diff --git a/.github/actions/fetchimages/action.yml b/.github/actions/fetchimages/action.yml new file mode 100644 index 00000000000..98b4c2b5bfc --- /dev/null +++ b/.github/actions/fetchimages/action.yml @@ -0,0 +1,58 @@ +name: Pull images +descriptions: Fetches and loads images from the cache + +inputs: + version: + description: key of the cache and tag of the images + required: true + type: string + flavor: + description: flavor of the OS image + required: false + type: string + toolkit: + description: fetch toolkit image + required: false + default: 'true' + type: 'boolean' + os: + description: fetch OS image + required: false + default: 'true' + type: 'boolean' + +runs: + using: composite + steps: + - if: ${{ inputs.toolkit == 'true' }} + name: Fetch toolkit image + id: cache-toolkit + uses: actions/cache/restore@v4 + env: + cache-name: toolkit-build-x86_64-${{ github.event_name }} + with: + path: /tmp/toolkit.tar + key: ${{ env.cache-name }}-${{ inputs.version }} + fail-on-cache-miss: true + - if: ${{ inputs.toolkit == 'true' }} + name: Load toolkit image + id: load-toolkit + shell: bash + run: | + docker load -i /tmp/toolkit.tar + - if: ${{ inputs.os == 'true' }} + name: Fetch OS image + id: cache-os + uses: actions/cache/restore@v4 + env: + cache-name: os-build-x86_64-${{ inputs.flavor }}-${{ github.event_name }} + with: + path: /tmp/os.tar + key: ${{ env.cache-name }}-${{ inputs.version }} + fail-on-cache-miss: true + - if: ${{ inputs.os == 'true' }} + name: Load OS image + id: load-os + shell: bash + run: | + docker load -i /tmp/os.tar diff --git a/.github/actions/version/action.yml b/.github/actions/version/action.yml new file mode 100644 index 00000000000..51f09ac6085 --- /dev/null +++ b/.github/actions/version/action.yml @@ -0,0 +1,20 @@ +name: Version hash +descriptions: Computes the version hash from the current context + +outputs: + version: + description: computed hash from current context + value: ${{ steps.version.outputs.version }} + +runs: + using: composite + steps: + - name: Define version + id: version + shell: bash + env: + hash: ${{ hashFiles('Dockerfile', '**/go.sum', '**/pkg/**', '**/examples/**', '**/cmd/**', '**/vendor/**', '**/Makefile', '**/main.go') }} + run: | + version="${{ env.hash }}" + version=${version::16} + echo "version=${version}" >> $GITHUB_OUTPUT diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index ad308101f30..5f6abc0d171 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -1,26 +1,18 @@ name: Build on: + pull_request: push: - tags: - - v** - pull_request_target: - types: - - opened - - synchronize - - reopened - paths: - - tests/** - - make/** - - Makefile - - .github/** - - pkg/** - - cmd/** - - go.mod - - go.sum - - examples/** + branches: + - main + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.ref_name }} + cancel-in-progress: true jobs: detect: + permissions: + contents: read runs-on: ubuntu-latest outputs: flavor: ${{ steps.set-matrix.outputs.flavor }} @@ -42,29 +34,49 @@ jobs: fi build-toolkit: + permissions: + contents: read needs: - detect - permissions: - packages: write runs-on: ubuntu-latest env: PLATFORM: ${{ needs.detect.outputs.platform }} - TOOLKIT_REPO: ghcr.io/${{github.repository}}/elemental-cli + outputs: + version: ${{ steps.version.outputs.version }} steps: - uses: actions/checkout@v4 with: ref: "${{ github.event.pull_request.head.sha }}" - run: | git fetch --prune --unshallow - - name: Log in to ghcr.io - uses: docker/login-action@v3 + - name: Define version + id: version + uses: ./.github/actions/version + - name: Check cache for Toolkit image + id: cache-toolkit + uses: actions/cache/restore@v4 + env: + cache-name: toolkit-build-x86_64-${{ github.event_name }} + lookup-only: true with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Build toolkit + path: /tmp/toolkit.tar + key: ${{ env.cache-name }}-${{ steps.version.outputs.version }} + - if: ${{ steps.cache-toolkit.outputs.cache-hit != 'true' }} + name: Build toolkit + env: + VERSION: ${{ steps.version.outputs.version }} run: | - make DOCKER_ARGS=--push build + make build-save + mv build/elemental-toolkit*.tar /tmp/toolkit.tar + - if: ${{ steps.cache-toolkit.outputs.cache-hit != 'true' }} + name: Save toolkit image in cache + id: save-toolkit + uses: actions/cache/save@v4 + env: + cache-name: toolkit-build-x86_64-${{ github.event_name }} + with: + path: /tmp/toolkit.tar + key: ${{ env.cache-name }}-${{ steps.version.outputs.version }} build-matrix: needs: @@ -77,3 +89,4 @@ jobs: uses: ./.github/workflows/build_and_test_x86.yaml with: flavor: ${{ matrix.flavor }} + version: ${{ needs.build-toolkit.outputs.version }} diff --git a/.github/workflows/build_and_test_arm.yaml b/.github/workflows/build_and_test_arm.yaml index a8d77ba2d96..5474fc71db0 100644 --- a/.github/workflows/build_and_test_arm.yaml +++ b/.github/workflows/build_and_test_arm.yaml @@ -11,11 +11,13 @@ on: type: string concurrency: - group: ci-${{ inputs.flavor }}-aarch64-${{ github.head_ref || github.ref }}-${{ github.repository }} + group: ${{ github.workflow }}-${{ github.head_ref || github.ref_name }}-${{ inputs.flavor }}-aarch64 cancel-in-progress: true -jobs: +permissions: + contents: read +jobs: build-iso: needs: detect runs-on: [self-hosted, arm64] diff --git a/.github/workflows/build_and_test_x86.yaml b/.github/workflows/build_and_test_x86.yaml index 48c22b81acd..7aeedea2242 100644 --- a/.github/workflows/build_and_test_x86.yaml +++ b/.github/workflows/build_and_test_x86.yaml @@ -6,50 +6,69 @@ on: flavor: required: true type: string + version: + required: true + type: string concurrency: - group: ci-${{ inputs.flavor }}-x86_64-${{ github.head_ref || github.ref }}-${{ github.repository }} + group: ${{ github.workflow }}-${{ github.head_ref || github.ref_name }}-${{ inputs.flavor }}-x86_64 cancel-in-progress: true jobs: build-os: permissions: - packages: write + contents: read runs-on: ubuntu-latest env: FLAVOR: ${{ inputs.flavor }} ARCH: x86_64 - TOOLKIT_REPO: ghcr.io/${{ github.repository }}/elemental-cli - REPO: ghcr.io/${{ github.repository }}/elemental-${{ inputs.flavor }} + VERSION: ${{ inputs.version }} steps: - uses: actions/checkout@v4 with: ref: "${{ github.event.pull_request.head.sha }}" - run: | git fetch --prune --unshallow - - name: Log in to registry - run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u $ --password-stdin - - name: Log in to the Container registry - uses: docker/login-action@v3 + - name: Check cache for OS image + id: cache-os + uses: actions/cache/restore@v4 + env: + cache-name: os-build-x86_64-${{ inputs.flavor }}-${{ github.event_name }} + lookup-only: true + with: + path: /tmp/os.tar + key: ${{ env.cache-name }}-${{ inputs.version }} + - if: ${{ steps.cache-os.outputs.cache-hit != 'true' }} + name: Pull images + uses: ./.github/actions/fetchimages with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Build OS + os: 'false' + version: ${{ inputs.version }} + - if: ${{ steps.cache-os.outputs.cache-hit != 'true' }} + name: Build OS run: | - make pull-toolkit - make ARCH=${{ env.ARCH }} DOCKER_ARGS=--load build-os - make ARCH=${{ env.ARCH }} DOCKER_ARGS=--push build-os + make ARCH=${{ env.ARCH }} build-os-save + mv build/elemental-${{ inputs.flavor }}*.tar /tmp/os.tar + - if: ${{ steps.cache-os.outputs.cache-hit != 'true' }} + name: Save OS image in cache + id: save-os + uses: actions/cache/save@v4 + env: + cache-name: os-build-x86_64-${{ inputs.flavor }}-${{ github.event_name }} + with: + path: /tmp/os.tar + key: ${{ env.cache-name }}-${{ inputs.version }} build-iso: + permissions: + contents: read needs: - build-os runs-on: ubuntu-latest env: FLAVOR: ${{ inputs.flavor }} ARCH: x86_64 - TOOLKIT_REPO: ghcr.io/${{github.repository}}/elemental-cli - REPO: ghcr.io/${{ github.repository }}/elemental-${{ inputs.flavor }} + VERSION: ${{ inputs.version }} steps: - uses: actions/checkout@v4 with: @@ -60,16 +79,20 @@ jobs: id: cache-iso uses: actions/cache/restore@v4 env: - cache-name: pr-iso-build-x86_64-${{ inputs.flavor }} - enableCrossOsArchive: true + cache-name: iso-build-x86_64-${{ inputs.flavor }}-${{ github.event_name }} lookup-only: true with: path: /tmp/*.iso - key: ${{ env.cache-name }}-${{ hashFiles('Dockerfile', '**/go.sum', '**/pkg/**', '**/examples/**', '**/cmd/**', '**/vendor/**', '**/Makefile', '**/main.go') }} + key: ${{ env.cache-name }}-${{ inputs.version }} + - if: ${{ steps.cache-iso.outputs.cache-hit != 'true' }} + name: Pull images + uses: ./.github/actions/fetchimages + with: + flavor: ${{ inputs.flavor }} + version: ${{ inputs.version }} - if: ${{ steps.cache-iso.outputs.cache-hit != 'true' }} name: Build ISO run: | - make pull-toolkit pull-os make build-iso sudo mv build/elemental-${{ env.FLAVOR }}.${{ env.ARCH}}.iso /tmp/ - if: ${{ steps.cache-iso.outputs.cache-hit != 'true' }} @@ -77,21 +100,21 @@ jobs: id: save-iso uses: actions/cache/save@v4 env: - cache-name: pr-iso-build-x86_64-${{ inputs.flavor }} + cache-name: iso-build-x86_64-${{ inputs.flavor }}-${{ github.event_name }} with: path: /tmp/*.iso - key: ${{ env.cache-name }}-${{ hashFiles('Dockerfile', '**/go.sum', '**/pkg/**', '**/examples/**', '**/cmd/**', '**/vendor/**', '**/Makefile', '**/main.go') }} - enableCrossOsArchive: true + key: ${{ env.cache-name }}-${{ inputs.version }} build-disk: + permissions: + contents: read needs: - build-os runs-on: ubuntu-latest env: FLAVOR: ${{ inputs.flavor }} ARCH: x86_64 - TOOLKIT_REPO: ghcr.io/${{github.repository}}/elemental-cli - REPO: ghcr.io/${{ github.repository }}/elemental-${{ inputs.flavor }} + VERSION: ${{ inputs.version }} steps: - uses: actions/checkout@v4 with: @@ -100,44 +123,47 @@ jobs: git fetch --prune --unshallow - name: Checks cached Disk uses: actions/cache/restore@v4 - id: cache-check + id: cache-disk env: - cache-name: pr-disk-build-x86_64-${{ inputs.flavor }} + cache-name: disk-build-x86_64-${{ inputs.flavor }}-${{ github.event_name }} with: path: /tmp/*.qcow2 - key: ${{ env.cache-name }}-${{ hashFiles('Dockerfile', '**/go.sum', '**/pkg/**', '**/examples/**', '**/cmd/**', '**/vendor/**', '**/Makefile', '**/main.go') }} - enableCrossOsArchive: true + key: ${{ env.cache-name }}-${{ inputs.version }} lookup-only: true - - if: ${{ steps.cache-check.outputs.cache-hit != 'true' }} + - if: ${{ steps.cache-disk.outputs.cache-hit != 'true' }} name: Cleanup worker run: | sudo rm -rf /usr/local/lib/android # will release about 10 GB if you don't need Android sudo rm -rf /usr/share/dotnet # will release about 20GB if you don't need .NET sudo df -h - - if: ${{ steps.cache-check.outputs.cache-hit != 'true' }} + - if: ${{ steps.cache-disk.outputs.cache-hit != 'true' }} + name: Pull images + uses: ./.github/actions/fetchimages + with: + flavor: ${{ inputs.flavor }} + version: ${{ inputs.version }} + - if: ${{ steps.cache-disk.outputs.cache-hit != 'true' }} name: Install to disk run: | sudo apt-get update && sudo apt-get install -y --no-install-recommends qemu-utils coreutils - make pull-toolkit pull-os sudo -E make ARCH=${{ env.ARCH }} build-disk sudo mv build/elemental-${{ env.FLAVOR }}.${{ env.ARCH}}.qcow2 /tmp/ - - if: ${{ steps.cache-check.outputs.cache-hit != 'true' }} + - if: ${{ steps.cache-disk.outputs.cache-hit != 'true' }} name: Save cached disk - id: cache-disk + id: save-disk uses: actions/cache/save@v4 env: - cache-name: pr-disk-build-x86_64-${{ inputs.flavor }} + cache-name: disk-build-x86_64-${{ inputs.flavor }}-${{ github.event_name }} with: path: /tmp/*.qcow2 - key: ${{ env.cache-name }}-${{ hashFiles('Dockerfile', '**/go.sum', '**/pkg/**', '**/examples/**', '**/cmd/**', '**/vendor/**', '**/Makefile', '**/main.go') }} - enableCrossOsArchive: true + key: ${{ env.cache-name }}-${{ inputs.version }} detect: + permissions: + contents: read runs-on: ubuntu-latest outputs: tests: ${{ steps.detect.outputs.tests }} - upgrade: ${{ steps.image.outputs.upgrade }} - toolkit: ${{ steps.image.outputs.toolkit }} steps: - id: detect env: @@ -148,21 +174,10 @@ jobs: else echo "tests=['test-active']" >> $GITHUB_OUTPUT fi - - uses: actions/checkout@v4 - with: - ref: "${{ github.event.pull_request.head.sha }}" - - id: image - env: - FLAVOR: ${{ inputs.flavor }} - run: | - git fetch --prune --unshallow - export TAG=$(git describe --tags --candidates=50 --abbrev=0) - export COMMIT=$(git rev-parse --short HEAD) - export VERSION=${TAG}-g${COMMIT} - echo "upgrade=ghcr.io/rancher/elemental-toolkit/elemental-${FLAVOR}:${VERSION}" >> $GITHUB_OUTPUT - echo "toolkit=ghcr.io/rancher/elemental-toolkit/elemental-cli:${VERSION}" >> $GITHUB_OUTPUT tests-matrix: + permissions: + contents: read needs: - build-disk - detect @@ -171,7 +186,7 @@ jobs: FLAVOR: ${{ inputs.flavor }} ARCH: x86_64 COS_TIMEOUT: 1600 - UPGRADE_ARGS: --toolkit-image=${{ needs.detect.outputs.toolkit }} --upgrade-image=${{ needs.detect.outputs.upgrade }} + VERSION: ${{ inputs.version }} strategy: matrix: test: ${{ fromJson(needs.detect.outputs.tests) }} @@ -190,19 +205,20 @@ jobs: - run: | git fetch --prune --unshallow - if: ${{ matrix.test == 'test-upgrade' }} - name: Download specific disk - run: | - wget -q --tries=3 https://github.com/rancher/elemental-toolkit/releases/download/v1.1.4/elemental-${{ inputs.flavor }}-v1.1.4.${{ env.ARCH }}.qcow2 -O /tmp/elemental-${{ inputs.flavor }}.${{ env.ARCH }}.qcow2 + name: Pull images + uses: ./.github/actions/fetchimages + with: + flavor: ${{ inputs.flavor }} + version: ${{ inputs.version }} - if: ${{ matrix.test != 'test-upgrade' }} name: Cached Disk id: cache-disk uses: actions/cache/restore@v4 env: - cache-name: pr-disk-build-x86_64-${{ inputs.flavor }} + cache-name: disk-build-x86_64-${{ inputs.flavor }}-${{ github.event_name }} with: path: /tmp/*.qcow2 - key: ${{ env.cache-name }}-${{ hashFiles('Dockerfile', '**/go.sum', '**/pkg/**', '**/examples/**', '**/cmd/**', '**/vendor/**', '**/Makefile', '**/main.go') }} - enableCrossOsArchive: true + key: ${{ env.cache-name }}-${{ inputs.version }} fail-on-cache-miss: true - name: Enable KVM group perms run: | @@ -236,6 +252,8 @@ jobs: make test-clean test-installer: + permissions: + contents: read needs: - build-iso - detect @@ -244,6 +262,7 @@ jobs: FLAVOR: ${{ inputs.flavor }} ARCH: x86_64 COS_TIMEOUT: 1600 + VERSION: ${{ inputs.version }} steps: - uses: actions/checkout@v4 with: @@ -261,11 +280,10 @@ jobs: id: cache-iso uses: actions/cache/restore@v4 env: - cache-name: pr-iso-build-x86_64-${{ inputs.flavor }} + cache-name: iso-build-x86_64-${{ inputs.flavor }}-${{ github.event_name }} with: path: /tmp/*.iso - key: ${{ env.cache-name }}-${{ hashFiles('Dockerfile', '**/go.sum', '**/pkg/**', '**/examples/**', '**/cmd/**', '**/vendor/**', '**/Makefile', '**/main.go') }} - enableCrossOsArchive: true + key: ${{ env.cache-name }}-${{ inputs.version }} fail-on-cache-miss: true - name: Enable KVM group perms run: | diff --git a/.github/workflows/cache-cleanup.yaml b/.github/workflows/cache-cleanup.yaml index 942a3729a92..d3aab6bf34f 100644 --- a/.github/workflows/cache-cleanup.yaml +++ b/.github/workflows/cache-cleanup.yaml @@ -5,32 +5,62 @@ name: Cleanup caches by a branch on: pull_request_target: types: + - synchronize - closed -permissions: - # `actions:write` permission is required to delete caches - # See also: https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#delete-a-github-actions-cache-for-a-repository-using-a-cache-id - actions: write - contents: read + # After running tests in the main branch pushes we want to clean the cache + workflow_run: + workflows: [Build] + types: [completed] + branches: [main] jobs: + version: + permissions: + contents: read + runs-on: ubuntu-latest + outputs: + version: ${{ steps.version.outputs.version }} + steps: + - uses: actions/checkout@v4 + with: + ref: "${{ github.event.pull_request.head.sha }}" + - run: | + git fetch --prune --unshallow + - name: Define version + id: version + uses: ./.github/actions/version + cleanup: + permissions: + # `actions:write` permission is required to delete caches. See: + # https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#delete-a-github-actions-cache-for-a-repository-using-a-cache-id + actions: write + contents: read + needs: + - version runs-on: ubuntu-latest - steps: + steps: - name: Cleanup run: | gh extension install actions/gh-actions-cache echo "Fetching list of cache key" - cacheKeysForPR=$(gh actions-cache list -R $REPO -B $BRANCH -L 100 | cut -f 1 ) - + cacheKeysForPR=$(gh actions-cache list -R "${REPO}" -B "${BRANCH}" -L 100 | cut -f 1 ) echo "Deleting caches..." for cacheKey in $cacheKeysForPR do - gh actions-cache delete $cacheKey -R $REPO -B $BRANCH --confirm + if [[ "${DELETE_ALL}" == "yes" ]] || [[ ! "${cacheKey}" =~ "${VERSION}" ]]; then + echo "Deleting $cacheKey as this is a leftover from a previous run" + gh actions-cache delete $cacheKey -R $REPO -B $BRANCH --confirm + else + echo "Not deleting $cacheKey as this does not match the criteria" + fi done echo "Done" env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} REPO: ${{ github.repository }} - BRANCH: refs/pull/${{ github.event.pull_request.number }}/merge + BRANCH: ${{ github.event_name == 'workflow_run' && github.ref_name || format('refs/pull/{0}/merge', github.event.pull_request.number) }} + VERSION: ${{ needs.version.outputs.version }} + DELETE_ALL: ${{ github.event_name == 'workflow_run' && 'yes' || github.event_name == 'pull_request_target' && github.event.action == 'closed' && 'yes' || 'no' }} diff --git a/.github/workflows/cli.yaml b/.github/workflows/cli.yaml index 2bd65287cea..15c108a255b 100644 --- a/.github/workflows/cli.yaml +++ b/.github/workflows/cli.yaml @@ -1,19 +1,13 @@ name: Build CLI on: pull_request: - paths: - - cmd/** - - pkg/** - - main.go - - make/** - - .github/** - - Makefile - - tests/** - - go.mod - - go.sum push: branches: - main + +permissions: + contents: read + jobs: build: runs-on: ubuntu-latest diff --git a/.github/workflows/docs-publish.yaml b/.github/workflows/docs-publish.yaml index cf13d1bb160..61bb20ab4f8 100644 --- a/.github/workflows/docs-publish.yaml +++ b/.github/workflows/docs-publish.yaml @@ -8,6 +8,10 @@ on: - main schedule: - cron: 0 20 * * * + +permissions: + contents: read + jobs: build-deploy: runs-on: ubuntu-latest diff --git a/Dockerfile b/Dockerfile index da72461bf73..687ea447faa 100644 --- a/Dockerfile +++ b/Dockerfile @@ -11,13 +11,15 @@ WORKDIR /src/ ADD go.mod . ADD go.sum . ADD vendor vendor -RUN go mod download ADD cmd cmd ADD internal internal ADD pkg pkg ADD main.go . -# Set arg/env after go mod download, otherwise we invalidate the cached layers due to the commit changing easily +# Ensure vendor folder is consistent +#RUN go mod verify + +# Set arg/env after go mod download, otherwise we invalidate the cached layers due commit hash changes ARG ELEMENTAL_VERSION=0.0.1 ARG ELEMENTAL_COMMIT="" ENV ELEMENTAL_VERSION=${ELEMENTAL_VERSION} diff --git a/Makefile b/Makefile index e5983a9bc00..b0007ebd6b3 100644 --- a/Makefile +++ b/Makefile @@ -1,18 +1,21 @@ # Directory of Makefile export ROOT_DIR:=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) -DISK?=$(shell ls $(ROOT_DIR)/build/*.qcow2 2> /dev/null) +DISK?=$(shell ls $(ROOT_DIR)/build/elemental*.qcow2 2> /dev/null) DISKSIZE?=20G -ISO?=$(shell ls $(ROOT_DIR)/build/*.iso 2> /dev/null) +ISO?=$(shell ls $(ROOT_DIR)/build/elemental*.iso 2> /dev/null) FLAVOR?=green ARCH?=$(shell uname -m) +UPGRADE_DISK_URL?=https://github.com/rancher/elemental-toolkit/releases/download/v1.1.4/elemental-$(FLAVOR)-v1.1.4.$(ARCH).qcow2 +UPGRADE_DISK?=upgrade-test-elemental-disk.qcow2 +UPGRADE_DISK_CHECK?=$(shell ls $(ROOT_DIR)/build/$(UPGRADE_DISK) 2> /dev/null) PLATFORM?=linux/$(ARCH) IMAGE_SIZE?=20G REPO?=local/elemental-$(FLAVOR) TOOLKIT_REPO?=local/elemental-toolkit DOCKER?=docker -BASE_OS_IMAGE?=registry.opensuse.org/opensuse/leap -BASE_OS_VERSION?=15.5 +BASE_OS_IMAGE?=registry.opensuse.org/opensuse/tumbleweed +BASE_OS_VERSION?=latest DOCKER_SOCK?=/var/run/docker.sock GIT_COMMIT?=$(shell git rev-parse HEAD) @@ -55,13 +58,11 @@ build: --build-arg BASE_OS_VERSION=$(BASE_OS_VERSION) \ --target elemental-toolkit -t $(TOOLKIT_REPO):$(VERSION) . -.PHONY: push-toolkit -push-toolkit: - $(DOCKER) push $(TOOLKIT_REPO):$(VERSION) - -.PHONY: pull-toolkit -pull-toolkit: - $(DOCKER) pull $(TOOLKIT_REPO):$(VERSION) +.PHONY: build-save +build-save: build + mkdir -p $(ROOT_DIR)/build + $(DOCKER) save --output build/elemental-toolkit-image-$(VERSION).tar \ + $(TOOLKIT_REPO):$(VERSION) .PHONY: build-cli build-cli: @@ -75,31 +76,29 @@ build-os: --build-arg VERSION=$(VERSION) \ --build-arg REPO=$(REPO) -t $(REPO):$(VERSION) \ $(BUILD_OPTS) examples/$(FLAVOR) - -.PHONY: push-os -push-os: - $(DOCKER) push $(REPO):$(VERSION) - -.PHONY: pull-os -pull-os: - $(DOCKER) pull $(REPO):$(VERSION) +.PHONY: build-os-save +build-os-save: build-os + mkdir -p $(ROOT_DIR)/build + $(DOCKER) save --output build/elemental-$(FLAVOR)-image-$(VERSION).tar \ + $(REPO):$(VERSION) .PHONY: build-iso build-iso: @echo Building $(ARCH) ISO mkdir -p $(ROOT_DIR)/build $(DOCKER) run --rm -v $(DOCKER_SOCK):$(DOCKER_SOCK) -v $(ROOT_DIR)/build:/build \ - --entrypoint /usr/bin/elemental $(TOOLKIT_REPO):$(VERSION) --debug build-iso --bootloader-in-rootfs -n elemental-$(FLAVOR).$(ARCH) \ + -v $(ROOT_DIR)/tests/assets/remote_login.yaml:/overlay-iso/iso-config/remote_login.yaml \ + --entrypoint /usr/bin/elemental $(TOOLKIT_REPO):$(VERSION) --debug build-iso --bootloader-in-rootfs \ + -n elemental-$(FLAVOR).$(ARCH) --overlay-iso /overlay-iso \ --local --platform $(PLATFORM) -o /build $(REPO):$(VERSION) .PHONY: build-disk build-disk: @echo Building $(ARCH) disk mkdir -p $(ROOT_DIR)/build - $(DOCKER) run --rm -v $(DOCKER_SOCK):$(DOCKER_SOCK) -v $(ROOT_DIR)/build:/build \ - --entrypoint /usr/bin/elemental \ - $(TOOLKIT_REPO):$(VERSION) --debug build-disk --platform $(PLATFORM) --expandable -n elemental-$(FLAVOR).$(ARCH) --local \ - -o /build --system $(REPO):$(VERSION) + $(DOCKER) run --rm -v $(DOCKER_SOCK):$(DOCKER_SOCK) -v $(ROOT_DIR)/build:/build -v $(ROOT_DIR)/tests/assets:/assets \ + --entrypoint /usr/bin/elemental $(TOOLKIT_REPO):$(VERSION) --debug build-disk --platform $(PLATFORM) \ + --expandable -n elemental-$(FLAVOR).$(ARCH) --local --cloud-init /assets/remote_login.yaml -o /build --system $(REPO):$(VERSION) qemu-img convert -O qcow2 $(ROOT_DIR)/build/elemental-$(FLAVOR).$(ARCH).raw $(ROOT_DIR)/build/elemental-$(FLAVOR).$(ARCH).qcow2 qemu-img resize $(ROOT_DIR)/build/elemental-$(FLAVOR).$(ARCH).qcow2 $(DISKSIZE) diff --git a/go.mod b/go.mod index 35500073807..70768bc4e76 100644 --- a/go.mod +++ b/go.mod @@ -14,132 +14,122 @@ require ( github.com/jaypipes/ghw v0.9.1-0.20220511134554-dac2f19e1c76 github.com/joho/godotenv v1.5.1 github.com/mitchellh/mapstructure v1.5.0 - github.com/onsi/ginkgo/v2 v2.15.0 - github.com/onsi/gomega v1.30.0 + github.com/onsi/ginkgo/v2 v2.19.0 + github.com/onsi/gomega v1.33.1 github.com/pkg/errors v0.9.1 - github.com/rancher/yip v1.4.10 + github.com/rancher/yip v1.9.2 github.com/sanity-io/litter v1.5.5 github.com/sirupsen/logrus v1.9.3 - github.com/spf13/cobra v1.7.0 + github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.15.0 - golang.org/x/crypto v0.21.0 + github.com/twpayne/go-vfs/v4 v4.3.0 + golang.org/x/crypto v0.23.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/mount-utils v0.23.0 ) require ( dario.cat/mergo v1.0.0 // indirect - github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver v1.5.0 // indirect - github.com/Masterminds/semver/v3 v3.2.0 // indirect + github.com/Masterminds/semver/v3 v3.2.1 // indirect github.com/Masterminds/sprig/v3 v3.2.3 // indirect - github.com/ProtonMail/go-crypto v1.0.0 // indirect - github.com/cloudflare/circl v1.3.7 // indirect - github.com/containerd/log v0.1.0 // indirect - github.com/cyphar/filepath-securejoin v0.2.4 // indirect - github.com/denisbrodbeck/machineid v1.0.1 // indirect - github.com/diskfs/go-diskfs v1.3.0 // indirect - github.com/emirpasic/gods v1.18.1 // indirect - github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect - github.com/go-git/go-billy/v5 v5.5.0 // indirect - github.com/go-git/go-git/v5 v5.11.0 // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/huandu/xstrings v1.4.0 // indirect - github.com/imdario/mergo v0.3.13 // indirect - github.com/itchyny/gojq v0.12.12 // indirect - github.com/itchyny/timefmt-go v0.1.5 // indirect - github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect - github.com/kendru/darwin/go/depgraph v0.0.0-20221105232959-877d6a81060c // indirect - github.com/kevinburke/ssh_config v1.2.0 // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/moby v25.0.2+incompatible // indirect - github.com/moby/sys/mountinfo v0.7.1 // indirect - github.com/mudler/entities v0.0.0-20220905203055-68348bae0f49 // indirect - github.com/packethost/packngo v0.31.0 // indirect - github.com/phayes/permbits v0.0.0-20190612203442-39d7c581d2ee // indirect - github.com/pierrec/lz4 v2.6.1+incompatible // indirect - github.com/pjbgf/sha1cd v0.3.0 // indirect - github.com/pkg/xattr v0.4.9 // indirect - github.com/rancher-sandbox/linuxkit v1.0.2 // indirect - github.com/samber/lo v1.37.0 // indirect - github.com/satori/go.uuid v1.2.0 // indirect - github.com/sergi/go-diff v1.3.1 // indirect - github.com/shopspring/decimal v1.3.1 // indirect - github.com/skeema/knownhosts v1.2.1 // indirect - github.com/spectrocloud-labs/herd v0.4.2 // indirect - github.com/tredoe/osutil/v2 v2.0.0-rc.16 // indirect - github.com/ulikunitz/xz v0.5.11 // indirect - github.com/vishvananda/netlink v1.2.1-beta.2 // indirect - github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect - github.com/vmware/vmw-guestinfo v0.0.0-20220317130741-510905f0efa3 // indirect - github.com/willdonnelly/passwd v0.0.0-20141013001024-7935dab3074c // indirect - github.com/xanzy/ssh-agent v0.3.3 // indirect - github.com/zcalusic/sysinfo v0.9.5 // indirect - golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect - google.golang.org/grpc v1.61.0 // indirect - gopkg.in/djherbis/times.v1 v1.3.0 // indirect - gopkg.in/warnings.v0 v0.1.2 // indirect - pault.ag/go/modprobe v0.1.2 // indirect - pault.ag/go/topsort v0.0.0-20160530003732-f98d2ad46e1a // indirect -) - -require ( - github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect github.com/Microsoft/hcsshim v0.11.4 // indirect + github.com/ProtonMail/go-crypto v1.1.0-alpha.2 // indirect github.com/StackExchange/wmi v1.2.1 // indirect + github.com/cloudflare/circl v1.3.8 // indirect github.com/containerd/cgroups v1.1.0 // indirect - github.com/containerd/continuity v0.4.3 // indirect + github.com/containerd/continuity v0.4.2 // indirect + github.com/containerd/log v0.1.0 // indirect github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect + github.com/cyphar/filepath-securejoin v0.2.5 // indirect + github.com/denisbrodbeck/machineid v1.0.1 // indirect + github.com/diskfs/go-diskfs v1.4.0 // indirect github.com/docker/cli v24.0.0+incompatible // indirect github.com/docker/distribution v2.8.2+incompatible // indirect - github.com/docker/docker v24.0.9+incompatible // indirect + github.com/docker/docker v24.0.0+incompatible // indirect github.com/docker/docker-credential-helpers v0.7.0 // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect + github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab // indirect + github.com/emirpasic/gods v1.18.1 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/ghodss/yaml v1.0.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect + github.com/go-git/go-billy/v5 v5.5.0 // indirect + github.com/go-git/go-git/v5 v5.12.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect - github.com/go-task/slim-sprig v2.20.0+incompatible // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/gofrs/flock v0.8.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/google/go-cmp v0.6.0 // indirect - github.com/google/pprof v0.0.0-20230502171905-255e3b9b56de // indirect + github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect + github.com/huandu/xstrings v1.4.0 // indirect + github.com/imdario/mergo v0.3.13 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/itchyny/gojq v0.12.16 // indirect + github.com/itchyny/timefmt-go v0.1.6 // indirect github.com/jaypipes/pcidb v1.0.0 // indirect - github.com/klauspost/compress v1.17.4 // indirect + github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect + github.com/kendru/darwin/go/depgraph v0.0.0-20221105232959-877d6a81060c // indirect + github.com/kevinburke/ssh_config v1.2.0 // indirect + github.com/klauspost/compress v1.16.5 // indirect github.com/magiconair/properties v1.8.7 // indirect + github.com/mauromorales/xpasswd v0.4.0 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/sys/sequential v0.5.0 // indirect + github.com/mudler/entities v0.8.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc6 // indirect - github.com/pelletier/go-toml/v2 v2.0.7 // indirect + github.com/opencontainers/image-spec v1.1.0-rc3 // indirect + github.com/packethost/packngo v0.31.0 // indirect + github.com/pelletier/go-toml/v2 v2.0.6 // indirect + github.com/phayes/permbits v0.0.0-20190612203442-39d7c581d2ee // indirect + github.com/pierrec/lz4/v4 v4.1.21 // indirect + github.com/pjbgf/sha1cd v0.3.0 // indirect + github.com/pkg/xattr v0.4.9 // indirect + github.com/rancher-sandbox/linuxkit v1.0.2 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/spf13/afero v1.9.5 // indirect - github.com/spf13/cast v1.5.0 // indirect + github.com/satori/go.uuid v1.2.0 // indirect + github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect + github.com/shopspring/decimal v1.4.0 // indirect + github.com/skeema/knownhosts v1.2.2 // indirect + github.com/spf13/afero v1.9.3 // indirect + github.com/spf13/cast v1.6.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/subosito/gotenv v1.4.2 // indirect - github.com/twpayne/go-vfs/v4 v4.3.0 + github.com/tredoe/osutil v1.5.0 // indirect + github.com/ulikunitz/xz v0.5.12 // indirect github.com/vbatts/tar-split v0.11.3 // indirect + github.com/vishvananda/netlink v1.2.1-beta.2 // indirect + github.com/vishvananda/netns v0.0.4 // indirect + github.com/vmware/vmw-guestinfo v0.0.0-20220317130741-510905f0efa3 // indirect + github.com/xanzy/ssh-agent v0.3.3 // indirect + github.com/zcalusic/sysinfo v1.0.2 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.21.0 // indirect - golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.17.0 // indirect - golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/text v0.15.0 // indirect + golang.org/x/tools v0.21.0 // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect + google.golang.org/grpc v1.58.3 // indirect google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/djherbis/times.v1 v1.3.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect howett.net/plist v1.0.0 // indirect k8s.io/klog/v2 v2.90.1 // indirect diff --git a/go.sum b/go.sum index edcd0a7b003..fa4b04ba5f0 100644 --- a/go.sum +++ b/go.sum @@ -38,47 +38,37 @@ cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3f dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= -github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8= github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78= -github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/ProtonMail/go-crypto v1.1.0-alpha.2 h1:bkyFVUP+ROOARdgCiJzNQo2V2kiB97LyUpzH9P6Hrlg= +github.com/ProtonMail/go-crypto v1.1.0-alpha.2/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/alecthomas/assert/v2 v2.3.0 h1:mAsH2wmvjsuvyBvAmCtm7zFsBlb8mIHx5ySLVdDZXL0= github.com/alecthomas/assert/v2 v2.3.0/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= -github.com/apex/log v1.9.0 h1:FHtw/xuaM8AgmvDDTI9fiwoAL25Sq2cxojnZICUU8l0= -github.com/apex/log v1.9.0/go.mod h1:m82fZlWIuiWzWP04XCTXmnX0xRkYYbCdYn8jbJeLBEA= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/bramvdbogaerde/go-scp v1.2.1 h1:BKTqrqXiQYovrDlfuVFaEGz0r4Ou6EED8L7jCXw6Buw= github.com/bramvdbogaerde/go-scp v1.2.1/go.mod h1:s4ZldBoRAOgUg8IrRP2Urmq5qqd2yPXQTPshACY8vQ0= -github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/canonical/go-efilib v0.9.5 h1:zRpWG4z61GiYsEmFYvXYuj+8xV2eJ200YY5Ht9EjrRU= github.com/canonical/go-efilib v0.9.5/go.mod h1:tHjv3Mni7hEpNSUNd1KJEV/AZJsFSH6LX/EQ0I75AZE= github.com/cavaliergopher/grab/v3 v3.0.1 h1:4z7TkBfmPjmLAAmkkAZNX/6QJ1nNFdv3SdIHXju0Fr4= @@ -86,14 +76,12 @@ github.com/cavaliergopher/grab/v3 v3.0.1/go.mod h1:1U/KNnD+Ft6JJiYoYBAimKH2XrYpt github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= -github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= -github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= +github.com/cloudflare/circl v1.3.8 h1:j+V8jJt09PoeMFIu2uh5JUyEaIHTXVOHslFoLNAKqwI= +github.com/cloudflare/circl v1.3.8/go.mod h1:PDRU+oXvdD7KCtgKxW95M5Z8BpSCJXQORiZFnBQS5QU= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -101,33 +89,26 @@ github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaD github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= github.com/containerd/containerd v1.7.12 h1:+KQsnv4VnzyxWcfO9mlxxELaoztsDEjOuCMPAuPqgU0= github.com/containerd/containerd v1.7.12/go.mod h1:/5OMpE1p0ylxtEUGY8kuCYkDRzJm9NO1TFMWjUpdevk= -github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8= -github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= +github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM= +github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k= github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= -github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/cyphar/filepath-securejoin v0.2.5 h1:6iR5tXJ/e6tJZzzdMc1km3Sa7RRIVBKAK32O2s7AYfo= +github.com/cyphar/filepath-securejoin v0.2.5/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denisbrodbeck/machineid v1.0.1 h1:geKr9qtkB876mXguW2X6TU4ZynleN6ezuMSRhl4D7AQ= github.com/denisbrodbeck/machineid v1.0.1/go.mod h1:dJUwb7PTidGDeYyUBmXZ2GphQBbjJCrnectwCyxcUSI= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/diskfs/go-diskfs v1.3.0 h1:D3IVe1y7ybB5SjCO0pOmkWThL9lZEWeanp8rRa0q0sk= -github.com/diskfs/go-diskfs v1.3.0/go.mod h1:3pUpCAz75Q11om5RsGpVKUgXp2Z+ATw1xV500glmCP0= +github.com/diskfs/go-diskfs v1.4.0 h1:MAybY6TPD+fmhY+a2qFhmdvMeIKvCqlgh4QIc1uCmBs= +github.com/diskfs/go-diskfs v1.4.0/go.mod h1:G8cyy+ngM+3yKlqjweMmtqvE+TxsnIo1xumbJX1AeLg= github.com/distribution/distribution v2.8.1+incompatible h1:8iXUoOqRPx30bhzIEPUmNIqlmBlWdrieW1bqr6LrX30= github.com/distribution/distribution v2.8.1+incompatible/go.mod h1:EgLm2NgWtdKgzF9NpMzUKgzmR7AMmb0VQi2B+ZzDRjc= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= @@ -135,8 +116,8 @@ github.com/docker/cli v24.0.0+incompatible h1:0+1VshNwBQzQAx9lOl+OYCTCEAD8fKs/qe github.com/docker/cli v24.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v24.0.9+incompatible h1:HPGzNmwfLZWdxHqK9/II92pyi1EpYKsAqcl4G0Of9v0= -github.com/docker/docker v24.0.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v24.0.0+incompatible h1:z4bf8HvONXX9Tde5lGBMQ7yCJgNahmJumdrStZAbeY4= +github.com/docker/docker v24.0.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= @@ -145,6 +126,8 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4 github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= +github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab h1:h1UgjJdAAhj+uPL68n7XASS6bU+07ZX1WJvVS2eyoeY= +github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab/go.mod h1:GLo/8fDswSAniFG+BFIaiSPcK610jyzgEhWYPQwuQdw= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -153,48 +136,41 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= -github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= -github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= +github.com/gliderlabs/ssh v0.3.7 h1:iV3Bqi942d9huXnzEF2Mt+CY9gLu8DNM4Obd+8bODRE= +github.com/gliderlabs/ssh v0.3.7/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= -github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= -github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= +github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys= +github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v2.20.0+incompatible h1:4Xh3bDzO29j4TWNOI+24ubc0vbVFMg2PMnXKxK54/CA= -github.com/go-task/slim-sprig v2.20.0+incompatible/go.mod h1:N/mhXZITr/EQAOErEHciKvO1bFei2Lld2Ym6h96pdy0= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -238,7 +214,6 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-containerregistry v0.19.0 h1:uIsMRBV7m/HDkDxE/nXMnv1q+lOOSPlQ/ywc5JbB8Ic= @@ -256,8 +231,8 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20230502171905-255e3b9b56de h1:6bMcLOeKoNo0+mTOb1ee3McF6CCKGixjLR3EDQY1Jik= -github.com/google/pprof v0.0.0-20230502171905-255e3b9b56de/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= @@ -268,11 +243,6 @@ github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -297,10 +267,10 @@ github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/itchyny/gojq v0.12.12 h1:x+xGI9BXqKoJQZkr95ibpe3cdrTbY8D9lonrK433rcA= -github.com/itchyny/gojq v0.12.12/go.mod h1:j+3sVkjxwd7A7Z5jrbKibgOLn0ZfLWkV+Awxr/pyzJE= -github.com/itchyny/timefmt-go v0.1.5 h1:G0INE2la8S6ru/ZI5JecgyzbbJNs5lG1RcBqa7Jm6GE= -github.com/itchyny/timefmt-go v0.1.5/go.mod h1:nEP7L+2YmAbT2kZ2HfSs1d8Xtw9LY8D2stDBckWakZ8= +github.com/itchyny/gojq v0.12.16 h1:yLfgLxhIr/6sJNVmYfQjTIv0jGctu6/DgDoivmxTr7g= +github.com/itchyny/gojq v0.12.16/go.mod h1:6abHbdC2uB9ogMS38XsErnfqJ94UlngIJGlRAIj4jTM= +github.com/itchyny/timefmt-go v0.1.6 h1:ia3s54iciXDdzWzwaVKXZPbiXzxxnv1SPGFfM/myJ5Q= +github.com/itchyny/timefmt-go v0.1.6/go.mod h1:RRDZYC5s9ErkjQvTvvU7keJjxUYzIISJGxm9/mAERQg= github.com/jaypipes/ghw v0.9.1-0.20220511134554-dac2f19e1c76 h1:ZbHBKyBTqelK2yYAvBt0CbdNY4Q0HCVoM1//avrnXCU= github.com/jaypipes/ghw v0.9.1-0.20220511134554-dac2f19e1c76/go.mod h1:dXMo19735vXOjpIBDyDYSp31sB2u4hrtRCMxInqQ64k= github.com/jaypipes/pcidb v1.0.0 h1:vtZIfkiCUE42oYbJS0TAq9XSfSmcsgo9IdxSm9qzYU8= @@ -310,53 +280,41 @@ github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kendru/darwin/go/depgraph v0.0.0-20221105232959-877d6a81060c h1:eKb4PqwAMhlqwXw0W3atpKaYaPGlXE/Fwh+xpCEYaPk= github.com/kendru/darwin/go/depgraph v0.0.0-20221105232959-877d6a81060c/go.mod h1:VOfm8h1NySetVlpHDSnbpCMsvCgYaU+YDn4XezUy2+4= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= +github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mauromorales/xpasswd v0.4.0 h1:Jf6mfA8lwQsYzwgfQADPDGV7l/liAvRrnG+nQTPy0j8= +github.com/mauromorales/xpasswd v0.4.0/go.mod h1:Z3+aY19mhNfcGi3st0+RAVSz2vC+pyoju2S/FPN8kEg= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/moby v25.0.2+incompatible h1:g2oKRI7vgWkiPHZbBghaPbcV/SuKP1g/YLx0I2nxFT4= -github.com/moby/moby v25.0.2+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= -github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g= -github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= @@ -364,117 +322,79 @@ github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbD github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mudler/entities v0.0.0-20220905203055-68348bae0f49 h1:P1QgHLh0hX935j6m9K6rlSxc0mkD1UuIAOQEu+1VCW4= -github.com/mudler/entities v0.0.0-20220905203055-68348bae0f49/go.mod h1:qquFT9tYp+/NO7tTotto4BT9zSRYSMDxo2PGZwujpFA= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mudler/entities v0.8.0 h1:U+KD0hn1v7MEkiOv60zjLLUj3kEsgj8IMFMN+No3D0Q= +github.com/mudler/entities v0.8.0/go.mod h1:exnXZF6qVnu4b9dEiH3sLEyxYBTknfkcJ3UCxyc/dwE= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= -github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= -github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= -github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= -github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0-rc6 h1:XDqvyKsJEbRtATzkgItUqBA7QHk58yxX1Ov9HERHNqU= -github.com/opencontainers/image-spec v1.1.0-rc6/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8= +github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/packethost/packngo v0.31.0 h1:LLH90ardhULWbagBIc3I3nl2uU75io0a7AwY6hyi0S4= github.com/packethost/packngo v0.31.0/go.mod h1:Io6VJqzkiqmIEQbpOjeIw9v8q9PfcTEq8TEY/tMQsfw= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml/v2 v2.0.7 h1:muncTPStnKRos5dpVKULv2FVd4bMOhNePj9CjgDb8Us= -github.com/pelletier/go-toml/v2 v2.0.7/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= +github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU= +github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= github.com/phayes/permbits v0.0.0-20190612203442-39d7c581d2ee h1:P6U24L02WMfj9ymZTxl7CxS73JC99x3ukk+DBkgQGQs= github.com/phayes/permbits v0.0.0-20190612203442-39d7c581d2ee/go.mod h1:3uODdxMgOaPYeWU7RzZLxVtJHZ/x1f/iHkBZuKJDzuY= -github.com/pierrec/lz4 v2.3.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= -github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= +github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pkg/xattr v0.4.1/go.mod h1:W2cGD0TBEus7MkUgv0tNZ9JutLtVO3cXu+IBRuHqnFs= github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE= github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rancher-sandbox/linuxkit v1.0.2 h1:mUFPL2Mgl1XZ5H82ABR57t5H2G2Qd+lu3gMYvUGmeZo= github.com/rancher-sandbox/linuxkit v1.0.2/go.mod h1:n6Fkjc5qoMeWrnLSA5oqUF8ZzFKMrM960CtBwfvH1ZM= -github.com/rancher/yip v1.4.10 h1:Q8w/olZPpzS9a1rvhROk0CIo/qiW6ZM/5rkGpaXJ5oo= -github.com/rancher/yip v1.4.10/go.mod h1:6ClCCblwo/9ccDfOxcqZ/w0F3SaGPRiXMwyjPJfmbkw= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rancher/yip v1.9.2 h1:AddaE7/J5eUgeMeW5mlB7JtNCafqVt78aZgg1CE+cfk= +github.com/rancher/yip v1.9.2/go.mod h1:acOitsP+8zVDhxM36mDePxpT+SXhK66GxW5Vgg3lw2Y= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/samber/lo v1.37.0 h1:XjVcB8g6tgUp8rsPsJ2CvhClfImrpL04YpQHXeHPhRw= -github.com/samber/lo v1.37.0/go.mod h1:9vaz2O4o8oOnK23pd2TrXufcbdbJIa3b6cstBWKpopA= github.com/sanity-io/litter v1.5.5 h1:iE+sBxPBzoK6uaEP5Lt3fHNgpKcHXc/A2HGETy0uJQo= github.com/sanity-io/litter v1.5.5/go.mod h1:9gzJgR2i4ZpjZHsKvUXIRQVk7P+yM3e+jAF7bU2UI5U= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= -github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= -github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= -github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spectrocloud-labs/herd v0.4.2 h1:90cYZmW0yxNw4PEbqNGSerrtqKSx1nvRbSwAwjc/5AY= -github.com/spectrocloud-labs/herd v0.4.2/go.mod h1:WBlMIs1QZ7XtVrt9rAAFZpkh/fZYA4l2gGOCUS1LDHE= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A= +github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= -github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk= +github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= -github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -487,43 +407,35 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tredoe/osutil/v2 v2.0.0-rc.16 h1:5A2SKvyB2c3lhPYUIHyFtu6jbaXlaA3Hu5gWIam8Pik= -github.com/tredoe/osutil/v2 v2.0.0-rc.16/go.mod h1:uLRVx/3pb7Y4RQhG8cQFbPE9ha5r81e6MXpBsxbTAYc= +github.com/tredoe/osutil v1.5.0 h1:UGVxbbHRoZi8xXVmbNZ2vgG6XoJ15ndE4LniiQ3rJKg= +github.com/tredoe/osutil v1.5.0/go.mod h1:TEzphzUUunysbdDRfdOgqkg10POQbnfIPV50ynqOfIg= github.com/twpayne/go-vfs/v4 v4.3.0 h1:rTqFzzOQ/6ESKTSiwVubHlCBedJDOhQyVSnw8rQNZhU= github.com/twpayne/go-vfs/v4 v4.3.0/go.mod h1:tq2UVhnUepesc0lSnPJH/jQ8HruGhzwZe2r5kDFpEIw= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= -github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= +github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck= github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY= github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA= -github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= +github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/vmware/vmw-guestinfo v0.0.0-20220317130741-510905f0efa3 h1:v6jG/tdl4O07LNVp74Nt7/OyL+1JsIW1M2f/nSvQheY= github.com/vmware/vmw-guestinfo v0.0.0-20220317130741-510905f0efa3/go.mod h1:CSBTxrhePCm0cmXNKDGeu+6bOQzpaEklfCqEpn89JWk= -github.com/willdonnelly/passwd v0.0.0-20141013001024-7935dab3074c h1:4+NVyrLUuEmvE3r3Xst7gPuKhAP6X04ACpMmPvtK0M0= -github.com/willdonnelly/passwd v0.0.0-20141013001024-7935dab3074c/go.mod h1:xcvfY9pOw6s4wyrhilFSbMthL6KzgrfCIETHHUOQ/fQ= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zcalusic/sysinfo v0.9.5 h1:ivoHyj9aIAYkwzo1+8QgJ5s4oeE6Etx9FmZtqa4wJjQ= -github.com/zcalusic/sysinfo v0.9.5/go.mod h1:Z/gPVufBrFc8X5sef3m6kkw3r3nlNFp+I6bvASfvBZQ= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +github.com/zcalusic/sysinfo v1.0.2 h1:nwTTo2a+WQ0NXwo0BGRojOJvJ/5XKvQih+2RrtWqfxc= +github.com/zcalusic/sysinfo v1.0.2/go.mod h1:kluzTYflRWo6/tXVMJPdEjShsbPpsFRyy+p1mBQPC30= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -532,10 +444,6 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -545,13 +453,11 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -562,8 +468,6 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17 h1:3MTrJm4PyNL9NBqvYDSj3DHl46qQakyfqfWo4jgfaEM= -golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -588,21 +492,14 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -630,10 +527,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -654,15 +549,9 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181021155630-eda9bb28ed51/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -674,10 +563,8 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200102141924-c96a22e43c9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -701,7 +588,6 @@ golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -715,18 +601,13 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -736,23 +617,19 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -797,15 +674,14 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= +golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -868,11 +744,10 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -887,8 +762,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= -google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -903,32 +778,25 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/djherbis/times.v1 v1.2.0/go.mod h1:AQlg6unIsrsCEdQYhTzERy542dz6SFdQFZFv6mUY0P8= gopkg.in/djherbis/times.v1 v1.3.0 h1:uxMS4iMtH6Pwsxog094W0FYldiNnfY/xba00vq6C2+o= gopkg.in/djherbis/times.v1 v1.3.0/go.mod h1:AQlg6unIsrsCEdQYhTzERy542dz6SFdQFZFv6mUY0P8= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -958,10 +826,6 @@ k8s.io/mount-utils v0.23.0/go.mod h1:9pFhzVjxle1osJUo++9MFDat9HPkQUOoHCn+eExZ3Ew k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 h1:kmDqav+P+/5e1i9tFfHq1qcF3sOrDp+YEkVDAHu7Jwk= k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -pault.ag/go/modprobe v0.1.2 h1:bblunaPhqpTxGDJ5TVFW/4gheohBPleF2dIV6j6sWkI= -pault.ag/go/modprobe v0.1.2/go.mod h1:afr2STC/2Maz/qi4+Bma1s0dszZgO/PcM8AKar9DWhM= -pault.ag/go/topsort v0.0.0-20160530003732-f98d2ad46e1a h1:WwS7vlB5H2AtwKj1jsGwp2ZLud1x6WXRXh2fXsRqrcA= -pault.ag/go/topsort v0.0.0-20160530003732-f98d2ad46e1a/go.mod h1:INqx0ClF7kmPAMk2zVTX8DRnhZ/yaA/Mg52g8KFKE7k= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/make/Makefile.test b/make/Makefile.test index e9431be9fd5..3baa6fc7d54 100644 --- a/make/Makefile.test +++ b/make/Makefile.test @@ -1,6 +1,5 @@ GINKGO ?= "github.com/onsi/ginkgo/v2/ginkgo" GINKGO_ARGS ?= -v --fail-fast -r --timeout=3h -UPGRADE_ARGS ?= .PHONY: prepare-test prepare-test: @@ -11,9 +10,21 @@ endif @scripts/run_vm.sh start $(DISK) @echo "VM started from $(DISK)" +.PHONY: prepare-upgrade-test +prepare-upgrade-test: +ifeq ("$(UPGRADE_DISK_CHECK)","") + mkdir -p $(ROOT_DIR)/build + @echo "Downloading disk from $(UPGRADE_DISK_URL) ..." + @wget -q --tries=3 $(UPGRADE_DISK_URL) -O $(ROOT_DIR)/build/$(UPGRADE_DISK) +endif + @scripts/run_registry.sh start + @scripts/run_vm.sh start $(ROOT_DIR)/build/$(UPGRADE_DISK) + @scripts/run_registry.sh push "$(TOOLKIT_REPO):$(VERSION)" "$(REPO):$(VERSION)" + @echo "VM started from $(ROOT_DIR)/build/$(UPGRADE_DISK)" + .PHONY: test-active test-active: prepare-test - VM_PID=$$(scripts/run_vm.sh vmpid) go run $(GINKGO) $(GINKGO_ARGS) ./tests/wait-active + VM_PID=$$(scripts/run_vm.sh vmpid) COS_PASS=cos go run $(GINKGO) $(GINKGO_ARGS) ./tests/wait-active .PHONY: prepare-installer-test prepare-installer-test: @@ -26,6 +37,7 @@ endif .PHONY: test-clean test-clean: test-stop + @scripts/run_registry.sh stop @scripts/run_vm.sh clean .PHONY: test-stop @@ -62,8 +74,8 @@ test-downgrade: test-active VM_PID=$$(scripts/run_vm.sh vmpid) go run $(GINKGO) $(GINKGO_ARGS) ./tests/downgrade -- $(UPGRADE_ARGS) .PHONY: test-upgrade -test-upgrade: test-active - VM_PID=$$(scripts/run_vm.sh vmpid) go run $(GINKGO) $(GINKGO_ARGS) ./tests/upgrade -- $(UPGRADE_ARGS) +test-upgrade: prepare-upgrade-test + @scripts/run_upgradetest.sh start $(GINKGO) "$(GINKGO_ARGS)" "$(TOOLKIT_REPO):$(VERSION)" "$(REPO):$(VERSION)" .PHONY: test-deps test-deps: @@ -84,5 +96,3 @@ ifneq ($(shell id -u), 0) else go run $(GINKGO) run --label-filter root --fail-fast --race --covermode=atomic --coverprofile=coverage_root.txt --coverpkg=github.com/rancher/elemental-toolkit/... -procs=1 -r ${PKG} endif - - diff --git a/pkg/cloudinit/cloudinit.go b/pkg/cloudinit/cloudinit.go index 3614c3721fd..3aa04a66504 100644 --- a/pkg/cloudinit/cloudinit.go +++ b/pkg/cloudinit/cloudinit.go @@ -63,7 +63,6 @@ func NewYipCloudInitRunner(l types.Logger, r types.Runner, fs vfs.FS) *YipCloudI plugins.Sysctl, plugins.User, plugins.SSH, - plugins.LoadModules, plugins.Timesyncd, plugins.Systemctl, plugins.Environment, diff --git a/scripts/run_registry.sh b/scripts/run_registry.sh new file mode 100755 index 00000000000..a22fe6e41a8 --- /dev/null +++ b/scripts/run_registry.sh @@ -0,0 +1,78 @@ +#!/bin/bash + +set -e + +SCRIPT=$(realpath -s "${0}") +SCRIPTS_PATH=$(dirname "${SCRIPT}") +TESTS_PATH=$(realpath -s "${SCRIPTS_PATH}/../tests") + +: "${ELMNTL_PREFIX:=}" +: "${ELMNTL_REGCONF:=${TESTS_PATH}/${ELMNTL_PREFIX}test-registry.yaml}" +: "${ELMNTL_IPADDR=$(ip -4 addr show $(ip route | awk '/default/ { print $5 }') | grep -oP '(?<=inet\s)\d+(\.\d+){3}')}" +: "${ELMNTL_REGPORT=5000}" + +function _abort { + echo "$@" && exit 1 +} + +function start { + if [ "$( docker container inspect -f '{{.State.Status}}' elmntl_registry )" = "running" ]; then + return + fi + + docker run --rm -d \ + --name elmntl_registry \ + -p "${ELMNTL_REGPORT}:${ELMNTL_REGPORT}" \ + registry:2 +} + +function stop { + docker stop elmntl_registry || true + ([ -f "${ELMNTL_REGCONF}" ] && rm "${ELMNTL_REGCONF}") || true +} + +function push { + local reg_img + + for img in "$@"; do + reg_img="${ELMNTL_IPADDR}.sslip.io:${ELMNTL_REGPORT}/${img}" + echo "Pushing '${img}' to '${reg_img}'" + + # Ugly hack around podman to circumvent the need of adding insecure registries + # at /etc/docker/daemon.json when using the docker client + docker save "${img}" | podman load + tag=${img##*:} + imgID=$(podman images -n | grep "${tag}" | awk '{print $3}' | head -n1) + podman tag "${imgID}" "${reg_img}" + podman push --tls-verify=false "${reg_img}" + podman rmi -f "${imgID}" + done +} + + +function url { + echo "${ELMNTL_IPADDR}.sslip.io:${ELMNTL_REGPORT}" +} + +cmd=$1 + +case $cmd in + start) + start + ;; + stop) + stop + ;; + push) + shift + push $@ + ;; + url) + url + ;; + *) + _abort "Unknown command: ${cmd}" + ;; +esac + +exit 0 diff --git a/scripts/run_upgradetest.sh b/scripts/run_upgradetest.sh new file mode 100755 index 00000000000..9bd476a70e0 --- /dev/null +++ b/scripts/run_upgradetest.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +set -e + +SCRIPT=$(realpath -s "${0}") +SCRIPTS_PATH=$(dirname "${SCRIPT}") +ROOT_PATH=$(realpath -s "${SCRIPTS_PATH}/..") + +: "${ELMNTL_OLDPASS:=cos}" + +function _abort { + echo "$@" && exit 1 +} + +function start { + local ginkgo="$1" + local ginkgo_args="$2" + local toolkit_img="$3" + local upgrade_img="$4" + local reg_url + + export VM_PID=$(${SCRIPTS_PATH}/run_vm.sh vmpid) + export COS_PASS=${ELMNTL_OLDPASS} + + reg_url=$(${SCRIPTS_PATH}/run_registry.sh url) + + pushd "${ROOT_PATH}" > /dev/null + go run ${ginkgo} ${ginkgo_args} ./tests/wait-active + go run ${ginkgo} ${ginkgo_args} ./tests/upgrade -- \ + --toolkit-image=docker://${reg_url}/${toolkit_img} --upgrade-image=docker://${reg_url}/${upgrade_img} + popd > /dev/null +} + +cmd=$1 + +case $cmd in + start) + shift + if [[ $# -ne 4 ]]; then + _abort "Wrong number of arguments" + fi + start "$1" "$2" "$3" "$4" + ;; + *) + _abort "Unknown command: ${cmd}" + ;; +esac + +exit 0 diff --git a/tests/assets/remote_login.yaml b/tests/assets/remote_login.yaml new file mode 100644 index 00000000000..80053549be5 --- /dev/null +++ b/tests/assets/remote_login.yaml @@ -0,0 +1,11 @@ +stages: + fs: + - name: "Adding authorized SSH key" + authorized_keys: + root: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDGIU1yprgpMg8Z82mBshjJGS8s5iK1RLu2jR6QEUcLDRa+At0/5zLLS0UMHlnqzRDSSgKMMjURB73TCVUiQ2hyCUNfuY3Pr9nQXQsPSPjd6FuBzhsUPPxW8QUkQLwQkyNELTDRYjucpPrYa2rpfdoE3mmG8QJ+CW7g699iuCpTltF265uyGltqQKBFrsyxfqqzp+I6L0A04BMuvDop2K79yArJD671uAlsIDHv4hhKTnBo7cS+EUi66XxjnHUsQ59v6B7SkdYfjtyoEDX5aGU0ijy+t3lhFrB2iEtKg9R12ZjK50fWdVeGPTFVjzQfcIuIgZEo00d0dxp438JTHyv4ZMC1wRcT/ZMxm8CivQD9HNWX/qadISrxgbYQnM9tjO3b6QprHiY5eMWrNI/SAzlFZbcu8muV6zJ/3RcwkILgUVmlA3RyOZKngsC6imRtzyhEsnyIo1+mvgWhXbYfqunMgI+4K9a4HVMoVOk7+CwPyb5GuuVWXUE3q+AUNPQQBFJqkylzl9E13GOHNzm3sDNZSm8bkNjZtinJD0nCyLrP5tOrLm7sXblQ28N+yHEcbh7VOMsK1O0Ed+hM651bxveU0/ZnZkQYhe/asf7RyVxkTPwPIXBqCGF/c4EsfcCRy8EjTyUk3mNfPScgfenQFxwqsYF6p3hViw6p9n4kSk3gAw== (none)" + install-after: + - if: '[ -e "/run/initramfs/live/iso-config/remote_login.yaml" ]' + name: "Adding authorized SSH key on install" + commands: + - cp /run/initramfs/live/iso-config/remote_login.yaml /run/elemental/oem diff --git a/tests/assets/testkey b/tests/assets/testkey new file mode 100644 index 00000000000..2b682d1dd93 --- /dev/null +++ b/tests/assets/testkey @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKQIBAAKCAgEAxiFNcqa4KTIPGfNpgbIYyRkvLOYitUS7to0ekBFHCw0WvgLd +P+cyy0tFDB5Z6s0Q0koCjDI1EQe90wlVIkNocglDX7mNz6/Z0F0LD0j43ehbgc4b +FDz8VvEFJEC8EJMjRC0w0WI7nKT62Gtq6X3aBN5phvECfglu4OvfYrgqU5bRduub +shpbakCgRa7MsX6qs6fiOi9ANOATLrw6Kdiu/cgKyQ+u9bgJbCAx7+IYSk5waO3E +vhFIuul8Y5x1LEOfb+ge0pHWH47cqBA1+WhlNIo8vrd5YRawdohLSoPUddmYyudH +1nVXhj0xVY80H3CLiIGRKNNHdHcaeN/CUx8r+GTAtcEXE/2TMZvAor0A/RzVl/6m +nSEq8YG2EJzPbYzt2+kKax4mOXjFqzSP0gM5RWW3LvJrlesyf90XMJCC4FFZpQN0 +cjmSp4LAuopkbc8oRLJ8iKNfpr4FoV22H6rpzICPuCvWuB1TKFTpO/gsD8m+Rrrl +Vl1BN6vgFDT0EARSapMpc5fRNdxjhzc5t7AzWUpvG5DY2bYpyQ9Jwsi6z+bTqy5u +7F25UNvDfshxHG4e1TjLCtTtBHfoTOudW8b3lNP2Z2ZEGIXv2rH+0clcZEz8DyFw +aghhf3OBLH3AkcvBI08lJN5jXz0nIH3p0BccKrGBeqd4VYsOqfZ+JEpN4AMCAwEA +AQKCAgAjEaHzuS6/m+ywWzVBj5Rzg9Gl0NYz34aaOUA/n89DVX3BBCDFPtbuxNPj +4TtpJALmNdNdnUVnTkd7Ixu+hM/n31vdhapkKGwVRWK/wdrK7GXGBAUm/+RqPx75 +27tt8506ZAVQCmMixtDfnaO+0FfWpMdo6x/VzzWg+h8ACjjzsq5ArlNQovmrROUZ +cGcqw5DTW9PfGJ30vfSnQQnKfl/66319ORluxLRmPwyXuR0zic67w9ofClAlRU6P +ExJqU03VMPDDtKyByXelB7mF+8RuGEnIOBFz7eAuGtG4zry8X1d8EpPUkdB8Ijip +PD+tkO4hq01WPI9ZPfuhiXmL3OibLs1p/HU04ZxzX0CUpr44+4qDxA1LMPv/B8ex +knUyMpInC6WRBduTmXl2jAdvZXEZ8897YgK2drXmn3uEdsasElC5ae8r+rGp3o0z +2K8CFMEaQI6dS4Qlb7FyeFE5NT0d+al6WpI0zEDSJBkB59QsUdvtGOuouP5lDrLz +nVP/a9Fz28h0Lxdi+AFTQmEw7X7JQh5Yt72VC/b2pkPVXibg9QcavYvqBTOzw+9F +tk0QGXhtzABRMPTdrrrSLGnqp5R9PKffW83aF4eIietPY5XUtGMKl6DhzQUSKBpe +PClOC432QjRek15nc5yZgFKX3JME3FsoYxpwHbBKLWx1ozdNgQKCAQEA8aqWXrcd +AgDUqXQGGiUQn+hGT0bKyIEcKdDq9f3ulNLH3dit6A/mrbC3WUUYSV3xAmPwF3CQ +ST2xzbs+HedTh6C/+cObg52zuYwcVMiFoLuj6Hfs4B2jf43cGKncYAGD15ouXlTm +mcT1/vLmWsKHtbQt43Whqze2n4PoPW1mxMo7lSNbFzGo+uXXozTpzyorZjlPAdBy +MyoFuhtIPYw+B7IAP7G4ovmQPJxqDFResPi9DSFpeVjjcCfc3Eqbur/Fa7wvjAdN +WgZzi5jIqj/wuQwK8n7zYgOJ7F6qdOoN/gdkIvNQrnB9g918jUmqOfOMY7Ee8aeK +rGL4DFQ2J0G6NQKCAQEA0eGrWTZE3jwYigvoZEvdFs2OGO3JNCKYLFCjIebuu0am +oVaJDY4rfMH5CC45nk2/n39muR/pLSjFHRU9QUfNNSLrgvgI/KEqgyKOiv8+m9Du +b22HDySvHuPFwqvU+1HKc9RCCI0hs85/NCvfqO4090GOq8+2th4G8EnCYf7JvN1J +459X5QCbuX7ZhAfovnAi7N6yS3N3MNBjs+9ems0iBzUX8FuUGiMhNBhkD2IpSo4Z +fDY6OTGkqJRHpXp+sMh57ktBTh3xioHxLm9dLz0bbfwLXPJQj53jlPuG1Xi3mkK1 +/P9RXkWfS7x2dSAk0q206pdnZqh3tg4JiOnzcMw4VwKCAQBdaFUUqOxCYPXRxXGy +W08wr4O1kqRAmX68Pu8CQ7MavNnZyq11O+gqjFph284GVZQZG22qcayjUoBOB6h7 +wHTNwHlTXaLXXXYhKqGst5DGzqAM84WQHoSagcdnlQgD57IA/Kwdc+1p0/JNJ6+X +twbjWZA/EbF6T198Cm0SiR2mxJABFhhYP66Tw5D0mTgiBaeerFn4Vow9cGmkRHVI +rNMxAEEOXXADNjjk5IQs3dN3zbysvZRZ/dxy9kVstD27EfKJsMQj+JKSUFoKw/QT +sgEmVRxKHH2QDdQak4CJDgp4fHzc8XTm573r4QkKR0XPfm5t/gD64Io+FUbQ5R/F +TqddAoIBAQCIKdZWvISVgXuSJAOS/jzjjFxCBBLjBgafyXjsh7weN8nnJhMofjHs +cJO2IPc0otBNFAR62h8XMzoekib5cTHK5WrRv0VlUNIZ3ZOIzeu6fyQRmodi5bPP +YIpur7gvjHLCDtUAYz+YPcH9APHL0S7/ssJy6QFqybRKtC+uD/Sl2RMU383QPkLJ +Z2iK5e6zjmC6OAiINtL/ElN86zD/UVJgRqvPPjqpcl0aRTC1YkPToLkcArpnXLOs +wvZwfzYuTwH7/UrS+U45Q/BK5ekYupZPFpPwqyxk4FRD1aM2X2kOZXVYO/R4Kscc +BZr8hewd2qBjCfMFgQqZTCYXc2CCr5tlAoIBAQDC2+W0OkRKcTDqGupYWONhmz4e +XRY9sJ/NDChfB72VimJjDqMxTij36rkFOMlIiKplX2YpBG1ht2aoRrkX7i+euQhX +2e6IspXj4Gpvr+dkgVtxNnxRIBc5N7NM+8UuUIDA0D5hbJo6L3CYvyJ7y0NDaMyX +xb7DsHaG5wSXkil/RW2Wu1fiHLC+tu+qi2WcWYWJ7k/KN/zETLDVbcR7RhPaH4Ue +VEs0Jy7hZSRx/4jZKOsMHMCion9N7wppmc8LU+BQGdKmtYiKIMMPf10FtYioSJFa +7GV7nfFok6IilDhNnF4gcfhD7IMYDISzmY+F3fDOspuxnPg866nnAkQzqvrn +-----END RSA PRIVATE KEY----- diff --git a/tests/assets/testkey.pub b/tests/assets/testkey.pub new file mode 100644 index 00000000000..b7c9627d4d9 --- /dev/null +++ b/tests/assets/testkey.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDGIU1yprgpMg8Z82mBshjJGS8s5iK1RLu2jR6QEUcLDRa+At0/5zLLS0UMHlnqzRDSSgKMMjURB73TCVUiQ2hyCUNfuY3Pr9nQXQsPSPjd6FuBzhsUPPxW8QUkQLwQkyNELTDRYjucpPrYa2rpfdoE3mmG8QJ+CW7g699iuCpTltF265uyGltqQKBFrsyxfqqzp+I6L0A04BMuvDop2K79yArJD671uAlsIDHv4hhKTnBo7cS+EUi66XxjnHUsQ59v6B7SkdYfjtyoEDX5aGU0ijy+t3lhFrB2iEtKg9R12ZjK50fWdVeGPTFVjzQfcIuIgZEo00d0dxp438JTHyv4ZMC1wRcT/ZMxm8CivQD9HNWX/qadISrxgbYQnM9tjO3b6QprHiY5eMWrNI/SAzlFZbcu8muV6zJ/3RcwkILgUVmlA3RyOZKngsC6imRtzyhEsnyIo1+mvgWhXbYfqunMgI+4K9a4HVMoVOk7+CwPyb5GuuVWXUE3q+AUNPQQBFJqkylzl9E13GOHNzm3sDNZSm8bkNjZtinJD0nCyLrP5tOrLm7sXblQ28N+yHEcbh7VOMsK1O0Ed+hM651bxveU0/ZnZkQYhe/asf7RyVxkTPwPIXBqCGF/c4EsfcCRy8EjTyUk3mNfPScgfenQFxwqsYF6p3hViw6p9n4kSk3gAw== (none) diff --git a/tests/assets/testusr_id b/tests/assets/testusr_id deleted file mode 100644 index c1a2c0fd2c0..00000000000 --- a/tests/assets/testusr_id +++ /dev/null @@ -1,7 +0,0 @@ ------BEGIN OPENSSH PRIVATE KEY----- -b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW -QyNTUxOQAAACB9HbITkCgqxf3QRDcftuFLSCfEtLGYG7tNiwaybCuYMQAAAKCvkH85r5B/ -OQAAAAtzc2gtZWQyNTUxOQAAACB9HbITkCgqxf3QRDcftuFLSCfEtLGYG7tNiwaybCuYMQ -AAAEDSbdE9SzVbCtCedQOLAxpzm7VNYDF2M4cVfDwsbxsxkH0dshOQKCrF/dBENx+24UtI -J8S0sZgbu02LBrJsK5gxAAAAG2RhdmlkQGxvY2FsaG9zdC5sb2NhbGRvbWFpbgEC ------END OPENSSH PRIVATE KEY----- diff --git a/tests/assets/testusr_id.pub b/tests/assets/testusr_id.pub deleted file mode 100644 index 8b920c622e2..00000000000 --- a/tests/assets/testusr_id.pub +++ /dev/null @@ -1 +0,0 @@ -ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIH0dshOQKCrF/dBENx+24UtIJ8S0sZgbu02LBrJsK5gx david@localhost.localdomain diff --git a/tests/upgrade/upgrade_test.go b/tests/upgrade/upgrade_test.go index d1b0974ad52..128b2f5a2ac 100644 --- a/tests/upgrade/upgrade_test.go +++ b/tests/upgrade/upgrade_test.go @@ -44,9 +44,10 @@ var _ = Describe("Elemental Feature tests", func() { By(fmt.Sprintf("and upgrading to %s", comm.UpgradeImage())) - upgradeCmd := s.ElementalCmd("upgrade", "--bootloader", "--system", comm.UpgradeImage()) + upgradeCmd := s.ElementalCmd("upgrade", "--tls-verify=false", "--bootloader", "--system", comm.UpgradeImage()) out, err := s.NewPodmanRunCommand(comm.ToolkitImage(), fmt.Sprintf("-c \"mount --rbind /host/run /run && %s\"", upgradeCmd)). Privileged(). + NoTLSVerify(). WithMount("/", "/host"). Run() diff --git a/tests/vm/podman.go b/tests/vm/podman.go index 527b868222c..02c1b2def7a 100644 --- a/tests/vm/podman.go +++ b/tests/vm/podman.go @@ -19,6 +19,8 @@ package vm import ( "fmt" "strings" + + . "github.com/onsi/ginkgo/v2" //nolint:revive ) type PodmanRunCommand struct { @@ -62,6 +64,6 @@ func (p *PodmanRunCommand) Run() (string, error) { } cmd := fmt.Sprintf("podman run %s %s %s %s %s", priv, strings.Join(mounts, " "), entrypoint, p.image, p.command) - fmt.Printf("Running podman command: '%s'", cmd) + By(fmt.Sprintf("Running podman command: '%s'", cmd)) return p.sut.Command(cmd) } diff --git a/tests/vm/sut.go b/tests/vm/sut.go index 9987a27b747..81eae22c705 100644 --- a/tests/vm/sut.go +++ b/tests/vm/sut.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" "fmt" + "log" "os" "os/exec" "path/filepath" @@ -76,6 +77,7 @@ type SUT struct { Host string Username string Password string + SSHKey []byte Timeout int artifactsRepo string TestVersion string @@ -85,10 +87,24 @@ type SUT struct { } func NewSUT() *SUT { + var sshKey []byte + var err error + user := os.Getenv("COS_USER") if user == "" { user = "root" } + + sshKeyFile := os.Getenv("COS_SSHKEY") + if sshKeyFile == "" { + sshKeyFile = "../assets/testkey" + } + + sshKey, err = os.ReadFile(sshKeyFile) + if err != nil { + fmt.Printf("failed reading ssh key file: %s\n", sshKeyFile) + } + pass := os.Getenv("COS_PASS") if pass == "" { pass = Cos @@ -123,6 +139,7 @@ func NewSUT() *SUT { Host: host, Username: user, Password: pass, + SSHKey: sshKey, MachineID: "test", Timeout: timeout, artifactsRepo: "", @@ -347,12 +364,24 @@ func (s *SUT) Reboot(t ...int) { } func (s *SUT) clientConfig() *ssh.ClientConfig { + var signer ssh.Signer + var err error + auths := []ssh.AuthMethod{} + + if s.SSHKey != nil { + signer, err = ssh.ParsePrivateKey(s.SSHKey) + if err != nil { + log.Fatalf("unable to parse private key: %v", err) + } + auths = append(auths, ssh.PublicKeys(signer)) + } + sshConfig := &ssh.ClientConfig{ - User: s.Username, - Auth: []ssh.AuthMethod{ssh.Password(s.Password)}, - Timeout: 30 * time.Second, // max time to establish connection + User: s.Username, + Auth: append(auths, ssh.Password(s.Password)), + Timeout: 15 * time.Second, // max time to establish connection + HostKeyCallback: ssh.InsecureIgnoreHostKey(), } - sshConfig.HostKeyCallback = ssh.InsecureIgnoreHostKey() return sshConfig } @@ -382,7 +411,7 @@ func (s *SUT) SendFile(src, dst, permission string) error { func (s *SUT) connectToHost() (*ssh.Client, error) { sshConfig := s.clientConfig() - client, err := SSHDialTimeout("tcp", s.Host, sshConfig, s.clientConfig().Timeout) + client, err := SSHDialTimeout("tcp", s.Host, sshConfig, sshConfig.Timeout) if err != nil { return nil, err } diff --git a/vendor/github.com/Masterminds/semver/.travis.yml b/vendor/github.com/Masterminds/semver/.travis.yml deleted file mode 100644 index 096369d44d9..00000000000 --- a/vendor/github.com/Masterminds/semver/.travis.yml +++ /dev/null @@ -1,29 +0,0 @@ -language: go - -go: - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - - 1.11.x - - 1.12.x - - tip - -# Setting sudo access to false will let Travis CI use containers rather than -# VMs to run the tests. For more details see: -# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/ -# - http://docs.travis-ci.com/user/workers/standard-infrastructure/ -sudo: false - -script: - - make setup - - make test - -notifications: - webhooks: - urls: - - https://webhooks.gitter.im/e/06e3328629952dabe3e0 - on_success: change # options: [always|never|change] default: always - on_failure: always # options: [always|never|change] default: always - on_start: never # options: [always|never|change] default: always diff --git a/vendor/github.com/Masterminds/semver/CHANGELOG.md b/vendor/github.com/Masterminds/semver/CHANGELOG.md deleted file mode 100644 index e405c9a84d9..00000000000 --- a/vendor/github.com/Masterminds/semver/CHANGELOG.md +++ /dev/null @@ -1,109 +0,0 @@ -# 1.5.0 (2019-09-11) - -## Added - -- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c) - -## Changed - -- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil) -- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil) -- #72: Adding docs comment pointing to vert for a cli -- #71: Update the docs on pre-release comparator handling -- #89: Test with new go versions (thanks @thedevsaddam) -- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll) - -## Fixed - -- #78: Fix unchecked error in example code (thanks @ravron) -- #70: Fix the handling of pre-releases and the 0.0.0 release edge case -- #97: Fixed copyright file for proper display on GitHub -- #107: Fix handling prerelease when sorting alphanum and num -- #109: Fixed where Validate sometimes returns wrong message on error - -# 1.4.2 (2018-04-10) - -## Changed -- #72: Updated the docs to point to vert for a console appliaction -- #71: Update the docs on pre-release comparator handling - -## Fixed -- #70: Fix the handling of pre-releases and the 0.0.0 release edge case - -# 1.4.1 (2018-04-02) - -## Fixed -- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) - -# 1.4.0 (2017-10-04) - -## Changed -- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) - -# 1.3.1 (2017-07-10) - -## Fixed -- Fixed #57: number comparisons in prerelease sometimes inaccurate - -# 1.3.0 (2017-05-02) - -## Added -- #45: Added json (un)marshaling support (thanks @mh-cbon) -- Stability marker. See https://masterminds.github.io/stability/ - -## Fixed -- #51: Fix handling of single digit tilde constraint (thanks @dgodd) - -## Changed -- #55: The godoc icon moved from png to svg - -# 1.2.3 (2017-04-03) - -## Fixed -- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * - -# Release 1.2.2 (2016-12-13) - -## Fixed -- #34: Fixed issue where hyphen range was not working with pre-release parsing. - -# Release 1.2.1 (2016-11-28) - -## Fixed -- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha" - properly. - -# Release 1.2.0 (2016-11-04) - -## Added -- #20: Added MustParse function for versions (thanks @adamreese) -- #15: Added increment methods on versions (thanks @mh-cbon) - -## Fixed -- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and - might not satisfy the intended compatibility. The change here ignores pre-releases - on constraint checks (e.g., ~ or ^) when a pre-release is not part of the - constraint. For example, `^1.2.3` will ignore pre-releases while - `^1.2.3-alpha` will include them. - -# Release 1.1.1 (2016-06-30) - -## Changed -- Issue #9: Speed up version comparison performance (thanks @sdboyer) -- Issue #8: Added benchmarks (thanks @sdboyer) -- Updated Go Report Card URL to new location -- Updated Readme to add code snippet formatting (thanks @mh-cbon) -- Updating tagging to v[SemVer] structure for compatibility with other tools. - -# Release 1.1.0 (2016-03-11) - -- Issue #2: Implemented validation to provide reasons a versions failed a - constraint. - -# Release 1.0.1 (2015-12-31) - -- Fixed #1: * constraint failing on valid versions. - -# Release 1.0.0 (2015-10-20) - -- Initial release diff --git a/vendor/github.com/Masterminds/semver/LICENSE.txt b/vendor/github.com/Masterminds/semver/LICENSE.txt deleted file mode 100644 index 9ff7da9c48b..00000000000 --- a/vendor/github.com/Masterminds/semver/LICENSE.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (C) 2014-2019, Matt Butcher and Matt Farina - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/semver/Makefile b/vendor/github.com/Masterminds/semver/Makefile deleted file mode 100644 index a7a1b4e36de..00000000000 --- a/vendor/github.com/Masterminds/semver/Makefile +++ /dev/null @@ -1,36 +0,0 @@ -.PHONY: setup -setup: - go get -u gopkg.in/alecthomas/gometalinter.v1 - gometalinter.v1 --install - -.PHONY: test -test: validate lint - @echo "==> Running tests" - go test -v - -.PHONY: validate -validate: - @echo "==> Running static validations" - @gometalinter.v1 \ - --disable-all \ - --enable deadcode \ - --severity deadcode:error \ - --enable gofmt \ - --enable gosimple \ - --enable ineffassign \ - --enable misspell \ - --enable vet \ - --tests \ - --vendor \ - --deadline 60s \ - ./... || exit_code=1 - -.PHONY: lint -lint: - @echo "==> Running linters" - @gometalinter.v1 \ - --disable-all \ - --enable golint \ - --vendor \ - --deadline 60s \ - ./... || : diff --git a/vendor/github.com/Masterminds/semver/README.md b/vendor/github.com/Masterminds/semver/README.md deleted file mode 100644 index 1b52d2f4362..00000000000 --- a/vendor/github.com/Masterminds/semver/README.md +++ /dev/null @@ -1,194 +0,0 @@ -# SemVer - -The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: - -* Parse semantic versions -* Sort semantic versions -* Check if a semantic version fits within a set of constraints -* Optionally work with a `v` prefix - -[![Stability: -Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) -[![Build Status](https://travis-ci.org/Masterminds/semver.svg)](https://travis-ci.org/Masterminds/semver) [![Build status](https://ci.appveyor.com/api/projects/status/jfk66lib7hb985k8/branch/master?svg=true&passingText=windows%20build%20passing&failingText=windows%20build%20failing)](https://ci.appveyor.com/project/mattfarina/semver/branch/master) [![GoDoc](https://godoc.org/github.com/Masterminds/semver?status.svg)](https://godoc.org/github.com/Masterminds/semver) [![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) - -If you are looking for a command line tool for version comparisons please see -[vert](https://github.com/Masterminds/vert) which uses this library. - -## Parsing Semantic Versions - -To parse a semantic version use the `NewVersion` function. For example, - -```go - v, err := semver.NewVersion("1.2.3-beta.1+build345") -``` - -If there is an error the version wasn't parseable. The version object has methods -to get the parts of the version, compare it to other versions, convert the -version back into a string, and get the original string. For more details -please see the [documentation](https://godoc.org/github.com/Masterminds/semver). - -## Sorting Semantic Versions - -A set of versions can be sorted using the [`sort`](https://golang.org/pkg/sort/) -package from the standard library. For example, - -```go - raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} - vs := make([]*semver.Version, len(raw)) - for i, r := range raw { - v, err := semver.NewVersion(r) - if err != nil { - t.Errorf("Error parsing version: %s", err) - } - - vs[i] = v - } - - sort.Sort(semver.Collection(vs)) -``` - -## Checking Version Constraints - -Checking a version against version constraints is one of the most featureful -parts of the package. - -```go - c, err := semver.NewConstraint(">= 1.2.3") - if err != nil { - // Handle constraint not being parseable. - } - - v, _ := semver.NewVersion("1.3") - if err != nil { - // Handle version not being parseable. - } - // Check if the version meets the constraints. The a variable will be true. - a := c.Check(v) -``` - -## Basic Comparisons - -There are two elements to the comparisons. First, a comparison string is a list -of comma separated and comparisons. These are then separated by || separated or -comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a -comparison that's greater than or equal to 1.2 and less than 3.0.0 or is -greater than or equal to 4.2.3. - -The basic comparisons are: - -* `=`: equal (aliased to no operator) -* `!=`: not equal -* `>`: greater than -* `<`: less than -* `>=`: greater than or equal to -* `<=`: less than or equal to - -## Working With Pre-release Versions - -Pre-releases, for those not familiar with them, are used for software releases -prior to stable or generally available releases. Examples of pre-releases include -development, alpha, beta, and release candidate releases. A pre-release may be -a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the -order of precidence, pre-releases come before their associated releases. In this -example `1.2.3-beta.1 < 1.2.3`. - -According to the Semantic Version specification pre-releases may not be -API compliant with their release counterpart. It says, - -> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. - -SemVer comparisons without a pre-release comparator will skip pre-release versions. -For example, `>=1.2.3` will skip pre-releases when looking at a list of releases -while `>=1.2.3-0` will evaluate and find pre-releases. - -The reason for the `0` as a pre-release version in the example comparison is -because pre-releases can only contain ASCII alphanumerics and hyphens (along with -`.` separators), per the spec. Sorting happens in ASCII sort order, again per the spec. The lowest character is a `0` in ASCII sort order (see an [ASCII Table](http://www.asciitable.com/)) - -Understanding ASCII sort ordering is important because A-Z comes before a-z. That -means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case -sensitivity doesn't apply here. This is due to ASCII sort ordering which is what -the spec specifies. - -## Hyphen Range Comparisons - -There are multiple methods to handle ranges and the first is hyphens ranges. -These look like: - -* `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` -* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5` - -## Wildcards In Comparisons - -The `x`, `X`, and `*` characters can be used as a wildcard character. This works -for all comparison operators. When used on the `=` operator it falls -back to the pack level comparison (see tilde below). For example, - -* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` -* `>= 1.2.x` is equivalent to `>= 1.2.0` -* `<= 2.x` is equivalent to `< 3` -* `*` is equivalent to `>= 0.0.0` - -## Tilde Range Comparisons (Patch) - -The tilde (`~`) comparison operator is for patch level ranges when a minor -version is specified and major level changes when the minor number is missing. -For example, - -* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` -* `~1` is equivalent to `>= 1, < 2` -* `~2.3` is equivalent to `>= 2.3, < 2.4` -* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` -* `~1.x` is equivalent to `>= 1, < 2` - -## Caret Range Comparisons (Major) - -The caret (`^`) comparison operator is for major level changes. This is useful -when comparisons of API versions as a major change is API breaking. For example, - -* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` -* `^0.0.1` is equivalent to `>= 0.0.1, < 1.0.0` -* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` -* `^2.3` is equivalent to `>= 2.3, < 3` -* `^2.x` is equivalent to `>= 2.0.0, < 3` - -# Validation - -In addition to testing a version against a constraint, a version can be validated -against a constraint. When validation fails a slice of errors containing why a -version didn't meet the constraint is returned. For example, - -```go - c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") - if err != nil { - // Handle constraint not being parseable. - } - - v, _ := semver.NewVersion("1.3") - if err != nil { - // Handle version not being parseable. - } - - // Validate a version against a constraint. - a, msgs := c.Validate(v) - // a is false - for _, m := range msgs { - fmt.Println(m) - - // Loops over the errors which would read - // "1.3 is greater than 1.2.3" - // "1.3 is less than 1.4" - } -``` - -# Fuzzing - - [dvyukov/go-fuzz](https://github.com/dvyukov/go-fuzz) is used for fuzzing. - -1. `go-fuzz-build` -2. `go-fuzz -workdir=fuzz` - -# Contribute - -If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) -or [create a pull request](https://github.com/Masterminds/semver/pulls). diff --git a/vendor/github.com/Masterminds/semver/appveyor.yml b/vendor/github.com/Masterminds/semver/appveyor.yml deleted file mode 100644 index b2778df15a4..00000000000 --- a/vendor/github.com/Masterminds/semver/appveyor.yml +++ /dev/null @@ -1,44 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\Masterminds\semver -shallow_clone: true - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -install: - - go version - - go env - - go get -u gopkg.in/alecthomas/gometalinter.v1 - - set PATH=%PATH%;%GOPATH%\bin - - gometalinter.v1.exe --install - -build_script: - - go install -v ./... - -test_script: - - "gometalinter.v1 \ - --disable-all \ - --enable deadcode \ - --severity deadcode:error \ - --enable gofmt \ - --enable gosimple \ - --enable ineffassign \ - --enable misspell \ - --enable vet \ - --tests \ - --vendor \ - --deadline 60s \ - ./... || exit_code=1" - - "gometalinter.v1 \ - --disable-all \ - --enable golint \ - --vendor \ - --deadline 60s \ - ./... || :" - - go test -v - -deploy: off diff --git a/vendor/github.com/Masterminds/semver/collection.go b/vendor/github.com/Masterminds/semver/collection.go deleted file mode 100644 index a78235895fd..00000000000 --- a/vendor/github.com/Masterminds/semver/collection.go +++ /dev/null @@ -1,24 +0,0 @@ -package semver - -// Collection is a collection of Version instances and implements the sort -// interface. See the sort package for more details. -// https://golang.org/pkg/sort/ -type Collection []*Version - -// Len returns the length of a collection. The number of Version instances -// on the slice. -func (c Collection) Len() int { - return len(c) -} - -// Less is needed for the sort interface to compare two Version objects on the -// slice. If checks if one is less than the other. -func (c Collection) Less(i, j int) bool { - return c[i].LessThan(c[j]) -} - -// Swap is needed for the sort interface to replace the Version objects -// at two different positions in the slice. -func (c Collection) Swap(i, j int) { - c[i], c[j] = c[j], c[i] -} diff --git a/vendor/github.com/Masterminds/semver/constraints.go b/vendor/github.com/Masterminds/semver/constraints.go deleted file mode 100644 index b94b93413f3..00000000000 --- a/vendor/github.com/Masterminds/semver/constraints.go +++ /dev/null @@ -1,423 +0,0 @@ -package semver - -import ( - "errors" - "fmt" - "regexp" - "strings" -) - -// Constraints is one or more constraint that a semantic version can be -// checked against. -type Constraints struct { - constraints [][]*constraint -} - -// NewConstraint returns a Constraints instance that a Version instance can -// be checked against. If there is a parse error it will be returned. -func NewConstraint(c string) (*Constraints, error) { - - // Rewrite - ranges into a comparison operation. - c = rewriteRange(c) - - ors := strings.Split(c, "||") - or := make([][]*constraint, len(ors)) - for k, v := range ors { - cs := strings.Split(v, ",") - result := make([]*constraint, len(cs)) - for i, s := range cs { - pc, err := parseConstraint(s) - if err != nil { - return nil, err - } - - result[i] = pc - } - or[k] = result - } - - o := &Constraints{constraints: or} - return o, nil -} - -// Check tests if a version satisfies the constraints. -func (cs Constraints) Check(v *Version) bool { - // loop over the ORs and check the inner ANDs - for _, o := range cs.constraints { - joy := true - for _, c := range o { - if !c.check(v) { - joy = false - break - } - } - - if joy { - return true - } - } - - return false -} - -// Validate checks if a version satisfies a constraint. If not a slice of -// reasons for the failure are returned in addition to a bool. -func (cs Constraints) Validate(v *Version) (bool, []error) { - // loop over the ORs and check the inner ANDs - var e []error - - // Capture the prerelease message only once. When it happens the first time - // this var is marked - var prerelesase bool - for _, o := range cs.constraints { - joy := true - for _, c := range o { - // Before running the check handle the case there the version is - // a prerelease and the check is not searching for prereleases. - if c.con.pre == "" && v.pre != "" { - if !prerelesase { - em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) - e = append(e, em) - prerelesase = true - } - joy = false - - } else { - - if !c.check(v) { - em := fmt.Errorf(c.msg, v, c.orig) - e = append(e, em) - joy = false - } - } - } - - if joy { - return true, []error{} - } - } - - return false, e -} - -var constraintOps map[string]cfunc -var constraintMsg map[string]string -var constraintRegex *regexp.Regexp - -func init() { - constraintOps = map[string]cfunc{ - "": constraintTildeOrEqual, - "=": constraintTildeOrEqual, - "!=": constraintNotEqual, - ">": constraintGreaterThan, - "<": constraintLessThan, - ">=": constraintGreaterThanEqual, - "=>": constraintGreaterThanEqual, - "<=": constraintLessThanEqual, - "=<": constraintLessThanEqual, - "~": constraintTilde, - "~>": constraintTilde, - "^": constraintCaret, - } - - constraintMsg = map[string]string{ - "": "%s is not equal to %s", - "=": "%s is not equal to %s", - "!=": "%s is equal to %s", - ">": "%s is less than or equal to %s", - "<": "%s is greater than or equal to %s", - ">=": "%s is less than %s", - "=>": "%s is less than %s", - "<=": "%s is greater than %s", - "=<": "%s is greater than %s", - "~": "%s does not have same major and minor version as %s", - "~>": "%s does not have same major and minor version as %s", - "^": "%s does not have same major version as %s", - } - - ops := make([]string, 0, len(constraintOps)) - for k := range constraintOps { - ops = append(ops, regexp.QuoteMeta(k)) - } - - constraintRegex = regexp.MustCompile(fmt.Sprintf( - `^\s*(%s)\s*(%s)\s*$`, - strings.Join(ops, "|"), - cvRegex)) - - constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( - `\s*(%s)\s+-\s+(%s)\s*`, - cvRegex, cvRegex)) -} - -// An individual constraint -type constraint struct { - // The callback function for the restraint. It performs the logic for - // the constraint. - function cfunc - - msg string - - // The version used in the constraint check. For example, if a constraint - // is '<= 2.0.0' the con a version instance representing 2.0.0. - con *Version - - // The original parsed version (e.g., 4.x from != 4.x) - orig string - - // When an x is used as part of the version (e.g., 1.x) - minorDirty bool - dirty bool - patchDirty bool -} - -// Check if a version meets the constraint -func (c *constraint) check(v *Version) bool { - return c.function(v, c) -} - -type cfunc func(v *Version, c *constraint) bool - -func parseConstraint(c string) (*constraint, error) { - m := constraintRegex.FindStringSubmatch(c) - if m == nil { - return nil, fmt.Errorf("improper constraint: %s", c) - } - - ver := m[2] - orig := ver - minorDirty := false - patchDirty := false - dirty := false - if isX(m[3]) { - ver = "0.0.0" - dirty = true - } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { - minorDirty = true - dirty = true - ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) - } else if isX(strings.TrimPrefix(m[5], ".")) { - dirty = true - patchDirty = true - ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) - } - - con, err := NewVersion(ver) - if err != nil { - - // The constraintRegex should catch any regex parsing errors. So, - // we should never get here. - return nil, errors.New("constraint Parser Error") - } - - cs := &constraint{ - function: constraintOps[m[1]], - msg: constraintMsg[m[1]], - con: con, - orig: orig, - minorDirty: minorDirty, - patchDirty: patchDirty, - dirty: dirty, - } - return cs, nil -} - -// Constraint functions -func constraintNotEqual(v *Version, c *constraint) bool { - if c.dirty { - - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false - } - - if c.con.Major() != v.Major() { - return true - } - if c.con.Minor() != v.Minor() && !c.minorDirty { - return true - } else if c.minorDirty { - return false - } - - return false - } - - return !v.Equal(c.con) -} - -func constraintGreaterThan(v *Version, c *constraint) bool { - - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false - } - - return v.Compare(c.con) == 1 -} - -func constraintLessThan(v *Version, c *constraint) bool { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false - } - - if !c.dirty { - return v.Compare(c.con) < 0 - } - - if v.Major() > c.con.Major() { - return false - } else if v.Minor() > c.con.Minor() && !c.minorDirty { - return false - } - - return true -} - -func constraintGreaterThanEqual(v *Version, c *constraint) bool { - - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false - } - - return v.Compare(c.con) >= 0 -} - -func constraintLessThanEqual(v *Version, c *constraint) bool { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false - } - - if !c.dirty { - return v.Compare(c.con) <= 0 - } - - if v.Major() > c.con.Major() { - return false - } else if v.Minor() > c.con.Minor() && !c.minorDirty { - return false - } - - return true -} - -// ~*, ~>* --> >= 0.0.0 (any) -// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 -// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 -// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 -// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 -// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 -func constraintTilde(v *Version, c *constraint) bool { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false - } - - if v.LessThan(c.con) { - return false - } - - // ~0.0.0 is a special case where all constraints are accepted. It's - // equivalent to >= 0.0.0. - if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && - !c.minorDirty && !c.patchDirty { - return true - } - - if v.Major() != c.con.Major() { - return false - } - - if v.Minor() != c.con.Minor() && !c.minorDirty { - return false - } - - return true -} - -// When there is a .x (dirty) status it automatically opts in to ~. Otherwise -// it's a straight = -func constraintTildeOrEqual(v *Version, c *constraint) bool { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false - } - - if c.dirty { - c.msg = constraintMsg["~"] - return constraintTilde(v, c) - } - - return v.Equal(c.con) -} - -// ^* --> (any) -// ^2, ^2.x, ^2.x.x --> >=2.0.0, <3.0.0 -// ^2.0, ^2.0.x --> >=2.0.0, <3.0.0 -// ^1.2, ^1.2.x --> >=1.2.0, <2.0.0 -// ^1.2.3 --> >=1.2.3, <2.0.0 -// ^1.2.0 --> >=1.2.0, <2.0.0 -func constraintCaret(v *Version, c *constraint) bool { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false - } - - if v.LessThan(c.con) { - return false - } - - if v.Major() != c.con.Major() { - return false - } - - return true -} - -var constraintRangeRegex *regexp.Regexp - -const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + - `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + - `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` - -func isX(x string) bool { - switch x { - case "x", "*", "X": - return true - default: - return false - } -} - -func rewriteRange(i string) string { - m := constraintRangeRegex.FindAllStringSubmatch(i, -1) - if m == nil { - return i - } - o := i - for _, v := range m { - t := fmt.Sprintf(">= %s, <= %s", v[1], v[11]) - o = strings.Replace(o, v[0], t, 1) - } - - return o -} diff --git a/vendor/github.com/Masterminds/semver/doc.go b/vendor/github.com/Masterminds/semver/doc.go deleted file mode 100644 index 6a6c24c6d6e..00000000000 --- a/vendor/github.com/Masterminds/semver/doc.go +++ /dev/null @@ -1,115 +0,0 @@ -/* -Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. - -Specifically it provides the ability to: - - * Parse semantic versions - * Sort semantic versions - * Check if a semantic version fits within a set of constraints - * Optionally work with a `v` prefix - -Parsing Semantic Versions - -To parse a semantic version use the `NewVersion` function. For example, - - v, err := semver.NewVersion("1.2.3-beta.1+build345") - -If there is an error the version wasn't parseable. The version object has methods -to get the parts of the version, compare it to other versions, convert the -version back into a string, and get the original string. For more details -please see the documentation at https://godoc.org/github.com/Masterminds/semver. - -Sorting Semantic Versions - -A set of versions can be sorted using the `sort` package from the standard library. -For example, - - raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} - vs := make([]*semver.Version, len(raw)) - for i, r := range raw { - v, err := semver.NewVersion(r) - if err != nil { - t.Errorf("Error parsing version: %s", err) - } - - vs[i] = v - } - - sort.Sort(semver.Collection(vs)) - -Checking Version Constraints - -Checking a version against version constraints is one of the most featureful -parts of the package. - - c, err := semver.NewConstraint(">= 1.2.3") - if err != nil { - // Handle constraint not being parseable. - } - - v, err := semver.NewVersion("1.3") - if err != nil { - // Handle version not being parseable. - } - // Check if the version meets the constraints. The a variable will be true. - a := c.Check(v) - -Basic Comparisons - -There are two elements to the comparisons. First, a comparison string is a list -of comma separated and comparisons. These are then separated by || separated or -comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a -comparison that's greater than or equal to 1.2 and less than 3.0.0 or is -greater than or equal to 4.2.3. - -The basic comparisons are: - - * `=`: equal (aliased to no operator) - * `!=`: not equal - * `>`: greater than - * `<`: less than - * `>=`: greater than or equal to - * `<=`: less than or equal to - -Hyphen Range Comparisons - -There are multiple methods to handle ranges and the first is hyphens ranges. -These look like: - - * `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` - * `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5` - -Wildcards In Comparisons - -The `x`, `X`, and `*` characters can be used as a wildcard character. This works -for all comparison operators. When used on the `=` operator it falls -back to the pack level comparison (see tilde below). For example, - - * `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` - * `>= 1.2.x` is equivalent to `>= 1.2.0` - * `<= 2.x` is equivalent to `<= 3` - * `*` is equivalent to `>= 0.0.0` - -Tilde Range Comparisons (Patch) - -The tilde (`~`) comparison operator is for patch level ranges when a minor -version is specified and major level changes when the minor number is missing. -For example, - - * `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` - * `~1` is equivalent to `>= 1, < 2` - * `~2.3` is equivalent to `>= 2.3, < 2.4` - * `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` - * `~1.x` is equivalent to `>= 1, < 2` - -Caret Range Comparisons (Major) - -The caret (`^`) comparison operator is for major level changes. This is useful -when comparisons of API versions as a major change is API breaking. For example, - - * `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` - * `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` - * `^2.3` is equivalent to `>= 2.3, < 3` - * `^2.x` is equivalent to `>= 2.0.0, < 3` -*/ -package semver diff --git a/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/vendor/github.com/Masterminds/semver/v3/.golangci.yml index c87d1c4b90e..fbc6332592f 100644 --- a/vendor/github.com/Masterminds/semver/v3/.golangci.yml +++ b/vendor/github.com/Masterminds/semver/v3/.golangci.yml @@ -5,12 +5,9 @@ linters: disable-all: true enable: - misspell - - structcheck - govet - staticcheck - - deadcode - errcheck - - varcheck - unparam - ineffassign - nakedret diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile index eac19178fbd..0e7b5c7138e 100644 --- a/vendor/github.com/Masterminds/semver/v3/Makefile +++ b/vendor/github.com/Masterminds/semver/v3/Makefile @@ -1,7 +1,5 @@ GOPATH=$(shell go env GOPATH) GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint -GOFUZZBUILD = $(GOPATH)/bin/go-fuzz-build -GOFUZZ = $(GOPATH)/bin/go-fuzz .PHONY: lint lint: $(GOLANGCI_LINT) @@ -19,19 +17,14 @@ test-cover: GO111MODULE=on go test -cover . .PHONY: fuzz -fuzz: $(GOFUZZBUILD) $(GOFUZZ) - @echo "==> Fuzz testing" - $(GOFUZZBUILD) - $(GOFUZZ) -workdir=_fuzz +fuzz: + @echo "==> Running Fuzz Tests" + go test -fuzz=FuzzNewVersion -fuzztime=15s . + go test -fuzz=FuzzStrictNewVersion -fuzztime=15s . + go test -fuzz=FuzzNewConstraint -fuzztime=15s . $(GOLANGCI_LINT): # Install golangci-lint. The configuration for it is in the .golangci.yml # file in the root of the repository echo ${GOPATH} curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1 - -$(GOFUZZBUILD): - cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz-build - -$(GOFUZZ): - cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-dep \ No newline at end of file diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md index d8f54dcbd3c..eab8cac3b7f 100644 --- a/vendor/github.com/Masterminds/semver/v3/README.md +++ b/vendor/github.com/Masterminds/semver/v3/README.md @@ -18,18 +18,20 @@ If you are looking for a command line tool for version comparisons please see ## Package Versions +Note, import `github.com/github.com/Masterminds/semver/v3` to use the latest version. + There are three major versions fo the `semver` package. -* 3.x.x is the new stable and active version. This version is focused on constraint +* 3.x.x is the stable and active version. This version is focused on constraint compatibility for range handling in other tools from other languages. It has a similar API to the v1 releases. The development of this version is on the master branch. The documentation for this version is below. * 2.x was developed primarily for [dep](https://github.com/golang/dep). There are no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer). There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x). -* 1.x.x is the most widely used version with numerous tagged releases. This is the - previous stable and is still maintained for bug fixes. The development, to fix - bugs, occurs on the release-1 branch. You can read the documentation [here](https://github.com/Masterminds/semver/blob/release-1/README.md). +* 1.x.x is the original release. It is no longer maintained. You should use the + v3 release instead. You can read the documentation for the 1.x.x release + [here](https://github.com/Masterminds/semver/blob/release-1/README.md). ## Parsing Semantic Versions @@ -242,3 +244,15 @@ for _, m := range msgs { If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) or [create a pull request](https://github.com/Masterminds/semver/pulls). + +## Security + +Security is an important consideration for this project. The project currently +uses the following tools to help discover security issues: + +* [CodeQL](https://github.com/Masterminds/semver) +* [gosec](https://github.com/securego/gosec) +* Daily Fuzz testing + +If you believe you have found a security vulnerability you can privately disclose +it through the [GitHub security page](https://github.com/Masterminds/semver/security). diff --git a/vendor/github.com/Masterminds/semver/v3/SECURITY.md b/vendor/github.com/Masterminds/semver/v3/SECURITY.md new file mode 100644 index 00000000000..a30a66b1f74 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +## Supported Versions + +The following versions of semver are currently supported: + +| Version | Supported | +| ------- | ------------------ | +| 3.x | :white_check_mark: | +| 2.x | :x: | +| 1.x | :x: | + +Fixes are only released for the latest minor version in the form of a patch release. + +## Reporting a Vulnerability + +You can privately disclose a vulnerability through GitHubs +[private vulnerability reporting](https://github.com/Masterminds/semver/security/advisories) +mechanism. diff --git a/vendor/github.com/Masterminds/semver/v3/constraints.go b/vendor/github.com/Masterminds/semver/v3/constraints.go index 203072e4646..8461c7ed903 100644 --- a/vendor/github.com/Masterminds/semver/v3/constraints.go +++ b/vendor/github.com/Masterminds/semver/v3/constraints.go @@ -586,7 +586,7 @@ func rewriteRange(i string) string { } o := i for _, v := range m { - t := fmt.Sprintf(">= %s, <= %s", v[1], v[11]) + t := fmt.Sprintf(">= %s, <= %s ", v[1], v[11]) o = strings.Replace(o, v[0], t, 1) } diff --git a/vendor/github.com/Masterminds/semver/v3/fuzz.go b/vendor/github.com/Masterminds/semver/v3/fuzz.go deleted file mode 100644 index a242ad70587..00000000000 --- a/vendor/github.com/Masterminds/semver/v3/fuzz.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build gofuzz - -package semver - -func Fuzz(data []byte) int { - d := string(data) - - // Test NewVersion - _, _ = NewVersion(d) - - // Test StrictNewVersion - _, _ = StrictNewVersion(d) - - // Test NewConstraint - _, _ = NewConstraint(d) - - // The return value should be 0 normally, 1 if the priority in future tests - // should be increased, and -1 if future tests should skip passing in that - // data. We do not have a reason to change priority so 0 is always returned. - // There are example tests that do this. - return 0 -} diff --git a/vendor/github.com/Masterminds/semver/version.go b/vendor/github.com/Masterminds/semver/version.go deleted file mode 100644 index 400d4f93412..00000000000 --- a/vendor/github.com/Masterminds/semver/version.go +++ /dev/null @@ -1,425 +0,0 @@ -package semver - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "regexp" - "strconv" - "strings" -) - -// The compiled version of the regex created at init() is cached here so it -// only needs to be created once. -var versionRegex *regexp.Regexp -var validPrereleaseRegex *regexp.Regexp - -var ( - // ErrInvalidSemVer is returned a version is found to be invalid when - // being parsed. - ErrInvalidSemVer = errors.New("Invalid Semantic Version") - - // ErrInvalidMetadata is returned when the metadata is an invalid format - ErrInvalidMetadata = errors.New("Invalid Metadata string") - - // ErrInvalidPrerelease is returned when the pre-release is an invalid format - ErrInvalidPrerelease = errors.New("Invalid Prerelease string") -) - -// SemVerRegex is the regular expression used to parse a semantic version. -const SemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + - `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + - `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` - -// ValidPrerelease is the regular expression which validates -// both prerelease and metadata values. -const ValidPrerelease string = `^([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*)$` - -// Version represents a single semantic version. -type Version struct { - major, minor, patch int64 - pre string - metadata string - original string -} - -func init() { - versionRegex = regexp.MustCompile("^" + SemVerRegex + "$") - validPrereleaseRegex = regexp.MustCompile(ValidPrerelease) -} - -// NewVersion parses a given version and returns an instance of Version or -// an error if unable to parse the version. -func NewVersion(v string) (*Version, error) { - m := versionRegex.FindStringSubmatch(v) - if m == nil { - return nil, ErrInvalidSemVer - } - - sv := &Version{ - metadata: m[8], - pre: m[5], - original: v, - } - - var temp int64 - temp, err := strconv.ParseInt(m[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("Error parsing version segment: %s", err) - } - sv.major = temp - - if m[2] != "" { - temp, err = strconv.ParseInt(strings.TrimPrefix(m[2], "."), 10, 64) - if err != nil { - return nil, fmt.Errorf("Error parsing version segment: %s", err) - } - sv.minor = temp - } else { - sv.minor = 0 - } - - if m[3] != "" { - temp, err = strconv.ParseInt(strings.TrimPrefix(m[3], "."), 10, 64) - if err != nil { - return nil, fmt.Errorf("Error parsing version segment: %s", err) - } - sv.patch = temp - } else { - sv.patch = 0 - } - - return sv, nil -} - -// MustParse parses a given version and panics on error. -func MustParse(v string) *Version { - sv, err := NewVersion(v) - if err != nil { - panic(err) - } - return sv -} - -// String converts a Version object to a string. -// Note, if the original version contained a leading v this version will not. -// See the Original() method to retrieve the original value. Semantic Versions -// don't contain a leading v per the spec. Instead it's optional on -// implementation. -func (v *Version) String() string { - var buf bytes.Buffer - - fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch) - if v.pre != "" { - fmt.Fprintf(&buf, "-%s", v.pre) - } - if v.metadata != "" { - fmt.Fprintf(&buf, "+%s", v.metadata) - } - - return buf.String() -} - -// Original returns the original value passed in to be parsed. -func (v *Version) Original() string { - return v.original -} - -// Major returns the major version. -func (v *Version) Major() int64 { - return v.major -} - -// Minor returns the minor version. -func (v *Version) Minor() int64 { - return v.minor -} - -// Patch returns the patch version. -func (v *Version) Patch() int64 { - return v.patch -} - -// Prerelease returns the pre-release version. -func (v *Version) Prerelease() string { - return v.pre -} - -// Metadata returns the metadata on the version. -func (v *Version) Metadata() string { - return v.metadata -} - -// originalVPrefix returns the original 'v' prefix if any. -func (v *Version) originalVPrefix() string { - - // Note, only lowercase v is supported as a prefix by the parser. - if v.original != "" && v.original[:1] == "v" { - return v.original[:1] - } - return "" -} - -// IncPatch produces the next patch version. -// If the current version does not have prerelease/metadata information, -// it unsets metadata and prerelease values, increments patch number. -// If the current version has any of prerelease or metadata information, -// it unsets both values and keeps curent patch value -func (v Version) IncPatch() Version { - vNext := v - // according to http://semver.org/#spec-item-9 - // Pre-release versions have a lower precedence than the associated normal version. - // according to http://semver.org/#spec-item-10 - // Build metadata SHOULD be ignored when determining version precedence. - if v.pre != "" { - vNext.metadata = "" - vNext.pre = "" - } else { - vNext.metadata = "" - vNext.pre = "" - vNext.patch = v.patch + 1 - } - vNext.original = v.originalVPrefix() + "" + vNext.String() - return vNext -} - -// IncMinor produces the next minor version. -// Sets patch to 0. -// Increments minor number. -// Unsets metadata. -// Unsets prerelease status. -func (v Version) IncMinor() Version { - vNext := v - vNext.metadata = "" - vNext.pre = "" - vNext.patch = 0 - vNext.minor = v.minor + 1 - vNext.original = v.originalVPrefix() + "" + vNext.String() - return vNext -} - -// IncMajor produces the next major version. -// Sets patch to 0. -// Sets minor to 0. -// Increments major number. -// Unsets metadata. -// Unsets prerelease status. -func (v Version) IncMajor() Version { - vNext := v - vNext.metadata = "" - vNext.pre = "" - vNext.patch = 0 - vNext.minor = 0 - vNext.major = v.major + 1 - vNext.original = v.originalVPrefix() + "" + vNext.String() - return vNext -} - -// SetPrerelease defines the prerelease value. -// Value must not include the required 'hypen' prefix. -func (v Version) SetPrerelease(prerelease string) (Version, error) { - vNext := v - if len(prerelease) > 0 && !validPrereleaseRegex.MatchString(prerelease) { - return vNext, ErrInvalidPrerelease - } - vNext.pre = prerelease - vNext.original = v.originalVPrefix() + "" + vNext.String() - return vNext, nil -} - -// SetMetadata defines metadata value. -// Value must not include the required 'plus' prefix. -func (v Version) SetMetadata(metadata string) (Version, error) { - vNext := v - if len(metadata) > 0 && !validPrereleaseRegex.MatchString(metadata) { - return vNext, ErrInvalidMetadata - } - vNext.metadata = metadata - vNext.original = v.originalVPrefix() + "" + vNext.String() - return vNext, nil -} - -// LessThan tests if one version is less than another one. -func (v *Version) LessThan(o *Version) bool { - return v.Compare(o) < 0 -} - -// GreaterThan tests if one version is greater than another one. -func (v *Version) GreaterThan(o *Version) bool { - return v.Compare(o) > 0 -} - -// Equal tests if two versions are equal to each other. -// Note, versions can be equal with different metadata since metadata -// is not considered part of the comparable version. -func (v *Version) Equal(o *Version) bool { - return v.Compare(o) == 0 -} - -// Compare compares this version to another one. It returns -1, 0, or 1 if -// the version smaller, equal, or larger than the other version. -// -// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is -// lower than the version without a prerelease. -func (v *Version) Compare(o *Version) int { - // Compare the major, minor, and patch version for differences. If a - // difference is found return the comparison. - if d := compareSegment(v.Major(), o.Major()); d != 0 { - return d - } - if d := compareSegment(v.Minor(), o.Minor()); d != 0 { - return d - } - if d := compareSegment(v.Patch(), o.Patch()); d != 0 { - return d - } - - // At this point the major, minor, and patch versions are the same. - ps := v.pre - po := o.Prerelease() - - if ps == "" && po == "" { - return 0 - } - if ps == "" { - return 1 - } - if po == "" { - return -1 - } - - return comparePrerelease(ps, po) -} - -// UnmarshalJSON implements JSON.Unmarshaler interface. -func (v *Version) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - temp, err := NewVersion(s) - if err != nil { - return err - } - v.major = temp.major - v.minor = temp.minor - v.patch = temp.patch - v.pre = temp.pre - v.metadata = temp.metadata - v.original = temp.original - temp = nil - return nil -} - -// MarshalJSON implements JSON.Marshaler interface. -func (v *Version) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -func compareSegment(v, o int64) int { - if v < o { - return -1 - } - if v > o { - return 1 - } - - return 0 -} - -func comparePrerelease(v, o string) int { - - // split the prelease versions by their part. The separator, per the spec, - // is a . - sparts := strings.Split(v, ".") - oparts := strings.Split(o, ".") - - // Find the longer length of the parts to know how many loop iterations to - // go through. - slen := len(sparts) - olen := len(oparts) - - l := slen - if olen > slen { - l = olen - } - - // Iterate over each part of the prereleases to compare the differences. - for i := 0; i < l; i++ { - // Since the lentgh of the parts can be different we need to create - // a placeholder. This is to avoid out of bounds issues. - stemp := "" - if i < slen { - stemp = sparts[i] - } - - otemp := "" - if i < olen { - otemp = oparts[i] - } - - d := comparePrePart(stemp, otemp) - if d != 0 { - return d - } - } - - // Reaching here means two versions are of equal value but have different - // metadata (the part following a +). They are not identical in string form - // but the version comparison finds them to be equal. - return 0 -} - -func comparePrePart(s, o string) int { - // Fastpath if they are equal - if s == o { - return 0 - } - - // When s or o are empty we can use the other in an attempt to determine - // the response. - if s == "" { - if o != "" { - return -1 - } - return 1 - } - - if o == "" { - if s != "" { - return 1 - } - return -1 - } - - // When comparing strings "99" is greater than "103". To handle - // cases like this we need to detect numbers and compare them. According - // to the semver spec, numbers are always positive. If there is a - at the - // start like -99 this is to be evaluated as an alphanum. numbers always - // have precedence over alphanum. Parsing as Uints because negative numbers - // are ignored. - - oi, n1 := strconv.ParseUint(o, 10, 64) - si, n2 := strconv.ParseUint(s, 10, 64) - - // The case where both are strings compare the strings - if n1 != nil && n2 != nil { - if s > o { - return 1 - } - return -1 - } else if n1 != nil { - // o is a string and s is a number - return -1 - } else if n2 != nil { - // s is a string and o is a number - return 1 - } - // Both are numbers - if si > oi { - return 1 - } - return -1 - -} diff --git a/vendor/github.com/Masterminds/semver/version_fuzz.go b/vendor/github.com/Masterminds/semver/version_fuzz.go deleted file mode 100644 index b42bcd62b95..00000000000 --- a/vendor/github.com/Masterminds/semver/version_fuzz.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build gofuzz - -package semver - -func Fuzz(data []byte) int { - if _, err := NewVersion(string(data)); err != nil { - return 0 - } - return 1 -} diff --git a/vendor/github.com/Microsoft/go-winio/.golangci.yml b/vendor/github.com/Microsoft/go-winio/.golangci.yml index 7b503d26a36..faedfe937a7 100644 --- a/vendor/github.com/Microsoft/go-winio/.golangci.yml +++ b/vendor/github.com/Microsoft/go-winio/.golangci.yml @@ -1,7 +1,3 @@ -run: - skip-dirs: - - pkg/etw/sample - linters: enable: # style @@ -20,9 +16,13 @@ linters: - gofmt # files are gofmt'ed - gosec # security - nilerr # returns nil even with non-nil error + - thelper # test helpers without t.Helper() - unparam # unused function params issues: + exclude-dirs: + - pkg/etw/sample + exclude-rules: # err is very often shadowed in nested scopes - linters: @@ -69,9 +69,7 @@ linters-settings: # struct order is often for Win32 compat # also, ignore pointer bytes/GC issues for now until performance becomes an issue - fieldalignment - check-shadowing: true nolintlint: - allow-leading-space: false require-explanation: true require-specific: true revive: diff --git a/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/Microsoft/go-winio/backup.go index 09621c88463..b54341daacb 100644 --- a/vendor/github.com/Microsoft/go-winio/backup.go +++ b/vendor/github.com/Microsoft/go-winio/backup.go @@ -10,14 +10,14 @@ import ( "io" "os" "runtime" - "syscall" "unicode/utf16" + "github.com/Microsoft/go-winio/internal/fs" "golang.org/x/sys/windows" ) -//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead -//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite +//sys backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead +//sys backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite const ( BackupData = uint32(iota + 1) @@ -104,7 +104,7 @@ func (r *BackupStreamReader) Next() (*BackupHeader, error) { if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { return nil, err } - hdr.Name = syscall.UTF16ToString(name) + hdr.Name = windows.UTF16ToString(name) } if wsi.StreamID == BackupSparseBlock { if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { @@ -205,7 +205,7 @@ func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { // Read reads a backup stream from the file by calling the Win32 API BackupRead(). func (r *BackupFileReader) Read(b []byte) (int, error) { var bytesRead uint32 - err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) + err := backupRead(windows.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) if err != nil { return 0, &os.PathError{Op: "BackupRead", Path: r.f.Name(), Err: err} } @@ -220,7 +220,7 @@ func (r *BackupFileReader) Read(b []byte) (int, error) { // the underlying file. func (r *BackupFileReader) Close() error { if r.ctx != 0 { - _ = backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) + _ = backupRead(windows.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) runtime.KeepAlive(r.f) r.ctx = 0 } @@ -244,7 +244,7 @@ func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { // Write restores a portion of the file using the provided backup stream. func (w *BackupFileWriter) Write(b []byte) (int, error) { var bytesWritten uint32 - err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) + err := backupWrite(windows.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) if err != nil { return 0, &os.PathError{Op: "BackupWrite", Path: w.f.Name(), Err: err} } @@ -259,7 +259,7 @@ func (w *BackupFileWriter) Write(b []byte) (int, error) { // close the underlying file. func (w *BackupFileWriter) Close() error { if w.ctx != 0 { - _ = backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) + _ = backupWrite(windows.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) runtime.KeepAlive(w.f) w.ctx = 0 } @@ -271,17 +271,14 @@ func (w *BackupFileWriter) Close() error { // // If the file opened was a directory, it cannot be used with Readdir(). func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) { - winPath, err := syscall.UTF16FromString(path) - if err != nil { - return nil, err - } - h, err := syscall.CreateFile(&winPath[0], - access, - share, + h, err := fs.CreateFile(path, + fs.AccessMask(access), + fs.FileShareMode(share), nil, - createmode, - syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, - 0) + fs.FileCreationDisposition(createmode), + fs.FILE_FLAG_BACKUP_SEMANTICS|fs.FILE_FLAG_OPEN_REPARSE_POINT, + 0, + ) if err != nil { err = &os.PathError{Op: "open", Path: path, Err: err} return nil, err diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go index 6b3b0cd5198..7f852bbf81b 100644 --- a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go +++ b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go @@ -11,7 +11,6 @@ import ( "path/filepath" "strconv" "strings" - "syscall" "time" "github.com/Microsoft/go-winio" @@ -106,7 +105,7 @@ func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *ta hdr.PAXRecords[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes) hdr.PAXRecords[hdrCreationTime] = formatPAXTime(time.Unix(0, fileInfo.CreationTime.Nanoseconds())) - if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { + if (fileInfo.FileAttributes & windows.FILE_ATTRIBUTE_DIRECTORY) != 0 { hdr.Mode |= cISDIR hdr.Size = 0 hdr.Typeflag = tar.TypeDir @@ -378,7 +377,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size // WriteTarFileFromBackupStream. func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) { name = hdr.Name - if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { + if hdr.Typeflag == tar.TypeReg { size = hdr.Size } fileInfo = &winio.FileBasicInfo{ @@ -396,7 +395,7 @@ func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *win fileInfo.FileAttributes = uint32(attr) } else { if hdr.Typeflag == tar.TypeDir { - fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY + fileInfo.FileAttributes |= windows.FILE_ATTRIBUTE_DIRECTORY } } if creationTimeStr, ok := hdr.PAXRecords[hdrCreationTime]; ok { @@ -469,7 +468,7 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) ( } } - if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { + if hdr.Typeflag == tar.TypeReg { bhdr := winio.BackupHeader{ Id: winio.BackupData, Size: hdr.Size, diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go index 175a99d3f42..fe82a180dbd 100644 --- a/vendor/github.com/Microsoft/go-winio/file.go +++ b/vendor/github.com/Microsoft/go-winio/file.go @@ -15,26 +15,11 @@ import ( "golang.org/x/sys/windows" ) -//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx -//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort -//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus -//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes -//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult - -type atomicBool int32 - -func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } -func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } -func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } - -//revive:disable-next-line:predeclared Keep "new" to maintain consistency with "atomic" pkg -func (b *atomicBool) swap(new bool) bool { - var newInt int32 - if new { - newInt = 1 - } - return atomic.SwapInt32((*int32)(b), newInt) == 1 -} +//sys cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) = CancelIoEx +//sys createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) = CreateIoCompletionPort +//sys getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus +//sys setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes +//sys wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult var ( ErrFileClosed = errors.New("file has already been closed") @@ -50,7 +35,7 @@ func (*timeoutError) Temporary() bool { return true } type timeoutChan chan struct{} var ioInitOnce sync.Once -var ioCompletionPort syscall.Handle +var ioCompletionPort windows.Handle // ioResult contains the result of an asynchronous IO operation. type ioResult struct { @@ -60,12 +45,12 @@ type ioResult struct { // ioOperation represents an outstanding asynchronous Win32 IO. type ioOperation struct { - o syscall.Overlapped + o windows.Overlapped ch chan ioResult } func initIO() { - h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) + h, err := createIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff) if err != nil { panic(err) } @@ -76,10 +61,10 @@ func initIO() { // win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. // It takes ownership of this handle and will close it if it is garbage collected. type win32File struct { - handle syscall.Handle + handle windows.Handle wg sync.WaitGroup wgLock sync.RWMutex - closing atomicBool + closing atomic.Bool socket bool readDeadline deadlineHandler writeDeadline deadlineHandler @@ -90,11 +75,11 @@ type deadlineHandler struct { channel timeoutChan channelLock sync.RWMutex timer *time.Timer - timedout atomicBool + timedout atomic.Bool } // makeWin32File makes a new win32File from an existing file handle. -func makeWin32File(h syscall.Handle) (*win32File, error) { +func makeWin32File(h windows.Handle) (*win32File, error) { f := &win32File{handle: h} ioInitOnce.Do(initIO) _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) @@ -110,7 +95,12 @@ func makeWin32File(h syscall.Handle) (*win32File, error) { return f, nil } +// Deprecated: use NewOpenFile instead. func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { + return NewOpenFile(windows.Handle(h)) +} + +func NewOpenFile(h windows.Handle) (io.ReadWriteCloser, error) { // If we return the result of makeWin32File directly, it can result in an // interface-wrapped nil, rather than a nil interface value. f, err := makeWin32File(h) @@ -124,13 +114,13 @@ func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { func (f *win32File) closeHandle() { f.wgLock.Lock() // Atomically set that we are closing, releasing the resources only once. - if !f.closing.swap(true) { + if !f.closing.Swap(true) { f.wgLock.Unlock() // cancel all IO and wait for it to complete _ = cancelIoEx(f.handle, nil) f.wg.Wait() // at this point, no new IO can start - syscall.Close(f.handle) + windows.Close(f.handle) f.handle = 0 } else { f.wgLock.Unlock() @@ -145,14 +135,14 @@ func (f *win32File) Close() error { // IsClosed checks if the file has been closed. func (f *win32File) IsClosed() bool { - return f.closing.isSet() + return f.closing.Load() } // prepareIO prepares for a new IO operation. // The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. func (f *win32File) prepareIO() (*ioOperation, error) { f.wgLock.RLock() - if f.closing.isSet() { + if f.closing.Load() { f.wgLock.RUnlock() return nil, ErrFileClosed } @@ -164,12 +154,12 @@ func (f *win32File) prepareIO() (*ioOperation, error) { } // ioCompletionProcessor processes completed async IOs forever. -func ioCompletionProcessor(h syscall.Handle) { +func ioCompletionProcessor(h windows.Handle) { for { var bytes uint32 var key uintptr var op *ioOperation - err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE) + err := getQueuedCompletionStatus(h, &bytes, &key, &op, windows.INFINITE) if op == nil { panic(err) } @@ -182,11 +172,11 @@ func ioCompletionProcessor(h syscall.Handle) { // asyncIO processes the return value from ReadFile or WriteFile, blocking until // the operation has actually completed. func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { - if err != syscall.ERROR_IO_PENDING { //nolint:errorlint // err is Errno + if err != windows.ERROR_IO_PENDING { //nolint:errorlint // err is Errno return int(bytes), err } - if f.closing.isSet() { + if f.closing.Load() { _ = cancelIoEx(f.handle, &c.o) } @@ -201,8 +191,8 @@ func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, er select { case r = <-c.ch: err = r.err - if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno - if f.closing.isSet() { + if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno + if f.closing.Load() { err = ErrFileClosed } } else if err != nil && f.socket { @@ -214,7 +204,7 @@ func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, er _ = cancelIoEx(f.handle, &c.o) r = <-c.ch err = r.err - if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno + if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno err = ErrTimeout } } @@ -235,23 +225,22 @@ func (f *win32File) Read(b []byte) (int, error) { } defer f.wg.Done() - if f.readDeadline.timedout.isSet() { + if f.readDeadline.timedout.Load() { return 0, ErrTimeout } var bytes uint32 - err = syscall.ReadFile(f.handle, b, &bytes, &c.o) + err = windows.ReadFile(f.handle, b, &bytes, &c.o) n, err := f.asyncIO(c, &f.readDeadline, bytes, err) runtime.KeepAlive(b) // Handle EOF conditions. if err == nil && n == 0 && len(b) != 0 { return 0, io.EOF - } else if err == syscall.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno + } else if err == windows.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno return 0, io.EOF - } else { - return n, err } + return n, err } // Write writes to a file handle. @@ -262,12 +251,12 @@ func (f *win32File) Write(b []byte) (int, error) { } defer f.wg.Done() - if f.writeDeadline.timedout.isSet() { + if f.writeDeadline.timedout.Load() { return 0, ErrTimeout } var bytes uint32 - err = syscall.WriteFile(f.handle, b, &bytes, &c.o) + err = windows.WriteFile(f.handle, b, &bytes, &c.o) n, err := f.asyncIO(c, &f.writeDeadline, bytes, err) runtime.KeepAlive(b) return n, err @@ -282,7 +271,7 @@ func (f *win32File) SetWriteDeadline(deadline time.Time) error { } func (f *win32File) Flush() error { - return syscall.FlushFileBuffers(f.handle) + return windows.FlushFileBuffers(f.handle) } func (f *win32File) Fd() uintptr { @@ -299,7 +288,7 @@ func (d *deadlineHandler) set(deadline time.Time) error { } d.timer = nil } - d.timedout.setFalse() + d.timedout.Store(false) select { case <-d.channel: @@ -314,7 +303,7 @@ func (d *deadlineHandler) set(deadline time.Time) error { } timeoutIO := func() { - d.timedout.setTrue() + d.timedout.Store(true) close(d.channel) } diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go index 702950e72a4..c860eb9917a 100644 --- a/vendor/github.com/Microsoft/go-winio/fileinfo.go +++ b/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -18,9 +18,18 @@ type FileBasicInfo struct { _ uint32 // padding } +// alignedFileBasicInfo is a FileBasicInfo, but aligned to uint64 by containing +// uint64 rather than windows.Filetime. Filetime contains two uint32s. uint64 +// alignment is necessary to pass this as FILE_BASIC_INFO. +type alignedFileBasicInfo struct { + CreationTime, LastAccessTime, LastWriteTime, ChangeTime uint64 + FileAttributes uint32 + _ uint32 // padding +} + // GetFileBasicInfo retrieves times and attributes for a file. func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { - bi := &FileBasicInfo{} + bi := &alignedFileBasicInfo{} if err := windows.GetFileInformationByHandleEx( windows.Handle(f.Fd()), windows.FileBasicInfo, @@ -30,16 +39,21 @@ func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} } runtime.KeepAlive(f) - return bi, nil + // Reinterpret the alignedFileBasicInfo as a FileBasicInfo so it matches the + // public API of this module. The data may be unnecessarily aligned. + return (*FileBasicInfo)(unsafe.Pointer(bi)), nil } // SetFileBasicInfo sets times and attributes for a file. func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { + // Create an alignedFileBasicInfo based on a FileBasicInfo. The copy is + // suitable to pass to GetFileInformationByHandleEx. + biAligned := *(*alignedFileBasicInfo)(unsafe.Pointer(bi)) if err := windows.SetFileInformationByHandle( windows.Handle(f.Fd()), windows.FileBasicInfo, - (*byte)(unsafe.Pointer(bi)), - uint32(unsafe.Sizeof(*bi)), + (*byte)(unsafe.Pointer(&biAligned)), + uint32(unsafe.Sizeof(biAligned)), ); err != nil { return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} } diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go index c881916583e..c4fdd9d4aec 100644 --- a/vendor/github.com/Microsoft/go-winio/hvsock.go +++ b/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -10,7 +10,6 @@ import ( "io" "net" "os" - "syscall" "time" "unsafe" @@ -181,13 +180,13 @@ type HvsockConn struct { var _ net.Conn = &HvsockConn{} func newHVSocket() (*win32File, error) { - fd, err := syscall.Socket(afHVSock, syscall.SOCK_STREAM, 1) + fd, err := windows.Socket(afHVSock, windows.SOCK_STREAM, 1) if err != nil { return nil, os.NewSyscallError("socket", err) } f, err := makeWin32File(fd) if err != nil { - syscall.Close(fd) + windows.Close(fd) return nil, err } f.socket = true @@ -197,16 +196,24 @@ func newHVSocket() (*win32File, error) { // ListenHvsock listens for connections on the specified hvsock address. func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { l := &HvsockListener{addr: *addr} - sock, err := newHVSocket() + + var sock *win32File + sock, err = newHVSocket() if err != nil { return nil, l.opErr("listen", err) } + defer func() { + if err != nil { + _ = sock.Close() + } + }() + sa := addr.raw() - err = socket.Bind(windows.Handle(sock.handle), &sa) + err = socket.Bind(sock.handle, &sa) if err != nil { return nil, l.opErr("listen", os.NewSyscallError("socket", err)) } - err = syscall.Listen(sock.handle, 16) + err = windows.Listen(sock.handle, 16) if err != nil { return nil, l.opErr("listen", os.NewSyscallError("listen", err)) } @@ -246,7 +253,7 @@ func (l *HvsockListener) Accept() (_ net.Conn, err error) { var addrbuf [addrlen * 2]byte var bytes uint32 - err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o) + err = windows.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o) if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil { return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) } @@ -263,7 +270,7 @@ func (l *HvsockListener) Accept() (_ net.Conn, err error) { conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) // initialize the accepted socket and update its properties with those of the listening socket - if err = windows.Setsockopt(windows.Handle(sock.handle), + if err = windows.Setsockopt(sock.handle, windows.SOL_SOCKET, windows.SO_UPDATE_ACCEPT_CONTEXT, (*byte)(unsafe.Pointer(&l.sock.handle)), int32(unsafe.Sizeof(l.sock.handle))); err != nil { return nil, conn.opErr("accept", os.NewSyscallError("setsockopt", err)) @@ -334,7 +341,7 @@ func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *Hvsock }() sa := addr.raw() - err = socket.Bind(windows.Handle(sock.handle), &sa) + err = socket.Bind(sock.handle, &sa) if err != nil { return nil, conn.opErr(op, os.NewSyscallError("bind", err)) } @@ -347,7 +354,7 @@ func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *Hvsock var bytes uint32 for i := uint(0); i <= d.Retries; i++ { err = socket.ConnectEx( - windows.Handle(sock.handle), + sock.handle, &sa, nil, // sendBuf 0, // sendDataLen @@ -367,7 +374,7 @@ func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *Hvsock // update the connection properties, so shutdown can be used if err = windows.Setsockopt( - windows.Handle(sock.handle), + sock.handle, windows.SOL_SOCKET, windows.SO_UPDATE_CONNECT_CONTEXT, nil, // optvalue @@ -378,7 +385,7 @@ func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *Hvsock // get the local name var sal rawHvsockAddr - err = socket.GetSockName(windows.Handle(sock.handle), &sal) + err = socket.GetSockName(sock.handle, &sal) if err != nil { return nil, conn.opErr(op, os.NewSyscallError("getsockname", err)) } @@ -421,7 +428,7 @@ func (d *HvsockDialer) redialWait(ctx context.Context) (err error) { return ctx.Err() } -// assumes error is a plain, unwrapped syscall.Errno provided by direct syscall. +// assumes error is a plain, unwrapped windows.Errno provided by direct syscall. func canRedial(err error) bool { //nolint:errorlint // guaranteed to be an Errno switch err { @@ -447,9 +454,9 @@ func (conn *HvsockConn) Read(b []byte) (int, error) { return 0, conn.opErr("read", err) } defer conn.sock.wg.Done() - buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))} var flags, bytes uint32 - err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) + err = windows.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) n, err := conn.sock.asyncIO(c, &conn.sock.readDeadline, bytes, err) if err != nil { var eno windows.Errno @@ -482,9 +489,9 @@ func (conn *HvsockConn) write(b []byte) (int, error) { return 0, conn.opErr("write", err) } defer conn.sock.wg.Done() - buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))} var bytes uint32 - err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) + err = windows.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) n, err := conn.sock.asyncIO(c, &conn.sock.writeDeadline, bytes, err) if err != nil { var eno windows.Errno @@ -511,7 +518,7 @@ func (conn *HvsockConn) shutdown(how int) error { return socket.ErrSocketClosed } - err := syscall.Shutdown(conn.sock.handle, how) + err := windows.Shutdown(conn.sock.handle, how) if err != nil { // If the connection was closed, shutdowns fail with "not connected" if errors.Is(err, windows.WSAENOTCONN) || @@ -525,7 +532,7 @@ func (conn *HvsockConn) shutdown(how int) error { // CloseRead shuts down the read end of the socket, preventing future read operations. func (conn *HvsockConn) CloseRead() error { - err := conn.shutdown(syscall.SHUT_RD) + err := conn.shutdown(windows.SHUT_RD) if err != nil { return conn.opErr("closeread", err) } @@ -535,7 +542,7 @@ func (conn *HvsockConn) CloseRead() error { // CloseWrite shuts down the write end of the socket, preventing future write operations and // notifying the other endpoint that no more data will be written. func (conn *HvsockConn) CloseWrite() error { - err := conn.shutdown(syscall.SHUT_WR) + err := conn.shutdown(windows.SHUT_WR) if err != nil { return conn.opErr("closewrite", err) } diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go index 509b3ec6410..0cd9621df78 100644 --- a/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go @@ -11,12 +11,14 @@ import ( //go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go fs.go // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew -//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW +//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW const NullHandle windows.Handle = 0 // AccessMask defines standard, specific, and generic rights. // +// Used with CreateFile and NtCreateFile (and co.). +// // Bitmask: // 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 // 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 @@ -47,6 +49,12 @@ const ( // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew#parameters FILE_ANY_ACCESS AccessMask = 0 + GENERIC_READ AccessMask = 0x8000_0000 + GENERIC_WRITE AccessMask = 0x4000_0000 + GENERIC_EXECUTE AccessMask = 0x2000_0000 + GENERIC_ALL AccessMask = 0x1000_0000 + ACCESS_SYSTEM_SECURITY AccessMask = 0x0100_0000 + // Specific Object Access // from ntioapi.h @@ -124,14 +132,32 @@ const ( TRUNCATE_EXISTING FileCreationDisposition = 0x05 ) +// Create disposition values for NtCreate* +type NTFileCreationDisposition uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + // From ntioapi.h + + FILE_SUPERSEDE NTFileCreationDisposition = 0x00 + FILE_OPEN NTFileCreationDisposition = 0x01 + FILE_CREATE NTFileCreationDisposition = 0x02 + FILE_OPEN_IF NTFileCreationDisposition = 0x03 + FILE_OVERWRITE NTFileCreationDisposition = 0x04 + FILE_OVERWRITE_IF NTFileCreationDisposition = 0x05 + FILE_MAXIMUM_DISPOSITION NTFileCreationDisposition = 0x05 +) + // CreateFile and co. take flags or attributes together as one parameter. // Define alias until we can use generics to allow both - +// // https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants type FileFlagOrAttribute uint32 //nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( // from winnt.h +const ( + // from winnt.h + FILE_FLAG_WRITE_THROUGH FileFlagOrAttribute = 0x8000_0000 FILE_FLAG_OVERLAPPED FileFlagOrAttribute = 0x4000_0000 FILE_FLAG_NO_BUFFERING FileFlagOrAttribute = 0x2000_0000 @@ -145,17 +171,51 @@ const ( // from winnt.h FILE_FLAG_FIRST_PIPE_INSTANCE FileFlagOrAttribute = 0x0008_0000 ) +// NtCreate* functions take a dedicated CreateOptions parameter. +// +// https://learn.microsoft.com/en-us/windows/win32/api/Winternl/nf-winternl-ntcreatefile +// +// https://learn.microsoft.com/en-us/windows/win32/devnotes/nt-create-named-pipe-file +type NTCreateOptions uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + // From ntioapi.h + + FILE_DIRECTORY_FILE NTCreateOptions = 0x0000_0001 + FILE_WRITE_THROUGH NTCreateOptions = 0x0000_0002 + FILE_SEQUENTIAL_ONLY NTCreateOptions = 0x0000_0004 + FILE_NO_INTERMEDIATE_BUFFERING NTCreateOptions = 0x0000_0008 + + FILE_SYNCHRONOUS_IO_ALERT NTCreateOptions = 0x0000_0010 + FILE_SYNCHRONOUS_IO_NONALERT NTCreateOptions = 0x0000_0020 + FILE_NON_DIRECTORY_FILE NTCreateOptions = 0x0000_0040 + FILE_CREATE_TREE_CONNECTION NTCreateOptions = 0x0000_0080 + + FILE_COMPLETE_IF_OPLOCKED NTCreateOptions = 0x0000_0100 + FILE_NO_EA_KNOWLEDGE NTCreateOptions = 0x0000_0200 + FILE_DISABLE_TUNNELING NTCreateOptions = 0x0000_0400 + FILE_RANDOM_ACCESS NTCreateOptions = 0x0000_0800 + + FILE_DELETE_ON_CLOSE NTCreateOptions = 0x0000_1000 + FILE_OPEN_BY_FILE_ID NTCreateOptions = 0x0000_2000 + FILE_OPEN_FOR_BACKUP_INTENT NTCreateOptions = 0x0000_4000 + FILE_NO_COMPRESSION NTCreateOptions = 0x0000_8000 +) + type FileSQSFlag = FileFlagOrAttribute //nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( // from winbase.h +const ( + // from winbase.h + SECURITY_ANONYMOUS FileSQSFlag = FileSQSFlag(SecurityAnonymous << 16) SECURITY_IDENTIFICATION FileSQSFlag = FileSQSFlag(SecurityIdentification << 16) SECURITY_IMPERSONATION FileSQSFlag = FileSQSFlag(SecurityImpersonation << 16) SECURITY_DELEGATION FileSQSFlag = FileSQSFlag(SecurityDelegation << 16) - SECURITY_SQOS_PRESENT FileSQSFlag = 0x00100000 - SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F0000 + SECURITY_SQOS_PRESENT FileSQSFlag = 0x0010_0000 + SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F_0000 ) // GetFinalPathNameByHandle flags diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go index e2f7bb24e5f..a94e234c706 100644 --- a/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -45,7 +42,7 @@ var ( procCreateFileW = modkernel32.NewProc("CreateFileW") ) -func CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { +func CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { var _p0 *uint16 _p0, err = syscall.UTF16PtrFromString(name) if err != nil { @@ -54,8 +51,8 @@ func CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall. return _CreateFile(_p0, access, mode, sa, createmode, attrs, templatefile) } -func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) +func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { + r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile)) handle = windows.Handle(r0) if handle == windows.InvalidHandle { err = errnoErr(e1) diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go index aeb7b7250f5..88580d974ec 100644 --- a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go +++ b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go @@ -156,9 +156,7 @@ func connectEx( bytesSent *uint32, overlapped *windows.Overlapped, ) (err error) { - // todo: after upgrading to 1.18, switch from syscall.Syscall9 to syscall.SyscallN - r1, _, e1 := syscall.Syscall9(connectExFunc.addr, - 7, + r1, _, e1 := syscall.SyscallN(connectExFunc.addr, uintptr(s), uintptr(name), uintptr(namelen), @@ -166,8 +164,8 @@ func connectEx( uintptr(sendDataLen), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), - 0, - 0) + ) + if r1 == 0 { if e1 != 0 { err = error(e1) diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go index 6d2e1a9e443..e1504126aa6 100644 --- a/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -48,7 +45,7 @@ var ( ) func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen)) if r1 == socketError { err = errnoErr(e1) } @@ -56,7 +53,7 @@ func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) { } func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) + r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) if r1 == socketError { err = errnoErr(e1) } @@ -64,7 +61,7 @@ func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err err } func getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) + r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) if r1 == socketError { err = errnoErr(e1) } diff --git a/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go index 7ad50570240..42ebc019fcb 100644 --- a/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go +++ b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go @@ -62,7 +62,7 @@ func (b *WString) Free() { // ResizeTo grows the buffer to at least c and returns the new capacity, freeing the // previous buffer back into pool. func (b *WString) ResizeTo(c uint32) uint32 { - // allready sufficient (or n is 0) + // already sufficient (or n is 0) if c <= b.Cap() { return b.Cap() } diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go index 25cc811031b..a2da6639d00 100644 --- a/vendor/github.com/Microsoft/go-winio/pipe.go +++ b/vendor/github.com/Microsoft/go-winio/pipe.go @@ -11,7 +11,6 @@ import ( "net" "os" "runtime" - "syscall" "time" "unsafe" @@ -20,20 +19,44 @@ import ( "github.com/Microsoft/go-winio/internal/fs" ) -//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe -//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW -//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo -//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW -//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc -//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile +//sys connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) = ConnectNamedPipe +//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateNamedPipeW +//sys disconnectNamedPipe(pipe windows.Handle) (err error) = DisconnectNamedPipe +//sys getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo +//sys getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW +//sys ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile //sys rtlNtStatusToDosError(status ntStatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb //sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) = ntdll.RtlDosPathNameToNtPathName_U //sys rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) = ntdll.RtlDefaultNpAcl +type PipeConn interface { + net.Conn + Disconnect() error + Flush() error +} + +// type aliases for mkwinsyscall code +type ( + ntAccessMask = fs.AccessMask + ntFileShareMode = fs.FileShareMode + ntFileCreationDisposition = fs.NTFileCreationDisposition + ntFileOptions = fs.NTCreateOptions +) + type ioStatusBlock struct { Status, Information uintptr } +// typedef struct _OBJECT_ATTRIBUTES { +// ULONG Length; +// HANDLE RootDirectory; +// PUNICODE_STRING ObjectName; +// ULONG Attributes; +// PVOID SecurityDescriptor; +// PVOID SecurityQualityOfService; +// } OBJECT_ATTRIBUTES; +// +// https://learn.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_object_attributes type objectAttributes struct { Length uintptr RootDirectory uintptr @@ -49,6 +72,17 @@ type unicodeString struct { Buffer uintptr } +// typedef struct _SECURITY_DESCRIPTOR { +// BYTE Revision; +// BYTE Sbz1; +// SECURITY_DESCRIPTOR_CONTROL Control; +// PSID Owner; +// PSID Group; +// PACL Sacl; +// PACL Dacl; +// } SECURITY_DESCRIPTOR, *PISECURITY_DESCRIPTOR; +// +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-security_descriptor type securityDescriptor struct { Revision byte Sbz1 byte @@ -80,6 +114,8 @@ type win32Pipe struct { path string } +var _ PipeConn = (*win32Pipe)(nil) + type win32MessageBytePipe struct { win32Pipe writeClosed bool @@ -103,6 +139,10 @@ func (f *win32Pipe) SetDeadline(t time.Time) error { return f.SetWriteDeadline(t) } +func (f *win32Pipe) Disconnect() error { + return disconnectNamedPipe(f.win32File.handle) +} + // CloseWrite closes the write side of a message pipe in byte mode. func (f *win32MessageBytePipe) CloseWrite() error { if f.writeClosed { @@ -146,7 +186,7 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) { // zero-byte message, ensure that all future Read() calls // also return EOF. f.readEOF = true - } else if err == syscall.ERROR_MORE_DATA { //nolint:errorlint // err is Errno + } else if err == windows.ERROR_MORE_DATA { //nolint:errorlint // err is Errno // ERROR_MORE_DATA indicates that the pipe's read mode is message mode // and the message still has more bytes. Treat this as a success, since // this package presents all named pipes as byte streams. @@ -164,21 +204,20 @@ func (s pipeAddress) String() string { } // tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. -func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask) (syscall.Handle, error) { +func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask, impLevel PipeImpLevel) (windows.Handle, error) { for { select { case <-ctx.Done(): - return syscall.Handle(0), ctx.Err() + return windows.Handle(0), ctx.Err() default: - wh, err := fs.CreateFile(*path, + h, err := fs.CreateFile(*path, access, 0, // mode nil, // security attributes fs.OPEN_EXISTING, - fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.SECURITY_ANONYMOUS, + fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.FileSQSFlag(impLevel), 0, // template file handle ) - h := syscall.Handle(wh) if err == nil { return h, nil } @@ -214,15 +253,33 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { // DialPipeContext attempts to connect to a named pipe by `path` until `ctx` // cancellation or timeout. func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { - return DialPipeAccess(ctx, path, syscall.GENERIC_READ|syscall.GENERIC_WRITE) + return DialPipeAccess(ctx, path, uint32(fs.GENERIC_READ|fs.GENERIC_WRITE)) } +// PipeImpLevel is an enumeration of impersonation levels that may be set +// when calling DialPipeAccessImpersonation. +type PipeImpLevel uint32 + +const ( + PipeImpLevelAnonymous = PipeImpLevel(fs.SECURITY_ANONYMOUS) + PipeImpLevelIdentification = PipeImpLevel(fs.SECURITY_IDENTIFICATION) + PipeImpLevelImpersonation = PipeImpLevel(fs.SECURITY_IMPERSONATION) + PipeImpLevelDelegation = PipeImpLevel(fs.SECURITY_DELEGATION) +) + // DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx` // cancellation or timeout. func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) { + return DialPipeAccessImpLevel(ctx, path, access, PipeImpLevelAnonymous) +} + +// DialPipeAccessImpLevel attempts to connect to a named pipe by `path` with +// `access` at `impLevel` until `ctx` cancellation or timeout. The other +// DialPipe* implementations use PipeImpLevelAnonymous. +func DialPipeAccessImpLevel(ctx context.Context, path string, access uint32, impLevel PipeImpLevel) (net.Conn, error) { var err error - var h syscall.Handle - h, err = tryDialPipe(ctx, &path, fs.AccessMask(access)) + var h windows.Handle + h, err = tryDialPipe(ctx, &path, fs.AccessMask(access), impLevel) if err != nil { return nil, err } @@ -235,7 +292,7 @@ func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, f, err := makeWin32File(h) if err != nil { - syscall.Close(h) + windows.Close(h) return nil, err } @@ -255,7 +312,7 @@ type acceptResponse struct { } type win32PipeListener struct { - firstHandle syscall.Handle + firstHandle windows.Handle path string config PipeConfig acceptCh chan (chan acceptResponse) @@ -263,8 +320,8 @@ type win32PipeListener struct { doneCh chan int } -func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) { - path16, err := syscall.UTF16FromString(path) +func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (windows.Handle, error) { + path16, err := windows.UTF16FromString(path) if err != nil { return 0, &os.PathError{Op: "open", Path: path, Err: err} } @@ -280,16 +337,20 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy ).Err(); err != nil { return 0, &os.PathError{Op: "open", Path: path, Err: err} } - defer localFree(ntPath.Buffer) + defer windows.LocalFree(windows.Handle(ntPath.Buffer)) //nolint:errcheck oa.ObjectName = &ntPath oa.Attributes = windows.OBJ_CASE_INSENSITIVE // The security descriptor is only needed for the first pipe. if first { if sd != nil { + //todo: does `sdb` need to be allocated on the heap, or can go allocate it? l := uint32(len(sd)) - sdb := localAlloc(0, l) - defer localFree(sdb) + sdb, err := windows.LocalAlloc(0, l) + if err != nil { + return 0, fmt.Errorf("LocalAlloc for security descriptor with of length %d: %w", l, err) + } + defer windows.LocalFree(windows.Handle(sdb)) //nolint:errcheck copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) } else { @@ -298,7 +359,7 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { return 0, fmt.Errorf("getting default named pipe ACL: %w", err) } - defer localFree(dacl) + defer windows.LocalFree(windows.Handle(dacl)) //nolint:errcheck sdb := &securityDescriptor{ Revision: 1, @@ -314,27 +375,27 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy typ |= windows.FILE_PIPE_MESSAGE_TYPE } - disposition := uint32(windows.FILE_OPEN) - access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE) + disposition := fs.FILE_OPEN + access := fs.GENERIC_READ | fs.GENERIC_WRITE | fs.SYNCHRONIZE if first { - disposition = windows.FILE_CREATE + disposition = fs.FILE_CREATE // By not asking for read or write access, the named pipe file system // will put this pipe into an initially disconnected state, blocking // client connections until the next call with first == false. - access = syscall.SYNCHRONIZE + access = fs.SYNCHRONIZE } timeout := int64(-50 * 10000) // 50ms var ( - h syscall.Handle + h windows.Handle iosb ioStatusBlock ) err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, + fs.FILE_SHARE_READ|fs.FILE_SHARE_WRITE, disposition, 0, typ, @@ -359,7 +420,7 @@ func (l *win32PipeListener) makeServerPipe() (*win32File, error) { } f, err := makeWin32File(h) if err != nil { - syscall.Close(h) + windows.Close(h) return nil, err } return f, nil @@ -418,7 +479,7 @@ func (l *win32PipeListener) listenerRoutine() { closed = err == ErrPipeListenerClosed //nolint:errorlint // err is Errno } } - syscall.Close(l.firstHandle) + windows.Close(l.firstHandle) l.firstHandle = 0 // Notify Close() and Accept() callers that the handle has been closed. close(l.doneCh) diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go index 0ff9dac906d..d9b90b6e861 100644 --- a/vendor/github.com/Microsoft/go-winio/privilege.go +++ b/vendor/github.com/Microsoft/go-winio/privilege.go @@ -9,7 +9,6 @@ import ( "fmt" "runtime" "sync" - "syscall" "unicode/utf16" "golang.org/x/sys/windows" @@ -18,8 +17,8 @@ import ( //sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges //sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf //sys revertToSelf() (err error) = advapi32.RevertToSelf -//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken -//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread +//sys openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken +//sys getCurrentThread() (h windows.Handle) = GetCurrentThread //sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW //sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW //sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW @@ -29,7 +28,7 @@ const ( SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED //revive:disable-next-line:var-naming ALL_CAPS - ERROR_NOT_ALL_ASSIGNED syscall.Errno = windows.ERROR_NOT_ALL_ASSIGNED + ERROR_NOT_ALL_ASSIGNED windows.Errno = windows.ERROR_NOT_ALL_ASSIGNED SeBackupPrivilege = "SeBackupPrivilege" SeRestorePrivilege = "SeRestorePrivilege" @@ -177,7 +176,7 @@ func newThreadToken() (windows.Token, error) { } var token windows.Token - err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token) + err = openThreadToken(getCurrentThread(), windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, false, &token) if err != nil { rerr := revertToSelf() if rerr != nil { diff --git a/vendor/github.com/Microsoft/go-winio/sd.go b/vendor/github.com/Microsoft/go-winio/sd.go index 5550ef6b61e..c3685e98e14 100644 --- a/vendor/github.com/Microsoft/go-winio/sd.go +++ b/vendor/github.com/Microsoft/go-winio/sd.go @@ -5,7 +5,7 @@ package winio import ( "errors" - "syscall" + "fmt" "unsafe" "golang.org/x/sys/windows" @@ -15,10 +15,6 @@ import ( //sys lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountSidW //sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW //sys convertStringSidToSid(str *uint16, sid **byte) (err error) = advapi32.ConvertStringSidToSidW -//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW -//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW -//sys localFree(mem uintptr) = LocalFree -//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength type AccountLookupError struct { Name string @@ -64,7 +60,7 @@ func LookupSidByName(name string) (sid string, err error) { var sidSize, sidNameUse, refDomainSize uint32 err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) - if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno + if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno return "", &AccountLookupError{name, err} } sidBuffer := make([]byte, sidSize) @@ -78,8 +74,8 @@ func LookupSidByName(name string) (sid string, err error) { if err != nil { return "", &AccountLookupError{name, err} } - sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) - localFree(uintptr(unsafe.Pointer(strBuffer))) + sid = windows.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) + _, _ = windows.LocalFree(windows.Handle(unsafe.Pointer(strBuffer))) return sid, nil } @@ -100,7 +96,7 @@ func LookupNameBySid(sid string) (name string, err error) { if err = convertStringSidToSid(sidBuffer, &sidPtr); err != nil { return "", &AccountLookupError{sid, err} } - defer localFree(uintptr(unsafe.Pointer(sidPtr))) + defer windows.LocalFree(windows.Handle(unsafe.Pointer(sidPtr))) //nolint:errcheck var nameSize, refDomainSize, sidNameUse uint32 err = lookupAccountSid(nil, sidPtr, nil, &nameSize, nil, &refDomainSize, &sidNameUse) @@ -120,25 +116,18 @@ func LookupNameBySid(sid string) (name string, err error) { } func SddlToSecurityDescriptor(sddl string) ([]byte, error) { - var sdBuffer uintptr - err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) + sd, err := windows.SecurityDescriptorFromString(sddl) if err != nil { - return nil, &SddlConversionError{sddl, err} + return nil, &SddlConversionError{Sddl: sddl, Err: err} } - defer localFree(sdBuffer) - sd := make([]byte, getSecurityDescriptorLength(sdBuffer)) - copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)]) - return sd, nil + b := unsafe.Slice((*byte)(unsafe.Pointer(sd)), sd.Length()) + return b, nil } func SecurityDescriptorToSddl(sd []byte) (string, error) { - var sddl *uint16 - // The returned string length seems to include an arbitrary number of terminating NULs. - // Don't use it. - err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) - if err != nil { - return "", err + if l := int(unsafe.Sizeof(windows.SECURITY_DESCRIPTOR{})); len(sd) < l { + return "", fmt.Errorf("SecurityDescriptor (%d) smaller than expected (%d): %w", len(sd), l, windows.ERROR_INCORRECT_SIZE) } - defer localFree(uintptr(unsafe.Pointer(sddl))) - return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil + s := (*windows.SECURITY_DESCRIPTOR)(unsafe.Pointer(&sd[0])) + return s.String(), nil } diff --git a/vendor/github.com/Microsoft/go-winio/tools.go b/vendor/github.com/Microsoft/go-winio/tools.go deleted file mode 100644 index 2aa045843ea..00000000000 --- a/vendor/github.com/Microsoft/go-winio/tools.go +++ /dev/null @@ -1,5 +0,0 @@ -//go:build tools - -package winio - -import _ "golang.org/x/tools/cmd/stringer" diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go index 469b16f6398..89b66eda8cc 100644 --- a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -45,38 +42,34 @@ var ( modntdll = windows.NewLazySystemDLL("ntdll.dll") modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") - procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") - procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") - procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") - procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") - procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") - procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") - procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") - procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") - procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") - procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") - procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") - procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") - procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") - procRevertToSelf = modadvapi32.NewProc("RevertToSelf") - procBackupRead = modkernel32.NewProc("BackupRead") - procBackupWrite = modkernel32.NewProc("BackupWrite") - procCancelIoEx = modkernel32.NewProc("CancelIoEx") - procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") - procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") - procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") - procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") - procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") - procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") - procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") - procLocalAlloc = modkernel32.NewProc("LocalAlloc") - procLocalFree = modkernel32.NewProc("LocalFree") - procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") - procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") - procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") - procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") - procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") - procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") + procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") + procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procBackupRead = modkernel32.NewProc("BackupRead") + procBackupWrite = modkernel32.NewProc("BackupWrite") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") + procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") + procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") + procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") + procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") + procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") + procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") ) func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { @@ -84,7 +77,7 @@ func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, ou if releaseAll { _p0 = 1 } - r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) + r0, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) success = r0 != 0 if true { err = errnoErr(e1) @@ -92,33 +85,8 @@ func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, ou return } -func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - func convertSidToStringSid(sid *byte, str **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(str) - if err != nil { - return - } - return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) -} - -func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str))) if r1 == 0 { err = errnoErr(e1) } @@ -126,21 +94,15 @@ func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision } func convertStringSidToSid(str *uint16, sid **byte) (err error) { - r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid)), 0) + r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid))) if r1 == 0 { err = errnoErr(e1) } return } -func getSecurityDescriptorLength(sd uintptr) (len uint32) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) - len = uint32(r0) - return -} - func impersonateSelf(level uint32) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) + r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(level)) if r1 == 0 { err = errnoErr(e1) } @@ -157,7 +119,7 @@ func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSiz } func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse))) if r1 == 0 { err = errnoErr(e1) } @@ -165,7 +127,7 @@ func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidS } func lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse))) if r1 == 0 { err = errnoErr(e1) } @@ -182,7 +144,7 @@ func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, } func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeDisplayNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId))) if r1 == 0 { err = errnoErr(e1) } @@ -199,7 +161,7 @@ func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size * } func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -221,19 +183,19 @@ func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err err } func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) if r1 == 0 { err = errnoErr(e1) } return } -func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { +func openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { var _p0 uint32 if openAsSelf { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } @@ -241,14 +203,14 @@ func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, } func revertToSelf() (err error) { - r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr()) if r1 == 0 { err = errnoErr(e1) } return } -func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { +func backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { var _p0 *byte if len(b) > 0 { _p0 = &b[0] @@ -261,14 +223,14 @@ func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, proce if processSecurity { _p2 = 1 } - r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + r1, _, e1 := syscall.SyscallN(procBackupRead.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context))) if r1 == 0 { err = errnoErr(e1) } return } -func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { +func backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { var _p0 *byte if len(b) > 0 { _p0 = &b[0] @@ -281,39 +243,39 @@ func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, p if processSecurity { _p2 = 1 } - r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + r1, _, e1 := syscall.SyscallN(procBackupWrite.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context))) if r1 == 0 { err = errnoErr(e1) } return } -func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) +func cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) { + r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(file), uintptr(unsafe.Pointer(o))) if r1 == 0 { err = errnoErr(e1) } return } -func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) +func connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) { + r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(o))) if r1 == 0 { err = errnoErr(e1) } return } -func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) - newport = syscall.Handle(r0) +func createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) { + r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount)) + newport = windows.Handle(r0) if newport == 0 { err = errnoErr(e1) } return } -func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { +func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) { var _p0 *uint16 _p0, err = syscall.UTF16PtrFromString(name) if err != nil { @@ -322,96 +284,93 @@ func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances ui return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) } -func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { +func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) { + r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa))) + handle = windows.Handle(r0) + if handle == windows.InvalidHandle { err = errnoErr(e1) } return } -func getCurrentThread() (h syscall.Handle) { - r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) - h = syscall.Handle(r0) - return -} - -func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) +func disconnectNamedPipe(pipe windows.Handle) (err error) { + r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe)) if r1 == 0 { err = errnoErr(e1) } return } -func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) - if r1 == 0 { - err = errnoErr(e1) - } +func getCurrentThread() (h windows.Handle) { + r0, _, _ := syscall.SyscallN(procGetCurrentThread.Addr()) + h = windows.Handle(r0) return } -func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) +func getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize)) if r1 == 0 { err = errnoErr(e1) } return } -func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { - r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0) - ptr = uintptr(r0) +func getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances))) + if r1 == 0 { + err = errnoErr(e1) + } return } -func localFree(mem uintptr) { - syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) +func getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout)) + if r1 == 0 { + err = errnoErr(e1) + } return } -func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) +func setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) { + r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(h), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } return } -func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) { - r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) +func ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) { + r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout))) status = ntStatus(r0) return } func rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) { - r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(dacl))) status = ntStatus(r0) return } func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved)) status = ntStatus(r0) return } func rtlNtStatusToDosError(status ntStatus) (winerr error) { - r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(status)) if r0 != 0 { winerr = syscall.Errno(r0) } return } -func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { +func wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { var _p0 uint32 if wait { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags))) if r1 == 0 { err = errnoErr(e1) } diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go b/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go index 1a6f73502e6..5022285b441 100644 --- a/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go @@ -18,8 +18,9 @@ import ( "crypto/cipher" "crypto/subtle" "errors" - "github.com/ProtonMail/go-crypto/internal/byteutil" "math/bits" + + "github.com/ProtonMail/go-crypto/internal/byteutil" ) type ocb struct { @@ -153,7 +154,7 @@ func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte { truncatedNonce := make([]byte, len(nonce)) copy(truncatedNonce, nonce) truncatedNonce[len(truncatedNonce)-1] &= 192 - Ktop := make([]byte, blockSize) + var Ktop []byte if bytes.Equal(truncatedNonce, o.reusableKtop.noncePrefix) { Ktop = o.reusableKtop.Ktop } else { diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go index d7af9141e36..e0a677f2843 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go @@ -23,7 +23,7 @@ import ( // Headers // // base64-encoded Bytes -// '=' base64 encoded checksum +// '=' base64 encoded checksum (optional) not checked anymore // -----END Type----- // // where Headers is a possibly empty sequence of Key: Value lines. @@ -40,36 +40,15 @@ type Block struct { var ArmorCorrupt error = errors.StructuralError("armor invalid") -const crc24Init = 0xb704ce -const crc24Poly = 0x1864cfb -const crc24Mask = 0xffffff - -// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 -func crc24(crc uint32, d []byte) uint32 { - for _, b := range d { - crc ^= uint32(b) << 16 - for i := 0; i < 8; i++ { - crc <<= 1 - if crc&0x1000000 != 0 { - crc ^= crc24Poly - } - } - } - return crc -} - var armorStart = []byte("-----BEGIN ") var armorEnd = []byte("-----END ") var armorEndOfLine = []byte("-----") -// lineReader wraps a line based reader. It watches for the end of an armor -// block and records the expected CRC value. +// lineReader wraps a line based reader. It watches for the end of an armor block type lineReader struct { - in *bufio.Reader - buf []byte - eof bool - crc uint32 - crcSet bool + in *bufio.Reader + buf []byte + eof bool } func (l *lineReader) Read(p []byte) (n int, err error) { @@ -98,26 +77,9 @@ func (l *lineReader) Read(p []byte) (n int, err error) { if len(line) == 5 && line[0] == '=' { // This is the checksum line - var expectedBytes [3]byte - var m int - m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:]) - if m != 3 || err != nil { - return - } - l.crc = uint32(expectedBytes[0])<<16 | - uint32(expectedBytes[1])<<8 | - uint32(expectedBytes[2]) - - line, _, err = l.in.ReadLine() - if err != nil && err != io.EOF { - return - } - if !bytes.HasPrefix(line, armorEnd) { - return 0, ArmorCorrupt - } + // Don't check the checksum l.eof = true - l.crcSet = true return 0, io.EOF } @@ -138,23 +100,14 @@ func (l *lineReader) Read(p []byte) (n int, err error) { return } -// openpgpReader passes Read calls to the underlying base64 decoder, but keeps -// a running CRC of the resulting data and checks the CRC against the value -// found by the lineReader at EOF. +// openpgpReader passes Read calls to the underlying base64 decoder. type openpgpReader struct { - lReader *lineReader - b64Reader io.Reader - currentCRC uint32 + lReader *lineReader + b64Reader io.Reader } func (r *openpgpReader) Read(p []byte) (n int, err error) { n, err = r.b64Reader.Read(p) - r.currentCRC = crc24(r.currentCRC, p[:n]) - - if err == io.EOF && r.lReader.crcSet && r.lReader.crc != uint32(r.currentCRC&crc24Mask) { - return 0, ArmorCorrupt - } - return } @@ -222,7 +175,6 @@ TryNextBlock: } p.lReader.in = r - p.oReader.currentCRC = crc24Init p.oReader.lReader = &p.lReader p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader) p.Body = &p.oReader diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go index 5b6e16c19d5..112f98b8351 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go @@ -14,6 +14,23 @@ var blockEnd = []byte("\n=") var newline = []byte("\n") var armorEndOfLineOut = []byte("-----\n") +const crc24Init = 0xb704ce +const crc24Poly = 0x1864cfb + +// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 +func crc24(crc uint32, d []byte) uint32 { + for _, b := range d { + crc ^= uint32(b) << 16 + for i := 0; i < 8; i++ { + crc <<= 1 + if crc&0x1000000 != 0 { + crc ^= crc24Poly + } + } + } + return crc +} + // writeSlices writes its arguments to the given Writer. func writeSlices(out io.Writer, slices ...[]byte) (err error) { for _, s := range slices { @@ -99,15 +116,18 @@ func (l *lineBreaker) Close() (err error) { // // encoding -> base64 encoder -> lineBreaker -> out type encoding struct { - out io.Writer - breaker *lineBreaker - b64 io.WriteCloser - crc uint32 - blockType []byte + out io.Writer + breaker *lineBreaker + b64 io.WriteCloser + crc uint32 + crcEnabled bool + blockType []byte } func (e *encoding) Write(data []byte) (n int, err error) { - e.crc = crc24(e.crc, data) + if e.crcEnabled { + e.crc = crc24(e.crc, data) + } return e.b64.Write(data) } @@ -118,20 +138,21 @@ func (e *encoding) Close() (err error) { } e.breaker.Close() - var checksumBytes [3]byte - checksumBytes[0] = byte(e.crc >> 16) - checksumBytes[1] = byte(e.crc >> 8) - checksumBytes[2] = byte(e.crc) + if e.crcEnabled { + var checksumBytes [3]byte + checksumBytes[0] = byte(e.crc >> 16) + checksumBytes[1] = byte(e.crc >> 8) + checksumBytes[2] = byte(e.crc) - var b64ChecksumBytes [4]byte - base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) + var b64ChecksumBytes [4]byte + base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) - return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine) + return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine) + } + return writeSlices(e.out, newline, armorEnd, e.blockType, armorEndOfLine) } -// Encode returns a WriteCloser which will encode the data written to it in -// OpenPGP armor. -func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { +func encode(out io.Writer, blockType string, headers map[string]string, checksum bool) (w io.WriteCloser, err error) { bType := []byte(blockType) err = writeSlices(out, armorStart, bType, armorEndOfLineOut) if err != nil { @@ -151,11 +172,27 @@ func Encode(out io.Writer, blockType string, headers map[string]string) (w io.Wr } e := &encoding{ - out: out, - breaker: newLineBreaker(out, 64), - crc: crc24Init, - blockType: bType, + out: out, + breaker: newLineBreaker(out, 64), + blockType: bType, + crc: crc24Init, + crcEnabled: checksum, } e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker) return e, nil } + +// Encode returns a WriteCloser which will encode the data written to it in +// OpenPGP armor. +func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { + return encode(out, blockType, headers, true) +} + +// EncodeWithChecksumOption returns a WriteCloser which will encode the data written to it in +// OpenPGP armor and provides the option to include a checksum. +// When forming ASCII Armor, the CRC24 footer SHOULD NOT be generated, +// unless interoperability with implementations that require the CRC24 footer +// to be present is a concern. +func EncodeWithChecksumOption(out io.Writer, blockType string, headers map[string]string, doChecksum bool) (w io.WriteCloser, err error) { + return encode(out, blockType, headers, doChecksum) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go index a94f6150c4b..5b40e1375de 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go @@ -30,8 +30,12 @@ func writeCanonical(cw io.Writer, buf []byte, s *int) (int, error) { if c == '\r' { *s = 1 } else if c == '\n' { - cw.Write(buf[start:i]) - cw.Write(newline) + if _, err := cw.Write(buf[start:i]); err != nil { + return 0, err + } + if _, err := cw.Write(newline); err != nil { + return 0, err + } start = i + 1 } case 1: @@ -39,7 +43,9 @@ func writeCanonical(cw io.Writer, buf []byte, s *int) (int, error) { } } - cw.Write(buf[start:]) + if _, err := cw.Write(buf[start:]); err != nil { + return 0, err + } return len(buf), nil } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ed25519/ed25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ed25519/ed25519.go new file mode 100644 index 00000000000..6abdf7c4466 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/ed25519/ed25519.go @@ -0,0 +1,115 @@ +// Package ed25519 implements the ed25519 signature algorithm for OpenPGP +// as defined in the Open PGP crypto refresh. +package ed25519 + +import ( + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + ed25519lib "github.com/cloudflare/circl/sign/ed25519" +) + +const ( + // PublicKeySize is the size, in bytes, of public keys in this package. + PublicKeySize = ed25519lib.PublicKeySize + // SeedSize is the size, in bytes, of private key seeds. + // The private key representation used by RFC 8032. + SeedSize = ed25519lib.SeedSize + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = ed25519lib.SignatureSize +) + +type PublicKey struct { + // Point represents the elliptic curve point of the public key. + Point []byte +} + +type PrivateKey struct { + PublicKey + // Key the private key representation by RFC 8032, + // encoded as seed | pub key point. + Key []byte +} + +// NewPublicKey creates a new empty ed25519 public key. +func NewPublicKey() *PublicKey { + return &PublicKey{} +} + +// NewPrivateKey creates a new empty private key referencing the public key. +func NewPrivateKey(key PublicKey) *PrivateKey { + return &PrivateKey{ + PublicKey: key, + } +} + +// Seed returns the ed25519 private key secret seed. +// The private key representation by RFC 8032. +func (pk *PrivateKey) Seed() []byte { + return pk.Key[:SeedSize] +} + +// MarshalByteSecret returns the underlying 32 byte seed of the private key. +func (pk *PrivateKey) MarshalByteSecret() []byte { + return pk.Seed() +} + +// UnmarshalByteSecret computes the private key from the secret seed +// and stores it in the private key object. +func (sk *PrivateKey) UnmarshalByteSecret(seed []byte) error { + sk.Key = ed25519lib.NewKeyFromSeed(seed) + return nil +} + +// GenerateKey generates a fresh private key with the provided randomness source. +func GenerateKey(rand io.Reader) (*PrivateKey, error) { + publicKey, privateKey, err := ed25519lib.GenerateKey(rand) + if err != nil { + return nil, err + } + privateKeyOut := new(PrivateKey) + privateKeyOut.PublicKey.Point = publicKey[:] + privateKeyOut.Key = privateKey[:] + return privateKeyOut, nil +} + +// Sign signs a message with the ed25519 algorithm. +// priv MUST be a valid key! Check this with Validate() before use. +func Sign(priv *PrivateKey, message []byte) ([]byte, error) { + return ed25519lib.Sign(priv.Key, message), nil +} + +// Verify verifies an ed25519 signature. +func Verify(pub *PublicKey, message []byte, signature []byte) bool { + return ed25519lib.Verify(pub.Point, message, signature) +} + +// Validate checks if the ed25519 private key is valid. +func Validate(priv *PrivateKey) error { + expectedPrivateKey := ed25519lib.NewKeyFromSeed(priv.Seed()) + if subtle.ConstantTimeCompare(priv.Key, expectedPrivateKey) == 0 { + return errors.KeyInvalidError("ed25519: invalid ed25519 secret") + } + if subtle.ConstantTimeCompare(priv.PublicKey.Point, expectedPrivateKey[SeedSize:]) == 0 { + return errors.KeyInvalidError("ed25519: invalid ed25519 public key") + } + return nil +} + +// ENCODING/DECODING signature: + +// WriteSignature encodes and writes an ed25519 signature to writer. +func WriteSignature(writer io.Writer, signature []byte) error { + _, err := writer.Write(signature) + return err +} + +// ReadSignature decodes an ed25519 signature from a reader. +func ReadSignature(reader io.Reader) ([]byte, error) { + signature := make([]byte, SignatureSize) + if _, err := io.ReadFull(reader, signature); err != nil { + return nil, err + } + return signature, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ed448/ed448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ed448/ed448.go new file mode 100644 index 00000000000..b11fb4fb179 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/ed448/ed448.go @@ -0,0 +1,119 @@ +// Package ed448 implements the ed448 signature algorithm for OpenPGP +// as defined in the Open PGP crypto refresh. +package ed448 + +import ( + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + ed448lib "github.com/cloudflare/circl/sign/ed448" +) + +const ( + // PublicKeySize is the size, in bytes, of public keys in this package. + PublicKeySize = ed448lib.PublicKeySize + // SeedSize is the size, in bytes, of private key seeds. + // The private key representation used by RFC 8032. + SeedSize = ed448lib.SeedSize + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = ed448lib.SignatureSize +) + +type PublicKey struct { + // Point represents the elliptic curve point of the public key. + Point []byte +} + +type PrivateKey struct { + PublicKey + // Key the private key representation by RFC 8032, + // encoded as seed | public key point. + Key []byte +} + +// NewPublicKey creates a new empty ed448 public key. +func NewPublicKey() *PublicKey { + return &PublicKey{} +} + +// NewPrivateKey creates a new empty private key referencing the public key. +func NewPrivateKey(key PublicKey) *PrivateKey { + return &PrivateKey{ + PublicKey: key, + } +} + +// Seed returns the ed448 private key secret seed. +// The private key representation by RFC 8032. +func (pk *PrivateKey) Seed() []byte { + return pk.Key[:SeedSize] +} + +// MarshalByteSecret returns the underlying seed of the private key. +func (pk *PrivateKey) MarshalByteSecret() []byte { + return pk.Seed() +} + +// UnmarshalByteSecret computes the private key from the secret seed +// and stores it in the private key object. +func (sk *PrivateKey) UnmarshalByteSecret(seed []byte) error { + sk.Key = ed448lib.NewKeyFromSeed(seed) + return nil +} + +// GenerateKey generates a fresh private key with the provided randomness source. +func GenerateKey(rand io.Reader) (*PrivateKey, error) { + publicKey, privateKey, err := ed448lib.GenerateKey(rand) + if err != nil { + return nil, err + } + privateKeyOut := new(PrivateKey) + privateKeyOut.PublicKey.Point = publicKey[:] + privateKeyOut.Key = privateKey[:] + return privateKeyOut, nil +} + +// Sign signs a message with the ed448 algorithm. +// priv MUST be a valid key! Check this with Validate() before use. +func Sign(priv *PrivateKey, message []byte) ([]byte, error) { + // Ed448 is used with the empty string as a context string. + // See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-08#section-13.7 + return ed448lib.Sign(priv.Key, message, ""), nil +} + +// Verify verifies a ed448 signature +func Verify(pub *PublicKey, message []byte, signature []byte) bool { + // Ed448 is used with the empty string as a context string. + // See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-08#section-13.7 + return ed448lib.Verify(pub.Point, message, signature, "") +} + +// Validate checks if the ed448 private key is valid +func Validate(priv *PrivateKey) error { + expectedPrivateKey := ed448lib.NewKeyFromSeed(priv.Seed()) + if subtle.ConstantTimeCompare(priv.Key, expectedPrivateKey) == 0 { + return errors.KeyInvalidError("ed448: invalid ed448 secret") + } + if subtle.ConstantTimeCompare(priv.PublicKey.Point, expectedPrivateKey[SeedSize:]) == 0 { + return errors.KeyInvalidError("ed448: invalid ed448 public key") + } + return nil +} + +// ENCODING/DECODING signature: + +// WriteSignature encodes and writes an ed448 signature to writer. +func WriteSignature(writer io.Writer, signature []byte) error { + _, err := writer.Write(signature) + return err +} + +// ReadSignature decodes an ed448 signature from a reader. +func ReadSignature(reader io.Reader) ([]byte, error) { + signature := make([]byte, SignatureSize) + if _, err := io.ReadFull(reader, signature); err != nil { + return nil, err + } + return signature, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go index 17e2bcfed20..8d6969c0bf8 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package errors contains common error types for the OpenPGP packages. -package errors // import "github.com/ProtonMail/go-crypto/openpgp/errors" +package errors // import "github.com/ProtonMail/go-crypto/v2/openpgp/errors" import ( "strconv" @@ -58,6 +58,14 @@ func (ke keyExpiredError) Error() string { return "openpgp: key expired" } +var ErrSignatureOlderThanKey error = signatureOlderThanKeyError(0) + +type signatureOlderThanKeyError int + +func (ske signatureOlderThanKeyError) Error() string { + return "openpgp: signature is older than the key" +} + var ErrKeyExpired error = keyExpiredError(0) type keyIncorrectError int @@ -92,12 +100,24 @@ func (keyRevokedError) Error() string { var ErrKeyRevoked error = keyRevokedError(0) +type WeakAlgorithmError string + +func (e WeakAlgorithmError) Error() string { + return "openpgp: weak algorithms are rejected: " + string(e) +} + type UnknownPacketTypeError uint8 func (upte UnknownPacketTypeError) Error() string { return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) } +type CriticalUnknownPacketTypeError uint8 + +func (upte CriticalUnknownPacketTypeError) Error() string { + return "openpgp: unknown critical packet type: " + strconv.Itoa(int(upte)) +} + // AEADError indicates that there is a problem when initializing or using a // AEAD instance, configuration struct, nonces or index values. type AEADError string @@ -114,3 +134,10 @@ type ErrDummyPrivateKey string func (dke ErrDummyPrivateKey) Error() string { return "openpgp: s2k GNU dummy key: " + string(dke) } + +// ErrMalformedMessage results when the packet sequence is incorrect +type ErrMalformedMessage string + +func (dke ErrMalformedMessage) Error() string { + return "openpgp: malformed message " + string(dke) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go index 5760cff80ea..c76a75bcda5 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go @@ -51,24 +51,14 @@ func (sk CipherFunction) Id() uint8 { return uint8(sk) } -var keySizeByID = map[uint8]int{ - TripleDES.Id(): 24, - CAST5.Id(): cast5.KeySize, - AES128.Id(): 16, - AES192.Id(): 24, - AES256.Id(): 32, -} - // KeySize returns the key size, in bytes, of cipher. func (cipher CipherFunction) KeySize() int { switch cipher { - case TripleDES: - return 24 case CAST5: return cast5.KeySize case AES128: return 16 - case AES192: + case AES192, TripleDES: return 24 case AES256: return 32 diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go index 35751034dda..97f891ffc0f 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go @@ -4,6 +4,7 @@ package ecc import ( "bytes" "crypto/elliptic" + "github.com/ProtonMail/go-crypto/bitcurves" "github.com/ProtonMail/go-crypto/brainpool" "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" @@ -47,7 +48,7 @@ var Curves = []CurveInfo{ Curve: NewCurve25519(), }, { - // X448 + // x448 GenName: "Curve448", Oid: encoding.NewOID([]byte{0x2B, 0x65, 0x6F}), Curve: NewX448(), diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go index 0e71934cd90..a40e45beeef 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go @@ -15,11 +15,15 @@ import ( "github.com/ProtonMail/go-crypto/openpgp/ecdh" "github.com/ProtonMail/go-crypto/openpgp/ecdsa" + "github.com/ProtonMail/go-crypto/openpgp/ed25519" + "github.com/ProtonMail/go-crypto/openpgp/ed448" "github.com/ProtonMail/go-crypto/openpgp/eddsa" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" "github.com/ProtonMail/go-crypto/openpgp/packet" + "github.com/ProtonMail/go-crypto/openpgp/x25519" + "github.com/ProtonMail/go-crypto/openpgp/x448" ) // NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a @@ -36,8 +40,8 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err return nil, err } primary := packet.NewSignerPrivateKey(creationTime, primaryPrivRaw) - if config != nil && config.V5Keys { - primary.UpgradeToV5() + if config.V6() { + primary.UpgradeToV6() } e := &Entity{ @@ -45,9 +49,25 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err PrivateKey: primary, Identities: make(map[string]*Identity), Subkeys: []Subkey{}, + Signatures: []*packet.Signature{}, } - err = e.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs) + if config.V6() { + // In v6 keys algorithm preferences should be stored in direct key signatures + selfSignature := createSignaturePacket(&primary.PublicKey, packet.SigTypeDirectSignature, config) + err = writeKeyProperties(selfSignature, creationTime, keyLifetimeSecs, config) + if err != nil { + return nil, err + } + err = selfSignature.SignDirectKeyBinding(&primary.PublicKey, primary, config) + if err != nil { + return nil, err + } + e.Signatures = append(e.Signatures, selfSignature) + e.SelfSignature = selfSignature + } + + err = e.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs, !config.V6()) if err != nil { return nil, err } @@ -65,27 +85,12 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err func (t *Entity) AddUserId(name, comment, email string, config *packet.Config) error { creationTime := config.Now() keyLifetimeSecs := config.KeyLifetime() - return t.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs) + return t.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs, !config.V6()) } -func (t *Entity) addUserId(name, comment, email string, config *packet.Config, creationTime time.Time, keyLifetimeSecs uint32) error { - uid := packet.NewUserId(name, comment, email) - if uid == nil { - return errors.InvalidArgumentError("user id field contained invalid characters") - } - - if _, ok := t.Identities[uid.Id]; ok { - return errors.InvalidArgumentError("user id exist") - } - - primary := t.PrivateKey - - isPrimaryId := len(t.Identities) == 0 - - selfSignature := createSignaturePacket(&primary.PublicKey, packet.SigTypePositiveCert, config) +func writeKeyProperties(selfSignature *packet.Signature, creationTime time.Time, keyLifetimeSecs uint32, config *packet.Config) error { selfSignature.CreationTime = creationTime selfSignature.KeyLifetimeSecs = &keyLifetimeSecs - selfSignature.IsPrimaryId = &isPrimaryId selfSignature.FlagsValid = true selfSignature.FlagSign = true selfSignature.FlagCertify = true @@ -131,6 +136,29 @@ func (t *Entity) addUserId(name, comment, email string, config *packet.Config, c selfSignature.PreferredCipherSuites = append(selfSignature.PreferredCipherSuites, [2]uint8{cipher, mode}) } } + return nil +} + +func (t *Entity) addUserId(name, comment, email string, config *packet.Config, creationTime time.Time, keyLifetimeSecs uint32, writeProperties bool) error { + uid := packet.NewUserId(name, comment, email) + if uid == nil { + return errors.InvalidArgumentError("user id field contained invalid characters") + } + + if _, ok := t.Identities[uid.Id]; ok { + return errors.InvalidArgumentError("user id exist") + } + + primary := t.PrivateKey + isPrimaryId := len(t.Identities) == 0 + selfSignature := createSignaturePacket(&primary.PublicKey, packet.SigTypePositiveCert, config) + if writeProperties { + err := writeKeyProperties(selfSignature, creationTime, keyLifetimeSecs, config) + if err != nil { + return err + } + } + selfSignature.IsPrimaryId = &isPrimaryId // User ID binding signature err := selfSignature.SignUserId(uid.Id, &primary.PublicKey, primary, config) @@ -158,8 +186,8 @@ func (e *Entity) AddSigningSubkey(config *packet.Config) error { } sub := packet.NewSignerPrivateKey(creationTime, subPrivRaw) sub.IsSubkey = true - if config != nil && config.V5Keys { - sub.UpgradeToV5() + if config.V6() { + sub.UpgradeToV6() } subkey := Subkey{ @@ -203,8 +231,8 @@ func (e *Entity) addEncryptionSubkey(config *packet.Config, creationTime time.Ti } sub := packet.NewDecrypterPrivateKey(creationTime, subPrivRaw) sub.IsSubkey = true - if config != nil && config.V5Keys { - sub.UpgradeToV5() + if config.V6() { + sub.UpgradeToV6() } subkey := Subkey{ @@ -242,6 +270,11 @@ func newSigner(config *packet.Config) (signer interface{}, err error) { } return rsa.GenerateKey(config.Random(), bits) case packet.PubKeyAlgoEdDSA: + if config.V6() { + // Implementations MUST NOT accept or generate v6 key material + // using the deprecated OIDs. + return nil, errors.InvalidArgumentError("EdDSALegacy cannot be used for v6 keys") + } curve := ecc.FindEdDSAByGenName(string(config.CurveName())) if curve == nil { return nil, errors.InvalidArgumentError("unsupported curve") @@ -263,6 +296,18 @@ func newSigner(config *packet.Config) (signer interface{}, err error) { return nil, err } return priv, nil + case packet.PubKeyAlgoEd25519: + priv, err := ed25519.GenerateKey(config.Random()) + if err != nil { + return nil, err + } + return priv, nil + case packet.PubKeyAlgoEd448: + priv, err := ed448.GenerateKey(config.Random()) + if err != nil { + return nil, err + } + return priv, nil default: return nil, errors.InvalidArgumentError("unsupported public key algorithm") } @@ -285,6 +330,13 @@ func newDecrypter(config *packet.Config) (decrypter interface{}, err error) { case packet.PubKeyAlgoEdDSA, packet.PubKeyAlgoECDSA: fallthrough // When passing EdDSA or ECDSA, we generate an ECDH subkey case packet.PubKeyAlgoECDH: + if config.V6() && + (config.CurveName() == packet.Curve25519 || + config.CurveName() == packet.Curve448) { + // Implementations MUST NOT accept or generate v6 key material + // using the deprecated OIDs. + return nil, errors.InvalidArgumentError("ECDH with Curve25519/448 legacy cannot be used for v6 keys") + } var kdf = ecdh.KDF{ Hash: algorithm.SHA512, Cipher: algorithm.AES256, @@ -294,6 +346,10 @@ func newDecrypter(config *packet.Config) (decrypter interface{}, err error) { return nil, errors.InvalidArgumentError("unsupported curve") } return ecdh.GenerateKey(config.Random(), curve, kdf) + case packet.PubKeyAlgoEd25519, packet.PubKeyAlgoX25519: // When passing Ed25519, we generate an x25519 subkey + return x25519.GenerateKey(config.Random()) + case packet.PubKeyAlgoEd448, packet.PubKeyAlgoX448: // When passing Ed448, we generate an x448 subkey + return x448.GenerateKey(config.Random()) default: return nil, errors.InvalidArgumentError("unsupported public key algorithm") } @@ -302,7 +358,7 @@ func newDecrypter(config *packet.Config) (decrypter interface{}, err error) { var bigOne = big.NewInt(1) // generateRSAKeyWithPrimes generates a multi-prime RSA keypair of the -// given bit size, using the given random source and prepopulated primes. +// given bit size, using the given random source and pre-populated primes. func generateRSAKeyWithPrimes(random io.Reader, nprimes int, bits int, prepopulatedPrimes []*big.Int) (*rsa.PrivateKey, error) { priv := new(rsa.PrivateKey) priv.E = 65537 diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go index 2d7b0cf3737..a071353e2ec 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go @@ -6,6 +6,7 @@ package openpgp import ( goerrors "errors" + "fmt" "io" "time" @@ -24,11 +25,13 @@ var PrivateKeyType = "PGP PRIVATE KEY BLOCK" // (which must be a signing key), one or more identities claimed by that key, // and zero or more subkeys, which may be encryption keys. type Entity struct { - PrimaryKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Identities map[string]*Identity // indexed by Identity.Name - Revocations []*packet.Signature - Subkeys []Subkey + PrimaryKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Identities map[string]*Identity // indexed by Identity.Name + Revocations []*packet.Signature + Subkeys []Subkey + SelfSignature *packet.Signature // Direct-key self signature of the PrimaryKey (contains primary key properties in v6) + Signatures []*packet.Signature // all (potentially unverified) self-signatures, revocations, and third-party signatures } // An Identity represents an identity claimed by an Entity and zero or more @@ -120,12 +123,12 @@ func shouldPreferIdentity(existingId, potentialNewId *Identity) bool { // given Entity. func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { // Fail to find any encryption key if the... - i := e.PrimaryIdentity() - if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired - i.SelfSignature == nil || // user ID has no self-signature - i.SelfSignature.SigExpired(now) || // user ID self-signature has expired + primarySelfSignature, primaryIdentity := e.PrimarySelfSignature() + if primarySelfSignature == nil || // no self-signature found + e.PrimaryKey.KeyExpired(primarySelfSignature, now) || // primary key has expired e.Revoked(now) || // primary key has been revoked - i.Revoked(now) { // user ID has been revoked + primarySelfSignature.SigExpired(now) || // user ID or or direct self-signature has expired + (primaryIdentity != nil && primaryIdentity.Revoked(now)) { // user ID has been revoked (for v4 keys) return Key{}, false } @@ -152,9 +155,9 @@ func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { // If we don't have any subkeys for encryption and the primary key // is marked as OK to encrypt with, then we can use it. - if i.SelfSignature.FlagsValid && i.SelfSignature.FlagEncryptCommunications && + if primarySelfSignature.FlagsValid && primarySelfSignature.FlagEncryptCommunications && e.PrimaryKey.PubKeyAlgo.CanEncrypt() { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true + return Key{e, e.PrimaryKey, e.PrivateKey, primarySelfSignature, e.Revocations}, true } return Key{}, false @@ -186,12 +189,12 @@ func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) { func (e *Entity) signingKeyByIdUsage(now time.Time, id uint64, flags int) (Key, bool) { // Fail to find any signing key if the... - i := e.PrimaryIdentity() - if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired - i.SelfSignature == nil || // user ID has no self-signature - i.SelfSignature.SigExpired(now) || // user ID self-signature has expired + primarySelfSignature, primaryIdentity := e.PrimarySelfSignature() + if primarySelfSignature == nil || // no self-signature found + e.PrimaryKey.KeyExpired(primarySelfSignature, now) || // primary key has expired e.Revoked(now) || // primary key has been revoked - i.Revoked(now) { // user ID has been revoked + primarySelfSignature.SigExpired(now) || // user ID or direct self-signature has expired + (primaryIdentity != nil && primaryIdentity.Revoked(now)) { // user ID has been revoked (for v4 keys) return Key{}, false } @@ -220,12 +223,12 @@ func (e *Entity) signingKeyByIdUsage(now time.Time, id uint64, flags int) (Key, // If we don't have any subkeys for signing and the primary key // is marked as OK to sign with, then we can use it. - if i.SelfSignature.FlagsValid && - (flags&packet.KeyFlagCertify == 0 || i.SelfSignature.FlagCertify) && - (flags&packet.KeyFlagSign == 0 || i.SelfSignature.FlagSign) && + if primarySelfSignature.FlagsValid && + (flags&packet.KeyFlagCertify == 0 || primarySelfSignature.FlagCertify) && + (flags&packet.KeyFlagSign == 0 || primarySelfSignature.FlagSign) && e.PrimaryKey.PubKeyAlgo.CanSign() && (id == 0 || e.PrimaryKey.KeyId == id) { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true + return Key{e, e.PrimaryKey, e.PrivateKey, primarySelfSignature, e.Revocations}, true } // No keys with a valid Signing Flag or no keys matched the id passed in @@ -259,7 +262,7 @@ func (e *Entity) EncryptPrivateKeys(passphrase []byte, config *packet.Config) er var keysToEncrypt []*packet.PrivateKey // Add entity private key to encrypt. if e.PrivateKey != nil && !e.PrivateKey.Dummy() && !e.PrivateKey.Encrypted { - keysToEncrypt = append(keysToEncrypt, e.PrivateKey) + keysToEncrypt = append(keysToEncrypt, e.PrivateKey) } // Add subkeys to encrypt. @@ -271,7 +274,7 @@ func (e *Entity) EncryptPrivateKeys(passphrase []byte, config *packet.Config) er return packet.EncryptPrivateKeys(keysToEncrypt, passphrase, config) } -// DecryptPrivateKeys decrypts all encrypted keys in the entitiy with the given passphrase. +// DecryptPrivateKeys decrypts all encrypted keys in the entity with the given passphrase. // Avoids recomputation of similar s2k key derivations. Public keys and dummy keys are ignored, // and don't cause an error to be returned. func (e *Entity) DecryptPrivateKeys(passphrase []byte) error { @@ -284,7 +287,7 @@ func (e *Entity) DecryptPrivateKeys(passphrase []byte) error { // Add subkeys to decrypt. for _, sub := range e.Subkeys { if sub.PrivateKey != nil && !sub.PrivateKey.Dummy() && sub.PrivateKey.Encrypted { - keysToDecrypt = append(keysToDecrypt, sub.PrivateKey) + keysToDecrypt = append(keysToDecrypt, sub.PrivateKey) } } return packet.DecryptPrivateKeys(keysToDecrypt, passphrase) @@ -318,8 +321,7 @@ type EntityList []*Entity func (el EntityList) KeysById(id uint64) (keys []Key) { for _, e := range el { if e.PrimaryKey.KeyId == id { - ident := e.PrimaryIdentity() - selfSig := ident.SelfSignature + selfSig, _ := e.PrimarySelfSignature() keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig, e.Revocations}) } @@ -441,7 +443,6 @@ func readToNextPublicKey(packets *packet.Reader) (err error) { return } else if err != nil { if _, ok := err.(errors.UnsupportedError); ok { - err = nil continue } return @@ -479,6 +480,7 @@ func ReadEntity(packets *packet.Reader) (*Entity, error) { } var revocations []*packet.Signature + var directSignatures []*packet.Signature EachPacket: for { p, err := packets.Next() @@ -497,9 +499,7 @@ EachPacket: if pkt.SigType == packet.SigTypeKeyRevocation { revocations = append(revocations, pkt) } else if pkt.SigType == packet.SigTypeDirectSignature { - // TODO: RFC4880 5.2.1 permits signatures - // directly on keys (eg. to bind additional - // revocation keys). + directSignatures = append(directSignatures, pkt) } // Else, ignoring the signature as it does not follow anything // we would know to attach it to. @@ -522,12 +522,39 @@ EachPacket: return nil, err } default: - // we ignore unknown packets + // we ignore unknown packets. } } - if len(e.Identities) == 0 { - return nil, errors.StructuralError("entity without any identities") + if len(e.Identities) == 0 && e.PrimaryKey.Version < 6 { + return nil, errors.StructuralError(fmt.Sprintf("v%d entity without any identities", e.PrimaryKey.Version)) + } + + // An implementation MUST ensure that a valid direct-key signature is present before using a v6 key. + if e.PrimaryKey.Version == 6 { + if len(directSignatures) == 0 { + return nil, errors.StructuralError("v6 entity without a valid direct-key signature") + } + // Select main direct key signature. + var mainDirectKeySelfSignature *packet.Signature + for _, directSignature := range directSignatures { + if directSignature.SigType == packet.SigTypeDirectSignature && + directSignature.CheckKeyIdOrFingerprint(e.PrimaryKey) && + (mainDirectKeySelfSignature == nil || + directSignature.CreationTime.After(mainDirectKeySelfSignature.CreationTime)) { + mainDirectKeySelfSignature = directSignature + } + } + if mainDirectKeySelfSignature == nil { + return nil, errors.StructuralError("no valid direct-key self-signature for v6 primary key found") + } + // Check that the main self-signature is valid. + err = e.PrimaryKey.VerifyDirectKeySignature(mainDirectKeySelfSignature) + if err != nil { + return nil, errors.StructuralError("invalid direct-key self-signature for v6 primary key") + } + e.SelfSignature = mainDirectKeySelfSignature + e.Signatures = directSignatures } for _, revocation := range revocations { @@ -672,6 +699,12 @@ func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign boo return err } } + for _, directSignature := range e.Signatures { + err := directSignature.Serialize(w) + if err != nil { + return err + } + } for _, ident := range e.Identities { err = ident.UserId.Serialize(w) if err != nil { @@ -738,6 +771,12 @@ func (e *Entity) Serialize(w io.Writer) error { return err } } + for _, directSignature := range e.Signatures { + err := directSignature.Serialize(w) + if err != nil { + return err + } + } for _, ident := range e.Identities { err = ident.UserId.Serialize(w) if err != nil { @@ -840,3 +879,23 @@ func (e *Entity) RevokeSubkey(sk *Subkey, reason packet.ReasonForRevocation, rea sk.Revocations = append(sk.Revocations, revSig) return nil } + +func (e *Entity) primaryDirectSignature() *packet.Signature { + return e.SelfSignature +} + +// PrimarySelfSignature searches the entity for the self-signature that stores key preferences. +// For V4 keys, returns the self-signature of the primary identity, and the identity. +// For V6 keys, returns the latest valid direct-key self-signature, and no identity (nil). +// This self-signature is to be used to check the key expiration, +// algorithm preferences, and so on. +func (e *Entity) PrimarySelfSignature() (*packet.Signature, *Identity) { + if e.PrimaryKey.Version == 6 { + return e.primaryDirectSignature(), nil + } + primaryIdentity := e.PrimaryIdentity() + if primaryIdentity == nil { + return nil, nil + } + return primaryIdentity.SelfSignature, primaryIdentity +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go index cee83bdc7a1..2d1aeed65c0 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go @@ -88,17 +88,20 @@ func (ar *aeadDecrypter) Read(dst []byte) (n int, err error) { if errRead != nil && errRead != io.EOF { return 0, errRead } - decrypted, errChunk := ar.openChunk(cipherChunk) - if errChunk != nil { - return 0, errChunk - } - // Return decrypted bytes, buffering if necessary - if len(dst) < len(decrypted) { - n = copy(dst, decrypted[:len(dst)]) - ar.buffer.Write(decrypted[len(dst):]) - } else { - n = copy(dst, decrypted) + if len(cipherChunk) > 0 { + decrypted, errChunk := ar.openChunk(cipherChunk) + if errChunk != nil { + return 0, errChunk + } + + // Return decrypted bytes, buffering if necessary + if len(dst) < len(decrypted) { + n = copy(dst, decrypted[:len(dst)]) + ar.buffer.Write(decrypted[len(dst):]) + } else { + n = copy(dst, decrypted) + } } // Check final authentication tag @@ -116,6 +119,12 @@ func (ar *aeadDecrypter) Read(dst []byte) (n int, err error) { // checked in the last Read call. In the future, this function could be used to // wipe the reader and peeked, decrypted bytes, if necessary. func (ar *aeadDecrypter) Close() (err error) { + if !ar.eof { + errChunk := ar.validateFinalTag(ar.peekedBytes) + if errChunk != nil { + return errChunk + } + } return nil } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go index 2f5cad71dab..334de286b38 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go @@ -8,9 +8,11 @@ import ( "compress/bzip2" "compress/flate" "compress/zlib" - "github.com/ProtonMail/go-crypto/openpgp/errors" "io" + "io/ioutil" "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/errors" ) // Compressed represents a compressed OpenPGP packet. The decompressed contents @@ -39,6 +41,37 @@ type CompressionConfig struct { Level int } +// decompressionReader ensures that the whole compression packet is read. +type decompressionReader struct { + compressed io.Reader + decompressed io.ReadCloser + readAll bool +} + +func newDecompressionReader(r io.Reader, decompressor io.ReadCloser) *decompressionReader { + return &decompressionReader{ + compressed: r, + decompressed: decompressor, + } +} + +func (dr *decompressionReader) Read(data []byte) (n int, err error) { + if dr.readAll { + return 0, io.EOF + } + n, err = dr.decompressed.Read(data) + if err == io.EOF { + dr.readAll = true + // Close the decompressor. + if errDec := dr.decompressed.Close(); errDec != nil { + return n, errDec + } + // Consume all remaining data from the compressed packet. + consumeAll(dr.compressed) + } + return n, err +} + func (c *Compressed) parse(r io.Reader) error { var buf [1]byte _, err := readFull(r, buf[:]) @@ -50,11 +83,15 @@ func (c *Compressed) parse(r io.Reader) error { case 0: c.Body = r case 1: - c.Body = flate.NewReader(r) + c.Body = newDecompressionReader(r, flate.NewReader(r)) case 2: - c.Body, err = zlib.NewReader(r) + decompressor, err := zlib.NewReader(r) + if err != nil { + return err + } + c.Body = newDecompressionReader(r, decompressor) case 3: - c.Body = bzip2.NewReader(r) + c.Body = newDecompressionReader(r, ioutil.NopCloser(bzip2.NewReader(r))) default: err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go index 04994bec97b..181d5d344ec 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go @@ -14,6 +14,21 @@ import ( "github.com/ProtonMail/go-crypto/openpgp/s2k" ) +var ( + defaultRejectPublicKeyAlgorithms = map[PublicKeyAlgorithm]bool{ + PubKeyAlgoElGamal: true, + PubKeyAlgoDSA: true, + } + defaultRejectMessageHashAlgorithms = map[crypto.Hash]bool{ + crypto.SHA1: true, + crypto.MD5: true, + crypto.RIPEMD160: true, + } + defaultRejectCurves = map[Curve]bool{ + CurveSecP256k1: true, + } +) + // Config collects a number of parameters along with sensible defaults. // A nil *Config is valid and results in all default values. type Config struct { @@ -73,9 +88,15 @@ type Config struct { // **Note: using this option may break compatibility with other OpenPGP // implementations, as well as future versions of this library.** AEADConfig *AEADConfig - // V5Keys configures version 5 key generation. If false, this package still - // supports version 5 keys, but produces version 4 keys. - V5Keys bool + // V6Keys configures version 6 key generation. If false, this package still + // supports version 6 keys, but produces version 4 keys. + V6Keys bool + // Minimum RSA key size allowed for key generation and message signing, verification and encryption. + MinRSABits uint16 + // Reject insecure algorithms, only works with v2 api + RejectPublicKeyAlgorithms map[PublicKeyAlgorithm]bool + RejectMessageHashAlgorithms map[crypto.Hash]bool + RejectCurves map[Curve]bool // "The validity period of the key. This is the number of seconds after // the key creation time that the key expires. If this is not present // or has a value of zero, the key never expires. This is found only on @@ -110,6 +131,21 @@ type Config struct { KnownNotations map[string]bool // SignatureNotations is a list of Notations to be added to any signatures. SignatureNotations []*Notation + // CheckIntendedRecipients controls, whether the OpenPGP Intended Recipient Fingerprint feature + // should be enabled for encryption and decryption. + // (See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-12.html#name-intended-recipient-fingerpr). + // When the flag is set, encryption produces Intended Recipient Fingerprint signature sub-packets and decryption + // checks whether the key it was encrypted to is one of the included fingerprints in the signature. + // If the flag is disabled, no Intended Recipient Fingerprint sub-packets are created or checked. + // The default behavior, when the config or flag is nil, is to enable the feature. + CheckIntendedRecipients *bool + // CacheSessionKey controls if decryption should return the session key used for decryption. + // If the flag is set, the session key is cached in the message details struct. + CacheSessionKey bool + // CheckPacketSequence is a flag that controls if the pgp message reader should strictly check + // that the packet sequence conforms with the grammar mandated by rfc4880. + // The default behavior, when the config or flag is nil, is to check the packet sequence. + CheckPacketSequence *bool } func (c *Config) Random() io.Reader { @@ -246,3 +282,71 @@ func (c *Config) Notations() []*Notation { } return c.SignatureNotations } + +func (c *Config) V6() bool { + if c == nil { + return false + } + return c.V6Keys +} + +func (c *Config) IntendedRecipients() bool { + if c == nil || c.CheckIntendedRecipients == nil { + return true + } + return *c.CheckIntendedRecipients +} + +func (c *Config) RetrieveSessionKey() bool { + if c == nil { + return false + } + return c.CacheSessionKey +} + +func (c *Config) MinimumRSABits() uint16 { + if c == nil || c.MinRSABits == 0 { + return 2047 + } + return c.MinRSABits +} + +func (c *Config) RejectPublicKeyAlgorithm(alg PublicKeyAlgorithm) bool { + var rejectedAlgorithms map[PublicKeyAlgorithm]bool + if c == nil || c.RejectPublicKeyAlgorithms == nil { + // Default + rejectedAlgorithms = defaultRejectPublicKeyAlgorithms + } else { + rejectedAlgorithms = c.RejectPublicKeyAlgorithms + } + return rejectedAlgorithms[alg] +} + +func (c *Config) RejectMessageHashAlgorithm(hash crypto.Hash) bool { + var rejectedAlgorithms map[crypto.Hash]bool + if c == nil || c.RejectMessageHashAlgorithms == nil { + // Default + rejectedAlgorithms = defaultRejectMessageHashAlgorithms + } else { + rejectedAlgorithms = c.RejectMessageHashAlgorithms + } + return rejectedAlgorithms[hash] +} + +func (c *Config) RejectCurve(curve Curve) bool { + var rejectedCurve map[Curve]bool + if c == nil || c.RejectCurves == nil { + // Default + rejectedCurve = defaultRejectCurves + } else { + rejectedCurve = c.RejectCurves + } + return rejectedCurve[curve] +} + +func (c *Config) StrictPacketSequence() bool { + if c == nil || c.CheckPacketSequence == nil { + return true + } + return *c.CheckPacketSequence +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go index eeff2902c12..e70f9d9411b 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go @@ -5,9 +5,11 @@ package packet import ( + "bytes" "crypto" "crypto/rsa" "encoding/binary" + "encoding/hex" "io" "math/big" "strconv" @@ -16,32 +18,85 @@ import ( "github.com/ProtonMail/go-crypto/openpgp/elgamal" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" + "github.com/ProtonMail/go-crypto/openpgp/x25519" + "github.com/ProtonMail/go-crypto/openpgp/x448" ) -const encryptedKeyVersion = 3 - // EncryptedKey represents a public-key encrypted session key. See RFC 4880, // section 5.1. type EncryptedKey struct { - KeyId uint64 - Algo PublicKeyAlgorithm - CipherFunc CipherFunction // only valid after a successful Decrypt for a v3 packet - Key []byte // only valid after a successful Decrypt + Version int + KeyId uint64 + KeyVersion int // v6 + KeyFingerprint []byte // v6 + Algo PublicKeyAlgorithm + CipherFunc CipherFunction // only valid after a successful Decrypt for a v3 packet + Key []byte // only valid after a successful Decrypt encryptedMPI1, encryptedMPI2 encoding.Field + ephemeralPublicX25519 *x25519.PublicKey // used for x25519 + ephemeralPublicX448 *x448.PublicKey // used for x448 + encryptedSession []byte // used for x25519 and x448 } func (e *EncryptedKey) parse(r io.Reader) (err error) { - var buf [10]byte - _, err = readFull(r, buf[:]) + var buf [8]byte + _, err = readFull(r, buf[:versionSize]) if err != nil { return } - if buf[0] != encryptedKeyVersion { + e.Version = int(buf[0]) + if e.Version != 3 && e.Version != 6 { return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) } - e.KeyId = binary.BigEndian.Uint64(buf[1:9]) - e.Algo = PublicKeyAlgorithm(buf[9]) + if e.Version == 6 { + //Read a one-octet size of the following two fields. + if _, err = readFull(r, buf[:1]); err != nil { + return + } + // The size may also be zero, and the key version and + // fingerprint omitted for an "anonymous recipient" + if buf[0] != 0 { + // non-anonymous case + _, err = readFull(r, buf[:versionSize]) + if err != nil { + return + } + e.KeyVersion = int(buf[0]) + if e.KeyVersion != 4 && e.KeyVersion != 6 { + return errors.UnsupportedError("unknown public key version " + strconv.Itoa(e.KeyVersion)) + } + var fingerprint []byte + if e.KeyVersion == 6 { + fingerprint = make([]byte, fingerprintSizeV6) + } else if e.KeyVersion == 4 { + fingerprint = make([]byte, fingerprintSize) + } + _, err = readFull(r, fingerprint) + if err != nil { + return + } + e.KeyFingerprint = fingerprint + if e.KeyVersion == 6 { + e.KeyId = binary.BigEndian.Uint64(e.KeyFingerprint[:keyIdSize]) + } else if e.KeyVersion == 4 { + e.KeyId = binary.BigEndian.Uint64(e.KeyFingerprint[fingerprintSize-keyIdSize : fingerprintSize]) + } + } + } else { + _, err = readFull(r, buf[:8]) + if err != nil { + return + } + e.KeyId = binary.BigEndian.Uint64(buf[:keyIdSize]) + } + + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + e.Algo = PublicKeyAlgorithm(buf[0]) + var cipherFunction byte switch e.Algo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: e.encryptedMPI1 = new(encoding.MPI) @@ -68,26 +123,39 @@ func (e *EncryptedKey) parse(r io.Reader) (err error) { if _, err = e.encryptedMPI2.ReadFrom(r); err != nil { return } + case PubKeyAlgoX25519: + e.ephemeralPublicX25519, e.encryptedSession, cipherFunction, err = x25519.DecodeFields(r, e.Version == 6) + if err != nil { + return + } + case PubKeyAlgoX448: + e.ephemeralPublicX448, e.encryptedSession, cipherFunction, err = x448.DecodeFields(r, e.Version == 6) + if err != nil { + return + } + } + if e.Version < 6 { + switch e.Algo { + case PubKeyAlgoX25519, PubKeyAlgoX448: + e.CipherFunc = CipherFunction(cipherFunction) + // Check for validiy is in the Decrypt method + } } + _, err = consumeAll(r) return } -func checksumKeyMaterial(key []byte) uint16 { - var checksum uint16 - for _, v := range key { - checksum += uint16(v) - } - return checksum -} - // Decrypt decrypts an encrypted session key with the given private key. The // private key must have been decrypted first. // If config is nil, sensible defaults will be used. func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { - if e.KeyId != 0 && e.KeyId != priv.KeyId { + if e.Version < 6 && e.KeyId != 0 && e.KeyId != priv.KeyId { return errors.InvalidArgumentError("cannot decrypt encrypted session key for key id " + strconv.FormatUint(e.KeyId, 16) + " with private key id " + strconv.FormatUint(priv.KeyId, 16)) } + if e.Version == 6 && e.KeyVersion != 0 && !bytes.Equal(e.KeyFingerprint, priv.Fingerprint) { + return errors.InvalidArgumentError("cannot decrypt encrypted session key for key fingerprint " + hex.EncodeToString(e.KeyFingerprint) + " with private key fingerprint " + hex.EncodeToString(priv.Fingerprint)) + } if e.Algo != priv.PubKeyAlgo { return errors.InvalidArgumentError("cannot decrypt encrypted session key of type " + strconv.Itoa(int(e.Algo)) + " with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) } @@ -114,51 +182,110 @@ func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { m := e.encryptedMPI2.Bytes() oid := priv.PublicKey.oid.EncodedBytes() b, err = ecdh.Decrypt(priv.PrivateKey.(*ecdh.PrivateKey), vsG, m, oid, priv.PublicKey.Fingerprint[:]) + case PubKeyAlgoX25519: + b, err = x25519.Decrypt(priv.PrivateKey.(*x25519.PrivateKey), e.ephemeralPublicX25519, e.encryptedSession) + case PubKeyAlgoX448: + b, err = x448.Decrypt(priv.PrivateKey.(*x448.PrivateKey), e.ephemeralPublicX448, e.encryptedSession) default: err = errors.InvalidArgumentError("cannot decrypt encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) } - if err != nil { return err } - e.CipherFunc = CipherFunction(b[0]) - if !e.CipherFunc.IsSupported() { - return errors.UnsupportedError("unsupported encryption function") - } - - e.Key = b[1 : len(b)-2] - expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) - checksum := checksumKeyMaterial(e.Key) - if checksum != expectedChecksum { - return errors.StructuralError("EncryptedKey checksum incorrect") + var key []byte + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH: + keyOffset := 0 + if e.Version < 6 { + e.CipherFunc = CipherFunction(b[0]) + keyOffset = 1 + if !e.CipherFunc.IsSupported() { + return errors.UnsupportedError("unsupported encryption function") + } + } + key, err = decodeChecksumKey(b[keyOffset:]) + if err != nil { + return err + } + case PubKeyAlgoX25519, PubKeyAlgoX448: + if e.Version < 6 { + switch e.CipherFunc { + case CipherAES128, CipherAES192, CipherAES256: + break + default: + return errors.StructuralError("v3 PKESK mandates AES as cipher function for x25519 and x448") + } + } + key = b[:] + default: + return errors.UnsupportedError("unsupported algorithm for decryption") } - + e.Key = key return nil } // Serialize writes the encrypted key packet, e, to w. func (e *EncryptedKey) Serialize(w io.Writer) error { - var mpiLen int + var encodedLength int switch e.Algo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - mpiLen = int(e.encryptedMPI1.EncodedLength()) + encodedLength = int(e.encryptedMPI1.EncodedLength()) case PubKeyAlgoElGamal: - mpiLen = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength()) + encodedLength = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength()) case PubKeyAlgoECDH: - mpiLen = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength()) + encodedLength = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength()) + case PubKeyAlgoX25519: + encodedLength = x25519.EncodedFieldsLength(e.encryptedSession, e.Version == 6) + case PubKeyAlgoX448: + encodedLength = x448.EncodedFieldsLength(e.encryptedSession, e.Version == 6) default: return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) } - err := serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen) + packetLen := versionSize /* version */ + keyIdSize /* key id */ + algorithmSize /* algo */ + encodedLength + if e.Version == 6 { + packetLen = versionSize /* version */ + algorithmSize /* algo */ + encodedLength + keyVersionSize /* key version */ + if e.KeyVersion == 6 { + packetLen += fingerprintSizeV6 + } else if e.KeyVersion == 4 { + packetLen += fingerprintSize + } + } + + err := serializeHeader(w, packetTypeEncryptedKey, packetLen) if err != nil { return err } - w.Write([]byte{encryptedKeyVersion}) - binary.Write(w, binary.BigEndian, e.KeyId) - w.Write([]byte{byte(e.Algo)}) + _, err = w.Write([]byte{byte(e.Version)}) + if err != nil { + return err + } + if e.Version == 6 { + _, err = w.Write([]byte{byte(e.KeyVersion)}) + if err != nil { + return err + } + // The key version number may also be zero, + // and the fingerprint omitted + if e.KeyVersion != 0 { + _, err = w.Write(e.KeyFingerprint) + if err != nil { + return err + } + } + } else { + // Write KeyID + err = binary.Write(w, binary.BigEndian, e.KeyId) + if err != nil { + return err + } + } + _, err = w.Write([]byte{byte(e.Algo)}) + if err != nil { + return err + } switch e.Algo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: @@ -176,34 +303,113 @@ func (e *EncryptedKey) Serialize(w io.Writer) error { } _, err := w.Write(e.encryptedMPI2.EncodedBytes()) return err + case PubKeyAlgoX25519: + err := x25519.EncodeFields(w, e.ephemeralPublicX25519, e.encryptedSession, byte(e.CipherFunc), e.Version == 6) + return err + case PubKeyAlgoX448: + err := x448.EncodeFields(w, e.ephemeralPublicX448, e.encryptedSession, byte(e.CipherFunc), e.Version == 6) + return err default: panic("internal error") } } -// SerializeEncryptedKey serializes an encrypted key packet to w that contains +// SerializeEncryptedKeyAEAD serializes an encrypted key packet to w that contains // key, encrypted to pub. +// If aeadSupported is set, PKESK v6 is used else v4. // If config is nil, sensible defaults will be used. -func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { - var buf [10]byte - buf[0] = encryptedKeyVersion - binary.BigEndian.PutUint64(buf[1:9], pub.KeyId) - buf[9] = byte(pub.PubKeyAlgo) - - keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */) - keyBlock[0] = byte(cipherFunc) - copy(keyBlock[1:], key) - checksum := checksumKeyMaterial(key) - keyBlock[1+len(key)] = byte(checksum >> 8) - keyBlock[1+len(key)+1] = byte(checksum) +func SerializeEncryptedKeyAEAD(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, aeadSupported bool, key []byte, config *Config) error { + return SerializeEncryptedKeyAEADwithHiddenOption(w, pub, cipherFunc, aeadSupported, key, false, config) +} + +// SerializeEncryptedKeyAEADwithHiddenOption serializes an encrypted key packet to w that contains +// key, encrypted to pub. +// Offers the hidden flag option to indicated if the PKESK packet should include a wildcard KeyID. +// If aeadSupported is set, PKESK v6 is used else v4. +// If config is nil, sensible defaults will be used. +func SerializeEncryptedKeyAEADwithHiddenOption(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, aeadSupported bool, key []byte, hidden bool, config *Config) error { + var buf [36]byte // max possible header size is v6 + lenHeaderWritten := versionSize + version := 3 + + if aeadSupported { + version = 6 + } + // An implementation MUST NOT generate ElGamal v6 PKESKs. + if version == 6 && pub.PubKeyAlgo == PubKeyAlgoElGamal { + return errors.InvalidArgumentError("ElGamal v6 PKESK are not allowed") + } + // In v3 PKESKs, for x25519 and x448, mandate using AES + if version == 3 && (pub.PubKeyAlgo == PubKeyAlgoX25519 || pub.PubKeyAlgo == PubKeyAlgoX448) { + switch cipherFunc { + case CipherAES128, CipherAES192, CipherAES256: + break + default: + return errors.InvalidArgumentError("v3 PKESK mandates AES for x25519 and x448") + } + } + + buf[0] = byte(version) + + // If hidden is set, the key should be hidden + // An implementation MAY accept or use a Key ID of all zeros, + // or a key version of zero and no key fingerprint, to hide the intended decryption key. + // See Section 5.1.8. in the open pgp crypto refresh + if version == 6 { + if !hidden { + // A one-octet size of the following two fields. + buf[1] = byte(keyVersionSize + len(pub.Fingerprint)) + // A one octet key version number. + buf[2] = byte(pub.Version) + lenHeaderWritten += keyVersionSize + 1 + // The fingerprint of the public key + copy(buf[lenHeaderWritten:lenHeaderWritten+len(pub.Fingerprint)], pub.Fingerprint) + lenHeaderWritten += len(pub.Fingerprint) + } else { + // The size may also be zero, and the key version + // and fingerprint omitted for an "anonymous recipient" + buf[1] = 0 + lenHeaderWritten += 1 + } + } else { + if !hidden { + binary.BigEndian.PutUint64(buf[versionSize:(versionSize+keyIdSize)], pub.KeyId) + } + lenHeaderWritten += keyIdSize + } + buf[lenHeaderWritten] = byte(pub.PubKeyAlgo) + lenHeaderWritten += algorithmSize + + var keyBlock []byte + switch pub.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH: + lenKeyBlock := len(key) + 2 + if version < 6 { + lenKeyBlock += 1 // cipher type included + } + keyBlock = make([]byte, lenKeyBlock) + keyOffset := 0 + if version < 6 { + keyBlock[0] = byte(cipherFunc) + keyOffset = 1 + } + encodeChecksumKey(keyBlock[keyOffset:], key) + case PubKeyAlgoX25519, PubKeyAlgoX448: + // algorithm is added in plaintext below + keyBlock = key + } switch pub.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock) + return serializeEncryptedKeyRSA(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*rsa.PublicKey), keyBlock) case PubKeyAlgoElGamal: - return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock) + return serializeEncryptedKeyElGamal(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*elgamal.PublicKey), keyBlock) case PubKeyAlgoECDH: - return serializeEncryptedKeyECDH(w, config.Random(), buf, pub.PublicKey.(*ecdh.PublicKey), keyBlock, pub.oid, pub.Fingerprint) + return serializeEncryptedKeyECDH(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*ecdh.PublicKey), keyBlock, pub.oid, pub.Fingerprint) + case PubKeyAlgoX25519: + return serializeEncryptedKeyX25519(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*x25519.PublicKey), keyBlock, byte(cipherFunc), version) + case PubKeyAlgoX448: + return serializeEncryptedKeyX448(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*x448.PublicKey), keyBlock, byte(cipherFunc), version) case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly: return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) } @@ -211,14 +417,30 @@ func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunctio return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) } -func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error { +// SerializeEncryptedKey serializes an encrypted key packet to w that contains +// key, encrypted to pub. +// PKESKv6 is used if config.AEAD() is not nil. +// If config is nil, sensible defaults will be used. +func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { + return SerializeEncryptedKeyAEAD(w, pub, cipherFunc, config.AEAD() != nil, key, config) +} + +// SerializeEncryptedKeyWithHiddenOption serializes an encrypted key packet to w that contains +// key, encrypted to pub. PKESKv6 is used if config.AEAD() is not nil. +// The hidden option controls if the packet should be anonymous, i.e., omit key metadata. +// If config is nil, sensible defaults will be used. +func SerializeEncryptedKeyWithHiddenOption(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, hidden bool, config *Config) error { + return SerializeEncryptedKeyAEADwithHiddenOption(w, pub, cipherFunc, config.AEAD() != nil, key, hidden, config) +} + +func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header []byte, pub *rsa.PublicKey, keyBlock []byte) error { cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) if err != nil { return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) } cipherMPI := encoding.NewMPI(cipherText) - packetLen := 10 /* header length */ + int(cipherMPI.EncodedLength()) + packetLen := len(header) /* header length */ + int(cipherMPI.EncodedLength()) err = serializeHeader(w, packetTypeEncryptedKey, packetLen) if err != nil { @@ -232,13 +454,13 @@ func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub return err } -func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error { +func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header []byte, pub *elgamal.PublicKey, keyBlock []byte) error { c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) if err != nil { return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) } - packetLen := 10 /* header length */ + packetLen := len(header) /* header length */ packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 @@ -257,7 +479,7 @@ func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, return err } -func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header [10]byte, pub *ecdh.PublicKey, keyBlock []byte, oid encoding.Field, fingerprint []byte) error { +func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header []byte, pub *ecdh.PublicKey, keyBlock []byte, oid encoding.Field, fingerprint []byte) error { vsG, c, err := ecdh.Encrypt(rand, pub, keyBlock, oid.EncodedBytes(), fingerprint) if err != nil { return errors.InvalidArgumentError("ECDH encryption failed: " + err.Error()) @@ -266,7 +488,7 @@ func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header [10]byte, pub g := encoding.NewMPI(vsG) m := encoding.NewOID(c) - packetLen := 10 /* header length */ + packetLen := len(header) /* header length */ packetLen += int(g.EncodedLength()) + int(m.EncodedLength()) err = serializeHeader(w, packetTypeEncryptedKey, packetLen) @@ -284,3 +506,70 @@ func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header [10]byte, pub _, err = w.Write(m.EncodedBytes()) return err } + +func serializeEncryptedKeyX25519(w io.Writer, rand io.Reader, header []byte, pub *x25519.PublicKey, keyBlock []byte, cipherFunc byte, version int) error { + ephemeralPublicX25519, ciphertext, err := x25519.Encrypt(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("x25519 encryption failed: " + err.Error()) + } + + packetLen := len(header) /* header length */ + packetLen += x25519.EncodedFieldsLength(ciphertext, version == 6) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + + _, err = w.Write(header[:]) + if err != nil { + return err + } + return x25519.EncodeFields(w, ephemeralPublicX25519, ciphertext, cipherFunc, version == 6) +} + +func serializeEncryptedKeyX448(w io.Writer, rand io.Reader, header []byte, pub *x448.PublicKey, keyBlock []byte, cipherFunc byte, version int) error { + ephemeralPublicX448, ciphertext, err := x448.Encrypt(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("x448 encryption failed: " + err.Error()) + } + + packetLen := len(header) /* header length */ + packetLen += x448.EncodedFieldsLength(ciphertext, version == 6) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + + _, err = w.Write(header[:]) + if err != nil { + return err + } + return x448.EncodeFields(w, ephemeralPublicX448, ciphertext, cipherFunc, version == 6) +} + +func checksumKeyMaterial(key []byte) uint16 { + var checksum uint16 + for _, v := range key { + checksum += uint16(v) + } + return checksum +} + +func decodeChecksumKey(msg []byte) (key []byte, err error) { + key = msg[:len(msg)-2] + expectedChecksum := uint16(msg[len(msg)-2])<<8 | uint16(msg[len(msg)-1]) + checksum := checksumKeyMaterial(key) + if checksum != expectedChecksum { + err = errors.StructuralError("session key checksum is incorrect") + } + return +} + +func encodeChecksumKey(buffer []byte, key []byte) { + copy(buffer, key) + checksum := checksumKeyMaterial(key) + buffer[len(key)] = byte(checksum >> 8) + buffer[len(key)+1] = byte(checksum) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go index 4be987609be..8a028c8a171 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go @@ -58,9 +58,9 @@ func (l *LiteralData) parse(r io.Reader) (err error) { // on completion. The fileName is truncated to 255 bytes. func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) { var buf [4]byte - buf[0] = 't' - if isBinary { - buf[0] = 'b' + buf[0] = 'b' + if !isBinary { + buf[0] = 'u' } if len(fileName) > 255 { fileName = fileName[:255] diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/marker.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/marker.go new file mode 100644 index 00000000000..1ee378ba3c1 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/marker.go @@ -0,0 +1,33 @@ +package packet + +import ( + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +type Marker struct{} + +const markerString = "PGP" + +// parse just checks if the packet contains "PGP". +func (m *Marker) parse(reader io.Reader) error { + var buffer [3]byte + if _, err := io.ReadFull(reader, buffer[:]); err != nil { + return err + } + if string(buffer[:]) != markerString { + return errors.StructuralError("invalid marker packet") + } + return nil +} + +// SerializeMarker writes a marker packet to writer. +func SerializeMarker(writer io.Writer) error { + err := serializeHeader(writer, packetTypeMarker, len(markerString)) + if err != nil { + return err + } + _, err = writer.Write([]byte(markerString)) + return err +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go index 033fb2d7e8c..f393c4063b8 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go @@ -7,34 +7,37 @@ package packet import ( "crypto" "encoding/binary" - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" "io" "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" ) // OnePassSignature represents a one-pass signature packet. See RFC 4880, // section 5.4. type OnePassSignature struct { - SigType SignatureType - Hash crypto.Hash - PubKeyAlgo PublicKeyAlgorithm - KeyId uint64 - IsLast bool + Version int + SigType SignatureType + Hash crypto.Hash + PubKeyAlgo PublicKeyAlgorithm + KeyId uint64 + IsLast bool + Salt []byte // v6 only + KeyFingerprint []byte // v6 only } -const onePassSignatureVersion = 3 - func (ops *OnePassSignature) parse(r io.Reader) (err error) { - var buf [13]byte - - _, err = readFull(r, buf[:]) + var buf [8]byte + // Read: version | signature type | hash algorithm | public-key algorithm + _, err = readFull(r, buf[:4]) if err != nil { return } - if buf[0] != onePassSignatureVersion { - err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) + if buf[0] != 3 && buf[0] != 6 { + return errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) } + ops.Version = int(buf[0]) var ok bool ops.Hash, ok = algorithm.HashIdToHashWithSha1(buf[2]) @@ -44,15 +47,69 @@ func (ops *OnePassSignature) parse(r io.Reader) (err error) { ops.SigType = SignatureType(buf[1]) ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) - ops.KeyId = binary.BigEndian.Uint64(buf[4:12]) - ops.IsLast = buf[12] != 0 + + if ops.Version == 6 { + // Only for v6, a variable-length field containing the salt + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + saltLength := int(buf[0]) + var expectedSaltLength int + expectedSaltLength, err = SaltLengthForHash(ops.Hash) + if err != nil { + return + } + if saltLength != expectedSaltLength { + err = errors.StructuralError("unexpected salt size for the given hash algorithm") + return + } + salt := make([]byte, expectedSaltLength) + _, err = readFull(r, salt) + if err != nil { + return + } + ops.Salt = salt + + // Only for v6 packets, 32 octets of the fingerprint of the signing key. + fingerprint := make([]byte, 32) + _, err = readFull(r, fingerprint) + if err != nil { + return + } + ops.KeyFingerprint = fingerprint + ops.KeyId = binary.BigEndian.Uint64(ops.KeyFingerprint[:8]) + } else { + _, err = readFull(r, buf[:8]) + if err != nil { + return + } + ops.KeyId = binary.BigEndian.Uint64(buf[:8]) + } + + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + ops.IsLast = buf[0] != 0 return } // Serialize marshals the given OnePassSignature to w. func (ops *OnePassSignature) Serialize(w io.Writer) error { - var buf [13]byte - buf[0] = onePassSignatureVersion + //v3 length 1+1+1+1+8+1 = + packetLength := 13 + if ops.Version == 6 { + // v6 length 1+1+1+1+1+len(salt)+32+1 = + packetLength = 38 + len(ops.Salt) + } + + if err := serializeHeader(w, packetTypeOnePassSignature, packetLength); err != nil { + return err + } + + var buf [8]byte + buf[0] = byte(ops.Version) buf[1] = uint8(ops.SigType) var ok bool buf[2], ok = algorithm.HashToHashIdWithSha1(ops.Hash) @@ -60,14 +117,41 @@ func (ops *OnePassSignature) Serialize(w io.Writer) error { return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) } buf[3] = uint8(ops.PubKeyAlgo) - binary.BigEndian.PutUint64(buf[4:12], ops.KeyId) - if ops.IsLast { - buf[12] = 1 - } - if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil { + _, err := w.Write(buf[:4]) + if err != nil { return err } - _, err := w.Write(buf[:]) + + if ops.Version == 6 { + // write salt for v6 signatures + _, err := w.Write([]byte{uint8(len(ops.Salt))}) + if err != nil { + return err + } + _, err = w.Write(ops.Salt) + if err != nil { + return err + } + + // write fingerprint v6 signatures + _, err = w.Write(ops.KeyFingerprint) + if err != nil { + return err + } + } else { + binary.BigEndian.PutUint64(buf[:8], ops.KeyId) + _, err := w.Write(buf[:8]) + if err != nil { + return err + } + } + + isLast := []byte{byte(0)} + if ops.IsLast { + isLast[0] = 1 + } + + _, err = w.Write(isLast) return err } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go index 4f8204079f2..cef7c661d3f 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go @@ -7,7 +7,6 @@ package packet import ( "bytes" "io" - "io/ioutil" "github.com/ProtonMail/go-crypto/openpgp/errors" ) @@ -26,7 +25,7 @@ type OpaquePacket struct { } func (op *OpaquePacket) parse(r io.Reader) (err error) { - op.Contents, err = ioutil.ReadAll(r) + op.Contents, err = io.ReadAll(r) return } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go index 4d86a7da826..da12fbce060 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go @@ -4,7 +4,7 @@ // Package packet implements parsing and serialization of OpenPGP packets, as // specified in RFC 4880. -package packet // import "github.com/ProtonMail/go-crypto/openpgp/packet" +package packet // import "github.com/ProtonMail/go-crypto/v2/openpgp/packet" import ( "bytes" @@ -311,12 +311,15 @@ const ( packetTypePrivateSubkey packetType = 7 packetTypeCompressed packetType = 8 packetTypeSymmetricallyEncrypted packetType = 9 + packetTypeMarker packetType = 10 packetTypeLiteralData packetType = 11 + packetTypeTrust packetType = 12 packetTypeUserId packetType = 13 packetTypePublicSubkey packetType = 14 packetTypeUserAttribute packetType = 17 packetTypeSymmetricallyEncryptedIntegrityProtected packetType = 18 packetTypeAEADEncrypted packetType = 20 + packetPadding packetType = 21 ) // EncryptedDataPacket holds encrypted data. It is currently implemented by @@ -328,7 +331,7 @@ type EncryptedDataPacket interface { // Read reads a single OpenPGP packet from the given io.Reader. If there is an // error parsing a packet, the whole packet is consumed from the input. func Read(r io.Reader) (p Packet, err error) { - tag, _, contents, err := readHeader(r) + tag, len, contents, err := readHeader(r) if err != nil { return } @@ -367,8 +370,93 @@ func Read(r io.Reader) (p Packet, err error) { p = se case packetTypeAEADEncrypted: p = new(AEADEncrypted) + case packetPadding: + p = Padding(len) + case packetTypeMarker: + p = new(Marker) + case packetTypeTrust: + // Not implemented, just consume + err = errors.UnknownPacketTypeError(tag) default: + // Packet Tags from 0 to 39 are critical. + // Packet Tags from 40 to 63 are non-critical. + if tag < 40 { + err = errors.CriticalUnknownPacketTypeError(tag) + } else { + err = errors.UnknownPacketTypeError(tag) + } + } + if p != nil { + err = p.parse(contents) + } + if err != nil { + consumeAll(contents) + } + return +} + +// ReadWithCheck reads a single OpenPGP message packet from the given io.Reader. If there is an +// error parsing a packet, the whole packet is consumed from the input. +// ReadWithCheck additionally checks if the OpenPGP message packet sequence adheres +// to the packet composition rules in rfc4880, if not throws an error. +func ReadWithCheck(r io.Reader, sequence *SequenceVerifier) (p Packet, msgErr error, err error) { + tag, len, contents, err := readHeader(r) + if err != nil { + return + } + switch tag { + case packetTypeEncryptedKey: + msgErr = sequence.Next(ESKSymbol) + p = new(EncryptedKey) + case packetTypeSignature: + msgErr = sequence.Next(SigSymbol) + p = new(Signature) + case packetTypeSymmetricKeyEncrypted: + msgErr = sequence.Next(ESKSymbol) + p = new(SymmetricKeyEncrypted) + case packetTypeOnePassSignature: + msgErr = sequence.Next(OPSSymbol) + p = new(OnePassSignature) + case packetTypeCompressed: + msgErr = sequence.Next(CompSymbol) + p = new(Compressed) + case packetTypeSymmetricallyEncrypted: + msgErr = sequence.Next(EncSymbol) + p = new(SymmetricallyEncrypted) + case packetTypeLiteralData: + msgErr = sequence.Next(LDSymbol) + p = new(LiteralData) + case packetTypeSymmetricallyEncryptedIntegrityProtected: + msgErr = sequence.Next(EncSymbol) + se := new(SymmetricallyEncrypted) + se.IntegrityProtected = true + p = se + case packetTypeAEADEncrypted: + msgErr = sequence.Next(EncSymbol) + p = new(AEADEncrypted) + case packetPadding: + p = Padding(len) + case packetTypeMarker: + p = new(Marker) + case packetTypeTrust: + // Not implemented, just consume err = errors.UnknownPacketTypeError(tag) + case packetTypePrivateKey, + packetTypePrivateSubkey, + packetTypePublicKey, + packetTypePublicSubkey, + packetTypeUserId, + packetTypeUserAttribute: + msgErr = sequence.Next(UnknownSymbol) + consumeAll(contents) + default: + // Packet Tags from 0 to 39 are critical. + // Packet Tags from 40 to 63 are non-critical. + if tag < 40 { + err = errors.CriticalUnknownPacketTypeError(tag) + } else { + err = errors.UnknownPacketTypeError(tag) + } } if p != nil { err = p.parse(contents) @@ -385,17 +473,17 @@ type SignatureType uint8 const ( SigTypeBinary SignatureType = 0x00 - SigTypeText = 0x01 - SigTypeGenericCert = 0x10 - SigTypePersonaCert = 0x11 - SigTypeCasualCert = 0x12 - SigTypePositiveCert = 0x13 - SigTypeSubkeyBinding = 0x18 - SigTypePrimaryKeyBinding = 0x19 - SigTypeDirectSignature = 0x1F - SigTypeKeyRevocation = 0x20 - SigTypeSubkeyRevocation = 0x28 - SigTypeCertificationRevocation = 0x30 + SigTypeText SignatureType = 0x01 + SigTypeGenericCert SignatureType = 0x10 + SigTypePersonaCert SignatureType = 0x11 + SigTypeCasualCert SignatureType = 0x12 + SigTypePositiveCert SignatureType = 0x13 + SigTypeSubkeyBinding SignatureType = 0x18 + SigTypePrimaryKeyBinding SignatureType = 0x19 + SigTypeDirectSignature SignatureType = 0x1F + SigTypeKeyRevocation SignatureType = 0x20 + SigTypeSubkeyRevocation SignatureType = 0x28 + SigTypeCertificationRevocation SignatureType = 0x30 ) // PublicKeyAlgorithm represents the different public key system specified for @@ -412,6 +500,11 @@ const ( PubKeyAlgoECDSA PublicKeyAlgorithm = 19 // https://www.ietf.org/archive/id/draft-koch-eddsa-for-openpgp-04.txt PubKeyAlgoEdDSA PublicKeyAlgorithm = 22 + // https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh + PubKeyAlgoX25519 PublicKeyAlgorithm = 25 + PubKeyAlgoX448 PublicKeyAlgorithm = 26 + PubKeyAlgoEd25519 PublicKeyAlgorithm = 27 + PubKeyAlgoEd448 PublicKeyAlgorithm = 28 // Deprecated in RFC 4880, Section 13.5. Use key flags instead. PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 @@ -422,7 +515,7 @@ const ( // key of the given type. func (pka PublicKeyAlgorithm) CanEncrypt() bool { switch pka { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH: + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH, PubKeyAlgoX25519, PubKeyAlgoX448: return true } return false @@ -432,7 +525,7 @@ func (pka PublicKeyAlgorithm) CanEncrypt() bool { // sign a message. func (pka PublicKeyAlgorithm) CanSign() bool { switch pka { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA: + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA, PubKeyAlgoEd25519, PubKeyAlgoEd448: return true } return false @@ -512,6 +605,11 @@ func (mode AEADMode) TagLength() int { return algorithm.AEADMode(mode).TagLength() } +// IsSupported returns true if the aead mode is supported from the library +func (mode AEADMode) IsSupported() bool { + return algorithm.AEADMode(mode).TagLength() > 0 +} + // new returns a fresh instance of the given mode. func (mode AEADMode) new(block cipher.Block) cipher.AEAD { return algorithm.AEADMode(mode).New(block) @@ -526,8 +624,17 @@ const ( KeySuperseded ReasonForRevocation = 1 KeyCompromised ReasonForRevocation = 2 KeyRetired ReasonForRevocation = 3 + UserIDNotValid ReasonForRevocation = 32 + Unknown ReasonForRevocation = 200 ) +func NewReasonForRevocation(value byte) ReasonForRevocation { + if value < 4 || value == 32 { + return ReasonForRevocation(value) + } + return Unknown +} + // Curve is a mapping to supported ECC curves for key generation. // See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-06.html#name-curve-specific-wire-formats type Curve string @@ -549,3 +656,20 @@ type TrustLevel uint8 // TrustAmount represents a trust amount per RFC4880 5.2.3.13 type TrustAmount uint8 + +const ( + // versionSize is the length in bytes of the version value. + versionSize = 1 + // algorithmSize is the length in bytes of the key algorithm value. + algorithmSize = 1 + // keyVersionSize is the length in bytes of the key version value + keyVersionSize = 1 + // keyIdSize is the length in bytes of the key identifier value. + keyIdSize = 8 + // timestampSize is the length in bytes of encoded timestamps. + timestampSize = 4 + // fingerprintSizeV6 is the length in bytes of the key fingerprint in v6. + fingerprintSizeV6 = 32 + // fingerprintSize is the length in bytes of the key fingerprint. + fingerprintSize = 20 +) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_sequence.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_sequence.go new file mode 100644 index 00000000000..55a8a56c2d1 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_sequence.go @@ -0,0 +1,222 @@ +package packet + +// This file implements the pushdown automata (PDA) from PGPainless (Paul Schaub) +// to verify pgp packet sequences. See Paul's blogpost for more details: +// https://blog.jabberhead.tk/2022/10/26/implementing-packet-sequence-validation-using-pushdown-automata/ +import ( + "fmt" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +func NewErrMalformedMessage(from State, input InputSymbol, stackSymbol StackSymbol) errors.ErrMalformedMessage { + return errors.ErrMalformedMessage(fmt.Sprintf("state %d, input symbol %d, stack symbol %d ", from, input, stackSymbol)) +} + +// InputSymbol defines the input alphabet of the PDA +type InputSymbol uint8 + +const ( + LDSymbol InputSymbol = iota + SigSymbol + OPSSymbol + CompSymbol + ESKSymbol + EncSymbol + EOSSymbol + UnknownSymbol +) + +// StackSymbol defines the stack alphabet of the PDA +type StackSymbol int8 + +const ( + MsgStackSymbol StackSymbol = iota + OpsStackSymbol + KeyStackSymbol + EndStackSymbol + EmptyStackSymbol +) + +// State defines the states of the PDA +type State int8 + +const ( + OpenPGPMessage State = iota + ESKMessage + LiteralMessage + CompressedMessage + EncryptedMessage + ValidMessage +) + +// transition represents a state transition in the PDA +type transition func(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) + +// SequenceVerifier is a pushdown automata to verify +// PGP messages packet sequences according to rfc4880. +type SequenceVerifier struct { + stack []StackSymbol + state State +} + +// Next performs a state transition with the given input symbol. +// If the transition fails a ErrMalformedMessage is returned. +func (sv *SequenceVerifier) Next(input InputSymbol) error { + for { + stackSymbol := sv.popStack() + transitionFunc := getTransition(sv.state) + nextState, newStackSymbols, redo, err := transitionFunc(input, stackSymbol) + if err != nil { + return err + } + if redo { + sv.pushStack(stackSymbol) + } + for _, newStackSymbol := range newStackSymbols { + sv.pushStack(newStackSymbol) + } + sv.state = nextState + if !redo { + break + } + } + return nil +} + +// Valid returns true if RDA is in a valid state. +func (sv *SequenceVerifier) Valid() bool { + return sv.state == ValidMessage && len(sv.stack) == 0 +} + +func (sv *SequenceVerifier) AssertValid() error { + if !sv.Valid() { + return errors.ErrMalformedMessage("invalid message") + } + return nil +} + +func NewSequenceVerifier() *SequenceVerifier { + return &SequenceVerifier{ + stack: []StackSymbol{EndStackSymbol, MsgStackSymbol}, + state: OpenPGPMessage, + } +} + +func (sv *SequenceVerifier) popStack() StackSymbol { + if len(sv.stack) == 0 { + return EmptyStackSymbol + } + elemIndex := len(sv.stack) - 1 + stackSymbol := sv.stack[elemIndex] + sv.stack = sv.stack[:elemIndex] + return stackSymbol +} + +func (sv *SequenceVerifier) pushStack(stackSymbol StackSymbol) { + sv.stack = append(sv.stack, stackSymbol) +} + +func getTransition(from State) transition { + switch from { + case OpenPGPMessage: + return fromOpenPGPMessage + case LiteralMessage: + return fromLiteralMessage + case CompressedMessage: + return fromCompressedMessage + case EncryptedMessage: + return fromEncryptedMessage + case ESKMessage: + return fromESKMessage + case ValidMessage: + return fromValidMessage + } + return nil +} + +// fromOpenPGPMessage is the transition for the state OpenPGPMessage. +func fromOpenPGPMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) { + if stackSymbol != MsgStackSymbol { + return 0, nil, false, NewErrMalformedMessage(OpenPGPMessage, input, stackSymbol) + } + switch input { + case LDSymbol: + return LiteralMessage, nil, false, nil + case SigSymbol: + return OpenPGPMessage, []StackSymbol{MsgStackSymbol}, false, nil + case OPSSymbol: + return OpenPGPMessage, []StackSymbol{OpsStackSymbol, MsgStackSymbol}, false, nil + case CompSymbol: + return CompressedMessage, nil, false, nil + case ESKSymbol: + return ESKMessage, []StackSymbol{KeyStackSymbol}, false, nil + case EncSymbol: + return EncryptedMessage, nil, false, nil + } + return 0, nil, false, NewErrMalformedMessage(OpenPGPMessage, input, stackSymbol) +} + +// fromESKMessage is the transition for the state ESKMessage. +func fromESKMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) { + if stackSymbol != KeyStackSymbol { + return 0, nil, false, NewErrMalformedMessage(ESKMessage, input, stackSymbol) + } + switch input { + case ESKSymbol: + return ESKMessage, []StackSymbol{KeyStackSymbol}, false, nil + case EncSymbol: + return EncryptedMessage, nil, false, nil + } + return 0, nil, false, NewErrMalformedMessage(ESKMessage, input, stackSymbol) +} + +// fromLiteralMessage is the transition for the state LiteralMessage. +func fromLiteralMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) { + switch input { + case SigSymbol: + if stackSymbol == OpsStackSymbol { + return LiteralMessage, nil, false, nil + } + case EOSSymbol: + if stackSymbol == EndStackSymbol { + return ValidMessage, nil, false, nil + } + } + return 0, nil, false, NewErrMalformedMessage(LiteralMessage, input, stackSymbol) +} + +// fromLiteralMessage is the transition for the state CompressedMessage. +func fromCompressedMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) { + switch input { + case SigSymbol: + if stackSymbol == OpsStackSymbol { + return CompressedMessage, nil, false, nil + } + case EOSSymbol: + if stackSymbol == EndStackSymbol { + return ValidMessage, nil, false, nil + } + } + return OpenPGPMessage, []StackSymbol{MsgStackSymbol}, true, nil +} + +// fromEncryptedMessage is the transition for the state EncryptedMessage. +func fromEncryptedMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) { + switch input { + case SigSymbol: + if stackSymbol == OpsStackSymbol { + return EncryptedMessage, nil, false, nil + } + case EOSSymbol: + if stackSymbol == EndStackSymbol { + return ValidMessage, nil, false, nil + } + } + return OpenPGPMessage, []StackSymbol{MsgStackSymbol}, true, nil +} + +// fromValidMessage is the transition for the state ValidMessage. +func fromValidMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) { + return 0, nil, false, NewErrMalformedMessage(ValidMessage, input, stackSymbol) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_unsupported.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_unsupported.go new file mode 100644 index 00000000000..2d714723cf8 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_unsupported.go @@ -0,0 +1,24 @@ +package packet + +import ( + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// UnsupportedPackage represents a OpenPGP packet with a known packet type +// but with unsupported content. +type UnsupportedPacket struct { + IncompletePacket Packet + Error errors.UnsupportedError +} + +// Implements the Packet interface +func (up *UnsupportedPacket) parse(read io.Reader) error { + err := up.IncompletePacket.parse(read) + if castedErr, ok := err.(errors.UnsupportedError); ok { + up.Error = castedErr + return nil + } + return err +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/padding.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/padding.go new file mode 100644 index 00000000000..06fa83740d8 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/padding.go @@ -0,0 +1,27 @@ +package packet + +import ( + "io" + "io/ioutil" +) + +// Padding type represents a Padding Packet (Tag 21). +// The padding type is represented by the length of its padding. +// see https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#name-padding-packet-tag-21 +type Padding int + +// parse just ignores the padding content. +func (pad Padding) parse(reader io.Reader) error { + _, err := io.CopyN(ioutil.Discard, reader, int64(pad)) + return err +} + +// SerializePadding writes the padding to writer. +func (pad Padding) SerializePadding(writer io.Writer, rand io.Reader) error { + err := serializeHeader(writer, packetPadding, int(pad)) + if err != nil { + return err + } + _, err = io.CopyN(writer, rand, int64(pad)) + return err +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go index 2fc4386437c..099b4d9ba08 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go @@ -9,22 +9,28 @@ import ( "crypto" "crypto/cipher" "crypto/dsa" - "crypto/rand" "crypto/rsa" "crypto/sha1" + "crypto/sha256" + "crypto/subtle" + "fmt" "io" - "io/ioutil" "math/big" "strconv" "time" "github.com/ProtonMail/go-crypto/openpgp/ecdh" "github.com/ProtonMail/go-crypto/openpgp/ecdsa" + "github.com/ProtonMail/go-crypto/openpgp/ed25519" + "github.com/ProtonMail/go-crypto/openpgp/ed448" "github.com/ProtonMail/go-crypto/openpgp/eddsa" "github.com/ProtonMail/go-crypto/openpgp/elgamal" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" "github.com/ProtonMail/go-crypto/openpgp/s2k" + "github.com/ProtonMail/go-crypto/openpgp/x25519" + "github.com/ProtonMail/go-crypto/openpgp/x448" + "golang.org/x/crypto/hkdf" ) // PrivateKey represents a possibly encrypted private key. See RFC 4880, @@ -35,14 +41,14 @@ type PrivateKey struct { encryptedData []byte cipher CipherFunction s2k func(out, in []byte) - // An *{rsa|dsa|elgamal|ecdh|ecdsa|ed25519}.PrivateKey or + aead AEADMode // only relevant if S2KAEAD is enabled + // An *{rsa|dsa|elgamal|ecdh|ecdsa|ed25519|ed448}.PrivateKey or // crypto.Signer/crypto.Decrypter (Decryptor RSA only). - PrivateKey interface{} - sha1Checksum bool - iv []byte + PrivateKey interface{} + iv []byte // Type of encryption of the S2K packet - // Allowed values are 0 (Not encrypted), 254 (SHA1), or + // Allowed values are 0 (Not encrypted), 253 (AEAD), 254 (SHA1), or // 255 (2-byte checksum) s2kType S2KType // Full parameters of the S2K packet @@ -55,6 +61,8 @@ type S2KType uint8 const ( // S2KNON unencrypt S2KNON S2KType = 0 + // S2KAEAD use authenticated encryption + S2KAEAD S2KType = 253 // S2KSHA1 sha1 sum check S2KSHA1 S2KType = 254 // S2KCHECKSUM sum check @@ -103,6 +111,34 @@ func NewECDHPrivateKey(creationTime time.Time, priv *ecdh.PrivateKey) *PrivateKe return pk } +func NewX25519PrivateKey(creationTime time.Time, priv *x25519.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewX25519PublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewX448PrivateKey(creationTime time.Time, priv *x448.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewX448PublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewEd25519PrivateKey(creationTime time.Time, priv *ed25519.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewEd25519PublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewEd448PrivateKey(creationTime time.Time, priv *ed448.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewEd448PublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + // NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that // implements RSA, ECDSA or EdDSA. func NewSignerPrivateKey(creationTime time.Time, signer interface{}) *PrivateKey { @@ -122,6 +158,14 @@ func NewSignerPrivateKey(creationTime time.Time, signer interface{}) *PrivateKey pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey.PublicKey) case eddsa.PrivateKey: pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey.PublicKey) + case *ed25519.PrivateKey: + pk.PublicKey = *NewEd25519PublicKey(creationTime, &pubkey.PublicKey) + case ed25519.PrivateKey: + pk.PublicKey = *NewEd25519PublicKey(creationTime, &pubkey.PublicKey) + case *ed448.PrivateKey: + pk.PublicKey = *NewEd448PublicKey(creationTime, &pubkey.PublicKey) + case ed448.PrivateKey: + pk.PublicKey = *NewEd448PublicKey(creationTime, &pubkey.PublicKey) default: panic("openpgp: unknown signer type in NewSignerPrivateKey") } @@ -129,7 +173,7 @@ func NewSignerPrivateKey(creationTime time.Time, signer interface{}) *PrivateKey return pk } -// NewDecrypterPrivateKey creates a PrivateKey from a *{rsa|elgamal|ecdh}.PrivateKey. +// NewDecrypterPrivateKey creates a PrivateKey from a *{rsa|elgamal|ecdh|x25519|x448}.PrivateKey. func NewDecrypterPrivateKey(creationTime time.Time, decrypter interface{}) *PrivateKey { pk := new(PrivateKey) switch priv := decrypter.(type) { @@ -139,6 +183,10 @@ func NewDecrypterPrivateKey(creationTime time.Time, decrypter interface{}) *Priv pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) case *ecdh.PrivateKey: pk.PublicKey = *NewECDHPublicKey(creationTime, &priv.PublicKey) + case *x25519.PrivateKey: + pk.PublicKey = *NewX25519PublicKey(creationTime, &priv.PublicKey) + case *x448.PrivateKey: + pk.PublicKey = *NewX448PublicKey(creationTime, &priv.PublicKey) default: panic("openpgp: unknown decrypter type in NewDecrypterPrivateKey") } @@ -152,6 +200,7 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { return } v5 := pk.PublicKey.Version == 5 + v6 := pk.PublicKey.Version == 6 var buf [1]byte _, err = readFull(r, buf[:]) @@ -160,7 +209,7 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { } pk.s2kType = S2KType(buf[0]) var optCount [1]byte - if v5 { + if v5 || (v6 && pk.s2kType != S2KNON) { if _, err = readFull(r, optCount[:]); err != nil { return } @@ -170,9 +219,9 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { case S2KNON: pk.s2k = nil pk.Encrypted = false - case S2KSHA1, S2KCHECKSUM: - if v5 && pk.s2kType == S2KCHECKSUM { - return errors.StructuralError("wrong s2k identifier for version 5") + case S2KSHA1, S2KCHECKSUM, S2KAEAD: + if (v5 || v6) && pk.s2kType == S2KCHECKSUM { + return errors.StructuralError(fmt.Sprintf("wrong s2k identifier for version %d", pk.Version)) } _, err = readFull(r, buf[:]) if err != nil { @@ -182,6 +231,29 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { if pk.cipher != 0 && !pk.cipher.IsSupported() { return errors.UnsupportedError("unsupported cipher function in private key") } + // [Optional] If string-to-key usage octet was 253, + // a one-octet AEAD algorithm. + if pk.s2kType == S2KAEAD { + _, err = readFull(r, buf[:]) + if err != nil { + return + } + pk.aead = AEADMode(buf[0]) + if !pk.aead.IsSupported() { + return errors.UnsupportedError("unsupported aead mode in private key") + } + } + + // [Optional] Only for a version 6 packet, + // and if string-to-key usage octet was 255, 254, or 253, + // an one-octet count of the following field. + if v6 { + _, err = readFull(r, buf[:]) + if err != nil { + return + } + } + pk.s2kParams, err = s2k.ParseIntoParams(r) if err != nil { return @@ -194,23 +266,32 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { return } pk.Encrypted = true - if pk.s2kType == S2KSHA1 { - pk.sha1Checksum = true - } default: return errors.UnsupportedError("deprecated s2k function in private key") } if pk.Encrypted { - blockSize := pk.cipher.blockSize() - if blockSize == 0 { + var ivSize int + // If the S2K usage octet was 253, the IV is of the size expected by the AEAD mode, + // unless it's a version 5 key, in which case it's the size of the symmetric cipher's block size. + // For all other S2K modes, it's always the block size. + if !v5 && pk.s2kType == S2KAEAD { + ivSize = pk.aead.IvLength() + } else { + ivSize = pk.cipher.blockSize() + } + + if ivSize == 0 { return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) } - pk.iv = make([]byte, blockSize) + pk.iv = make([]byte, ivSize) _, err = readFull(r, pk.iv) if err != nil { return } + if v5 && pk.s2kType == S2KAEAD { + pk.iv = pk.iv[:pk.aead.IvLength()] + } } var privateKeyData []byte @@ -230,7 +311,7 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { return } } else { - privateKeyData, err = ioutil.ReadAll(r) + privateKeyData, err = io.ReadAll(r) if err != nil { return } @@ -239,16 +320,22 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { if len(privateKeyData) < 2 { return errors.StructuralError("truncated private key data") } - var sum uint16 - for i := 0; i < len(privateKeyData)-2; i++ { - sum += uint16(privateKeyData[i]) - } - if privateKeyData[len(privateKeyData)-2] != uint8(sum>>8) || - privateKeyData[len(privateKeyData)-1] != uint8(sum) { - return errors.StructuralError("private key checksum failure") + if pk.Version != 6 { + // checksum + var sum uint16 + for i := 0; i < len(privateKeyData)-2; i++ { + sum += uint16(privateKeyData[i]) + } + if privateKeyData[len(privateKeyData)-2] != uint8(sum>>8) || + privateKeyData[len(privateKeyData)-1] != uint8(sum) { + return errors.StructuralError("private key checksum failure") + } + privateKeyData = privateKeyData[:len(privateKeyData)-2] + return pk.parsePrivateKey(privateKeyData) + } else { + // No checksum + return pk.parsePrivateKey(privateKeyData) } - privateKeyData = privateKeyData[:len(privateKeyData)-2] - return pk.parsePrivateKey(privateKeyData) } pk.encryptedData = privateKeyData @@ -280,18 +367,59 @@ func (pk *PrivateKey) Serialize(w io.Writer) (err error) { optional := bytes.NewBuffer(nil) if pk.Encrypted || pk.Dummy() { - optional.Write([]byte{uint8(pk.cipher)}) - if err := pk.s2kParams.Serialize(optional); err != nil { + // [Optional] If string-to-key usage octet was 255, 254, or 253, + // a one-octet symmetric encryption algorithm. + if _, err = optional.Write([]byte{uint8(pk.cipher)}); err != nil { + return + } + // [Optional] If string-to-key usage octet was 253, + // a one-octet AEAD algorithm. + if pk.s2kType == S2KAEAD { + if _, err = optional.Write([]byte{uint8(pk.aead)}); err != nil { + return + } + } + + s2kBuffer := bytes.NewBuffer(nil) + if err := pk.s2kParams.Serialize(s2kBuffer); err != nil { return err } + // [Optional] Only for a version 6 packet, and if string-to-key + // usage octet was 255, 254, or 253, an one-octet + // count of the following field. + if pk.Version == 6 { + if _, err = optional.Write([]byte{uint8(s2kBuffer.Len())}); err != nil { + return + } + } + // [Optional] If string-to-key usage octet was 255, 254, or 253, + // a string-to-key (S2K) specifier. The length of the string-to-key specifier + // depends on its type + if _, err = io.Copy(optional, s2kBuffer); err != nil { + return + } + + // IV if pk.Encrypted { - optional.Write(pk.iv) + if _, err = optional.Write(pk.iv); err != nil { + return + } + if pk.Version == 5 && pk.s2kType == S2KAEAD { + // Add padding for version 5 + padding := make([]byte, pk.cipher.blockSize()-len(pk.iv)) + if _, err = optional.Write(padding); err != nil { + return + } + } } } - if pk.Version == 5 { + if pk.Version == 5 || (pk.Version == 6 && pk.s2kType != S2KNON) { contents.Write([]byte{uint8(optional.Len())}) } - io.Copy(contents, optional) + + if _, err := io.Copy(contents, optional); err != nil { + return err + } if !pk.Dummy() { l := 0 @@ -303,8 +431,10 @@ func (pk *PrivateKey) Serialize(w io.Writer) (err error) { return err } l = buf.Len() - checksum := mod64kHash(buf.Bytes()) - buf.Write([]byte{byte(checksum >> 8), byte(checksum)}) + if pk.Version != 6 { + checksum := mod64kHash(buf.Bytes()) + buf.Write([]byte{byte(checksum >> 8), byte(checksum)}) + } priv = buf.Bytes() } else { priv, l = pk.encryptedData, len(pk.encryptedData) @@ -370,6 +500,26 @@ func serializeECDHPrivateKey(w io.Writer, priv *ecdh.PrivateKey) error { return err } +func serializeX25519PrivateKey(w io.Writer, priv *x25519.PrivateKey) error { + _, err := w.Write(priv.Secret) + return err +} + +func serializeX448PrivateKey(w io.Writer, priv *x448.PrivateKey) error { + _, err := w.Write(priv.Secret) + return err +} + +func serializeEd25519PrivateKey(w io.Writer, priv *ed25519.PrivateKey) error { + _, err := w.Write(priv.MarshalByteSecret()) + return err +} + +func serializeEd448PrivateKey(w io.Writer, priv *ed448.PrivateKey) error { + _, err := w.Write(priv.MarshalByteSecret()) + return err +} + // decrypt decrypts an encrypted private key using a decryption key. func (pk *PrivateKey) decrypt(decryptionKey []byte) error { if pk.Dummy() { @@ -378,37 +528,51 @@ func (pk *PrivateKey) decrypt(decryptionKey []byte) error { if !pk.Encrypted { return nil } - block := pk.cipher.new(decryptionKey) - cfb := cipher.NewCFBDecrypter(block, pk.iv) - - data := make([]byte, len(pk.encryptedData)) - cfb.XORKeyStream(data, pk.encryptedData) - - if pk.sha1Checksum { - if len(data) < sha1.Size { - return errors.StructuralError("truncated private key data") - } - h := sha1.New() - h.Write(data[:len(data)-sha1.Size]) - sum := h.Sum(nil) - if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { - return errors.StructuralError("private key checksum failure") - } - data = data[:len(data)-sha1.Size] - } else { - if len(data) < 2 { - return errors.StructuralError("truncated private key data") + var data []byte + switch pk.s2kType { + case S2KAEAD: + aead := pk.aead.new(block) + additionalData, err := pk.additionalData() + if err != nil { + return err } - var sum uint16 - for i := 0; i < len(data)-2; i++ { - sum += uint16(data[i]) + // Decrypt the encrypted key material with aead + data, err = aead.Open(nil, pk.iv, pk.encryptedData, additionalData) + if err != nil { + return err } - if data[len(data)-2] != uint8(sum>>8) || - data[len(data)-1] != uint8(sum) { - return errors.StructuralError("private key checksum failure") + case S2KSHA1, S2KCHECKSUM: + cfb := cipher.NewCFBDecrypter(block, pk.iv) + data = make([]byte, len(pk.encryptedData)) + cfb.XORKeyStream(data, pk.encryptedData) + if pk.s2kType == S2KSHA1 { + if len(data) < sha1.Size { + return errors.StructuralError("truncated private key data") + } + h := sha1.New() + h.Write(data[:len(data)-sha1.Size]) + sum := h.Sum(nil) + if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-sha1.Size] + } else { + if len(data) < 2 { + return errors.StructuralError("truncated private key data") + } + var sum uint16 + for i := 0; i < len(data)-2; i++ { + sum += uint16(data[i]) + } + if data[len(data)-2] != uint8(sum>>8) || + data[len(data)-1] != uint8(sum) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-2] } - data = data[:len(data)-2] + default: + return errors.InvalidArgumentError("invalid s2k type") } err := pk.parsePrivateKey(data) @@ -424,7 +588,6 @@ func (pk *PrivateKey) decrypt(decryptionKey []byte) error { pk.s2k = nil pk.Encrypted = false pk.encryptedData = nil - return nil } @@ -440,6 +603,9 @@ func (pk *PrivateKey) decryptWithCache(passphrase []byte, keyCache *s2k.Cache) e if err != nil { return err } + if pk.s2kType == S2KAEAD { + key = pk.applyHKDF(key) + } return pk.decrypt(key) } @@ -454,11 +620,14 @@ func (pk *PrivateKey) Decrypt(passphrase []byte) error { key := make([]byte, pk.cipher.KeySize()) pk.s2k(key, passphrase) + if pk.s2kType == S2KAEAD { + key = pk.applyHKDF(key) + } return pk.decrypt(key) } // DecryptPrivateKeys decrypts all encrypted keys with the given config and passphrase. -// Avoids recomputation of similar s2k key derivations. +// Avoids recomputation of similar s2k key derivations. func DecryptPrivateKeys(keys []*PrivateKey, passphrase []byte) error { // Create a cache to avoid recomputation of key derviations for the same passphrase. s2kCache := &s2k.Cache{} @@ -474,7 +643,7 @@ func DecryptPrivateKeys(keys []*PrivateKey, passphrase []byte) error { } // encrypt encrypts an unencrypted private key. -func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, cipherFunction CipherFunction) error { +func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, s2kType S2KType, cipherFunction CipherFunction, rand io.Reader) error { if pk.Dummy() { return errors.ErrDummyPrivateKey("dummy key found") } @@ -485,7 +654,7 @@ func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, cipherFunction Cip if len(key) != cipherFunction.KeySize() { return errors.InvalidArgumentError("supplied encryption key has the wrong size") } - + priv := bytes.NewBuffer(nil) err := pk.serializePrivateKey(priv) if err != nil { @@ -497,35 +666,53 @@ func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, cipherFunction Cip pk.s2k, err = pk.s2kParams.Function() if err != nil { return err - } + } privateKeyBytes := priv.Bytes() - pk.sha1Checksum = true + pk.s2kType = s2kType block := pk.cipher.new(key) - pk.iv = make([]byte, pk.cipher.blockSize()) - _, err = rand.Read(pk.iv) - if err != nil { - return err - } - cfb := cipher.NewCFBEncrypter(block, pk.iv) - - if pk.sha1Checksum { - pk.s2kType = S2KSHA1 - h := sha1.New() - h.Write(privateKeyBytes) - sum := h.Sum(nil) - privateKeyBytes = append(privateKeyBytes, sum...) - } else { - pk.s2kType = S2KCHECKSUM - var sum uint16 - for _, b := range privateKeyBytes { - sum += uint16(b) + switch s2kType { + case S2KAEAD: + if pk.aead == 0 { + return errors.StructuralError("aead mode is not set on key") + } + aead := pk.aead.new(block) + additionalData, err := pk.additionalData() + if err != nil { + return err } - priv.Write([]byte{uint8(sum >> 8), uint8(sum)}) + pk.iv = make([]byte, aead.NonceSize()) + _, err = io.ReadFull(rand, pk.iv) + if err != nil { + return err + } + // Decrypt the encrypted key material with aead + pk.encryptedData = aead.Seal(nil, pk.iv, privateKeyBytes, additionalData) + case S2KSHA1, S2KCHECKSUM: + pk.iv = make([]byte, pk.cipher.blockSize()) + _, err = io.ReadFull(rand, pk.iv) + if err != nil { + return err + } + cfb := cipher.NewCFBEncrypter(block, pk.iv) + if s2kType == S2KSHA1 { + h := sha1.New() + h.Write(privateKeyBytes) + sum := h.Sum(nil) + privateKeyBytes = append(privateKeyBytes, sum...) + } else { + var sum uint16 + for _, b := range privateKeyBytes { + sum += uint16(b) + } + privateKeyBytes = append(privateKeyBytes, []byte{uint8(sum >> 8), uint8(sum)}...) + } + pk.encryptedData = make([]byte, len(privateKeyBytes)) + cfb.XORKeyStream(pk.encryptedData, privateKeyBytes) + default: + return errors.InvalidArgumentError("invalid s2k type for encryption") } - pk.encryptedData = make([]byte, len(privateKeyBytes)) - cfb.XORKeyStream(pk.encryptedData, privateKeyBytes) pk.Encrypted = true pk.PrivateKey = nil return err @@ -544,8 +731,15 @@ func (pk *PrivateKey) EncryptWithConfig(passphrase []byte, config *Config) error return err } s2k(key, passphrase) + s2kType := S2KSHA1 + if config.AEAD() != nil { + s2kType = S2KAEAD + pk.aead = config.AEAD().Mode() + pk.cipher = config.Cipher() + key = pk.applyHKDF(key) + } // Encrypt the private key with the derived encryption key. - return pk.encrypt(key, params, config.Cipher()) + return pk.encrypt(key, params, s2kType, config.Cipher(), config.Random()) } // EncryptPrivateKeys encrypts all unencrypted keys with the given config and passphrase. @@ -564,7 +758,16 @@ func EncryptPrivateKeys(keys []*PrivateKey, passphrase []byte, config *Config) e s2k(encryptionKey, passphrase) for _, key := range keys { if key != nil && !key.Dummy() && !key.Encrypted { - err = key.encrypt(encryptionKey, params, config.Cipher()) + s2kType := S2KSHA1 + if config.AEAD() != nil { + s2kType = S2KAEAD + key.aead = config.AEAD().Mode() + key.cipher = config.Cipher() + derivedKey := key.applyHKDF(encryptionKey) + err = key.encrypt(derivedKey, params, s2kType, config.Cipher(), config.Random()) + } else { + err = key.encrypt(encryptionKey, params, s2kType, config.Cipher(), config.Random()) + } if err != nil { return err } @@ -581,7 +784,7 @@ func (pk *PrivateKey) Encrypt(passphrase []byte) error { S2KMode: s2k.IteratedSaltedS2K, S2KCount: 65536, Hash: crypto.SHA256, - } , + }, DefaultCipher: CipherAES256, } return pk.EncryptWithConfig(passphrase, config) @@ -601,6 +804,14 @@ func (pk *PrivateKey) serializePrivateKey(w io.Writer) (err error) { err = serializeEdDSAPrivateKey(w, priv) case *ecdh.PrivateKey: err = serializeECDHPrivateKey(w, priv) + case *x25519.PrivateKey: + err = serializeX25519PrivateKey(w, priv) + case *x448.PrivateKey: + err = serializeX448PrivateKey(w, priv) + case *ed25519.PrivateKey: + err = serializeEd25519PrivateKey(w, priv) + case *ed448.PrivateKey: + err = serializeEd448PrivateKey(w, priv) default: err = errors.InvalidArgumentError("unknown private key type") } @@ -621,8 +832,18 @@ func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { return pk.parseECDHPrivateKey(data) case PubKeyAlgoEdDSA: return pk.parseEdDSAPrivateKey(data) + case PubKeyAlgoX25519: + return pk.parseX25519PrivateKey(data) + case PubKeyAlgoX448: + return pk.parseX448PrivateKey(data) + case PubKeyAlgoEd25519: + return pk.parseEd25519PrivateKey(data) + case PubKeyAlgoEd448: + return pk.parseEd448PrivateKey(data) + default: + err = errors.StructuralError("unknown private key type") + return } - panic("impossible") } func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { @@ -743,6 +964,86 @@ func (pk *PrivateKey) parseECDHPrivateKey(data []byte) (err error) { return nil } +func (pk *PrivateKey) parseX25519PrivateKey(data []byte) (err error) { + publicKey := pk.PublicKey.PublicKey.(*x25519.PublicKey) + privateKey := x25519.NewPrivateKey(*publicKey) + privateKey.PublicKey = *publicKey + + privateKey.Secret = make([]byte, x25519.KeySize) + + if len(data) != x25519.KeySize { + err = errors.StructuralError("wrong x25519 key size") + return err + } + subtle.ConstantTimeCopy(1, privateKey.Secret, data) + if err = x25519.Validate(privateKey); err != nil { + return err + } + pk.PrivateKey = privateKey + return nil +} + +func (pk *PrivateKey) parseX448PrivateKey(data []byte) (err error) { + publicKey := pk.PublicKey.PublicKey.(*x448.PublicKey) + privateKey := x448.NewPrivateKey(*publicKey) + privateKey.PublicKey = *publicKey + + privateKey.Secret = make([]byte, x448.KeySize) + + if len(data) != x448.KeySize { + err = errors.StructuralError("wrong x448 key size") + return err + } + subtle.ConstantTimeCopy(1, privateKey.Secret, data) + if err = x448.Validate(privateKey); err != nil { + return err + } + pk.PrivateKey = privateKey + return nil +} + +func (pk *PrivateKey) parseEd25519PrivateKey(data []byte) (err error) { + publicKey := pk.PublicKey.PublicKey.(*ed25519.PublicKey) + privateKey := ed25519.NewPrivateKey(*publicKey) + privateKey.PublicKey = *publicKey + + if len(data) != ed25519.SeedSize { + err = errors.StructuralError("wrong ed25519 key size") + return err + } + err = privateKey.UnmarshalByteSecret(data) + if err != nil { + return err + } + err = ed25519.Validate(privateKey) + if err != nil { + return err + } + pk.PrivateKey = privateKey + return nil +} + +func (pk *PrivateKey) parseEd448PrivateKey(data []byte) (err error) { + publicKey := pk.PublicKey.PublicKey.(*ed448.PublicKey) + privateKey := ed448.NewPrivateKey(*publicKey) + privateKey.PublicKey = *publicKey + + if len(data) != ed448.SeedSize { + err = errors.StructuralError("wrong ed448 key size") + return err + } + err = privateKey.UnmarshalByteSecret(data) + if err != nil { + return err + } + err = ed448.Validate(privateKey) + if err != nil { + return err + } + pk.PrivateKey = privateKey + return nil +} + func (pk *PrivateKey) parseEdDSAPrivateKey(data []byte) (err error) { eddsaPub := pk.PublicKey.PublicKey.(*eddsa.PublicKey) eddsaPriv := eddsa.NewPrivateKey(*eddsaPub) @@ -767,6 +1068,41 @@ func (pk *PrivateKey) parseEdDSAPrivateKey(data []byte) (err error) { return nil } +func (pk *PrivateKey) additionalData() ([]byte, error) { + additionalData := bytes.NewBuffer(nil) + // Write additional data prefix based on packet type + var packetByte byte + if pk.PublicKey.IsSubkey { + packetByte = 0xc7 + } else { + packetByte = 0xc5 + } + // Write public key to additional data + _, err := additionalData.Write([]byte{packetByte}) + if err != nil { + return nil, err + } + err = pk.PublicKey.serializeWithoutHeaders(additionalData) + if err != nil { + return nil, err + } + return additionalData.Bytes(), nil +} + +func (pk *PrivateKey) applyHKDF(inputKey []byte) []byte { + var packetByte byte + if pk.PublicKey.IsSubkey { + packetByte = 0xc7 + } else { + packetByte = 0xc5 + } + associatedData := []byte{packetByte, byte(pk.Version), byte(pk.cipher), byte(pk.aead)} + hkdfReader := hkdf.New(sha256.New, inputKey, []byte{}, associatedData) + encryptionKey := make([]byte, pk.cipher.KeySize()) + _, _ = readFull(hkdfReader, encryptionKey) + return encryptionKey +} + func validateDSAParameters(priv *dsa.PrivateKey) error { p := priv.P // group prime q := priv.Q // subgroup order diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go index 3402b8c140c..dd93c98702c 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go @@ -5,7 +5,6 @@ package packet import ( - "crypto" "crypto/dsa" "crypto/rsa" "crypto/sha1" @@ -21,23 +20,24 @@ import ( "github.com/ProtonMail/go-crypto/openpgp/ecdh" "github.com/ProtonMail/go-crypto/openpgp/ecdsa" + "github.com/ProtonMail/go-crypto/openpgp/ed25519" + "github.com/ProtonMail/go-crypto/openpgp/ed448" "github.com/ProtonMail/go-crypto/openpgp/eddsa" "github.com/ProtonMail/go-crypto/openpgp/elgamal" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" + "github.com/ProtonMail/go-crypto/openpgp/x25519" + "github.com/ProtonMail/go-crypto/openpgp/x448" ) -type kdfHashFunction byte -type kdfAlgorithm byte - // PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. type PublicKey struct { Version int CreationTime time.Time PubKeyAlgo PublicKeyAlgorithm - PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey or *eddsa.PublicKey + PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey or *eddsa.PublicKey, *x25519.PublicKey, *x448.PublicKey, *ed25519.PublicKey, *ed448.PublicKey Fingerprint []byte KeyId uint64 IsSubkey bool @@ -61,11 +61,18 @@ func (pk *PublicKey) UpgradeToV5() { pk.setFingerprintAndKeyId() } +// UpgradeToV6 updates the version of the key to v6, and updates all necessary +// fields. +func (pk *PublicKey) UpgradeToV6() { + pk.Version = 6 + pk.setFingerprintAndKeyId() +} + // signingKey provides a convenient abstraction over signature verification // for v3 and v4 public keys. type signingKey interface { SerializeForHash(io.Writer) error - SerializeSignaturePrefix(io.Writer) + SerializeSignaturePrefix(io.Writer) error serializeWithoutHeaders(io.Writer) error } @@ -174,6 +181,54 @@ func NewEdDSAPublicKey(creationTime time.Time, pub *eddsa.PublicKey) *PublicKey return pk } +func NewX25519PublicKey(creationTime time.Time, pub *x25519.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoX25519, + PublicKey: pub, + } + + pk.setFingerprintAndKeyId() + return pk +} + +func NewX448PublicKey(creationTime time.Time, pub *x448.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoX448, + PublicKey: pub, + } + + pk.setFingerprintAndKeyId() + return pk +} + +func NewEd25519PublicKey(creationTime time.Time, pub *ed25519.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoEd25519, + PublicKey: pub, + } + + pk.setFingerprintAndKeyId() + return pk +} + +func NewEd448PublicKey(creationTime time.Time, pub *ed448.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoEd448, + PublicKey: pub, + } + + pk.setFingerprintAndKeyId() + return pk +} + func (pk *PublicKey) parse(r io.Reader) (err error) { // RFC 4880, section 5.5.2 var buf [6]byte @@ -181,12 +236,14 @@ func (pk *PublicKey) parse(r io.Reader) (err error) { if err != nil { return } - if buf[0] != 4 && buf[0] != 5 { + if buf[0] != 4 && buf[0] != 5 && buf[0] != 6 { return errors.UnsupportedError("public key version " + strconv.Itoa(int(buf[0]))) } pk.Version = int(buf[0]) - if pk.Version == 5 { + if pk.Version >= 5 { + // Read the four-octet scalar octet count + // The count is not used in this implementation var n [4]byte _, err = readFull(r, n[:]) if err != nil { @@ -195,6 +252,7 @@ func (pk *PublicKey) parse(r io.Reader) (err error) { } pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) + // Ignore four-ocet length switch pk.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: err = pk.parseRSA(r) @@ -208,6 +266,14 @@ func (pk *PublicKey) parse(r io.Reader) (err error) { err = pk.parseECDH(r) case PubKeyAlgoEdDSA: err = pk.parseEdDSA(r) + case PubKeyAlgoX25519: + err = pk.parseX25519(r) + case PubKeyAlgoX448: + err = pk.parseX448(r) + case PubKeyAlgoEd25519: + err = pk.parseEd25519(r) + case PubKeyAlgoEd448: + err = pk.parseEd448(r) default: err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) } @@ -221,15 +287,21 @@ func (pk *PublicKey) parse(r io.Reader) (err error) { func (pk *PublicKey) setFingerprintAndKeyId() { // RFC 4880, section 12.2 - if pk.Version == 5 { + if pk.Version >= 5 { fingerprint := sha256.New() - pk.SerializeForHash(fingerprint) + if err := pk.SerializeForHash(fingerprint); err != nil { + // Should not happen for a hash. + panic(err) + } pk.Fingerprint = make([]byte, 32) copy(pk.Fingerprint, fingerprint.Sum(nil)) pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[:8]) } else { fingerprint := sha1.New() - pk.SerializeForHash(fingerprint) + if err := pk.SerializeForHash(fingerprint); err != nil { + // Should not happen for a hash. + panic(err) + } pk.Fingerprint = make([]byte, 20) copy(pk.Fingerprint, fingerprint.Sum(nil)) pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) @@ -324,16 +396,17 @@ func (pk *PublicKey) parseECDSA(r io.Reader) (err error) { if _, err = pk.oid.ReadFrom(r); err != nil { return } - pk.p = new(encoding.MPI) - if _, err = pk.p.ReadFrom(r); err != nil { - return - } curveInfo := ecc.FindByOid(pk.oid) if curveInfo == nil { return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) } + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + c, ok := curveInfo.Curve.(ecc.ECDSACurve) if !ok { return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) @@ -353,6 +426,12 @@ func (pk *PublicKey) parseECDH(r io.Reader) (err error) { if _, err = pk.oid.ReadFrom(r); err != nil { return } + + curveInfo := ecc.FindByOid(pk.oid) + if curveInfo == nil { + return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) + } + pk.p = new(encoding.MPI) if _, err = pk.p.ReadFrom(r); err != nil { return @@ -362,12 +441,6 @@ func (pk *PublicKey) parseECDH(r io.Reader) (err error) { return } - curveInfo := ecc.FindByOid(pk.oid) - - if curveInfo == nil { - return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) - } - c, ok := curveInfo.Curve.(ecc.ECDHCurve) if !ok { return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) @@ -400,6 +473,7 @@ func (pk *PublicKey) parseEdDSA(r io.Reader) (err error) { if _, err = pk.oid.ReadFrom(r); err != nil { return } + curveInfo := ecc.FindByOid(pk.oid) if curveInfo == nil { return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) @@ -435,75 +509,148 @@ func (pk *PublicKey) parseEdDSA(r io.Reader) (err error) { return } +func (pk *PublicKey) parseX25519(r io.Reader) (err error) { + point := make([]byte, x25519.KeySize) + _, err = io.ReadFull(r, point) + if err != nil { + return + } + pub := &x25519.PublicKey{ + Point: point, + } + pk.PublicKey = pub + return +} + +func (pk *PublicKey) parseX448(r io.Reader) (err error) { + point := make([]byte, x448.KeySize) + _, err = io.ReadFull(r, point) + if err != nil { + return + } + pub := &x448.PublicKey{ + Point: point, + } + pk.PublicKey = pub + return +} + +func (pk *PublicKey) parseEd25519(r io.Reader) (err error) { + point := make([]byte, ed25519.PublicKeySize) + _, err = io.ReadFull(r, point) + if err != nil { + return + } + pub := &ed25519.PublicKey{ + Point: point, + } + pk.PublicKey = pub + return +} + +func (pk *PublicKey) parseEd448(r io.Reader) (err error) { + point := make([]byte, ed448.PublicKeySize) + _, err = io.ReadFull(r, point) + if err != nil { + return + } + pub := &ed448.PublicKey{ + Point: point, + } + pk.PublicKey = pub + return +} + // SerializeForHash serializes the PublicKey to w with the special packet // header format needed for hashing. func (pk *PublicKey) SerializeForHash(w io.Writer) error { - pk.SerializeSignaturePrefix(w) + if err := pk.SerializeSignaturePrefix(w); err != nil { + return err + } return pk.serializeWithoutHeaders(w) } // SerializeSignaturePrefix writes the prefix for this public key to the given Writer. // The prefix is used when calculating a signature over this public key. See // RFC 4880, section 5.2.4. -func (pk *PublicKey) SerializeSignaturePrefix(w io.Writer) { +func (pk *PublicKey) SerializeSignaturePrefix(w io.Writer) error { var pLength = pk.algorithmSpecificByteCount() - if pk.Version == 5 { - pLength += 10 // version, timestamp (4), algorithm, key octet count (4). - w.Write([]byte{ - 0x9A, + // version, timestamp, algorithm + pLength += versionSize + timestampSize + algorithmSize + if pk.Version >= 5 { + // key octet count (4). + pLength += 4 + _, err := w.Write([]byte{ + // When a v4 signature is made over a key, the hash data starts with the octet 0x99, followed by a two-octet length + // of the key, and then the body of the key packet. When a v6 signature is made over a key, the hash data starts + // with the salt, then octet 0x9B, followed by a four-octet length of the key, and then the body of the key packet. + 0x95 + byte(pk.Version), byte(pLength >> 24), byte(pLength >> 16), byte(pLength >> 8), byte(pLength), }) - return + if err != nil { + return err + } + return nil + } + if _, err := w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}); err != nil { + return err } - pLength += 6 - w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) + return nil } func (pk *PublicKey) Serialize(w io.Writer) (err error) { - length := 6 // 6 byte header + length := uint32(versionSize + timestampSize + algorithmSize) // 6 byte header length += pk.algorithmSpecificByteCount() - if pk.Version == 5 { + if pk.Version >= 5 { length += 4 // octet key count } packetType := packetTypePublicKey if pk.IsSubkey { packetType = packetTypePublicSubkey } - err = serializeHeader(w, packetType, length) + err = serializeHeader(w, packetType, int(length)) if err != nil { return } return pk.serializeWithoutHeaders(w) } -func (pk *PublicKey) algorithmSpecificByteCount() int { - length := 0 +func (pk *PublicKey) algorithmSpecificByteCount() uint32 { + length := uint32(0) switch pk.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - length += int(pk.n.EncodedLength()) - length += int(pk.e.EncodedLength()) + length += uint32(pk.n.EncodedLength()) + length += uint32(pk.e.EncodedLength()) case PubKeyAlgoDSA: - length += int(pk.p.EncodedLength()) - length += int(pk.q.EncodedLength()) - length += int(pk.g.EncodedLength()) - length += int(pk.y.EncodedLength()) + length += uint32(pk.p.EncodedLength()) + length += uint32(pk.q.EncodedLength()) + length += uint32(pk.g.EncodedLength()) + length += uint32(pk.y.EncodedLength()) case PubKeyAlgoElGamal: - length += int(pk.p.EncodedLength()) - length += int(pk.g.EncodedLength()) - length += int(pk.y.EncodedLength()) + length += uint32(pk.p.EncodedLength()) + length += uint32(pk.g.EncodedLength()) + length += uint32(pk.y.EncodedLength()) case PubKeyAlgoECDSA: - length += int(pk.oid.EncodedLength()) - length += int(pk.p.EncodedLength()) + length += uint32(pk.oid.EncodedLength()) + length += uint32(pk.p.EncodedLength()) case PubKeyAlgoECDH: - length += int(pk.oid.EncodedLength()) - length += int(pk.p.EncodedLength()) - length += int(pk.kdf.EncodedLength()) + length += uint32(pk.oid.EncodedLength()) + length += uint32(pk.p.EncodedLength()) + length += uint32(pk.kdf.EncodedLength()) case PubKeyAlgoEdDSA: - length += int(pk.oid.EncodedLength()) - length += int(pk.p.EncodedLength()) + length += uint32(pk.oid.EncodedLength()) + length += uint32(pk.p.EncodedLength()) + case PubKeyAlgoX25519: + length += x25519.KeySize + case PubKeyAlgoX448: + length += x448.KeySize + case PubKeyAlgoEd25519: + length += ed25519.PublicKeySize + case PubKeyAlgoEd448: + length += ed448.PublicKeySize default: panic("unknown public key algorithm") } @@ -522,7 +669,7 @@ func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { return } - if pk.Version == 5 { + if pk.Version >= 5 { n := pk.algorithmSpecificByteCount() if _, err = w.Write([]byte{ byte(n >> 24), byte(n >> 16), byte(n >> 8), byte(n), @@ -580,6 +727,22 @@ func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { } _, err = w.Write(pk.p.EncodedBytes()) return + case PubKeyAlgoX25519: + publicKey := pk.PublicKey.(*x25519.PublicKey) + _, err = w.Write(publicKey.Point) + return + case PubKeyAlgoX448: + publicKey := pk.PublicKey.(*x448.PublicKey) + _, err = w.Write(publicKey.Point) + return + case PubKeyAlgoEd25519: + publicKey := pk.PublicKey.(*ed25519.PublicKey) + _, err = w.Write(publicKey.Point) + return + case PubKeyAlgoEd448: + publicKey := pk.PublicKey.(*ed448.PublicKey) + _, err = w.Write(publicKey.Point) + return } return errors.InvalidArgumentError("bad public-key algorithm") } @@ -600,7 +763,8 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err erro } signed.Write(sig.HashSuffix) hashBytes := signed.Sum(nil) - if sig.Version == 5 && (hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1]) { + // see discussion https://github.com/ProtonMail/go-crypto/issues/107 + if sig.Version >= 5 && (hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1]) { return errors.SignatureError("hash tag doesn't match") } @@ -639,6 +803,18 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err erro return errors.SignatureError("EdDSA verification failure") } return nil + case PubKeyAlgoEd25519: + ed25519PublicKey := pk.PublicKey.(*ed25519.PublicKey) + if !ed25519.Verify(ed25519PublicKey, hashBytes, sig.EdSig) { + return errors.SignatureError("Ed25519 verification failure") + } + return nil + case PubKeyAlgoEd448: + ed448PublicKey := pk.PublicKey.(*ed448.PublicKey) + if !ed448.Verify(ed448PublicKey, hashBytes, sig.EdSig) { + return errors.SignatureError("ed448 verification failure") + } + return nil default: return errors.SignatureError("Unsupported public key algorithm used in signature") } @@ -646,11 +822,8 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err erro // keySignatureHash returns a Hash of the message that needs to be signed for // pk to assert a subkey relationship to signed. -func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() +func keySignatureHash(pk, signed signingKey, hashFunc hash.Hash) (h hash.Hash, err error) { + h = hashFunc // RFC 4880, section 5.2.4 err = pk.SerializeForHash(h) @@ -665,7 +838,11 @@ func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, // VerifyKeySignature returns nil iff sig is a valid signature, made by this // public key, of signed. func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { - h, err := keySignatureHash(pk, signed, sig.Hash) + preparedHash, err := sig.PrepareVerify() + if err != nil { + return err + } + h, err := keySignatureHash(pk, signed, preparedHash) if err != nil { return err } @@ -679,10 +856,14 @@ func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error if sig.EmbeddedSignature == nil { return errors.StructuralError("signing subkey is missing cross-signature") } + preparedHashEmbedded, err := sig.EmbeddedSignature.PrepareVerify() + if err != nil { + return err + } // Verify the cross-signature. This is calculated over the same // data as the main signature, so we cannot just recursively // call signed.VerifyKeySignature(...) - if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil { + if h, err = keySignatureHash(pk, signed, preparedHashEmbedded); err != nil { return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) } if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { @@ -693,32 +874,31 @@ func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error return nil } -func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - err = pk.SerializeForHash(h) - - return +func keyRevocationHash(pk signingKey, hashFunc hash.Hash) (err error) { + return pk.SerializeForHash(hashFunc) } // VerifyRevocationSignature returns nil iff sig is a valid signature, made by this // public key. func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { - h, err := keyRevocationHash(pk, sig.Hash) + preparedHash, err := sig.PrepareVerify() if err != nil { return err } - return pk.VerifySignature(h, sig) + if keyRevocationHash(pk, preparedHash); err != nil { + return err + } + return pk.VerifySignature(preparedHash, sig) } // VerifySubkeyRevocationSignature returns nil iff sig is a valid subkey revocation signature, // made by this public key, of signed. func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signed *PublicKey) (err error) { - h, err := keySignatureHash(pk, signed, sig.Hash) + preparedHash, err := sig.PrepareVerify() + if err != nil { + return err + } + h, err := keySignatureHash(pk, signed, preparedHash) if err != nil { return err } @@ -727,15 +907,15 @@ func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signed *Pub // userIdSignatureHash returns a Hash of the message that needs to be signed // to assert that pk is a valid key for id. -func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() +func userIdSignatureHash(id string, pk *PublicKey, h hash.Hash) (err error) { // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) + if err := pk.SerializeSignaturePrefix(h); err != nil { + return err + } + if err := pk.serializeWithoutHeaders(h); err != nil { + return err + } var buf [5]byte buf[0] = 0xb4 @@ -746,16 +926,37 @@ func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash h.Write(buf[:]) h.Write([]byte(id)) - return + return nil +} + +// directKeySignatureHash returns a Hash of the message that needs to be signed. +func directKeySignatureHash(pk *PublicKey, h hash.Hash) (err error) { + return pk.SerializeForHash(h) } // VerifyUserIdSignature returns nil iff sig is a valid signature, made by this // public key, that id is the identity of pub. func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { - h, err := userIdSignatureHash(id, pub, sig.Hash) + h, err := sig.PrepareVerify() + if err != nil { + return err + } + if err := userIdSignatureHash(id, pub, h); err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// VerifyDirectKeySignature returns nil iff sig is a valid signature, made by this +// public key. +func (pk *PublicKey) VerifyDirectKeySignature(sig *Signature) (err error) { + h, err := sig.PrepareVerify() if err != nil { return err } + if err := directKeySignatureHash(pk, h); err != nil { + return err + } return pk.VerifySignature(h, sig) } @@ -786,21 +987,49 @@ func (pk *PublicKey) BitLength() (bitLength uint16, err error) { bitLength = pk.p.BitLength() case PubKeyAlgoEdDSA: bitLength = pk.p.BitLength() + case PubKeyAlgoX25519: + bitLength = x25519.KeySize * 8 + case PubKeyAlgoX448: + bitLength = x448.KeySize * 8 + case PubKeyAlgoEd25519: + bitLength = ed25519.PublicKeySize * 8 + case PubKeyAlgoEd448: + bitLength = ed448.PublicKeySize * 8 default: err = errors.InvalidArgumentError("bad public-key algorithm") } return } +// Curve returns the used elliptic curve of this public key. +// Returns an error if no elliptic curve is used. +func (pk *PublicKey) Curve() (curve Curve, err error) { + switch pk.PubKeyAlgo { + case PubKeyAlgoECDSA, PubKeyAlgoECDH, PubKeyAlgoEdDSA: + curveInfo := ecc.FindByOid(pk.oid) + if curveInfo == nil { + return "", errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) + } + curve = Curve(curveInfo.GenName) + case PubKeyAlgoEd25519, PubKeyAlgoX25519: + curve = Curve25519 + case PubKeyAlgoEd448, PubKeyAlgoX448: + curve = Curve448 + default: + err = errors.InvalidArgumentError("public key does not operate with an elliptic curve") + } + return +} + // KeyExpired returns whether sig is a self-signature of a key that has // expired or is created in the future. func (pk *PublicKey) KeyExpired(sig *Signature, currentTime time.Time) bool { - if pk.CreationTime.After(currentTime) { + if pk.CreationTime.Unix() > currentTime.Unix() { return true } if sig.KeyLifetimeSecs == nil || *sig.KeyLifetimeSecs == 0 { return false } expiry := pk.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) - return currentTime.After(expiry) + return currentTime.Unix() > expiry.Unix() } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go index 10215fe5f23..dd84092392a 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go @@ -10,6 +10,12 @@ import ( "github.com/ProtonMail/go-crypto/openpgp/errors" ) +type PacketReader interface { + Next() (p Packet, err error) + Push(reader io.Reader) (err error) + Unread(p Packet) +} + // Reader reads packets from an io.Reader and allows packets to be 'unread' so // that they result from the next call to Next. type Reader struct { @@ -26,37 +32,81 @@ type Reader struct { const maxReaders = 32 // Next returns the most recently unread Packet, or reads another packet from -// the top-most io.Reader. Unknown packet types are skipped. +// the top-most io.Reader. Unknown/unsupported/Marker packet types are skipped. func (r *Reader) Next() (p Packet, err error) { + for { + p, err := r.read() + if err == io.EOF { + break + } else if err != nil { + if _, ok := err.(errors.UnknownPacketTypeError); ok { + continue + } + if _, ok := err.(errors.UnsupportedError); ok { + switch p.(type) { + case *SymmetricallyEncrypted, *AEADEncrypted, *Compressed, *LiteralData: + return nil, err + } + continue + } + return nil, err + } else { + //A marker packet MUST be ignored when received + switch p.(type) { + case *Marker: + continue + } + return p, nil + } + } + return nil, io.EOF +} + +// Next returns the most recently unread Packet, or reads another packet from +// the top-most io.Reader. Unknown/Marker packet types are skipped while unsupported +// packets are returned as UnsupportedPacket type. +func (r *Reader) NextWithUnsupported() (p Packet, err error) { + for { + p, err = r.read() + if err == io.EOF { + break + } else if err != nil { + if _, ok := err.(errors.UnknownPacketTypeError); ok { + continue + } + if casteErr, ok := err.(errors.UnsupportedError); ok { + return &UnsupportedPacket{ + IncompletePacket: p, + Error: casteErr, + }, nil + } + return + } else { + //A marker packet MUST be ignored when received + switch p.(type) { + case *Marker: + continue + } + return + } + } + return nil, io.EOF +} + +func (r *Reader) read() (p Packet, err error) { if len(r.q) > 0 { p = r.q[len(r.q)-1] r.q = r.q[:len(r.q)-1] return } - for len(r.readers) > 0 { p, err = Read(r.readers[len(r.readers)-1]) - if err == nil { - return - } if err == io.EOF { r.readers = r.readers[:len(r.readers)-1] continue } - // TODO: Add strict mode that rejects unknown packets, instead of ignoring them. - if _, ok := err.(errors.UnknownPacketTypeError); ok { - continue - } - if _, ok := err.(errors.UnsupportedError); ok { - switch p.(type) { - case *SymmetricallyEncrypted, *AEADEncrypted, *Compressed, *LiteralData: - return nil, err - } - continue - } - return nil, err + return p, err } - return nil, io.EOF } @@ -84,3 +134,76 @@ func NewReader(r io.Reader) *Reader { readers: []io.Reader{r}, } } + +// CheckReader is similar to Reader but additionally +// uses the pushdown automata to verify the read packet sequence. +type CheckReader struct { + Reader + verifier *SequenceVerifier + fullyRead bool +} + +// Next returns the most recently unread Packet, or reads another packet from +// the top-most io.Reader. Unknown packet types are skipped. +// If the read packet sequence does not conform to the packet composition +// rules in rfc4880, it returns an error. +func (r *CheckReader) Next() (p Packet, err error) { + if r.fullyRead { + return nil, io.EOF + } + if len(r.q) > 0 { + p = r.q[len(r.q)-1] + r.q = r.q[:len(r.q)-1] + return + } + var errMsg error + for len(r.readers) > 0 { + p, errMsg, err = ReadWithCheck(r.readers[len(r.readers)-1], r.verifier) + if errMsg != nil { + err = errMsg + return + } + if err == nil { + return + } + if err == io.EOF { + r.readers = r.readers[:len(r.readers)-1] + continue + } + //A marker packet MUST be ignored when received + switch p.(type) { + case *Marker: + continue + } + if _, ok := err.(errors.UnknownPacketTypeError); ok { + continue + } + if _, ok := err.(errors.UnsupportedError); ok { + switch p.(type) { + case *SymmetricallyEncrypted, *AEADEncrypted, *Compressed, *LiteralData: + return nil, err + } + continue + } + return nil, err + } + if errMsg = r.verifier.Next(EOSSymbol); errMsg != nil { + return nil, errMsg + } + if errMsg = r.verifier.AssertValid(); errMsg != nil { + return nil, errMsg + } + r.fullyRead = true + return nil, io.EOF +} + +func NewCheckReader(r io.Reader) *CheckReader { + return &CheckReader{ + Reader: Reader{ + q: nil, + readers: []io.Reader{r}, + }, + verifier: NewSequenceVerifier(), + fullyRead: false, + } +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/recipient.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/recipient.go new file mode 100644 index 00000000000..fb2e362e4a8 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/recipient.go @@ -0,0 +1,15 @@ +package packet + +// Recipient type represents a Intended Recipient Fingerprint subpacket +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#name-intended-recipient-fingerpr +type Recipient struct { + KeyVersion int + Fingerprint []byte +} + +func (r *Recipient) Serialize() []byte { + packet := make([]byte, len(r.Fingerprint)+1) + packet[0] = byte(r.KeyVersion) + copy(packet[1:], r.Fingerprint) + return packet +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go index 80d0bb98e0f..420625386b5 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go @@ -15,6 +15,8 @@ import ( "time" "github.com/ProtonMail/go-crypto/openpgp/ecdsa" + "github.com/ProtonMail/go-crypto/openpgp/ed25519" + "github.com/ProtonMail/go-crypto/openpgp/ed448" "github.com/ProtonMail/go-crypto/openpgp/eddsa" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" @@ -39,6 +41,9 @@ type Signature struct { SigType SignatureType PubKeyAlgo PublicKeyAlgorithm Hash crypto.Hash + // salt contains a random salt value for v6 signatures + // See RFC the crypto refresh Section 5.2.3. + salt []byte // HashSuffix is extra data that is hashed in after the signed data. HashSuffix []byte @@ -57,6 +62,7 @@ type Signature struct { DSASigR, DSASigS encoding.Field ECDSASigR, ECDSASigS encoding.Field EdDSASigR, EdDSASigS encoding.Field + EdSig []byte // rawSubpackets contains the unparsed subpackets, in order. rawSubpackets []outputSubpacket @@ -72,6 +78,7 @@ type Signature struct { SignerUserId *string IsPrimaryId *bool Notations []*Notation + IntendedRecipients []*Recipient // TrustLevel and TrustAmount can be set by the signer to assert that // the key is not only valid but also trustworthy at the specified @@ -113,26 +120,52 @@ type Signature struct { outSubpackets []outputSubpacket } +// VerifiableSignature internally keeps state if the +// the signature has been verified before. +type VerifiableSignature struct { + Valid *bool // nil if it has not been verified yet + Packet *Signature +} + +// NewVerifiableSig returns a struct of type VerifiableSignature referencing the input signature. +func NewVerifiableSig(signature *Signature) *VerifiableSignature { + return &VerifiableSignature{ + Packet: signature, + } +} + +// Salt returns the signature salt for v6 signatures. +func (sig *Signature) Salt() []byte { + if sig == nil { + return nil + } + return sig.salt +} + func (sig *Signature) parse(r io.Reader) (err error) { // RFC 4880, section 5.2.3 - var buf [5]byte + var buf [7]byte _, err = readFull(r, buf[:1]) if err != nil { return } - if buf[0] != 4 && buf[0] != 5 { + if buf[0] != 4 && buf[0] != 5 && buf[0] != 6 { err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) return } sig.Version = int(buf[0]) - _, err = readFull(r, buf[:5]) + if sig.Version == 6 { + _, err = readFull(r, buf[:7]) + } else { + _, err = readFull(r, buf[:5]) + } if err != nil { return } sig.SigType = SignatureType(buf[0]) sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA: + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA, PubKeyAlgoEd25519, PubKeyAlgoEd448: default: err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) return @@ -150,7 +183,17 @@ func (sig *Signature) parse(r io.Reader) (err error) { return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) } - hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4]) + var hashedSubpacketsLength int + if sig.Version == 6 { + // For a v6 signature, a four-octet length is used. + hashedSubpacketsLength = + int(buf[3])<<24 | + int(buf[4])<<16 | + int(buf[5])<<8 | + int(buf[6]) + } else { + hashedSubpacketsLength = int(buf[3])<<8 | int(buf[4]) + } hashedSubpackets := make([]byte, hashedSubpacketsLength) _, err = readFull(r, hashedSubpackets) if err != nil { @@ -166,11 +209,21 @@ func (sig *Signature) parse(r io.Reader) (err error) { return } - _, err = readFull(r, buf[:2]) + if sig.Version == 6 { + _, err = readFull(r, buf[:4]) + } else { + _, err = readFull(r, buf[:2]) + } + if err != nil { return } - unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1]) + var unhashedSubpacketsLength uint32 + if sig.Version == 6 { + unhashedSubpacketsLength = uint32(buf[0])<<24 | uint32(buf[1])<<16 | uint32(buf[2])<<8 | uint32(buf[3]) + } else { + unhashedSubpacketsLength = uint32(buf[0])<<8 | uint32(buf[1]) + } unhashedSubpackets := make([]byte, unhashedSubpacketsLength) _, err = readFull(r, unhashedSubpackets) if err != nil { @@ -186,6 +239,30 @@ func (sig *Signature) parse(r io.Reader) (err error) { return } + if sig.Version == 6 { + // Only for v6 signatures, a variable-length field containing the salt + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + saltLength := int(buf[0]) + var expectedSaltLength int + expectedSaltLength, err = SaltLengthForHash(sig.Hash) + if err != nil { + return + } + if saltLength != expectedSaltLength { + err = errors.StructuralError("unexpected salt size for the given hash algorithm") + return + } + salt := make([]byte, expectedSaltLength) + _, err = readFull(r, salt) + if err != nil { + return + } + sig.salt = salt + } + switch sig.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: sig.RSASignature = new(encoding.MPI) @@ -216,6 +293,16 @@ func (sig *Signature) parse(r io.Reader) (err error) { if _, err = sig.EdDSASigS.ReadFrom(r); err != nil { return } + case PubKeyAlgoEd25519: + sig.EdSig, err = ed25519.ReadSignature(r) + if err != nil { + return + } + case PubKeyAlgoEd448: + sig.EdSig, err = ed448.ReadSignature(r) + if err != nil { + return + } default: panic("unreachable") } @@ -260,6 +347,7 @@ const ( featuresSubpacket signatureSubpacketType = 30 embeddedSignatureSubpacket signatureSubpacketType = 32 issuerFingerprintSubpacket signatureSubpacketType = 33 + intendedRecipientSubpacket signatureSubpacketType = 35 prefCipherSuitesSubpacket signatureSubpacketType = 39 ) @@ -365,16 +453,18 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r copy(sig.PreferredSymmetric, subpacket) case issuerSubpacket: // Issuer, section 5.2.3.5 - if sig.Version > 4 { - err = errors.StructuralError("issuer subpacket found in v5 key") + if sig.Version > 4 && isHashed { + err = errors.StructuralError("issuer subpacket found in v6 key") return } if len(subpacket) != 8 { err = errors.StructuralError("issuer subpacket with bad length") return } - sig.IssuerKeyId = new(uint64) - *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) + if sig.Version <= 4 { + sig.IssuerKeyId = new(uint64) + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) + } case notationDataSubpacket: // Notation data, section 5.2.3.16 if len(subpacket) < 8 { @@ -453,7 +543,7 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r return } sig.RevocationReason = new(ReasonForRevocation) - *sig.RevocationReason = ReasonForRevocation(subpacket[0]) + *sig.RevocationReason = NewReasonForRevocation(subpacket[0]) sig.RevocationReasonText = string(subpacket[1:]) case featuresSubpacket: // Features subpacket, section 5.2.3.24 specifies a very general @@ -495,17 +585,30 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r return } v, l := subpacket[0], len(subpacket[1:]) - if v == 5 && l != 32 || v != 5 && l != 20 { + if v >= 5 && l != 32 || v < 5 && l != 20 { return nil, errors.StructuralError("bad fingerprint length") } sig.IssuerFingerprint = make([]byte, l) copy(sig.IssuerFingerprint, subpacket[1:]) sig.IssuerKeyId = new(uint64) - if v == 5 { + if v >= 5 { *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[1:9]) } else { *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[13:21]) } + case intendedRecipientSubpacket: + // Intended Recipient Fingerprint + // https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#name-intended-recipient-fingerpr + if len(subpacket) < 1 { + return nil, errors.StructuralError("invalid intended recipient fingerpring length") + } + version, length := subpacket[0], len(subpacket[1:]) + if version >= 5 && length != 32 || version < 5 && length != 20 { + return nil, errors.StructuralError("invalid fingerprint length") + } + fingerprint := make([]byte, length) + copy(fingerprint, subpacket[1:]) + sig.IntendedRecipients = append(sig.IntendedRecipients, &Recipient{int(version), fingerprint}) case prefCipherSuitesSubpacket: // Preferred AEAD cipher suites // See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#name-preferred-aead-ciphersuites @@ -550,6 +653,13 @@ func (sig *Signature) CheckKeyIdOrFingerprint(pk *PublicKey) bool { return sig.IssuerKeyId != nil && *sig.IssuerKeyId == pk.KeyId } +func (sig *Signature) CheckKeyIdOrFingerprintExplicit(fingerprint []byte, keyId uint64) bool { + if sig.IssuerFingerprint != nil && len(sig.IssuerFingerprint) >= 20 && fingerprint != nil { + return bytes.Equal(sig.IssuerFingerprint, fingerprint) + } + return sig.IssuerKeyId != nil && *sig.IssuerKeyId == keyId +} + // serializeSubpacketLength marshals the given length into to. func serializeSubpacketLength(to []byte, length int) int { // RFC 4880, Section 4.2.2. @@ -598,20 +708,19 @@ func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { to = to[n:] } } - return } // SigExpired returns whether sig is a signature that has expired or is created // in the future. func (sig *Signature) SigExpired(currentTime time.Time) bool { - if sig.CreationTime.After(currentTime) { + if sig.CreationTime.Unix() > currentTime.Unix() { return true } if sig.SigLifetimeSecs == nil || *sig.SigLifetimeSecs == 0 { return false } expiry := sig.CreationTime.Add(time.Duration(*sig.SigLifetimeSecs) * time.Second) - return currentTime.After(expiry) + return currentTime.Unix() > expiry.Unix() } // buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. @@ -635,20 +744,36 @@ func (sig *Signature) buildHashSuffix(hashedSubpackets []byte) (err error) { uint8(sig.SigType), uint8(sig.PubKeyAlgo), uint8(hashId), - uint8(len(hashedSubpackets) >> 8), - uint8(len(hashedSubpackets)), }) + hashedSubpacketsLength := len(hashedSubpackets) + if sig.Version == 6 { + // v6 signatures store the length in 4 octets + hashedFields.Write([]byte{ + uint8(hashedSubpacketsLength >> 24), + uint8(hashedSubpacketsLength >> 16), + uint8(hashedSubpacketsLength >> 8), + uint8(hashedSubpacketsLength), + }) + } else { + hashedFields.Write([]byte{ + uint8(hashedSubpacketsLength >> 8), + uint8(hashedSubpacketsLength), + }) + } + lenPrefix := hashedFields.Len() hashedFields.Write(hashedSubpackets) - var l uint64 = uint64(6 + len(hashedSubpackets)) + var l uint64 = uint64(lenPrefix + len(hashedSubpackets)) if sig.Version == 5 { + // v5 case hashedFields.Write([]byte{0x05, 0xff}) hashedFields.Write([]byte{ uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32), uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), }) } else { - hashedFields.Write([]byte{0x04, 0xff}) + // v4 and v6 case + hashedFields.Write([]byte{byte(sig.Version), 0xff}) hashedFields.Write([]byte{ uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), }) @@ -676,6 +801,67 @@ func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { return } +// PrepareSign must be called to create a hash object before Sign for v6 signatures. +// The created hash object initially hashes a randomly generated salt +// as required by v6 signatures. The generated salt is stored in sig. If the signature is not v6, +// the method returns an empty hash object. +// See RFC the crypto refresh Section 3.2.4. +func (sig *Signature) PrepareSign(config *Config) (hash.Hash, error) { + if !sig.Hash.Available() { + return nil, errors.UnsupportedError("hash function") + } + hasher := sig.Hash.New() + if sig.Version == 6 { + if sig.salt == nil { + var err error + sig.salt, err = SignatureSaltForHash(sig.Hash, config.Random()) + if err != nil { + return nil, err + } + } + hasher.Write(sig.salt) + } + return hasher, nil +} + +// SetSalt sets the signature salt for v6 signatures. +// Assumes salt is generated correctly and checks if length matches. +// If the signature is not v6, the method ignores the salt. +// Use PrepareSign whenever possible instead of generating and +// hashing the salt externally. +// See RFC the crypto refresh Section 3.2.4. +func (sig *Signature) SetSalt(salt []byte) error { + if sig.Version == 6 { + expectedSaltLength, err := SaltLengthForHash(sig.Hash) + if err != nil { + return err + } + if salt == nil || len(salt) != expectedSaltLength { + return errors.InvalidArgumentError("unexpected salt size for the given hash algorithm") + } + sig.salt = salt + } + return nil +} + +// PrepareVerify must be called to create a hash object before verifying v6 signatures. +// The created hash object initially hashes the internally stored salt. +// If the signature is not v6, the method returns an empty hash object. +// See crypto refresh Section 3.2.4. +func (sig *Signature) PrepareVerify() (hash.Hash, error) { + if !sig.Hash.Available() { + return nil, errors.UnsupportedError("hash function") + } + hasher := sig.Hash.New() + if sig.Version == 6 { + if sig.salt == nil { + return nil, errors.StructuralError("v6 requires a salt for the hash to be signed") + } + hasher.Write(sig.salt) + } + return hasher, nil +} + // Sign signs a message with a private key. The hash, h, must contain // the hash of the message to be signed and will be mutated by this function. // On success, the signature is stored in sig. Call Serialize to write it out. @@ -729,6 +915,18 @@ func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err e sig.EdDSASigR = encoding.NewMPI(r) sig.EdDSASigS = encoding.NewMPI(s) } + case PubKeyAlgoEd25519: + sk := priv.PrivateKey.(*ed25519.PrivateKey) + signature, err := ed25519.Sign(sk, digest) + if err == nil { + sig.EdSig = signature + } + case PubKeyAlgoEd448: + sk := priv.PrivateKey.(*ed448.PrivateKey) + signature, err := ed448.Sign(sk, digest) + if err == nil { + sig.EdSig = signature + } default: err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) } @@ -744,11 +942,32 @@ func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, co if priv.Dummy() { return errors.ErrDummyPrivateKey("dummy key found") } - h, err := userIdSignatureHash(id, pub, sig.Hash) + prepareHash, err := sig.PrepareSign(config) if err != nil { return err } - return sig.Sign(h, priv, config) + if err := userIdSignatureHash(id, pub, prepareHash); err != nil { + return err + } + return sig.Sign(prepareHash, priv, config) +} + +// SignDirectKeyBinding computes a signature from priv +// On success, the signature is stored in sig. +// Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignDirectKeyBinding(pub *PublicKey, priv *PrivateKey, config *Config) error { + if priv.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + prepareHash, err := sig.PrepareSign(config) + if err != nil { + return err + } + if err := directKeySignatureHash(pub, prepareHash); err != nil { + return err + } + return sig.Sign(prepareHash, priv, config) } // CrossSignKey computes a signature from signingKey on pub hashed using hashKey. On success, @@ -756,7 +975,11 @@ func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, co // If config is nil, sensible defaults will be used. func (sig *Signature) CrossSignKey(pub *PublicKey, hashKey *PublicKey, signingKey *PrivateKey, config *Config) error { - h, err := keySignatureHash(hashKey, pub, sig.Hash) + prepareHash, err := sig.PrepareSign(config) + if err != nil { + return err + } + h, err := keySignatureHash(hashKey, pub, prepareHash) if err != nil { return err } @@ -770,7 +993,11 @@ func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) if priv.Dummy() { return errors.ErrDummyPrivateKey("dummy key found") } - h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash) + prepareHash, err := sig.PrepareSign(config) + if err != nil { + return err + } + h, err := keySignatureHash(&priv.PublicKey, pub, prepareHash) if err != nil { return err } @@ -781,11 +1008,14 @@ func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) // stored in sig. Call Serialize to write it out. // If config is nil, sensible defaults will be used. func (sig *Signature) RevokeKey(pub *PublicKey, priv *PrivateKey, config *Config) error { - h, err := keyRevocationHash(pub, sig.Hash) + prepareHash, err := sig.PrepareSign(config) if err != nil { return err } - return sig.Sign(h, priv, config) + if err := keyRevocationHash(pub, prepareHash); err != nil { + return err + } + return sig.Sign(prepareHash, priv, config) } // RevokeSubkey computes a subkey revocation signature of pub using priv. @@ -802,7 +1032,7 @@ func (sig *Signature) Serialize(w io.Writer) (err error) { if len(sig.outSubpackets) == 0 { sig.outSubpackets = sig.rawSubpackets } - if sig.RSASignature == nil && sig.DSASigR == nil && sig.ECDSASigR == nil && sig.EdDSASigR == nil { + if sig.RSASignature == nil && sig.DSASigR == nil && sig.ECDSASigR == nil && sig.EdDSASigR == nil && sig.EdSig == nil { return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") } @@ -819,16 +1049,24 @@ func (sig *Signature) Serialize(w io.Writer) (err error) { case PubKeyAlgoEdDSA: sigLength = int(sig.EdDSASigR.EncodedLength()) sigLength += int(sig.EdDSASigS.EncodedLength()) + case PubKeyAlgoEd25519: + sigLength = ed25519.SignatureSize + case PubKeyAlgoEd448: + sigLength = ed448.SignatureSize default: panic("impossible") } + hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) - length := len(sig.HashSuffix) - 6 /* trailer not included */ + + length := 4 + /* length of version|signature type|public-key algorithm|hash algorithm */ + 2 /* length of hashed subpackets */ + hashedSubpacketsLen + 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + 2 /* hash tag */ + sigLength - if sig.Version == 5 { - length -= 4 // eight-octet instead of four-octet big endian + if sig.Version == 6 { + length += 4 + /* the two length fields are four-octet instead of two */ + 1 + /* salt length */ + len(sig.salt) /* length salt */ } err = serializeHeader(w, packetTypeSignature, length) if err != nil { @@ -842,18 +1080,41 @@ func (sig *Signature) Serialize(w io.Writer) (err error) { } func (sig *Signature) serializeBody(w io.Writer) (err error) { - hashedSubpacketsLen := uint16(uint16(sig.HashSuffix[4])<<8) | uint16(sig.HashSuffix[5]) - fields := sig.HashSuffix[:6+hashedSubpacketsLen] + var fields []byte + if sig.Version == 6 { + // v6 signatures use 4 octets for length + hashedSubpacketsLen := + uint32(uint32(sig.HashSuffix[4])<<24) | + uint32(uint32(sig.HashSuffix[5])<<16) | + uint32(uint32(sig.HashSuffix[6])<<8) | + uint32(sig.HashSuffix[7]) + fields = sig.HashSuffix[:8+hashedSubpacketsLen] + } else { + hashedSubpacketsLen := uint16(uint16(sig.HashSuffix[4])<<8) | + uint16(sig.HashSuffix[5]) + fields = sig.HashSuffix[:6+hashedSubpacketsLen] + + } _, err = w.Write(fields) if err != nil { return } unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) - unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) - unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) - unhashedSubpackets[1] = byte(unhashedSubpacketsLen) - serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) + var unhashedSubpackets []byte + if sig.Version == 6 { + unhashedSubpackets = make([]byte, 4+unhashedSubpacketsLen) + unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 24) + unhashedSubpackets[1] = byte(unhashedSubpacketsLen >> 16) + unhashedSubpackets[2] = byte(unhashedSubpacketsLen >> 8) + unhashedSubpackets[3] = byte(unhashedSubpacketsLen) + serializeSubpackets(unhashedSubpackets[4:], sig.outSubpackets, false) + } else { + unhashedSubpackets = make([]byte, 2+unhashedSubpacketsLen) + unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) + unhashedSubpackets[1] = byte(unhashedSubpacketsLen) + serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) + } _, err = w.Write(unhashedSubpackets) if err != nil { @@ -864,6 +1125,18 @@ func (sig *Signature) serializeBody(w io.Writer) (err error) { return } + if sig.Version == 6 { + // write salt for v6 signatures + _, err = w.Write([]byte{uint8(len(sig.salt))}) + if err != nil { + return + } + _, err = w.Write(sig.salt) + if err != nil { + return + } + } + switch sig.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: _, err = w.Write(sig.RSASignature.EncodedBytes()) @@ -882,6 +1155,10 @@ func (sig *Signature) serializeBody(w io.Writer) (err error) { return } _, err = w.Write(sig.EdDSASigS.EncodedBytes()) + case PubKeyAlgoEd25519: + err = ed25519.WriteSignature(w, sig.EdSig) + case PubKeyAlgoEd448: + err = ed448.WriteSignature(w, sig.EdSig) default: panic("impossible") } @@ -908,7 +1185,7 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp } if sig.IssuerFingerprint != nil { contents := append([]uint8{uint8(issuer.Version)}, sig.IssuerFingerprint...) - subpackets = append(subpackets, outputSubpacket{true, issuerFingerprintSubpacket, sig.Version == 5, contents}) + subpackets = append(subpackets, outputSubpacket{true, issuerFingerprintSubpacket, sig.Version >= 5, contents}) } if sig.SignerUserId != nil { subpackets = append(subpackets, outputSubpacket{true, signerUserIdSubpacket, false, []byte(*sig.SignerUserId)}) @@ -958,6 +1235,17 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp }) } + for _, recipient := range sig.IntendedRecipients { + subpackets = append( + subpackets, + outputSubpacket{ + true, + intendedRecipientSubpacket, + false, + recipient.Serialize(), + }) + } + // The following subpackets may only appear in self-signatures. var features = byte(0x00) @@ -1073,8 +1361,6 @@ func (sig *Signature) AddMetadataToHashSuffix() { binary.BigEndian.PutUint32(buf[:], lit.Time) suffix.Write(buf[:]) - // Update the counter and restore trailing bytes - l = uint64(suffix.Len()) suffix.Write([]byte{0x05, 0xff}) suffix.Write([]byte{ uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32), @@ -1082,3 +1368,35 @@ func (sig *Signature) AddMetadataToHashSuffix() { }) sig.HashSuffix = suffix.Bytes() } + +// SaltLengthForHash selects the required salt length for the given hash algorithm, +// as per Table 23 (Hash algorithm registry) of the crypto refresh. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#section-9.5|Crypto Refresh Section 9.5. +func SaltLengthForHash(hash crypto.Hash) (int, error) { + switch hash { + case crypto.SHA256, crypto.SHA224, crypto.SHA3_256: + return 16, nil + case crypto.SHA384: + return 24, nil + case crypto.SHA512, crypto.SHA3_512: + return 32, nil + default: + return 0, errors.UnsupportedError("hash function not supported for V6 signatures") + } +} + +// SignatureSaltForHash generates a random signature salt +// with the length for the given hash algorithm. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#section-9.5|Crypto Refresh Section 9.5. +func SignatureSaltForHash(hash crypto.Hash, randReader io.Reader) ([]byte, error) { + saltLength, err := SaltLengthForHash(hash) + if err != nil { + return nil, err + } + salt := make([]byte, saltLength) + _, err = io.ReadFull(randReader, salt) + if err != nil { + return nil, err + } + return salt, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go index bac2b132ea2..c97b98b9303 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go @@ -7,11 +7,13 @@ package packet import ( "bytes" "crypto/cipher" + "crypto/sha256" "io" "strconv" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/s2k" + "golang.org/x/crypto/hkdf" ) // This is the largest session key that we'll support. Since at most 256-bit cipher @@ -39,10 +41,17 @@ func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { return err } ske.Version = int(buf[0]) - if ske.Version != 4 && ske.Version != 5 { + if ske.Version != 4 && ske.Version != 5 && ske.Version != 6 { return errors.UnsupportedError("unknown SymmetricKeyEncrypted version") } + if ske.Version > 5 { + // Scalar octet count + if _, err := readFull(r, buf[:]); err != nil { + return err + } + } + // Cipher function if _, err := readFull(r, buf[:]); err != nil { return err @@ -52,7 +61,7 @@ func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[0]))) } - if ske.Version == 5 { + if ske.Version >= 5 { // AEAD mode if _, err := readFull(r, buf[:]); err != nil { return errors.StructuralError("cannot read AEAD octet from packet") @@ -60,6 +69,13 @@ func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { ske.Mode = AEADMode(buf[0]) } + if ske.Version > 5 { + // Scalar octet count + if _, err := readFull(r, buf[:]); err != nil { + return err + } + } + var err error if ske.s2k, err = s2k.Parse(r); err != nil { if _, ok := err.(errors.ErrDummyPrivateKey); ok { @@ -68,7 +84,7 @@ func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { return err } - if ske.Version == 5 { + if ske.Version >= 5 { // AEAD IV iv := make([]byte, ske.Mode.IvLength()) _, err := readFull(r, iv) @@ -109,8 +125,8 @@ func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunc case 4: plaintextKey, cipherFunc, err := ske.decryptV4(key) return plaintextKey, cipherFunc, err - case 5: - plaintextKey, err := ske.decryptV5(key) + case 5, 6: + plaintextKey, err := ske.aeadDecrypt(ske.Version, key) return plaintextKey, CipherFunction(0), err } err := errors.UnsupportedError("unknown SymmetricKeyEncrypted version") @@ -136,9 +152,9 @@ func (ske *SymmetricKeyEncrypted) decryptV4(key []byte) ([]byte, CipherFunction, return plaintextKey, cipherFunc, nil } -func (ske *SymmetricKeyEncrypted) decryptV5(key []byte) ([]byte, error) { - adata := []byte{0xc3, byte(5), byte(ske.CipherFunc), byte(ske.Mode)} - aead := getEncryptedKeyAeadInstance(ske.CipherFunc, ske.Mode, key, adata) +func (ske *SymmetricKeyEncrypted) aeadDecrypt(version int, key []byte) ([]byte, error) { + adata := []byte{0xc3, byte(version), byte(ske.CipherFunc), byte(ske.Mode)} + aead := getEncryptedKeyAeadInstance(ske.CipherFunc, ske.Mode, key, adata, version) plaintextKey, err := aead.Open(nil, ske.iv, ske.encryptedKey, adata) if err != nil { @@ -178,7 +194,7 @@ func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Conf func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, passphrase []byte, config *Config) (err error) { var version int if config.AEAD() != nil { - version = 5 + version = 6 } else { version = 4 } @@ -203,11 +219,15 @@ func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, pass switch version { case 4: packetLength = 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize - case 5: + case 5, 6: ivLen := config.AEAD().Mode().IvLength() tagLen := config.AEAD().Mode().TagLength() packetLength = 3 + len(s2kBytes) + ivLen + keySize + tagLen } + if version > 5 { + packetLength += 2 // additional octet count fields + } + err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) if err != nil { return @@ -216,13 +236,22 @@ func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, pass // Symmetric Key Encrypted Version buf := []byte{byte(version)} + if version > 5 { + // Scalar octet count + buf = append(buf, byte(3+len(s2kBytes)+config.AEAD().Mode().IvLength())) + } + // Cipher function buf = append(buf, byte(cipherFunc)) - if version == 5 { + if version >= 5 { // AEAD mode buf = append(buf, byte(config.AEAD().Mode())) } + if version > 5 { + // Scalar octet count + buf = append(buf, byte(len(s2kBytes))) + } _, err = w.Write(buf) if err != nil { return @@ -243,10 +272,10 @@ func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, pass if err != nil { return } - case 5: + case 5, 6: mode := config.AEAD().Mode() - adata := []byte{0xc3, byte(5), byte(cipherFunc), byte(mode)} - aead := getEncryptedKeyAeadInstance(cipherFunc, mode, keyEncryptingKey, adata) + adata := []byte{0xc3, byte(version), byte(cipherFunc), byte(mode)} + aead := getEncryptedKeyAeadInstance(cipherFunc, mode, keyEncryptingKey, adata, version) // Sample iv using random reader iv := make([]byte, config.AEAD().Mode().IvLength()) @@ -270,7 +299,17 @@ func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, pass return } -func getEncryptedKeyAeadInstance(c CipherFunction, mode AEADMode, inputKey, associatedData []byte) (aead cipher.AEAD) { - blockCipher := c.new(inputKey) +func getEncryptedKeyAeadInstance(c CipherFunction, mode AEADMode, inputKey, associatedData []byte, version int) (aead cipher.AEAD) { + var blockCipher cipher.Block + if version > 5 { + hkdfReader := hkdf.New(sha256.New, inputKey, []byte{}, associatedData) + + encryptionKey := make([]byte, c.KeySize()) + _, _ = readFull(hkdfReader, encryptionKey) + + blockCipher = c.new(encryptionKey) + } else { + blockCipher = c.new(inputKey) + } return mode.new(blockCipher) } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go index e96252c1968..a8ef0bbbec2 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go @@ -115,7 +115,7 @@ func serializeSymmetricallyEncryptedAead(ciphertext io.WriteCloser, cipherSuite // Random salt salt := make([]byte, aeadSaltSize) - if _, err := rand.Read(salt); err != nil { + if _, err := io.ReadFull(rand, salt); err != nil { return nil, err } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go index fa26bebe382..645963fa785 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go @@ -237,7 +237,10 @@ func serializeSymmetricallyEncryptedMdc(ciphertext io.WriteCloser, c CipherFunct block := c.new(key) blockSize := block.BlockSize() iv := make([]byte, blockSize) - _, err = config.Random().Read(iv) + _, err = io.ReadFull(config.Random(), iv) + if err != nil { + return nil, err + } if err != nil { return } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go index 88ec72c6c4a..63814ed1324 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go @@ -9,7 +9,6 @@ import ( "image" "image/jpeg" "io" - "io/ioutil" ) const UserAttrImageSubpacket = 1 @@ -63,7 +62,7 @@ func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute { func (uat *UserAttribute) parse(r io.Reader) (err error) { // RFC 4880, section 5.13 - b, err := ioutil.ReadAll(r) + b, err := io.ReadAll(r) if err != nil { return } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go index 614fbafd5e1..3c7451a3c36 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go @@ -6,7 +6,6 @@ package packet import ( "io" - "io/ioutil" "strings" ) @@ -66,7 +65,7 @@ func NewUserId(name, comment, email string) *UserId { func (uid *UserId) parse(r io.Reader) (err error) { // RFC 4880, section 5.11 - b, err := ioutil.ReadAll(r) + b, err := io.ReadAll(r) if err != nil { return } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go index 8499c73790d..408506592fc 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go @@ -46,6 +46,7 @@ type MessageDetails struct { DecryptedWith Key // the private key used to decrypt the message, if any. IsSigned bool // true if the message is signed. SignedByKeyId uint64 // the key id of the signer, if any. + SignedByFingerprint []byte // the key fingerprint of the signer, if any. SignedBy *Key // the key of the signer, if available. LiteralData *packet.LiteralData // the metadata of the contents UnverifiedBody io.Reader // the contents of the message. @@ -117,7 +118,7 @@ ParsePackets: // This packet contains the decryption key encrypted to a public key. md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) switch p.Algo { - case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal, packet.PubKeyAlgoECDH: + case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal, packet.PubKeyAlgoECDH, packet.PubKeyAlgoX25519, packet.PubKeyAlgoX448: break default: continue @@ -270,13 +271,17 @@ FindLiteralData: prevLast = true } - h, wrappedHash, err = hashForSignature(p.Hash, p.SigType) + h, wrappedHash, err = hashForSignature(p.Hash, p.SigType, p.Salt) if err != nil { md.SignatureError = err } md.IsSigned = true + if p.Version == 6 { + md.SignedByFingerprint = p.KeyFingerprint + } md.SignedByKeyId = p.KeyId + if keyring != nil { keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) if len(keys) > 0 { @@ -292,7 +297,7 @@ FindLiteralData: if md.IsSigned && md.SignatureError == nil { md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md, config} } else if md.decrypted != nil { - md.UnverifiedBody = checkReader{md} + md.UnverifiedBody = &checkReader{md, false} } else { md.UnverifiedBody = md.LiteralData.Body } @@ -300,12 +305,22 @@ FindLiteralData: return md, nil } +func wrapHashForSignature(hashFunc hash.Hash, sigType packet.SignatureType) (hash.Hash, error) { + switch sigType { + case packet.SigTypeBinary: + return hashFunc, nil + case packet.SigTypeText: + return NewCanonicalTextHash(hashFunc), nil + } + return nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) +} + // hashForSignature returns a pair of hashes that can be used to verify a // signature. The signature may specify that the contents of the signed message // should be preprocessed (i.e. to normalize line endings). Thus this function // returns two hashes. The second should be used to hash the message itself and // performs any needed preprocessing. -func hashForSignature(hashFunc crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { +func hashForSignature(hashFunc crypto.Hash, sigType packet.SignatureType, sigSalt []byte) (hash.Hash, hash.Hash, error) { if _, ok := algorithm.HashToHashIdWithSha1(hashFunc); !ok { return nil, nil, errors.UnsupportedError("unsupported hash function") } @@ -313,14 +328,19 @@ func hashForSignature(hashFunc crypto.Hash, sigType packet.SignatureType) (hash. return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashFunc))) } h := hashFunc.New() - + if sigSalt != nil { + h.Write(sigSalt) + } + wrappedHash, err := wrapHashForSignature(h, sigType) + if err != nil { + return nil, nil, err + } switch sigType { case packet.SigTypeBinary: - return h, h, nil + return h, wrappedHash, nil case packet.SigTypeText: - return h, NewCanonicalTextHash(h), nil + return h, wrappedHash, nil } - return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) } @@ -328,16 +348,22 @@ func hashForSignature(hashFunc crypto.Hash, sigType packet.SignatureType) (hash. // it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger // MDC checks. type checkReader struct { - md *MessageDetails + md *MessageDetails + checked bool } -func (cr checkReader) Read(buf []byte) (int, error) { +func (cr *checkReader) Read(buf []byte) (int, error) { n, sensitiveParsingError := cr.md.LiteralData.Body.Read(buf) if sensitiveParsingError == io.EOF { + if cr.checked { + // Only check once + return n, io.EOF + } mdcErr := cr.md.decrypted.Close() if mdcErr != nil { return n, mdcErr } + cr.checked = true return n, io.EOF } @@ -428,14 +454,13 @@ func (scr *signatureCheckReader) Read(buf []byte) (int, error) { // if any, and a possible signature verification error. // If the signer isn't known, ErrUnknownIssuer is returned. func VerifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { - var expectedHashes []crypto.Hash - return verifyDetachedSignature(keyring, signed, signature, expectedHashes, config) + return verifyDetachedSignature(keyring, signed, signature, nil, false, config) } // VerifyDetachedSignatureAndHash performs the same actions as // VerifyDetachedSignature and checks that the expected hash functions were used. func VerifyDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { - return verifyDetachedSignature(keyring, signed, signature, expectedHashes, config) + return verifyDetachedSignature(keyring, signed, signature, expectedHashes, true, config) } // CheckDetachedSignature takes a signed file and a detached signature and @@ -443,25 +468,24 @@ func VerifyDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader // signature verification error. If the signer isn't known, // ErrUnknownIssuer is returned. func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) { - var expectedHashes []crypto.Hash - return CheckDetachedSignatureAndHash(keyring, signed, signature, expectedHashes, config) + _, signer, err = verifyDetachedSignature(keyring, signed, signature, nil, false, config) + return } // CheckDetachedSignatureAndHash performs the same actions as // CheckDetachedSignature and checks that the expected hash functions were used. func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (signer *Entity, err error) { - _, signer, err = verifyDetachedSignature(keyring, signed, signature, expectedHashes, config) + _, signer, err = verifyDetachedSignature(keyring, signed, signature, expectedHashes, true, config) return } -func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { +func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, checkHashes bool, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { var issuerKeyId uint64 var hashFunc crypto.Hash var sigType packet.SignatureType var keys []Key var p packet.Packet - expectedHashesLen := len(expectedHashes) packets := packet.NewReader(signature) for { p, err = packets.Next() @@ -483,16 +507,19 @@ func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expec issuerKeyId = *sig.IssuerKeyId hashFunc = sig.Hash sigType = sig.SigType - - for i, expectedHash := range expectedHashes { - if hashFunc == expectedHash { - break + if checkHashes { + matchFound := false + // check for hashes + for _, expectedHash := range expectedHashes { + if hashFunc == expectedHash { + matchFound = true + break + } } - if i+1 == expectedHashesLen { - return nil, nil, errors.StructuralError("hash algorithm mismatch with cleartext message headers") + if !matchFound { + return nil, nil, errors.StructuralError("hash algorithm or salt mismatch with cleartext message headers") } } - keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) if len(keys) > 0 { break @@ -503,7 +530,11 @@ func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expec panic("unreachable") } - h, wrappedHash, err := hashForSignature(hashFunc, sigType) + h, err := sig.PrepareVerify() + if err != nil { + return nil, nil, err + } + wrappedHash, err := wrapHashForSignature(h, sigType) if err != nil { return nil, nil, err } @@ -551,15 +582,11 @@ func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader, // NOTE: The order of these checks is important, as the caller may choose to // ignore ErrSignatureExpired or ErrKeyExpired errors, but should never // ignore any other errors. -// -// TODO: Also return an error if: -// - The primary key is expired according to a direct-key signature -// - (For V5 keys only:) The direct-key signature (exists and) is expired func checkSignatureDetails(key *Key, signature *packet.Signature, config *packet.Config) error { now := config.Now() - primaryIdentity := key.Entity.PrimaryIdentity() + primarySelfSignature, primaryIdentity := key.Entity.PrimarySelfSignature() signedBySubKey := key.PublicKey != key.Entity.PrimaryKey - sigsToCheck := []*packet.Signature{signature, primaryIdentity.SelfSignature} + sigsToCheck := []*packet.Signature{signature, primarySelfSignature} if signedBySubKey { sigsToCheck = append(sigsToCheck, key.SelfSignature, key.SelfSignature.EmbeddedSignature) } @@ -572,10 +599,10 @@ func checkSignatureDetails(key *Key, signature *packet.Signature, config *packet } if key.Entity.Revoked(now) || // primary key is revoked (signedBySubKey && key.Revoked(now)) || // subkey is revoked - primaryIdentity.Revoked(now) { // primary identity is revoked + (primaryIdentity != nil && primaryIdentity.Revoked(now)) { // primary identity is revoked for v4 return errors.ErrKeyRevoked } - if key.Entity.PrimaryKey.KeyExpired(primaryIdentity.SelfSignature, now) { // primary key is expired + if key.Entity.PrimaryKey.KeyExpired(primarySelfSignature, now) { // primary key is expired return errors.ErrKeyExpired } if signedBySubKey { diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go index db6dad5c0b7..670d60226a4 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go @@ -26,6 +26,8 @@ const testKeys1And2PrivateHex = "9501d8044d3c5c10010400b1d13382944bd5aba23a43129 const dsaElGamalTestKeysHex = "9501e1044dfcb16a110400aa3e5c1a1f43dd28c2ffae8abf5cfce555ee874134d8ba0a0f7b868ce2214beddc74e5e1e21ded354a95d18acdaf69e5e342371a71fbb9093162e0c5f3427de413a7f2c157d83f5cd2f9d791256dc4f6f0e13f13c3302af27f2384075ab3021dff7a050e14854bbde0a1094174855fc02f0bae8e00a340d94a1f22b32e48485700a0cec672ac21258fb95f61de2ce1af74b2c4fa3e6703ff698edc9be22c02ae4d916e4fa223f819d46582c0516235848a77b577ea49018dcd5e9e15cff9dbb4663a1ae6dd7580fa40946d40c05f72814b0f88481207e6c0832c3bded4853ebba0a7e3bd8e8c66df33d5a537cd4acf946d1080e7a3dcea679cb2b11a72a33a2b6a9dc85f466ad2ddf4c3db6283fa645343286971e3dd700703fc0c4e290d45767f370831a90187e74e9972aae5bff488eeff7d620af0362bfb95c1a6c3413ab5d15a2e4139e5d07a54d72583914661ed6a87cce810be28a0aa8879a2dd39e52fb6fe800f4f181ac7e328f740cde3d09a05cecf9483e4cca4253e60d4429ffd679d9996a520012aad119878c941e3cf151459873bdfc2a9563472fe0303027a728f9feb3b864260a1babe83925ce794710cfd642ee4ae0e5b9d74cee49e9c67b6cd0ea5dfbb582132195a121356a1513e1bca73e5b80c58c7ccb4164453412f456c47616d616c2054657374204b65792031886204131102002205024dfcb16a021b03060b090807030206150802090a0b0416020301021e01021780000a091033af447ccd759b09fadd00a0b8fd6f5a790bad7e9f2dbb7632046dc4493588db009c087c6a9ba9f7f49fab221587a74788c00db4889ab00200009d0157044dfcb16a1004008dec3f9291205255ccff8c532318133a6840739dd68b03ba942676f9038612071447bf07d00d559c5c0875724ea16a4c774f80d8338b55fca691a0522e530e604215b467bbc9ccfd483a1da99d7bc2648b4318fdbd27766fc8bfad3fddb37c62b8ae7ccfe9577e9b8d1e77c1d417ed2c2ef02d52f4da11600d85d3229607943700030503ff506c94c87c8cab778e963b76cf63770f0a79bf48fb49d3b4e52234620fc9f7657f9f8d56c96a2b7c7826ae6b57ebb2221a3fe154b03b6637cea7e6d98e3e45d87cf8dc432f723d3d71f89c5192ac8d7290684d2c25ce55846a80c9a7823f6acd9bb29fa6cd71f20bc90eccfca20451d0c976e460e672b000df49466408d527affe0303027a728f9feb3b864260abd761730327bca2aaa4ea0525c175e92bf240682a0e83b226f97ecb2e935b62c9a133858ce31b271fa8eb41f6a1b3cd72a63025ce1a75ee4180dcc284884904181102000905024dfcb16a021b0c000a091033af447ccd759b09dd0b009e3c3e7296092c81bee5a19929462caaf2fff3ae26009e218c437a2340e7ea628149af1ec98ec091a43992b00200009501e1044dfcb1be1104009f61faa61aa43df75d128cbe53de528c4aec49ce9360c992e70c77072ad5623de0a3a6212771b66b39a30dad6781799e92608316900518ec01184a85d872365b7d2ba4bacfb5882ea3c2473d3750dc6178cc1cf82147fb58caa28b28e9f12f6d1efcb0534abed644156c91cca4ab78834268495160b2400bc422beb37d237c2300a0cac94911b6d493bda1e1fbc6feeca7cb7421d34b03fe22cec6ccb39675bb7b94a335c2b7be888fd3906a1125f33301d8aa6ec6ee6878f46f73961c8d57a3e9544d8ef2a2cbfd4d52da665b1266928cfe4cb347a58c412815f3b2d2369dec04b41ac9a71cc9547426d5ab941cccf3b18575637ccfb42df1a802df3cfe0a999f9e7109331170e3a221991bf868543960f8c816c28097e503fe319db10fb98049f3a57d7c80c420da66d56f3644371631fad3f0ff4040a19a4fedc2d07727a1b27576f75a4d28c47d8246f27071e12d7a8de62aad216ddbae6aa02efd6b8a3e2818cda48526549791ab277e447b3a36c57cefe9b592f5eab73959743fcc8e83cbefec03a329b55018b53eec196765ae40ef9e20521a603c551efe0303020950d53a146bf9c66034d00c23130cce95576a2ff78016ca471276e8227fb30b1ffbd92e61804fb0c3eff9e30b1a826ee8f3e4730b4d86273ca977b4164453412f456c47616d616c2054657374204b65792032886204131102002205024dfcb1be021b03060b090807030206150802090a0b0416020301021e01021780000a0910a86bf526325b21b22bd9009e34511620415c974750a20df5cb56b182f3b48e6600a0a9466cb1a1305a84953445f77d461593f1d42bc1b00200009d0157044dfcb1be1004009565a951da1ee87119d600c077198f1c1bceb0f7aa54552489298e41ff788fa8f0d43a69871f0f6f77ebdfb14a4260cf9fbeb65d5844b4272a1904dd95136d06c3da745dc46327dd44a0f16f60135914368c8039a34033862261806bb2c5ce1152e2840254697872c85441ccb7321431d75a747a4bfb1d2c66362b51ce76311700030503fc0ea76601c196768070b7365a200e6ddb09307f262d5f39eec467b5f5784e22abdf1aa49226f59ab37cb49969d8f5230ea65caf56015abda62604544ed526c5c522bf92bed178a078789f6c807b6d34885688024a5bed9e9f8c58d11d4b82487b44c5f470c5606806a0443b79cadb45e0f897a561a53f724e5349b9267c75ca17fe0303020950d53a146bf9c660bc5f4ce8f072465e2d2466434320c1e712272fafc20e342fe7608101580fa1a1a367e60486a7cd1246b7ef5586cf5e10b32762b710a30144f12dd17dd4884904181102000905024dfcb1be021b0c000a0910a86bf526325b21b2904c00a0b2b66b4b39ccffda1d10f3ea8d58f827e30a8b8e009f4255b2d8112a184e40cde43a34e8655ca7809370b0020000" +const ed25519wX25519Key = "c54b0663877fe31b00000020f94da7bb48d60a61e567706a6587d0331999bb9d891a08242ead84543df895a3001972817b12be707e8d5f586ce61361201d344eb266a2c82fde6835762b65b0b7c2b1061f1b0a00000042058263877fe3030b090705150a0e080c021600029b03021e09222106cb186c4f0609a697e4d52dfa6c722b0c1f1e27c18a56708f6525ec27bad9acc905270902070200000000ad2820103e2d7d227ec0e6d7ce4471db36bfc97083253690271498a7ef0576c07faae14585b3b903b0127ec4fda2f023045a2ec76bcb4f9571a9651e14aee1137a1d668442c88f951e33c4ffd33fb9a17d511eed758fc6d9cc50cb5fd793b2039d5804c74b0663877fe319000000208693248367f9e5015db922f8f48095dda784987f2d5985b12fbad16caf5e4435004d600a4f794d44775c57a26e0feefed558e9afffd6ad0d582d57fb2ba2dcedb8c29b06181b0a0000002c050263877fe322a106cb186c4f0609a697e4d52dfa6c722b0c1f1e27c18a56708f6525ec27bad9acc9021b0c00000000defa20a6e9186d9d5935fc8fe56314cdb527486a5a5120f9b762a235a729f039010a56b89c658568341fbef3b894e9834ad9bc72afae2f4c9c47a43855e65f1cb0a3f77bbc5f61085c1f8249fe4e7ca59af5f0bcee9398e0fa8d76e522e1d8ab42bb0d" + const signedMessageHex = "a3019bc0cbccc0c4b8d8b74ee2108fe16ec6d3ca490cbe362d3f8333d3f352531472538b8b13d353b97232f352158c20943157c71c16064626063656269052062e4e01987e9b6fccff4b7df3a34c534b23e679cbec3bc0f8f6e64dfb4b55fe3f8efa9ce110ddb5cd79faf1d753c51aecfa669f7e7aa043436596cccc3359cb7dd6bbe9ecaa69e5989d9e57209571edc0b2fa7f57b9b79a64ee6e99ce1371395fee92fec2796f7b15a77c386ff668ee27f6d38f0baa6c438b561657377bf6acff3c5947befd7bf4c196252f1d6e5c524d0300" const signedTextMessageHex = "a3019bc0cbccc8c4b8d8b74ee2108fe16ec6d36a250cbece0c178233d3f352531472538b8b13d35379b97232f352158ca0b4312f57c71c1646462606365626906a062e4e019811591798ff99bf8afee860b0d8a8c2a85c3387e3bcf0bb3b17987f2bbcfab2aa526d930cbfd3d98757184df3995c9f3e7790e36e3e9779f06089d4c64e9e47dd6202cb6e9bc73c5d11bb59fbaf89d22d8dc7cf199ddf17af96e77c5f65f9bbed56f427bd8db7af37f6c9984bf9385efaf5f184f986fb3e6adb0ecfe35bbf92d16a7aa2a344fb0bc52fb7624f0200" @@ -160,18 +162,78 @@ TcIYl5/Uyoi+FOvPLcNw4hOv2nwUzSSVAw== =IiS2 -----END PGP PRIVATE KEY BLOCK-----` -// Generated with the above private key -const v5PrivKeyMsg = `-----BEGIN PGP MESSAGE----- -Version: OpenPGP.js v4.10.7 -Comment: https://openpgpjs.org +// See OpenPGP crypto refresh Section A.3. +const v6PrivKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +xUsGY4d/4xsAAAAg+U2nu0jWCmHlZ3BqZYfQMxmZu52JGggkLq2EVD34laMAGXKB +exK+cH6NX1hs5hNhIB00TrJmosgv3mg1ditlsLfCsQYfGwoAAABCBYJjh3/jAwsJ +BwUVCg4IDAIWAAKbAwIeCSIhBssYbE8GCaaX5NUt+mxyKwwfHifBilZwj2Ul7Ce6 +2azJBScJAgcCAAAAAK0oIBA+LX0ifsDm185Ecds2v8lwgyU2kCcUmKfvBXbAf6rh +RYWzuQOwEn7E/aLwIwRaLsdry0+VcallHhSu4RN6HWaEQsiPlR4zxP/TP7mhfVEe +7XWPxtnMUMtf15OyA51YBMdLBmOHf+MZAAAAIIaTJINn+eUBXbki+PSAld2nhJh/ +LVmFsS+60WyvXkQ1AE1gCk95TUR3XFeibg/u/tVY6a//1q0NWC1X+yui3O24wpsG +GBsKAAAALAWCY4d/4wKbDCIhBssYbE8GCaaX5NUt+mxyKwwfHifBilZwj2Ul7Ce6 +2azJAAAAAAQBIKbpGG2dWTX8j+VjFM21J0hqWlEg+bdiojWnKfA5AQpWUWtnNwDE +M0g12vYxoWM8Y81W+bHBw805I8kWVkXU6vFOi+HWvv/ira7ofJu16NnoUkhclkUr +k0mXubZvyl4GBg== +-----END PGP PRIVATE KEY BLOCK-----` + +// See OpenPGP crypto refresh merge request: +// https://gitlab.com/openpgp-wg/rfc4880bis/-/merge_requests/304 +const v6PrivKeyMsg = `-----BEGIN PGP MESSAGE----- + +wV0GIQYSyD8ecG9jCP4VGkF3Q6HwM3kOk+mXhIjR2zeNqZMIhRmHzxjV8bU/gXzO +WgBM85PMiVi93AZfJfhK9QmxfdNnZBjeo1VDeVZheQHgaVf7yopqR6W1FT6NOrfS +aQIHAgZhZBZTW+CwcW1g4FKlbExAf56zaw76/prQoN+bAzxpohup69LA7JW/Vp0l +yZnuSj3hcFj0DfqLTGgr4/u717J+sPWbtQBfgMfG9AOIwwrUBqsFE9zW+f1zdlYo +bhF30A+IitsxxA== +-----END PGP MESSAGE-----` + +// See OpenPGP crypto refresh merge request: +// https://gitlab.com/openpgp-wg/rfc4880bis/-/merge_requests/305 +const v6PrivKeyInlineSignMsg = `-----BEGIN PGP MESSAGE----- -xA0DAQoWGTR7yYckZAIByxF1B21zZy50eHRfbIGSdGVzdMJ3BQEWCgAGBQJf -bIGSACMiIQUZNHvJhyRkAl+Z3z7C4AAO2YhIkuH3s+pMlACRWVabVDQvAP9G -y29VPonFXqi2zKkpZrvyvZxg+n5e8Nt9wNbuxeCd3QD/TtO2s+JvjrE4Siwv -UQdl5MlBka1QSNbMq2Bz7XwNPg4= -=6lbM +wV0GIQYSyD8ecG9jCP4VGkF3Q6HwM3kOk+mXhIjR2zeNqZMIhRmHzxjV8bU/gXzO +WgBM85PMiVi93AZfJfhK9QmxfdNnZBjeo1VDeVZheQHgaVf7yopqR6W1FT6NOrfS +aQIHAgZhZBZTW+CwcW1g4FKlbExAf56zaw76/prQoN+bAzxpohup69LA7JW/Vp0l +yZnuSj3hcFj0DfqLTGgr4/u717J+sPWbtQBfgMfG9AOIwwrUBqsFE9zW+f1zdlYo +bhF30A+IitsxxA== -----END PGP MESSAGE-----` +// See https://gitlab.com/openpgp-wg/rfc4880bis/-/merge_requests/274 +// decryption password: "correct horse battery staple" +const v6ArgonSealedPrivKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +xYIGY4d/4xsAAAAg+U2nu0jWCmHlZ3BqZYfQMxmZu52JGggkLq2EVD34laP9JgkC +FARdb9ccngltHraRe25uHuyuAQQVtKipJ0+r5jL4dacGWSAheCWPpITYiyfyIOPS +3gIDyg8f7strd1OB4+LZsUhcIjOMpVHgmiY/IutJkulneoBYwrEGHxsKAAAAQgWC +Y4d/4wMLCQcFFQoOCAwCFgACmwMCHgkiIQbLGGxPBgmml+TVLfpscisMHx4nwYpW +cI9lJewnutmsyQUnCQIHAgAAAACtKCAQPi19In7A5tfORHHbNr/JcIMlNpAnFJin +7wV2wH+q4UWFs7kDsBJ+xP2i8CMEWi7Ha8tPlXGpZR4UruETeh1mhELIj5UeM8T/ +0z+5oX1RHu11j8bZzFDLX9eTsgOdWATHggZjh3/jGQAAACCGkySDZ/nlAV25Ivj0 +gJXdp4SYfy1ZhbEvutFsr15ENf0mCQIUBA5hhGgp2oaavg6mFUXcFMwBBBUuE8qf +9Ock+xwusd+GAglBr5LVyr/lup3xxQvHXFSjjA2haXfoN6xUGRdDEHI6+uevKjVR +v5oAxgu7eJpaXNjCmwYYGwoAAAAsBYJjh3/jApsMIiEGyxhsTwYJppfk1S36bHIr +DB8eJ8GKVnCPZSXsJ7rZrMkAAAAABAEgpukYbZ1ZNfyP5WMUzbUnSGpaUSD5t2Ki +Nacp8DkBClZRa2c3AMQzSDXa9jGhYzxjzVb5scHDzTkjyRZWRdTq8U6L4da+/+Kt +ruh8m7Xo2ehSSFyWRSuTSZe5tm/KXgYG +-----END PGP PRIVATE KEY BLOCK-----` + +const v4Key25519 = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +xUkEZB3qzRto01j2k2pwN5ux9w70stPinAdXULLr20CRW7U7h2GSeACch0M+ +qzQg8yjFQ8VBvu3uwgKH9senoHmj72lLSCLTmhFKzQR0ZXN0wogEEBsIAD4F +gmQd6s0ECwkHCAmQIf45+TuC+xMDFQgKBBYAAgECGQECmwMCHgEWIQSWEzMi +jJUHvyIbVKIh/jn5O4L7EwAAUhaHNlgudvxARdPPETUzVgjuWi+YIz8w1xIb +lHQMvIrbe2sGCQIethpWofd0x7DHuv/ciHg+EoxJ/Td6h4pWtIoKx0kEZB3q +zRm4CyA7quliq7yx08AoOqHTuuCgvpkSdEhpp3pEyejQOgBo0p6ywIiLPllY +0t+jpNspHpAGfXID6oqjpYuJw3AfVRBlwnQEGBsIACoFgmQd6s0JkCH+Ofk7 +gvsTApsMFiEElhMzIoyVB78iG1SiIf45+TuC+xMAAGgQuN9G73446ykvJ/mL +sCZ7zGFId2gBd1EnG0FTC4npfOKpck0X8dngByrCxU8LDSfvjsEp/xDAiKsQ +aU71tdtNBQ== +=e7jT +-----END PGP PRIVATE KEY BLOCK-----` + const keyWithExpiredCrossSig = `-----BEGIN PGP PUBLIC KEY BLOCK----- xsDNBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv @@ -272,3 +334,124 @@ AtNTq6ihLMD5v1d82ZC7tNatdlDMGWnIdvEMCv2GZcuIqDQ9rXWs49e7tq1NncLY hz3tYjKhoFTKEIq3y3Pp =h/aX -----END PGP PUBLIC KEY BLOCK-----` + +const keyv5Test = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Comment: Bob's OpenPGP Transferable Secret Key + +lQVYBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv +/seOXpgecTdOcVttfzC8ycIKrt3aQTiwOG/ctaR4Bk/t6ayNFfdUNxHWk4WCKzdz +/56fW2O0F23qIRd8UUJp5IIlN4RDdRCtdhVQIAuzvp2oVy/LaS2kxQoKvph/5pQ/ +5whqsyroEWDJoSV0yOb25B/iwk/pLUFoyhDG9bj0kIzDxrEqW+7Ba8nocQlecMF3 +X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv +9CurEOtxUw6N3RdOtLmYZS9uEnn5y1UkF88o8Nku890uk6BrewFzJyLAx5wRZ4F0 +qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb +SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb +vLIwa3T4CyshfT0AEQEAAQAL/RZqbJW2IqQDCnJi4Ozm++gPqBPiX1RhTWSjwxfM +cJKUZfzLj414rMKm6Jh1cwwGY9jekROhB9WmwaaKT8HtcIgrZNAlYzANGRCM4TLK +3VskxfSwKKna8l+s+mZglqbAjUg3wmFuf9Tj2xcUZYmyRm1DEmcN2ZzpvRtHgX7z +Wn1mAKUlSDJZSQks0zjuMNbupcpyJokdlkUg2+wBznBOTKzgMxVNC9b2g5/tMPUs +hGGWmF1UH+7AHMTaS6dlmr2ZBIyogdnfUqdNg5sZwsxSNrbglKP4sqe7X61uEAIQ +bD7rT3LonLbhkrj3I8wilUD8usIwt5IecoHhd9HziqZjRCc1BUBkboUEoyedbDV4 +i4qfsFZ6CEWoLuD5pW7dEp0M+WeuHXO164Rc+LnH6i1VQrpb1Okl4qO6ejIpIjBI +1t3GshtUu/mwGBBxs60KBX5g77mFQ9lLCRj8lSYqOsHRKBhUp4qM869VA+fD0BRP +fqPT0I9IH4Oa/A3jYJcg622GwQYA1LhnP208Waf6PkQSJ6kyr8ymY1yVh9VBE/g6 +fRDYA+pkqKnw9wfH2Qho3ysAA+OmVOX8Hldg+Pc0Zs0e5pCavb0En8iFLvTA0Q2E +LR5rLue9uD7aFuKFU/VdcddY9Ww/vo4k5p/tVGp7F8RYCFn9rSjIWbfvvZi1q5Tx ++akoZbga+4qQ4WYzB/obdX6SCmi6BndcQ1QdjCCQU6gpYx0MddVERbIp9+2SXDyL +hpxjSyz+RGsZi/9UAshT4txP4+MZBgDfK3ZqtW+h2/eMRxkANqOJpxSjMyLO/FXN +WxzTDYeWtHNYiAlOwlQZEPOydZFty9IVzzNFQCIUCGjQ/nNyhw7adSgUk3+BXEx/ +MyJPYY0BYuhLxLYcrfQ9nrhaVKxRJj25SVHj2ASsiwGJRZW4CC3uw40OYxfKEvNC +mer/VxM3kg8qqGf9KUzJ1dVdAvjyx2Hz6jY2qWCyRQ6IMjWHyd43C4r3jxooYKUC +YnstRQyb/gCSKahveSEjo07CiXMr88UGALwzEr3npFAsPW3osGaFLj49y1oRe11E +he9gCHFm+fuzbXrWmdPjYU5/ZdqdojzDqfu4ThfnipknpVUM1o6MQqkjM896FHm8 +zbKVFSMhEP6DPHSCexMFrrSgN03PdwHTO6iBaIBBFqmGY01tmJ03SxvSpiBPON9P +NVvy/6UZFedTq8A07OUAxO62YUSNtT5pmK2vzs3SAZJmbFbMh+NN204TRI72GlqT +t5hcfkuv8hrmwPS/ZR6q312mKQ6w/1pqO9qitCFCb2IgQmFiYmFnZSA8Ym9iQG9w +ZW5wZ3AuZXhhbXBsZT6JAc4EEwEKADgCGwMFCwkIBwIGFQoJCAsCBBYCAwECHgEC +F4AWIQTRpm4aI7GCyZgPeIz7/MgqAV5zMAUCXaWe+gAKCRD7/MgqAV5zMG9sC/9U +2T3RrqEbw533FPNfEflhEVRIZ8gDXKM8hU6cqqEzCmzZT6xYTe6sv4y+PJBGXJFX +yhj0g6FDkSyboM5litOcTupURObVqMgA/Y4UKERznm4fzzH9qek85c4ljtLyNufe +doL2pp3vkGtn7eD0QFRaLLmnxPKQ/TlZKdLE1G3u8Uot8QHicaR6GnAdc5UXQJE3 +BiV7jZuDyWmZ1cUNwJkKL6oRtp+ZNDOQCrLNLecKHcgCqrpjSQG5oouba1I1Q6Vl +sP44dhA1nkmLHtxlTOzpeHj4jnk1FaXmyasurrrI5CgU/L2Oi39DGKTH/A/cywDN +4ZplIQ9zR8enkbXquUZvFDe+Xz+6xRXtb5MwQyWODB3nHw85HocLwRoIN9WdQEI+ +L8a/56AuOwhs8llkSuiITjR7r9SgKJC2WlAHl7E8lhJ3VDW3ELC56KH308d6mwOG +ZRAqIAKzM1T5FGjMBhq7ZV0eqdEntBh3EcOIfj2M8rg1MzJv+0mHZOIjByawikad +BVgEXaWc8gEMANYwv1xsYyunXYK0X1vY/rP1NNPvhLyLIE7NpK90YNBj+xS1ldGD +bUdZqZeef2xJe8gMQg05DoD1DF3GipZ0Ies65beh+d5hegb7N4pzh0LzrBrVNHar +29b5ExdI7i4iYD5TO6Vr/qTUOiAN/byqELEzAb+L+b2DVz/RoCm4PIp1DU9ewcc2 +WB38Ofqut3nLYA5tqJ9XvAiEQme+qAVcM3ZFcaMt4I4dXhDZZNg+D9LiTWcxdUPB +leu8iwDRjAgyAhPzpFp+nWoqWA81uIiULWD1Fj+IVoY3ZvgivoYOiEFBJ9lbb4te +g9m5UT/AaVDTWuHzbspVlbiVe+qyB77C2daWzNyx6UYBPLOo4r0t0c91kbNE5lgj +Z7xz6los0N1U8vq91EFSeQJoSQ62XWavYmlCLmdNT6BNfgh4icLsT7Vr1QMX9jzn +JtTPxdXytSdHvpSpULsqJ016l0dtmONcK3z9mj5N5z0k1tg1AH970TGYOe2aUcSx +IRDMXDOPyzEfjwARAQABAAv9F2CwsjS+Sjh1M1vegJbZjei4gF1HHpEM0K0PSXsp +SfVvpR4AoSJ4He6CXSMWg0ot8XKtDuZoV9jnJaES5UL9pMAD7JwIOqZm/DYVJM5h +OASCh1c356/wSbFbzRHPtUdZO9Q30WFNJM5pHbCJPjtNoRmRGkf71RxtvHBzy7np +Ga+W6U/NVKHw0i0CYwMI0YlKDakYW3Pm+QL+gHZFvngGweTod0f9l2VLLAmeQR/c ++EZs7lNumhuZ8mXcwhUc9JQIhOkpO+wreDysEFkAcsKbkQP3UDUsA1gFx9pbMzT0 +tr1oZq2a4QBtxShHzP/ph7KLpN+6qtjks3xB/yjTgaGmtrwM8tSe0wD1RwXS+/1o +BHpXTnQ7TfeOGUAu4KCoOQLv6ELpKWbRBLWuiPwMdbGpvVFALO8+kvKAg9/r+/ny +zM2GQHY+J3Jh5JxPiJnHfXNZjIKLbFbIPdSKNyJBuazXW8xIa//mEHMI5OcvsZBK +clAIp7LXzjEjKXIwHwDcTn9pBgDpdOKTHOtJ3JUKx0rWVsDH6wq6iKV/FTVSY5jl +zN+puOEsskF1Lfxn9JsJihAVO3yNsp6RvkKtyNlFazaCVKtDAmkjoh60XNxcNRqr +gCnwdpbgdHP6v/hvZY54ZaJjz6L2e8unNEkYLxDt8cmAyGPgH2XgL7giHIp9jrsQ +aS381gnYwNX6wE1aEikgtY91nqJjwPlibF9avSyYQoMtEqM/1UjTjB2KdD/MitK5 +fP0VpvuXpNYZedmyq4UOMwdkiNMGAOrfmOeT0olgLrTMT5H97Cn3Yxbk13uXHNu/ +ZUZZNe8s+QtuLfUlKAJtLEUutN33TlWQY522FV0m17S+b80xJib3yZVJteVurrh5 +HSWHAM+zghQAvCesg5CLXa2dNMkTCmZKgCBvfDLZuZbjFwnwCI6u/NhOY9egKuUf +SA/je/RXaT8m5VxLYMxwqQXKApzD87fv0tLPlVIEvjEsaf992tFEFSNPcG1l/jpd +5AVXw6kKuf85UkJtYR1x2MkQDrqY1QX/XMw00kt8y9kMZUre19aCArcmor+hDhRJ +E3Gt4QJrD9z/bICESw4b4z2DbgD/Xz9IXsA/r9cKiM1h5QMtXvuhyfVeM01enhxM +GbOH3gjqqGNKysx0UODGEwr6AV9hAd8RWXMchJLaExK9J5SRawSg671ObAU24SdY +vMQ9Z4kAQ2+1ReUZzf3ogSMRZtMT+d18gT6L90/y+APZIaoArLPhebIAGq39HLmJ +26x3z0WAgrpA1kNsjXEXkoiZGPLKIGoe3hqJAbYEGAEKACAWIQTRpm4aI7GCyZgP +eIz7/MgqAV5zMAUCXaWc8gIbDAAKCRD7/MgqAV5zMOn/C/9ugt+HZIwX308zI+QX +c5vDLReuzmJ3ieE0DMO/uNSC+K1XEioSIZP91HeZJ2kbT9nn9fuReuoff0T0Dief +rbwcIQQHFFkrqSp1K3VWmUGp2JrUsXFVdjy/fkBIjTd7c5boWljv/6wAsSfiv2V0 +JSM8EFU6TYXxswGjFVfc6X97tJNeIrXL+mpSmPPqy2bztcCCHkWS5lNLWQw+R7Vg +71Fe6yBSNVrqC2/imYG2J9zlowjx1XU63Wdgqp2Wxt0l8OmsB/W80S1fRF5G4SDH +s9HXglXXqPsBRZJYfP+VStm9L5P/sKjCcX6WtZR7yS6G8zj/X767MLK/djANvpPd +NVniEke6hM3CNBXYPAMhQBMWhCulcoz+0lxi8L34rMN+Dsbma96psdUrn7uLaB91 +6we0CTfF8qqm7BsVAgalon/UUiuMY80U3ueoj3okiSTiHIjD/YtpXSPioC8nMng7 +xqAY9Bwizt4FWgXuLm1a4+So4V9j1TRCXd12Uc2l2RNmgDE= +=miES +-----END PGP PRIVATE KEY BLOCK----- +` + +const certv5Test = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +lGEFXJH05BYAAAAtCSsGAQQB2kcPAQEHQFhZlVcVVtwf+21xNQPX+ecMJJBL0MPd +fj75iux+my8QAAAAAAAiAQCHZ1SnSUmWqxEsoI6facIVZQu6mph3cBFzzTvcm5lA +Ng5ctBhlbW1hLmdvbGRtYW5AZXhhbXBsZS5uZXSIlgUTFggASCIhBRk0e8mHJGQC +X5nfPsLgAA7ZiEiS4fez6kyUAJFZVptUBQJckfTkAhsDBQsJCAcCAyICAQYVCgkI +CwIEFgIDAQIeBwIXgAAA9cAA/jiR3yMsZMeEQ40u6uzEoXa6UXeV/S3wwJAXRJy9 +M8s0AP9vuL/7AyTfFXwwzSjDnYmzS0qAhbLDQ643N+MXGBJ2BZxmBVyR9OQSAAAA +MgorBgEEAZdVAQUBAQdA+nysrzml2UCweAqtpDuncSPlvrcBWKU0yfU0YvYWWAoD +AQgHAAAAAAAiAP9OdAPppjU1WwpqjIItkxr+VPQRT8Zm/Riw7U3F6v3OiBFHiHoF +GBYIACwiIQUZNHvJhyRkAl+Z3z7C4AAO2YhIkuH3s+pMlACRWVabVAUCXJH05AIb +DAAAOSQBAP4BOOIR/sGLNMOfeb5fPs/02QMieoiSjIBnijhob2U5AQC+RtOHCHx7 +TcIYl5/Uyoi+FOvPLcNw4hOv2nwUzSSVAw== +=IiS2 +-----END PGP PRIVATE KEY BLOCK----- +` + +const msgv5Test = `-----BEGIN PGP MESSAGE----- + +wcDMA3wvqk35PDeyAQv+PcQiLsoYTH30nJYQh3j3cJaO2+jErtVCrIQRIU0+ +rmgMddERYST4A9mA0DQIiTI4FQ0Lp440D3BWCgpq3LlNWewGzduaWwym5rN6 +cwHz5ccDqOcqbd9X0GXXGy/ZH/ljSgzuVMIytMAXKdF/vrRrVgH/+I7cxvm9 +HwnhjMN5dF0j4aEt996H2T7cbtzSr2GN9SWGW8Gyu7I8Zx73hgrGUI7gDiJB +Afaff+P6hfkkHSGOItr94dde8J/7AUF4VEwwxdVVPvsNEFyvv6gRIbYtOCa2 +6RE6h1V/QTxW2O7zZgzWALrE2ui0oaYr9QuqQSssd9CdgExLfdPbI+3/ZAnE +v31Idzpk3/6ILiakYHtXkElPXvf46mCNpobty8ysT34irF+fy3C1p3oGwAsx +5VDV9OSFU6z5U+UPbSPYAy9rkc5ZssuIKxCER2oTvZ2L8Q5cfUvEUiJtRGGn +CJlHrVDdp3FssKv2tlKgLkvxJLyoOjuEkj44H1qRk+D02FzmmUT/0sAHAYYx +lTir6mjHeLpcGjn4waUuWIAJyph8SxUexP60bic0L0NBa6Qp5SxxijKsPIDb +FPHxWwfJSDZRrgUyYT7089YFB/ZM4FHyH9TZcnxn0f0xIB7NS6YNDsxzN2zT +EVEYf+De4qT/dQTsdww78Chtcv9JY9r2kDm77dk2MUGHL2j7n8jasbLtgA7h +pn2DMIWLrGamMLWRmlwslolKr1sMV5x8w+5Ias6C33iBMl9phkg42an0gYmc +byVJHvLO/XErtC+GNIJeMg== +=liRq +-----END PGP MESSAGE----- +` diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go index a43695964b2..f4f5c7832d4 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go @@ -87,10 +87,10 @@ func decodeCount(c uint8) int { // encodeMemory converts the Argon2 "memory" in the range parallelism*8 to // 2**31, inclusive, to an encoded memory. The return value is the // octet that is actually stored in the GPG file. encodeMemory panics -// if is not in the above range +// if is not in the above range // See OpenPGP crypto refresh Section 3.7.1.4. func encodeMemory(memory uint32, parallelism uint8) uint8 { - if memory < (8 * uint32(parallelism)) || memory > uint32(2147483648) { + if memory < (8*uint32(parallelism)) || memory > uint32(2147483648) { panic("Memory argument memory is outside the required range") } @@ -211,7 +211,7 @@ func Generate(rand io.Reader, c *Config) (*Params, error) { c.S2KMode = IteratedSaltedS2K } params = &Params{ - mode: IteratedSaltedS2K, + mode: IteratedSaltedS2K, hashId: hashId, countByte: c.EncodedCount(), } @@ -306,9 +306,12 @@ func (params *Params) Dummy() bool { func (params *Params) salt() []byte { switch params.mode { - case SaltedS2K, IteratedSaltedS2K: return params.saltBytes[:8] - case Argon2S2K: return params.saltBytes[:Argon2SaltSize] - default: return nil + case SaltedS2K, IteratedSaltedS2K: + return params.saltBytes[:8] + case Argon2S2K: + return params.saltBytes[:Argon2SaltSize] + default: + return nil } } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go index 25a4442dfbd..616e0d12c6c 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go @@ -5,7 +5,7 @@ package s2k // the same parameters. type Cache map[Params][]byte -// GetOrComputeDerivedKey tries to retrieve the key +// GetOrComputeDerivedKey tries to retrieve the key // for the given s2k parameters from the cache. // If there is no hit, it derives the key with the s2k function from the passphrase, // updates the cache, and returns the key. diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go index b40be5228fc..b93db1ab853 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go @@ -50,9 +50,9 @@ type Config struct { type Argon2Config struct { NumberOfPasses uint8 DegreeOfParallelism uint8 - // The memory parameter for Argon2 specifies desired memory usage in kibibytes. + // Memory specifies the desired Argon2 memory usage in kibibytes. // For example memory=64*1024 sets the memory cost to ~64 MB. - Memory uint32 + Memory uint32 } func (c *Config) Mode() Mode { @@ -115,7 +115,7 @@ func (c *Argon2Config) EncodedMemory() uint8 { } memory := c.Memory - lowerBound := uint32(c.Parallelism())*8 + lowerBound := uint32(c.Parallelism()) * 8 upperBound := uint32(2147483648) switch { diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go index 7fdd13a3dd3..0db5526ce0e 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go @@ -76,7 +76,11 @@ func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.S sig := createSignaturePacket(signingKey.PublicKey, sigType, config) - h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) + h, err := sig.PrepareSign(config) + if err != nil { + return + } + wrappedHash, err := wrapHashForSignature(h, sig.SigType) if err != nil { return } @@ -275,14 +279,28 @@ func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entit return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") } + var salt []byte if signer != nil { + var opsVersion = 3 + if signer.Version == 6 { + opsVersion = signer.Version + } ops := &packet.OnePassSignature{ + Version: opsVersion, SigType: sigType, Hash: hash, PubKeyAlgo: signer.PubKeyAlgo, KeyId: signer.KeyId, IsLast: true, } + if opsVersion == 6 { + ops.KeyFingerprint = signer.Fingerprint + salt, err = packet.SignatureSaltForHash(hash, config.Random()) + if err != nil { + return nil, err + } + ops.Salt = salt + } if err := ops.Serialize(payload); err != nil { return nil, err } @@ -310,19 +328,19 @@ func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entit } if signer != nil { - h, wrappedHash, err := hashForSignature(hash, sigType) + h, wrappedHash, err := hashForSignature(hash, sigType, salt) if err != nil { return nil, err } metadata := &packet.LiteralData{ - Format: 't', + Format: 'u', FileName: hints.FileName, Time: epochSeconds, } if hints.IsBinary { metadata.Format = 'b' } - return signatureWriter{payload, literalData, hash, wrappedHash, h, signer, sigType, config, metadata}, nil + return signatureWriter{payload, literalData, hash, wrappedHash, h, salt, signer, sigType, config, metadata}, nil } return literalData, nil } @@ -380,15 +398,19 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no valid encryption keys") } - sig := to[i].PrimaryIdentity().SelfSignature - if !sig.SEIPDv2 { + primarySelfSignature, _ := to[i].PrimarySelfSignature() + if primarySelfSignature == nil { + return nil, errors.InvalidArgumentError("entity without a self-signature") + } + + if !primarySelfSignature.SEIPDv2 { aeadSupported = false } - candidateCiphers = intersectPreferences(candidateCiphers, sig.PreferredSymmetric) - candidateHashes = intersectPreferences(candidateHashes, sig.PreferredHash) - candidateCipherSuites = intersectCipherSuites(candidateCipherSuites, sig.PreferredCipherSuites) - candidateCompression = intersectPreferences(candidateCompression, sig.PreferredCompression) + candidateCiphers = intersectPreferences(candidateCiphers, primarySelfSignature.PreferredSymmetric) + candidateHashes = intersectPreferences(candidateHashes, primarySelfSignature.PreferredHash) + candidateCipherSuites = intersectCipherSuites(candidateCipherSuites, primarySelfSignature.PreferredCipherSuites) + candidateCompression = intersectPreferences(candidateCompression, primarySelfSignature.PreferredCompression) } // In the event that the intersection of supported algorithms is empty we use the ones @@ -428,7 +450,7 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En } for _, key := range encryptKeys { - if err := packet.SerializeEncryptedKey(keyWriter, key.PublicKey, cipher, symKey, config); err != nil { + if err := packet.SerializeEncryptedKeyAEAD(keyWriter, key.PublicKey, cipher, aeadSupported, symKey, config); err != nil { return nil, err } } @@ -465,13 +487,17 @@ func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Con hashToHashId(crypto.SHA3_512), } defaultHashes := candidateHashes[0:1] - preferredHashes := signed.PrimaryIdentity().SelfSignature.PreferredHash + primarySelfSignature, _ := signed.PrimarySelfSignature() + if primarySelfSignature == nil { + return nil, errors.StructuralError("signed entity has no self-signature") + } + preferredHashes := primarySelfSignature.PreferredHash if len(preferredHashes) == 0 { preferredHashes = defaultHashes } candidateHashes = intersectPreferences(candidateHashes, preferredHashes) if len(candidateHashes) == 0 { - return nil, errors.InvalidArgumentError("cannot sign because signing key shares no common algorithms with candidate hashes") + return nil, errors.StructuralError("cannot sign because signing key shares no common algorithms with candidate hashes") } return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, packet.SigTypeBinary, config) @@ -486,6 +512,7 @@ type signatureWriter struct { hashType crypto.Hash wrappedHash hash.Hash h hash.Hash + salt []byte // v6 only signer *packet.PrivateKey sigType packet.SignatureType config *packet.Config @@ -509,6 +536,10 @@ func (s signatureWriter) Close() error { sig.Hash = s.hashType sig.Metadata = s.metadata + if err := sig.SetSalt(s.salt); err != nil { + return err + } + if err := sig.Sign(s.h, s.signer, s.config); err != nil { return err } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/x25519/x25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/x25519/x25519.go new file mode 100644 index 00000000000..38afcc74fa3 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/x25519/x25519.go @@ -0,0 +1,221 @@ +package x25519 + +import ( + "crypto/sha256" + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap" + "github.com/ProtonMail/go-crypto/openpgp/errors" + x25519lib "github.com/cloudflare/circl/dh/x25519" + "golang.org/x/crypto/hkdf" +) + +const ( + hkdfInfo = "OpenPGP X25519" + aes128KeySize = 16 + // The size of a public or private key in bytes. + KeySize = x25519lib.Size +) + +type PublicKey struct { + // Point represents the encoded elliptic curve point of the public key. + Point []byte +} + +type PrivateKey struct { + PublicKey + // Secret represents the secret of the private key. + Secret []byte +} + +// NewPrivateKey creates a new empty private key including the public key. +func NewPrivateKey(key PublicKey) *PrivateKey { + return &PrivateKey{ + PublicKey: key, + } +} + +// Validate validates that the provided public key matches the private key. +func Validate(pk *PrivateKey) (err error) { + var expectedPublicKey, privateKey x25519lib.Key + subtle.ConstantTimeCopy(1, privateKey[:], pk.Secret) + x25519lib.KeyGen(&expectedPublicKey, &privateKey) + if subtle.ConstantTimeCompare(expectedPublicKey[:], pk.PublicKey.Point) == 0 { + return errors.KeyInvalidError("x25519: invalid key") + } + return nil +} + +// GenerateKey generates a new x25519 key pair. +func GenerateKey(rand io.Reader) (*PrivateKey, error) { + var privateKey, publicKey x25519lib.Key + privateKeyOut := new(PrivateKey) + err := generateKey(rand, &privateKey, &publicKey) + if err != nil { + return nil, err + } + privateKeyOut.PublicKey.Point = publicKey[:] + privateKeyOut.Secret = privateKey[:] + return privateKeyOut, nil +} + +func generateKey(rand io.Reader, privateKey *x25519lib.Key, publicKey *x25519lib.Key) error { + maxRounds := 10 + isZero := true + for round := 0; isZero; round++ { + if round == maxRounds { + return errors.InvalidArgumentError("x25519: zero keys only, randomness source might be corrupt") + } + _, err := io.ReadFull(rand, privateKey[:]) + if err != nil { + return err + } + isZero = constantTimeIsZero(privateKey[:]) + } + x25519lib.KeyGen(publicKey, privateKey) + return nil +} + +// Encrypt encrypts a sessionKey with x25519 according to +// the OpenPGP crypto refresh specification section 5.1.6. The function assumes that the +// sessionKey has the correct format and padding according to the specification. +func Encrypt(rand io.Reader, publicKey *PublicKey, sessionKey []byte) (ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, err error) { + var ephemeralPrivate, ephemeralPublic, staticPublic, shared x25519lib.Key + // Check that the input static public key has 32 bytes + if len(publicKey.Point) != KeySize { + err = errors.KeyInvalidError("x25519: the public key has the wrong size") + return + } + copy(staticPublic[:], publicKey.Point) + // Generate ephemeral keyPair + err = generateKey(rand, &ephemeralPrivate, &ephemeralPublic) + if err != nil { + return + } + // Compute shared key + ok := x25519lib.Shared(&shared, &ephemeralPrivate, &staticPublic) + if !ok { + err = errors.KeyInvalidError("x25519: the public key is a low order point") + return + } + // Derive the encryption key from the shared secret + encryptionKey := applyHKDF(ephemeralPublic[:], publicKey.Point[:], shared[:]) + ephemeralPublicKey = &PublicKey{ + Point: ephemeralPublic[:], + } + // Encrypt the sessionKey with aes key wrapping + encryptedSessionKey, err = keywrap.Wrap(encryptionKey, sessionKey) + return +} + +// Decrypt decrypts a session key stored in ciphertext with the provided x25519 +// private key and ephemeral public key. +func Decrypt(privateKey *PrivateKey, ephemeralPublicKey *PublicKey, ciphertext []byte) (encodedSessionKey []byte, err error) { + var ephemeralPublic, staticPrivate, shared x25519lib.Key + // Check that the input ephemeral public key has 32 bytes + if len(ephemeralPublicKey.Point) != KeySize { + err = errors.KeyInvalidError("x25519: the public key has the wrong size") + return + } + copy(ephemeralPublic[:], ephemeralPublicKey.Point) + subtle.ConstantTimeCopy(1, staticPrivate[:], privateKey.Secret) + // Compute shared key + ok := x25519lib.Shared(&shared, &staticPrivate, &ephemeralPublic) + if !ok { + err = errors.KeyInvalidError("x25519: the ephemeral public key is a low order point") + return + } + // Derive the encryption key from the shared secret + encryptionKey := applyHKDF(ephemeralPublicKey.Point[:], privateKey.PublicKey.Point[:], shared[:]) + // Decrypt the session key with aes key wrapping + encodedSessionKey, err = keywrap.Unwrap(encryptionKey, ciphertext) + return +} + +func applyHKDF(ephemeralPublicKey []byte, publicKey []byte, sharedSecret []byte) []byte { + inputKey := make([]byte, 3*KeySize) + // ephemeral public key | recipient public key | shared secret + subtle.ConstantTimeCopy(1, inputKey[:KeySize], ephemeralPublicKey) + subtle.ConstantTimeCopy(1, inputKey[KeySize:2*KeySize], publicKey) + subtle.ConstantTimeCopy(1, inputKey[2*KeySize:], sharedSecret) + hkdfReader := hkdf.New(sha256.New, inputKey, []byte{}, []byte(hkdfInfo)) + encryptionKey := make([]byte, aes128KeySize) + _, _ = io.ReadFull(hkdfReader, encryptionKey) + return encryptionKey +} + +func constantTimeIsZero(bytes []byte) bool { + isZero := byte(0) + for _, b := range bytes { + isZero |= b + } + return isZero == 0 +} + +// ENCODING/DECODING ciphertexts: + +// EncodeFieldsLength returns the length of the ciphertext encoding +// given the encrypted session key. +func EncodedFieldsLength(encryptedSessionKey []byte, v6 bool) int { + lenCipherFunction := 0 + if !v6 { + lenCipherFunction = 1 + } + return KeySize + 1 + len(encryptedSessionKey) + lenCipherFunction +} + +// EncodeField encodes x25519 session key encryption fields as +// ephemeral x25519 public key | follow byte length | cipherFunction (v3 only) | encryptedSessionKey +// and writes it to writer. +func EncodeFields(writer io.Writer, ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, cipherFunction byte, v6 bool) (err error) { + lenAlgorithm := 0 + if !v6 { + lenAlgorithm = 1 + } + if _, err = writer.Write(ephemeralPublicKey.Point); err != nil { + return err + } + if _, err = writer.Write([]byte{byte(len(encryptedSessionKey) + lenAlgorithm)}); err != nil { + return err + } + if !v6 { + if _, err = writer.Write([]byte{cipherFunction}); err != nil { + return err + } + } + _, err = writer.Write(encryptedSessionKey) + return err +} + +// DecodeField decodes a x25519 session key encryption as +// ephemeral x25519 public key | follow byte length | cipherFunction (v3 only) | encryptedSessionKey. +func DecodeFields(reader io.Reader, v6 bool) (ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, cipherFunction byte, err error) { + var buf [1]byte + ephemeralPublicKey = &PublicKey{ + Point: make([]byte, KeySize), + } + // 32 octets representing an ephemeral x25519 public key. + if _, err = io.ReadFull(reader, ephemeralPublicKey.Point); err != nil { + return nil, nil, 0, err + } + // A one-octet size of the following fields. + if _, err = io.ReadFull(reader, buf[:]); err != nil { + return nil, nil, 0, err + } + followingLen := buf[0] + // The one-octet algorithm identifier, if it was passed (in the case of a v3 PKESK packet). + if !v6 { + if _, err = io.ReadFull(reader, buf[:]); err != nil { + return nil, nil, 0, err + } + cipherFunction = buf[0] + followingLen -= 1 + } + // The encrypted session key. + encryptedSessionKey = make([]byte, followingLen) + if _, err = io.ReadFull(reader, encryptedSessionKey); err != nil { + return nil, nil, 0, err + } + return ephemeralPublicKey, encryptedSessionKey, cipherFunction, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/x448/x448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/x448/x448.go new file mode 100644 index 00000000000..65a082dabd7 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/x448/x448.go @@ -0,0 +1,229 @@ +package x448 + +import ( + "crypto/sha512" + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap" + "github.com/ProtonMail/go-crypto/openpgp/errors" + x448lib "github.com/cloudflare/circl/dh/x448" + "golang.org/x/crypto/hkdf" +) + +const ( + hkdfInfo = "OpenPGP X448" + aes256KeySize = 32 + // The size of a public or private key in bytes. + KeySize = x448lib.Size +) + +type PublicKey struct { + // Point represents the encoded elliptic curve point of the public key. + Point []byte +} + +type PrivateKey struct { + PublicKey + // Secret represents the secret of the private key. + Secret []byte +} + +// NewPrivateKey creates a new empty private key including the public key. +func NewPrivateKey(key PublicKey) *PrivateKey { + return &PrivateKey{ + PublicKey: key, + } +} + +// Validate validates that the provided public key matches +// the private key. +func Validate(pk *PrivateKey) (err error) { + var expectedPublicKey, privateKey x448lib.Key + subtle.ConstantTimeCopy(1, privateKey[:], pk.Secret) + x448lib.KeyGen(&expectedPublicKey, &privateKey) + if subtle.ConstantTimeCompare(expectedPublicKey[:], pk.PublicKey.Point) == 0 { + return errors.KeyInvalidError("x448: invalid key") + } + return nil +} + +// GenerateKey generates a new x448 key pair. +func GenerateKey(rand io.Reader) (*PrivateKey, error) { + var privateKey, publicKey x448lib.Key + privateKeyOut := new(PrivateKey) + err := generateKey(rand, &privateKey, &publicKey) + if err != nil { + return nil, err + } + privateKeyOut.PublicKey.Point = publicKey[:] + privateKeyOut.Secret = privateKey[:] + return privateKeyOut, nil +} + +func generateKey(rand io.Reader, privateKey *x448lib.Key, publicKey *x448lib.Key) error { + maxRounds := 10 + isZero := true + for round := 0; isZero; round++ { + if round == maxRounds { + return errors.InvalidArgumentError("x448: zero keys only, randomness source might be corrupt") + } + _, err := io.ReadFull(rand, privateKey[:]) + if err != nil { + return err + } + isZero = constantTimeIsZero(privateKey[:]) + } + x448lib.KeyGen(publicKey, privateKey) + return nil +} + +// Encrypt encrypts a sessionKey with x448 according to +// the OpenPGP crypto refresh specification section 5.1.7. The function assumes that the +// sessionKey has the correct format and padding according to the specification. +func Encrypt(rand io.Reader, publicKey *PublicKey, sessionKey []byte) (ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, err error) { + var ephemeralPrivate, ephemeralPublic, staticPublic, shared x448lib.Key + // Check that the input static public key has 56 bytes. + if len(publicKey.Point) != KeySize { + err = errors.KeyInvalidError("x448: the public key has the wrong size") + return nil, nil, err + } + copy(staticPublic[:], publicKey.Point) + // Generate ephemeral keyPair. + if err = generateKey(rand, &ephemeralPrivate, &ephemeralPublic); err != nil { + return nil, nil, err + } + // Compute shared key. + ok := x448lib.Shared(&shared, &ephemeralPrivate, &staticPublic) + if !ok { + err = errors.KeyInvalidError("x448: the public key is a low order point") + return nil, nil, err + } + // Derive the encryption key from the shared secret. + encryptionKey := applyHKDF(ephemeralPublic[:], publicKey.Point[:], shared[:]) + ephemeralPublicKey = &PublicKey{ + Point: ephemeralPublic[:], + } + // Encrypt the sessionKey with aes key wrapping. + encryptedSessionKey, err = keywrap.Wrap(encryptionKey, sessionKey) + if err != nil { + return nil, nil, err + } + return ephemeralPublicKey, encryptedSessionKey, nil +} + +// Decrypt decrypts a session key stored in ciphertext with the provided x448 +// private key and ephemeral public key. +func Decrypt(privateKey *PrivateKey, ephemeralPublicKey *PublicKey, ciphertext []byte) (encodedSessionKey []byte, err error) { + var ephemeralPublic, staticPrivate, shared x448lib.Key + // Check that the input ephemeral public key has 56 bytes. + if len(ephemeralPublicKey.Point) != KeySize { + err = errors.KeyInvalidError("x448: the public key has the wrong size") + return nil, err + } + copy(ephemeralPublic[:], ephemeralPublicKey.Point) + subtle.ConstantTimeCopy(1, staticPrivate[:], privateKey.Secret) + // Compute shared key. + ok := x448lib.Shared(&shared, &staticPrivate, &ephemeralPublic) + if !ok { + err = errors.KeyInvalidError("x448: the ephemeral public key is a low order point") + return nil, err + } + // Derive the encryption key from the shared secret. + encryptionKey := applyHKDF(ephemeralPublicKey.Point[:], privateKey.PublicKey.Point[:], shared[:]) + // Decrypt the session key with aes key wrapping. + encodedSessionKey, err = keywrap.Unwrap(encryptionKey, ciphertext) + if err != nil { + return nil, err + } + return encodedSessionKey, nil +} + +func applyHKDF(ephemeralPublicKey []byte, publicKey []byte, sharedSecret []byte) []byte { + inputKey := make([]byte, 3*KeySize) + // ephemeral public key | recipient public key | shared secret. + subtle.ConstantTimeCopy(1, inputKey[:KeySize], ephemeralPublicKey) + subtle.ConstantTimeCopy(1, inputKey[KeySize:2*KeySize], publicKey) + subtle.ConstantTimeCopy(1, inputKey[2*KeySize:], sharedSecret) + hkdfReader := hkdf.New(sha512.New, inputKey, []byte{}, []byte(hkdfInfo)) + encryptionKey := make([]byte, aes256KeySize) + _, _ = io.ReadFull(hkdfReader, encryptionKey) + return encryptionKey +} + +func constantTimeIsZero(bytes []byte) bool { + isZero := byte(0) + for _, b := range bytes { + isZero |= b + } + return isZero == 0 +} + +// ENCODING/DECODING ciphertexts: + +// EncodeFieldsLength returns the length of the ciphertext encoding +// given the encrypted session key. +func EncodedFieldsLength(encryptedSessionKey []byte, v6 bool) int { + lenCipherFunction := 0 + if !v6 { + lenCipherFunction = 1 + } + return KeySize + 1 + len(encryptedSessionKey) + lenCipherFunction +} + +// EncodeField encodes x448 session key encryption fields as +// ephemeral x448 public key | follow byte length | cipherFunction (v3 only) | encryptedSessionKey +// and writes it to writer. +func EncodeFields(writer io.Writer, ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, cipherFunction byte, v6 bool) (err error) { + lenAlgorithm := 0 + if !v6 { + lenAlgorithm = 1 + } + if _, err = writer.Write(ephemeralPublicKey.Point); err != nil { + return err + } + if _, err = writer.Write([]byte{byte(len(encryptedSessionKey) + lenAlgorithm)}); err != nil { + return err + } + if !v6 { + if _, err = writer.Write([]byte{cipherFunction}); err != nil { + return err + } + } + if _, err = writer.Write(encryptedSessionKey); err != nil { + return err + } + return nil +} + +// DecodeField decodes a x448 session key encryption as +// ephemeral x448 public key | follow byte length | cipherFunction (v3 only) | encryptedSessionKey. +func DecodeFields(reader io.Reader, v6 bool) (ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, cipherFunction byte, err error) { + var buf [1]byte + ephemeralPublicKey = &PublicKey{ + Point: make([]byte, KeySize), + } + // 56 octets representing an ephemeral x448 public key. + if _, err = io.ReadFull(reader, ephemeralPublicKey.Point); err != nil { + return nil, nil, 0, err + } + // A one-octet size of the following fields. + if _, err = io.ReadFull(reader, buf[:]); err != nil { + return nil, nil, 0, err + } + followingLen := buf[0] + // The one-octet algorithm identifier, if it was passed (in the case of a v3 PKESK packet). + if !v6 { + if _, err = io.ReadFull(reader, buf[:]); err != nil { + return nil, nil, 0, err + } + cipherFunction = buf[0] + followingLen -= 1 + } + // The encrypted session key. + encryptedSessionKey = make([]byte, followingLen) + if _, err = io.ReadFull(reader, encryptedSessionKey); err != nil { + return nil, nil, 0, err + } + return ephemeralPublicKey, encryptedSessionKey, cipherFunction, nil +} diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s index b7723185b61..ce9f062894a 100644 --- a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s +++ b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s @@ -1,4 +1,5 @@ -// +build amd64 +//go:build amd64 && !purego +// +build amd64,!purego #include "textflag.h" diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s index 810aa9e6481..ed33ba3d032 100644 --- a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s +++ b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s @@ -1,4 +1,5 @@ -// +build amd64 +//go:build amd64 && !purego +// +build amd64,!purego #include "textflag.h" diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.s b/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.s index 5c4aeddecb4..1fcc2dee17f 100644 --- a/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.s +++ b/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.s @@ -1,4 +1,5 @@ -// +build amd64 +//go:build amd64 && !purego +// +build amd64,!purego #include "textflag.h" #include "fp_amd64.h" diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s index 435addf5e6c..3f1f07c9862 100644 --- a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s +++ b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s @@ -1,4 +1,5 @@ -// +build amd64 +//go:build amd64 && !purego +// +build amd64,!purego #include "textflag.h" #include "fp_amd64.h" diff --git a/vendor/github.com/containerd/continuity/fs/copy.go b/vendor/github.com/containerd/continuity/fs/copy.go index a3fef3c0571..af3abdd4c49 100644 --- a/vendor/github.com/containerd/continuity/fs/copy.go +++ b/vendor/github.com/containerd/continuity/fs/copy.go @@ -103,6 +103,11 @@ func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) er } } + entries, err := os.ReadDir(src) + if err != nil { + return fmt.Errorf("failed to read %s: %w", src, err) + } + if err := copyFileInfo(stat, src, dst); err != nil { return fmt.Errorf("failed to copy file info for %s: %w", dst, err) } @@ -111,15 +116,7 @@ func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) er return fmt.Errorf("failed to copy xattrs: %w", err) } - f, err := os.Open(src) - if err != nil { - return err - } - defer f.Close() - - dr := &dirReader{f: f} - - handleEntry := func(entry os.DirEntry) error { + for _, entry := range entries { source := filepath.Join(src, entry.Name()) target := filepath.Join(dst, entry.Name()) @@ -133,7 +130,7 @@ func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) er if err := copyDirectory(target, source, inodes, o); err != nil { return err } - return nil + continue case (fileInfo.Mode() & os.ModeType) == 0: link, err := getLinkSource(target, fileInfo, inodes) if err != nil { @@ -162,7 +159,7 @@ func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) er } default: logrus.Warnf("unsupported mode: %s: %s", source, fileInfo.Mode()) - return nil + continue } if err := copyFileInfo(fileInfo, source, target); err != nil { @@ -172,20 +169,9 @@ func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) er if err := copyXAttrs(target, source, o.xex, o.xeh); err != nil { return fmt.Errorf("failed to copy xattrs: %w", err) } - return nil } - for { - entry := dr.Next() - if entry == nil { - break - } - - if err := handleEntry(entry); err != nil { - return err - } - } - return dr.Err() + return nil } // CopyFile copies the source file to the target. diff --git a/vendor/github.com/containerd/continuity/fs/dir.go b/vendor/github.com/containerd/continuity/fs/dir.go deleted file mode 100644 index 6c7e32e95d1..00000000000 --- a/vendor/github.com/containerd/continuity/fs/dir.go +++ /dev/null @@ -1,53 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "io" - "os" -) - -type dirReader struct { - buf []os.DirEntry - f *os.File - err error -} - -func (r *dirReader) Next() os.DirEntry { - if len(r.buf) == 0 { - infos, err := r.f.ReadDir(32) - if err != nil { - if err != io.EOF { - r.err = err - } - return nil - } - r.buf = infos - } - - if len(r.buf) == 0 { - return nil - } - out := r.buf[0] - r.buf[0] = nil - r.buf = r.buf[1:] - return out -} - -func (r *dirReader) Err() error { - return r.err -} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go index b4800567345..42bf32aab00 100644 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go @@ -9,6 +9,8 @@ func Render(doc []byte) []byte { renderer := NewRoffRenderer() return blackfriday.Run(doc, - []blackfriday.Option{blackfriday.WithRenderer(renderer), - blackfriday.WithExtensions(renderer.GetExtensions())}...) + []blackfriday.Option{ + blackfriday.WithRenderer(renderer), + blackfriday.WithExtensions(renderer.GetExtensions()), + }...) } diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go index be2b3436062..4b19188d90f 100644 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go @@ -1,6 +1,7 @@ package md2man import ( + "bytes" "fmt" "io" "os" @@ -34,10 +35,10 @@ const ( hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n" linkTag = "\n\\[la]" linkCloseTag = "\\[ra]" - codespanTag = "\\fB\\fC" + codespanTag = "\\fB" codespanCloseTag = "\\fR" - codeTag = "\n.PP\n.RS\n\n.nf\n" - codeCloseTag = "\n.fi\n.RE\n" + codeTag = "\n.EX\n" + codeCloseTag = "\n.EE\n" quoteTag = "\n.PP\n.RS\n" quoteCloseTag = "\n.RE\n" listTag = "\n.RS\n" @@ -86,8 +87,7 @@ func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) { // RenderNode is called for each node in a markdown document; based on the node // type the equivalent roff output is sent to the writer func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { - - var walkAction = blackfriday.GoToNext + walkAction := blackfriday.GoToNext switch node.Type { case blackfriday.Text: @@ -109,9 +109,16 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering out(w, strongCloseTag) } case blackfriday.Link: - if !entering { - out(w, linkTag+string(node.LinkData.Destination)+linkCloseTag) + // Don't render the link text for automatic links, because this + // will only duplicate the URL in the roff output. + // See https://daringfireball.net/projects/markdown/syntax#autolink + if !bytes.Equal(node.LinkData.Destination, node.FirstChild.Literal) { + out(w, string(node.FirstChild.Literal)) } + // Hyphens in a link must be escaped to avoid word-wrap in the rendered man page. + escapedLink := strings.ReplaceAll(string(node.LinkData.Destination), "-", "\\-") + out(w, linkTag+escapedLink+linkCloseTag) + walkAction = blackfriday.SkipChildren case blackfriday.Image: // ignore images walkAction = blackfriday.SkipChildren @@ -160,6 +167,11 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering r.handleTableCell(w, node, entering) case blackfriday.HTMLSpan: // ignore other HTML tags + case blackfriday.HTMLBlock: + if bytes.HasPrefix(node.Literal, []byte(" +// example: " main.go | 10 +++++++--- " func printStat(fileStats []FileStat) string { - padLength := float64(len(" ")) - newlineLength := float64(len("\n")) - separatorLength := float64(len("|")) - // Soft line length limit. The text length calculation below excludes - // length of the change number. Adding that would take it closer to 80, - // but probably not more than 80, until it's a huge number. - lineLength := 72.0 - - // Get the longest filename and longest total change. - var longestLength float64 - var longestTotalChange float64 - for _, fs := range fileStats { - if int(longestLength) < len(fs.Name) { - longestLength = float64(len(fs.Name)) - } - totalChange := fs.Addition + fs.Deletion - if int(longestTotalChange) < totalChange { - longestTotalChange = float64(totalChange) - } - } - - // Parts of the output: - // |<+++/---> - // example: " main.go | 10 +++++++--- " - - // - leftTextLength := padLength + longestLength + padLength - - // <+++++/-----> - // Excluding number length here. - rightTextLength := padLength + padLength + newlineLength + maxGraphWidth := uint(53) + maxNameLen := 0 + maxChangeLen := 0 - totalTextArea := leftTextLength + separatorLength + rightTextLength - heightOfHistogram := lineLength - totalTextArea + scaleLinear := func(it, width, max uint) uint { + if it == 0 || max == 0 { + return 0 + } - // Scale the histogram. - var scaleFactor float64 - if longestTotalChange > heightOfHistogram { - // Scale down to heightOfHistogram. - scaleFactor = longestTotalChange / heightOfHistogram - } else { - scaleFactor = 1.0 + return 1 + (it * (width - 1) / max) } - finalOutput := "" for _, fs := range fileStats { - addn := float64(fs.Addition) - deln := float64(fs.Deletion) - addc := int(math.Floor(addn/scaleFactor)) - delc := int(math.Floor(deln/scaleFactor)) - if addc < 0 { - addc = 0 + if len(fs.Name) > maxNameLen { + maxNameLen = len(fs.Name) } - if delc < 0 { - delc = 0 + + changes := strconv.Itoa(fs.Addition + fs.Deletion) + if len(changes) > maxChangeLen { + maxChangeLen = len(changes) } - adds := strings.Repeat("+", addc) - dels := strings.Repeat("-", delc) - finalOutput += fmt.Sprintf(" %s | %d %s%s\n", fs.Name, (fs.Addition + fs.Deletion), adds, dels) } - return finalOutput + result := "" + for _, fs := range fileStats { + add := uint(fs.Addition) + del := uint(fs.Deletion) + np := maxNameLen - len(fs.Name) + cp := maxChangeLen - len(strconv.Itoa(fs.Addition+fs.Deletion)) + + total := add + del + if total > maxGraphWidth { + add = scaleLinear(add, maxGraphWidth, total) + del = scaleLinear(del, maxGraphWidth, total) + } + + adds := strings.Repeat("+", int(add)) + dels := strings.Repeat("-", int(del)) + namePad := strings.Repeat(" ", np) + changePad := strings.Repeat(" ", cp) + + result += fmt.Sprintf(" %s%s | %s%d %s%s\n", fs.Name, namePad, changePad, total, adds, dels) + } + return result } func getFileStatsFromFilePatches(filePatches []fdiff.FilePatch) FileStats { diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go index e9f7666b838..0fd0e51398f 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go @@ -7,6 +7,7 @@ import ( "io" "path" "path/filepath" + "sort" "strings" "github.com/go-git/go-git/v5/plumbing" @@ -27,6 +28,7 @@ var ( ErrFileNotFound = errors.New("file not found") ErrDirectoryNotFound = errors.New("directory not found") ErrEntryNotFound = errors.New("entry not found") + ErrEntriesNotSorted = errors.New("entries in tree are not sorted") ) // Tree is basically like a directory - it references a bunch of other trees @@ -270,6 +272,28 @@ func (t *Tree) Decode(o plumbing.EncodedObject) (err error) { return nil } +type TreeEntrySorter []TreeEntry + +func (s TreeEntrySorter) Len() int { + return len(s) +} + +func (s TreeEntrySorter) Less(i, j int) bool { + name1 := s[i].Name + name2 := s[j].Name + if s[i].Mode == filemode.Dir { + name1 += "/" + } + if s[j].Mode == filemode.Dir { + name2 += "/" + } + return name1 < name2 +} + +func (s TreeEntrySorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + // Encode transforms a Tree into a plumbing.EncodedObject. func (t *Tree) Encode(o plumbing.EncodedObject) (err error) { o.SetType(plumbing.TreeObject) @@ -279,7 +303,15 @@ func (t *Tree) Encode(o plumbing.EncodedObject) (err error) { } defer ioutil.CheckClose(w, &err) + + if !sort.IsSorted(TreeEntrySorter(t.Entries)) { + return ErrEntriesNotSorted + } + for _, entry := range t.Entries { + if strings.IndexByte(entry.Name, 0) != -1 { + return fmt.Errorf("malformed filename %q", entry.Name) + } if _, err = fmt.Fprintf(w, "%o %s", entry.Mode, entry.Name); err != nil { return err } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go index 6e7b334cbd1..2adb6452880 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go @@ -88,7 +88,9 @@ func (t *treeNoder) Children() ([]noder.Noder, error) { } } - return transformChildren(parent) + var err error + t.children, err = transformChildren(parent) + return t.children, err } // Returns the children of a tree as treenoders. diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go index 54126febf48..1c4ceee68d0 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go @@ -91,9 +91,9 @@ func advertisedReferences(ctx context.Context, s *session, serviceName string) ( } type client struct { - c *http.Client + client *http.Client transports *lru.Cache - m sync.RWMutex + mutex sync.RWMutex } // ClientOptions holds user configurable options for the client. @@ -147,7 +147,7 @@ func NewClientWithOptions(c *http.Client, opts *ClientOptions) transport.Transpo } } cl := &client{ - c: c, + client: c, } if opts != nil { @@ -234,10 +234,10 @@ func newSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (* // if the client wasn't configured to have a cache for transports then just configure // the transport and use it directly, otherwise try to use the cache. if c.transports == nil { - tr, ok := c.c.Transport.(*http.Transport) + tr, ok := c.client.Transport.(*http.Transport) if !ok { return nil, fmt.Errorf("expected underlying client transport to be of type: %s; got: %s", - reflect.TypeOf(transport), reflect.TypeOf(c.c.Transport)) + reflect.TypeOf(transport), reflect.TypeOf(c.client.Transport)) } transport = tr.Clone() @@ -258,7 +258,7 @@ func newSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (* transport, found = c.fetchTransport(transportOpts) if !found { - transport = c.c.Transport.(*http.Transport).Clone() + transport = c.client.Transport.(*http.Transport).Clone() configureTransport(transport, ep) c.addTransport(transportOpts, transport) } @@ -266,12 +266,12 @@ func newSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (* httpClient = &http.Client{ Transport: transport, - CheckRedirect: c.c.CheckRedirect, - Jar: c.c.Jar, - Timeout: c.c.Timeout, + CheckRedirect: c.client.CheckRedirect, + Jar: c.client.Jar, + Timeout: c.client.Timeout, } } else { - httpClient = c.c + httpClient = c.client } s := &session{ diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/transport.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/transport.go index 052f3c8e284..c8db389204a 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/transport.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/transport.go @@ -14,21 +14,21 @@ type transportOptions struct { } func (c *client) addTransport(opts transportOptions, transport *http.Transport) { - c.m.Lock() + c.mutex.Lock() c.transports.Add(opts, transport) - c.m.Unlock() + c.mutex.Unlock() } func (c *client) removeTransport(opts transportOptions) { - c.m.Lock() + c.mutex.Lock() c.transports.Remove(opts) - c.m.Unlock() + c.mutex.Unlock() } func (c *client) fetchTransport(opts transportOptions) (*http.Transport, bool) { - c.m.RLock() + c.mutex.RLock() t, ok := c.transports.Get(opts) - c.m.RUnlock() + c.mutex.RUnlock() if !ok { return nil, false } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go index 46fda73fa41..05dea448f8f 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go @@ -49,7 +49,9 @@ type runner struct { func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (common.Command, error) { c := &command{command: cmd, endpoint: ep, config: r.config} if auth != nil { - c.setAuth(auth) + if err := c.setAuth(auth); err != nil { + return nil, err + } } if err := c.connect(); err != nil { diff --git a/vendor/github.com/go-git/go-git/v5/remote.go b/vendor/github.com/go-git/go-git/v5/remote.go index 0cb70bc0093..7cc0db9b7db 100644 --- a/vendor/github.com/go-git/go-git/v5/remote.go +++ b/vendor/github.com/go-git/go-git/v5/remote.go @@ -470,6 +470,14 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen } } + var updatedPrune bool + if o.Prune { + updatedPrune, err = r.pruneRemotes(o.RefSpecs, localRefs, remoteRefs) + if err != nil { + return nil, err + } + } + updated, err := r.updateLocalReferenceStorage(o.RefSpecs, refs, remoteRefs, specToRefs, o.Tags, o.Force) if err != nil { return nil, err @@ -482,7 +490,7 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen } } - if !updated { + if !updated && !updatedPrune { return remoteRefs, NoErrAlreadyUpToDate } @@ -574,6 +582,27 @@ func (r *Remote) fetchPack(ctx context.Context, o *FetchOptions, s transport.Upl return err } +func (r *Remote) pruneRemotes(specs []config.RefSpec, localRefs []*plumbing.Reference, remoteRefs memory.ReferenceStorage) (bool, error) { + var updatedPrune bool + for _, spec := range specs { + rev := spec.Reverse() + for _, ref := range localRefs { + if !rev.Match(ref.Name()) { + continue + } + _, err := remoteRefs.Reference(rev.Dst(ref.Name())) + if errors.Is(err, plumbing.ErrReferenceNotFound) { + updatedPrune = true + err := r.s.RemoveReference(ref.Name()) + if err != nil { + return false, err + } + } + } + } + return updatedPrune, nil +} + func (r *Remote) addReferencesToUpdate( refspecs []config.RefSpec, localRefs []*plumbing.Reference, @@ -1099,7 +1128,7 @@ func isFastForward(s storer.EncodedObjectStorer, old, new plumbing.Hash, earlies } found := false - // stop iterating at the earlist shallow commit, ignoring its parents + // stop iterating at the earliest shallow commit, ignoring its parents // note: when pull depth is smaller than the number of new changes on the remote, this fails due to missing parents. // as far as i can tell, without the commits in-between the shallow pull and the earliest shallow, there's no // real way of telling whether it will be a fast-forward merge. diff --git a/vendor/github.com/go-git/go-git/v5/repository.go b/vendor/github.com/go-git/go-git/v5/repository.go index 1524a691305..a57c7141f8d 100644 --- a/vendor/github.com/go-git/go-git/v5/repository.go +++ b/vendor/github.com/go-git/go-git/v5/repository.go @@ -51,19 +51,21 @@ var ( // ErrFetching is returned when the packfile could not be downloaded ErrFetching = errors.New("unable to fetch packfile") - ErrInvalidReference = errors.New("invalid reference, should be a tag or a branch") - ErrRepositoryNotExists = errors.New("repository does not exist") - ErrRepositoryIncomplete = errors.New("repository's commondir path does not exist") - ErrRepositoryAlreadyExists = errors.New("repository already exists") - ErrRemoteNotFound = errors.New("remote not found") - ErrRemoteExists = errors.New("remote already exists") - ErrAnonymousRemoteName = errors.New("anonymous remote name must be 'anonymous'") - ErrWorktreeNotProvided = errors.New("worktree should be provided") - ErrIsBareRepository = errors.New("worktree not available in a bare repository") - ErrUnableToResolveCommit = errors.New("unable to resolve commit") - ErrPackedObjectsNotSupported = errors.New("packed objects not supported") - ErrSHA256NotSupported = errors.New("go-git was not compiled with SHA256 support") - ErrAlternatePathNotSupported = errors.New("alternate path must use the file scheme") + ErrInvalidReference = errors.New("invalid reference, should be a tag or a branch") + ErrRepositoryNotExists = errors.New("repository does not exist") + ErrRepositoryIncomplete = errors.New("repository's commondir path does not exist") + ErrRepositoryAlreadyExists = errors.New("repository already exists") + ErrRemoteNotFound = errors.New("remote not found") + ErrRemoteExists = errors.New("remote already exists") + ErrAnonymousRemoteName = errors.New("anonymous remote name must be 'anonymous'") + ErrWorktreeNotProvided = errors.New("worktree should be provided") + ErrIsBareRepository = errors.New("worktree not available in a bare repository") + ErrUnableToResolveCommit = errors.New("unable to resolve commit") + ErrPackedObjectsNotSupported = errors.New("packed objects not supported") + ErrSHA256NotSupported = errors.New("go-git was not compiled with SHA256 support") + ErrAlternatePathNotSupported = errors.New("alternate path must use the file scheme") + ErrUnsupportedMergeStrategy = errors.New("unsupported merge strategy") + ErrFastForwardMergeNotPossible = errors.New("not possible to fast-forward merge changes") ) // Repository represents a git repository @@ -1769,8 +1771,43 @@ func (r *Repository) RepackObjects(cfg *RepackConfig) (err error) { return nil } +// Merge merges the reference branch into the current branch. +// +// If the merge is not possible (or supported) returns an error without changing +// the HEAD for the current branch. Possible errors include: +// - The merge strategy is not supported. +// - The specific strategy cannot be used (e.g. using FastForwardMerge when one is not possible). +func (r *Repository) Merge(ref plumbing.Reference, opts MergeOptions) error { + if opts.Strategy != FastForwardMerge { + return ErrUnsupportedMergeStrategy + } + + // Ignore error as not having a shallow list is optional here. + shallowList, _ := r.Storer.Shallow() + var earliestShallow *plumbing.Hash + if len(shallowList) > 0 { + earliestShallow = &shallowList[0] + } + + head, err := r.Head() + if err != nil { + return err + } + + ff, err := isFastForward(r.Storer, head.Hash(), ref.Hash(), earliestShallow) + if err != nil { + return err + } + + if !ff { + return ErrFastForwardMergeNotPossible + } + + return r.Storer.SetReference(plumbing.NewHashReference(head.Name(), ref.Hash())) +} + // createNewObjectPack is a helper for RepackObjects taking care -// of creating a new pack. It is used so the the PackfileWriter +// of creating a new pack. It is used so the PackfileWriter // deferred close has the right scope. func (r *Repository) createNewObjectPack(cfg *RepackConfig) (h plumbing.Hash, err error) { ow := newObjectWalker(r.Storer) diff --git a/vendor/github.com/go-git/go-git/v5/signer.go b/vendor/github.com/go-git/go-git/v5/signer.go new file mode 100644 index 00000000000..e3ef7ebd31d --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/signer.go @@ -0,0 +1,33 @@ +package git + +import ( + "io" + + "github.com/go-git/go-git/v5/plumbing" +) + +// signableObject is an object which can be signed. +type signableObject interface { + EncodeWithoutSignature(o plumbing.EncodedObject) error +} + +// Signer is an interface for signing git objects. +// message is a reader containing the encoded object to be signed. +// Implementors should return the encoded signature and an error if any. +// See https://git-scm.com/docs/gitformat-signature for more information. +type Signer interface { + Sign(message io.Reader) ([]byte, error) +} + +func signObject(signer Signer, obj signableObject) ([]byte, error) { + encoded := &plumbing.MemoryObject{} + if err := obj.EncodeWithoutSignature(encoded); err != nil { + return nil, err + } + r, err := encoded.Reader() + if err != nil { + return nil, err + } + + return signer.Sign(r) +} diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go index 7bba0d03e31..33800627de7 100644 --- a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go +++ b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go @@ -29,6 +29,8 @@ type node struct { hash []byte children []noder.Noder isDir bool + mode os.FileMode + size int64 } // NewRootNode returns the root node based on a given billy.Filesystem. @@ -48,8 +50,15 @@ func NewRootNode( // difftree algorithm will detect changes in the contents of files and also in // their mode. // +// Please note that the hash is calculated on first invocation of Hash(), +// meaning that it will not update when the underlying file changes +// between invocations. +// // The hash of a directory is always a 24-bytes slice of zero values func (n *node) Hash() []byte { + if n.hash == nil { + n.calculateHash() + } return n.hash } @@ -121,81 +130,74 @@ func (n *node) calculateChildren() error { func (n *node) newChildNode(file os.FileInfo) (*node, error) { path := path.Join(n.path, file.Name()) - hash, err := n.calculateHash(path, file) - if err != nil { - return nil, err - } - node := &node{ fs: n.fs, submodules: n.submodules, path: path, - hash: hash, isDir: file.IsDir(), + size: file.Size(), + mode: file.Mode(), } - if hash, isSubmodule := n.submodules[path]; isSubmodule { - node.hash = append(hash[:], filemode.Submodule.Bytes()...) + if _, isSubmodule := n.submodules[path]; isSubmodule { node.isDir = false } return node, nil } -func (n *node) calculateHash(path string, file os.FileInfo) ([]byte, error) { - if file.IsDir() { - return make([]byte, 24), nil - } - - var hash plumbing.Hash - var err error - if file.Mode()&os.ModeSymlink != 0 { - hash, err = n.doCalculateHashForSymlink(path, file) - } else { - hash, err = n.doCalculateHashForRegular(path, file) +func (n *node) calculateHash() { + if n.isDir { + n.hash = make([]byte, 24) + return } - + mode, err := filemode.NewFromOSFileMode(n.mode) if err != nil { - return nil, err + n.hash = plumbing.ZeroHash[:] + return } - - mode, err := filemode.NewFromOSFileMode(file.Mode()) - if err != nil { - return nil, err + if submoduleHash, isSubmodule := n.submodules[n.path]; isSubmodule { + n.hash = append(submoduleHash[:], filemode.Submodule.Bytes()...) + return } - - return append(hash[:], mode.Bytes()...), nil + var hash plumbing.Hash + if n.mode&os.ModeSymlink != 0 { + hash = n.doCalculateHashForSymlink() + } else { + hash = n.doCalculateHashForRegular() + } + n.hash = append(hash[:], mode.Bytes()...) } -func (n *node) doCalculateHashForRegular(path string, file os.FileInfo) (plumbing.Hash, error) { - f, err := n.fs.Open(path) +func (n *node) doCalculateHashForRegular() plumbing.Hash { + f, err := n.fs.Open(n.path) if err != nil { - return plumbing.ZeroHash, err + return plumbing.ZeroHash } defer f.Close() - h := plumbing.NewHasher(plumbing.BlobObject, file.Size()) + h := plumbing.NewHasher(plumbing.BlobObject, n.size) if _, err := io.Copy(h, f); err != nil { - return plumbing.ZeroHash, err + return plumbing.ZeroHash } - return h.Sum(), nil + return h.Sum() } -func (n *node) doCalculateHashForSymlink(path string, file os.FileInfo) (plumbing.Hash, error) { - target, err := n.fs.Readlink(path) +func (n *node) doCalculateHashForSymlink() plumbing.Hash { + target, err := n.fs.Readlink(n.path) if err != nil { - return plumbing.ZeroHash, err + return plumbing.ZeroHash } - h := plumbing.NewHasher(plumbing.BlobObject, file.Size()) + h := plumbing.NewHasher(plumbing.BlobObject, n.size) if _, err := h.Write([]byte(target)); err != nil { - return plumbing.ZeroHash, err + return plumbing.ZeroHash } - return h.Sum(), nil + return h.Sum() } func (n *node) String() string { diff --git a/vendor/github.com/go-git/go-git/v5/worktree.go b/vendor/github.com/go-git/go-git/v5/worktree.go index ad525c1a494..ab11d42db83 100644 --- a/vendor/github.com/go-git/go-git/v5/worktree.go +++ b/vendor/github.com/go-git/go-git/v5/worktree.go @@ -227,20 +227,17 @@ func (w *Worktree) createBranch(opts *CheckoutOptions) error { } func (w *Worktree) getCommitFromCheckoutOptions(opts *CheckoutOptions) (plumbing.Hash, error) { - if !opts.Hash.IsZero() { - return opts.Hash, nil - } - - b, err := w.r.Reference(opts.Branch, true) - if err != nil { - return plumbing.ZeroHash, err - } + hash := opts.Hash + if hash.IsZero() { + b, err := w.r.Reference(opts.Branch, true) + if err != nil { + return plumbing.ZeroHash, err + } - if !b.Name().IsTag() { - return b.Hash(), nil + hash = b.Hash() } - o, err := w.r.Object(plumbing.AnyObject, b.Hash()) + o, err := w.r.Object(plumbing.AnyObject, hash) if err != nil { return plumbing.ZeroHash, err } @@ -248,7 +245,7 @@ func (w *Worktree) getCommitFromCheckoutOptions(opts *CheckoutOptions) (plumbing switch o := o.(type) { case *object.Tag: if o.TargetType != plumbing.CommitObject { - return plumbing.ZeroHash, fmt.Errorf("unsupported tag object target %q", o.TargetType) + return plumbing.ZeroHash, fmt.Errorf("%w: tag target %q", object.ErrUnsupportedObject, o.TargetType) } return o.Target, nil @@ -256,7 +253,7 @@ func (w *Worktree) getCommitFromCheckoutOptions(opts *CheckoutOptions) (plumbing return o.Hash, nil } - return plumbing.ZeroHash, fmt.Errorf("unsupported tag target %q", o.Type()) + return plumbing.ZeroHash, fmt.Errorf("%w: %q", object.ErrUnsupportedObject, o.Type()) } func (w *Worktree) setHEADToCommit(commit plumbing.Hash) error { @@ -431,6 +428,10 @@ var worktreeDeny = map[string]struct{}{ func validPath(paths ...string) error { for _, p := range paths { parts := strings.FieldsFunc(p, func(r rune) bool { return (r == '\\' || r == '/') }) + if len(parts) == 0 { + return fmt.Errorf("invalid path: %q", p) + } + if _, denied := worktreeDeny[strings.ToLower(parts[0])]; denied { return fmt.Errorf("invalid path prefix: %q", p) } diff --git a/vendor/github.com/go-git/go-git/v5/worktree_commit.go b/vendor/github.com/go-git/go-git/v5/worktree_commit.go index eaa21c3f191..f62054bcb44 100644 --- a/vendor/github.com/go-git/go-git/v5/worktree_commit.go +++ b/vendor/github.com/go-git/go-git/v5/worktree_commit.go @@ -3,6 +3,7 @@ package git import ( "bytes" "errors" + "io" "path" "sort" "strings" @@ -14,6 +15,7 @@ import ( "github.com/go-git/go-git/v5/storage" "github.com/ProtonMail/go-crypto/openpgp" + "github.com/ProtonMail/go-crypto/openpgp/packet" "github.com/go-git/go-billy/v5" ) @@ -43,29 +45,30 @@ func (w *Worktree) Commit(msg string, opts *CommitOptions) (plumbing.Hash, error if err != nil { return plumbing.ZeroHash, err } - - t, err := w.r.getTreeFromCommitHash(head.Hash()) + headCommit, err := w.r.CommitObject(head.Hash()) if err != nil { return plumbing.ZeroHash, err } - treeHash = t.Hash - opts.Parents = []plumbing.Hash{head.Hash()} - } else { - idx, err := w.r.Storer.Index() - if err != nil { - return plumbing.ZeroHash, err + opts.Parents = nil + if len(headCommit.ParentHashes) != 0 { + opts.Parents = []plumbing.Hash{headCommit.ParentHashes[0]} } + } - h := &buildTreeHelper{ - fs: w.Filesystem, - s: w.r.Storer, - } + idx, err := w.r.Storer.Index() + if err != nil { + return plumbing.ZeroHash, err + } - treeHash, err = h.BuildTree(idx, opts) - if err != nil { - return plumbing.ZeroHash, err - } + h := &buildTreeHelper{ + fs: w.Filesystem, + s: w.r.Storer, + } + + treeHash, err = h.BuildTree(idx, opts) + if err != nil { + return plumbing.ZeroHash, err } commit, err := w.buildCommitObject(msg, opts, treeHash) @@ -125,12 +128,17 @@ func (w *Worktree) buildCommitObject(msg string, opts *CommitOptions, tree plumb ParentHashes: opts.Parents, } - if opts.SignKey != nil { - sig, err := w.buildCommitSignature(commit, opts.SignKey) + // Convert SignKey into a Signer if set. Existing Signer should take priority. + signer := opts.Signer + if signer == nil && opts.SignKey != nil { + signer = &gpgSigner{key: opts.SignKey} + } + if signer != nil { + sig, err := signObject(signer, commit) if err != nil { return plumbing.ZeroHash, err } - commit.PGPSignature = sig + commit.PGPSignature = string(sig) } obj := w.r.Storer.NewEncodedObject() @@ -140,20 +148,17 @@ func (w *Worktree) buildCommitObject(msg string, opts *CommitOptions, tree plumb return w.r.Storer.SetEncodedObject(obj) } -func (w *Worktree) buildCommitSignature(commit *object.Commit, signKey *openpgp.Entity) (string, error) { - encoded := &plumbing.MemoryObject{} - if err := commit.Encode(encoded); err != nil { - return "", err - } - r, err := encoded.Reader() - if err != nil { - return "", err - } +type gpgSigner struct { + key *openpgp.Entity + cfg *packet.Config +} + +func (s *gpgSigner) Sign(message io.Reader) ([]byte, error) { var b bytes.Buffer - if err := openpgp.ArmoredDetachSign(&b, signKey, r, nil); err != nil { - return "", err + if err := openpgp.ArmoredDetachSign(&b, s.key, message, s.cfg); err != nil { + return nil, err } - return b.String(), nil + return b.Bytes(), nil } // buildTreeHelper converts a given index.Index file into multiple git objects @@ -263,4 +268,4 @@ func (h *buildTreeHelper) copyTreeToStorageRecursive(parent string, t *object.Tr return hash, nil } return h.s.SetEncodedObject(o) -} \ No newline at end of file +} diff --git a/vendor/github.com/go-git/go-git/v5/worktree_status.go b/vendor/github.com/go-git/go-git/v5/worktree_status.go index 730108754b9..dd9b2439cfd 100644 --- a/vendor/github.com/go-git/go-git/v5/worktree_status.go +++ b/vendor/github.com/go-git/go-git/v5/worktree_status.go @@ -271,7 +271,7 @@ func diffTreeIsEquals(a, b noder.Hasher) bool { // no error is returned. When path is a file, the blob.Hash is returned. func (w *Worktree) Add(path string) (plumbing.Hash, error) { // TODO(mcuadros): deprecate in favor of AddWithOption in v6. - return w.doAdd(path, make([]gitignore.Pattern, 0)) + return w.doAdd(path, make([]gitignore.Pattern, 0), false) } func (w *Worktree) doAddDirectory(idx *index.Index, s Status, directory string, ignorePattern []gitignore.Pattern) (added bool, err error) { @@ -321,7 +321,7 @@ func (w *Worktree) AddWithOptions(opts *AddOptions) error { } if opts.All { - _, err := w.doAdd(".", w.Excludes) + _, err := w.doAdd(".", w.Excludes, false) return err } @@ -329,16 +329,11 @@ func (w *Worktree) AddWithOptions(opts *AddOptions) error { return w.AddGlob(opts.Glob) } - _, err := w.Add(opts.Path) + _, err := w.doAdd(opts.Path, make([]gitignore.Pattern, 0), opts.SkipStatus) return err } -func (w *Worktree) doAdd(path string, ignorePattern []gitignore.Pattern) (plumbing.Hash, error) { - s, err := w.Status() - if err != nil { - return plumbing.ZeroHash, err - } - +func (w *Worktree) doAdd(path string, ignorePattern []gitignore.Pattern, skipStatus bool) (plumbing.Hash, error) { idx, err := w.r.Storer.Index() if err != nil { return plumbing.ZeroHash, err @@ -348,6 +343,17 @@ func (w *Worktree) doAdd(path string, ignorePattern []gitignore.Pattern) (plumbi var added bool fi, err := w.Filesystem.Lstat(path) + + // status is required for doAddDirectory + var s Status + var err2 error + if !skipStatus || fi == nil || fi.IsDir() { + s, err2 = w.Status() + if err2 != nil { + return plumbing.ZeroHash, err2 + } + } + if err != nil || !fi.IsDir() { added, h, err = w.doAddFile(idx, s, path, ignorePattern) } else { @@ -421,8 +427,9 @@ func (w *Worktree) AddGlob(pattern string) error { // doAddFile create a new blob from path and update the index, added is true if // the file added is different from the index. +// if s status is nil will skip the status check and update the index anyway func (w *Worktree) doAddFile(idx *index.Index, s Status, path string, ignorePattern []gitignore.Pattern) (added bool, h plumbing.Hash, err error) { - if s.File(path).Worktree == Unmodified { + if s != nil && s.File(path).Worktree == Unmodified { return false, h, nil } if len(ignorePattern) > 0 { diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index 8969526a6e5..7c7f0c69cd9 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -1,6 +1,7 @@ # A minimal logging API for Go [![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-logr/logr)](https://goreportcard.com/report/github.com/go-logr/logr) [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr) logr offers an(other) opinion on how Go programs and libraries can do logging diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go index fb2f866f4b7..30568e768dc 100644 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -236,15 +236,14 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { // implementation. It should be constructed with NewFormatter. Some of // its methods directly implement logr.LogSink. type Formatter struct { - outputFormat outputFormat - prefix string - values []any - valuesStr string - parentValuesStr string - depth int - opts *Options - group string // for slog groups - groupDepth int + outputFormat outputFormat + prefix string + values []any + valuesStr string + depth int + opts *Options + groupName string // for slog groups + groups []groupDef } // outputFormat indicates which outputFormat to use. @@ -257,6 +256,13 @@ const ( outputJSON ) +// groupDef represents a saved group. The values may be empty, but we don't +// know if we need to render the group until the final record is rendered. +type groupDef struct { + name string + values string +} + // PseudoStruct is a list of key-value pairs that gets logged as a struct. type PseudoStruct []any @@ -264,76 +270,102 @@ type PseudoStruct []any func (f Formatter) render(builtins, args []any) string { // Empirically bytes.Buffer is faster than strings.Builder for this. buf := bytes.NewBuffer(make([]byte, 0, 1024)) + if f.outputFormat == outputJSON { - buf.WriteByte('{') // for the whole line + buf.WriteByte('{') // for the whole record } + // Render builtins vals := builtins if hook := f.opts.RenderBuiltinsHook; hook != nil { vals = hook(f.sanitize(vals)) } - f.flatten(buf, vals, false, false) // keys are ours, no need to escape + f.flatten(buf, vals, false) // keys are ours, no need to escape continuing := len(builtins) > 0 - if f.parentValuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) + // Turn the inner-most group into a string + argsStr := func() string { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + vals = args + if hook := f.opts.RenderArgsHook; hook != nil { + vals = hook(f.sanitize(vals)) } - buf.WriteString(f.parentValuesStr) - continuing = true - } + f.flatten(buf, vals, true) // escape user-provided keys - groupDepth := f.groupDepth - if f.group != "" { - if f.valuesStr != "" || len(args) != 0 { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys - buf.WriteByte(f.colon()) - buf.WriteByte('{') // for the group - continuing = false - } else { - // The group was empty - groupDepth-- + return buf.String() + }() + + // Render the stack of groups from the inside out. + bodyStr := f.renderGroup(f.groupName, f.valuesStr, argsStr) + for i := len(f.groups) - 1; i >= 0; i-- { + grp := &f.groups[i] + if grp.values == "" && bodyStr == "" { + // no contents, so we must elide the whole group + continue } + bodyStr = f.renderGroup(grp.name, grp.values, bodyStr) } - if f.valuesStr != "" { + if bodyStr != "" { if continuing { buf.WriteByte(f.comma()) } - buf.WriteString(f.valuesStr) - continuing = true + buf.WriteString(bodyStr) } - vals = args - if hook := f.opts.RenderArgsHook; hook != nil { - vals = hook(f.sanitize(vals)) + if f.outputFormat == outputJSON { + buf.WriteByte('}') // for the whole record } - f.flatten(buf, vals, continuing, true) // escape user-provided keys - for i := 0; i < groupDepth; i++ { - buf.WriteByte('}') // for the groups + return buf.String() +} + +// renderGroup returns a string representation of the named group with rendered +// values and args. If the name is empty, this will return the values and args, +// joined. If the name is not empty, this will return a single key-value pair, +// where the value is a grouping of the values and args. If the values and +// args are both empty, this will return an empty string, even if the name was +// specified. +func (f Formatter) renderGroup(name string, values string, args string) string { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + needClosingBrace := false + if name != "" && (values != "" || args != "") { + buf.WriteString(f.quoted(name, true)) // escape user-provided keys + buf.WriteByte(f.colon()) + buf.WriteByte('{') + needClosingBrace = true } - if f.outputFormat == outputJSON { - buf.WriteByte('}') // for the whole line + continuing := false + if values != "" { + buf.WriteString(values) + continuing = true + } + + if args != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(args) + } + + if needClosingBrace { + buf.WriteByte('}') } return buf.String() } -// flatten renders a list of key-value pairs into a buffer. If continuing is -// true, it assumes that the buffer has previous values and will emit a -// separator (which depends on the output format) before the first pair it -// writes. If escapeKeys is true, the keys are assumed to have -// non-JSON-compatible characters in them and must be evaluated for escapes. +// flatten renders a list of key-value pairs into a buffer. If escapeKeys is +// true, the keys are assumed to have non-JSON-compatible characters in them +// and must be evaluated for escapes. // // This function returns a potentially modified version of kvList, which // ensures that there is a value for every key (adding a value if needed) and // that each key is a string (substituting a key if needed). -func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any { +func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, escapeKeys bool) []any { // This logic overlaps with sanitize() but saves one type-cast per key, // which can be measurable. if len(kvList)%2 != 0 { @@ -354,7 +386,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc } v := kvList[i+1] - if i > 0 || continuing { + if i > 0 { if f.outputFormat == outputJSON { buf.WriteByte(f.comma()) } else { @@ -766,46 +798,17 @@ func (f Formatter) sanitize(kvList []any) []any { // startGroup opens a new group scope (basically a sub-struct), which locks all // the current saved values and starts them anew. This is needed to satisfy // slog. -func (f *Formatter) startGroup(group string) { +func (f *Formatter) startGroup(name string) { // Unnamed groups are just inlined. - if group == "" { + if name == "" { return } - // Any saved values can no longer be changed. - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - continuing := false - - if f.parentValuesStr != "" { - buf.WriteString(f.parentValuesStr) - continuing = true - } - - if f.group != "" && f.valuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys - buf.WriteByte(f.colon()) - buf.WriteByte('{') // for the group - continuing = false - } - - if f.valuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.valuesStr) - } - - // NOTE: We don't close the scope here - that's done later, when a log line - // is actually rendered (because we have N scopes to close). - - f.parentValuesStr = buf.String() + n := len(f.groups) + f.groups = append(f.groups[:n:n], groupDef{f.groupName, f.valuesStr}) // Start collecting new values. - f.group = group - f.groupDepth++ + f.groupName = name f.valuesStr = "" f.values = nil } @@ -900,7 +903,7 @@ func (f *Formatter) AddValues(kvList []any) { // Pre-render values, so we don't have to do it on each Info/Error call. buf := bytes.NewBuffer(make([]byte, 0, 1024)) - f.flatten(buf, vals, false, true) // escape user-provided keys + f.flatten(buf, vals, true) // escape user-provided keys f.valuesStr = buf.String() } diff --git a/vendor/github.com/go-task/slim-sprig/.travis.yml b/vendor/github.com/go-task/slim-sprig/.travis.yml deleted file mode 100644 index 482aa3cd0d9..00000000000 --- a/vendor/github.com/go-task/slim-sprig/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -language: go - -go: - - 1.9.x - - 1.10.x - - 1.11.x - - tip - -# Setting sudo access to false will let Travis CI use containers rather than -# VMs to run the tests. For more details see: -# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/ -# - http://docs.travis-ci.com/user/workers/standard-infrastructure/ -sudo: false - -script: - - make setup test - -notifications: - webhooks: - urls: - - https://webhooks.gitter.im/e/06e3328629952dabe3e0 - on_success: change # options: [always|never|change] default: always - on_failure: always # options: [always|never|change] default: always - on_start: never # options: [always|never|change] default: always diff --git a/vendor/github.com/go-task/slim-sprig/Makefile b/vendor/github.com/go-task/slim-sprig/Makefile deleted file mode 100644 index 63a93fdf798..00000000000 --- a/vendor/github.com/go-task/slim-sprig/Makefile +++ /dev/null @@ -1,13 +0,0 @@ - -HAS_GLIDE := $(shell command -v glide;) - -.PHONY: test -test: - go test -v . - -.PHONY: setup -setup: -ifndef HAS_GLIDE - go get -u github.com/Masterminds/glide -endif - glide install diff --git a/vendor/github.com/go-task/slim-sprig/appveyor.yml b/vendor/github.com/go-task/slim-sprig/appveyor.yml deleted file mode 100644 index d545a987a3b..00000000000 --- a/vendor/github.com/go-task/slim-sprig/appveyor.yml +++ /dev/null @@ -1,26 +0,0 @@ - -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\Masterminds\sprig -shallow_clone: true - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -install: - - go get -u github.com/Masterminds/glide - - set PATH=%GOPATH%\bin;%PATH% - - go version - - go env - -build_script: - - glide install - - go install ./... - -test_script: - - go test -v - -deploy: off diff --git a/vendor/github.com/go-task/slim-sprig/crypto.go b/vendor/github.com/go-task/slim-sprig/crypto.go deleted file mode 100644 index 7427deb8388..00000000000 --- a/vendor/github.com/go-task/slim-sprig/crypto.go +++ /dev/null @@ -1,441 +0,0 @@ -package sprig - -import ( - "bytes" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/hmac" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/sha256" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/base64" - "encoding/binary" - "encoding/hex" - "encoding/pem" - "errors" - "fmt" - "hash/adler32" - "math/big" - "net" - "time" - - "github.com/google/uuid" - "golang.org/x/crypto/scrypt" -) - -func sha256sum(input string) string { - hash := sha256.Sum256([]byte(input)) - return hex.EncodeToString(hash[:]) -} - -func sha1sum(input string) string { - hash := sha1.Sum([]byte(input)) - return hex.EncodeToString(hash[:]) -} - -func adler32sum(input string) string { - hash := adler32.Checksum([]byte(input)) - return fmt.Sprintf("%d", hash) -} - -// uuidv4 provides a safe and secure UUID v4 implementation -func uuidv4() string { - return fmt.Sprintf("%s", uuid.New()) -} - -var master_password_seed = "com.lyndir.masterpassword" - -var password_type_templates = map[string][][]byte{ - "maximum": {[]byte("anoxxxxxxxxxxxxxxxxx"), []byte("axxxxxxxxxxxxxxxxxno")}, - "long": {[]byte("CvcvnoCvcvCvcv"), []byte("CvcvCvcvnoCvcv"), []byte("CvcvCvcvCvcvno"), []byte("CvccnoCvcvCvcv"), []byte("CvccCvcvnoCvcv"), - []byte("CvccCvcvCvcvno"), []byte("CvcvnoCvccCvcv"), []byte("CvcvCvccnoCvcv"), []byte("CvcvCvccCvcvno"), []byte("CvcvnoCvcvCvcc"), - []byte("CvcvCvcvnoCvcc"), []byte("CvcvCvcvCvccno"), []byte("CvccnoCvccCvcv"), []byte("CvccCvccnoCvcv"), []byte("CvccCvccCvcvno"), - []byte("CvcvnoCvccCvcc"), []byte("CvcvCvccnoCvcc"), []byte("CvcvCvccCvccno"), []byte("CvccnoCvcvCvcc"), []byte("CvccCvcvnoCvcc"), - []byte("CvccCvcvCvccno")}, - "medium": {[]byte("CvcnoCvc"), []byte("CvcCvcno")}, - "short": {[]byte("Cvcn")}, - "basic": {[]byte("aaanaaan"), []byte("aannaaan"), []byte("aaannaaa")}, - "pin": {[]byte("nnnn")}, -} - -var template_characters = map[byte]string{ - 'V': "AEIOU", - 'C': "BCDFGHJKLMNPQRSTVWXYZ", - 'v': "aeiou", - 'c': "bcdfghjklmnpqrstvwxyz", - 'A': "AEIOUBCDFGHJKLMNPQRSTVWXYZ", - 'a': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz", - 'n': "0123456789", - 'o': "@&%?,=[]_:-+*$#!'^~;()/.", - 'x': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz0123456789!@#$%^&*()", -} - -func derivePassword(counter uint32, password_type, password, user, site string) string { - var templates = password_type_templates[password_type] - if templates == nil { - return fmt.Sprintf("cannot find password template %s", password_type) - } - - var buffer bytes.Buffer - buffer.WriteString(master_password_seed) - binary.Write(&buffer, binary.BigEndian, uint32(len(user))) - buffer.WriteString(user) - - salt := buffer.Bytes() - key, err := scrypt.Key([]byte(password), salt, 32768, 8, 2, 64) - if err != nil { - return fmt.Sprintf("failed to derive password: %s", err) - } - - buffer.Truncate(len(master_password_seed)) - binary.Write(&buffer, binary.BigEndian, uint32(len(site))) - buffer.WriteString(site) - binary.Write(&buffer, binary.BigEndian, counter) - - var hmacv = hmac.New(sha256.New, key) - hmacv.Write(buffer.Bytes()) - var seed = hmacv.Sum(nil) - var temp = templates[int(seed[0])%len(templates)] - - buffer.Truncate(0) - for i, element := range temp { - pass_chars := template_characters[element] - pass_char := pass_chars[int(seed[i+1])%len(pass_chars)] - buffer.WriteByte(pass_char) - } - - return buffer.String() -} - -func generatePrivateKey(typ string) string { - var priv interface{} - var err error - switch typ { - case "", "rsa": - // good enough for government work - priv, err = rsa.GenerateKey(rand.Reader, 4096) - case "dsa": - key := new(dsa.PrivateKey) - // again, good enough for government work - if err = dsa.GenerateParameters(&key.Parameters, rand.Reader, dsa.L2048N256); err != nil { - return fmt.Sprintf("failed to generate dsa params: %s", err) - } - err = dsa.GenerateKey(key, rand.Reader) - priv = key - case "ecdsa": - // again, good enough for government work - priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - default: - return "Unknown type " + typ - } - if err != nil { - return fmt.Sprintf("failed to generate private key: %s", err) - } - - return string(pem.EncodeToMemory(pemBlockForKey(priv))) -} - -type DSAKeyFormat struct { - Version int - P, Q, G, Y, X *big.Int -} - -func pemBlockForKey(priv interface{}) *pem.Block { - switch k := priv.(type) { - case *rsa.PrivateKey: - return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)} - case *dsa.PrivateKey: - val := DSAKeyFormat{ - P: k.P, Q: k.Q, G: k.G, - Y: k.Y, X: k.X, - } - bytes, _ := asn1.Marshal(val) - return &pem.Block{Type: "DSA PRIVATE KEY", Bytes: bytes} - case *ecdsa.PrivateKey: - b, _ := x509.MarshalECPrivateKey(k) - return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} - default: - return nil - } -} - -type certificate struct { - Cert string - Key string -} - -func buildCustomCertificate(b64cert string, b64key string) (certificate, error) { - crt := certificate{} - - cert, err := base64.StdEncoding.DecodeString(b64cert) - if err != nil { - return crt, errors.New("unable to decode base64 certificate") - } - - key, err := base64.StdEncoding.DecodeString(b64key) - if err != nil { - return crt, errors.New("unable to decode base64 private key") - } - - decodedCert, _ := pem.Decode(cert) - if decodedCert == nil { - return crt, errors.New("unable to decode certificate") - } - _, err = x509.ParseCertificate(decodedCert.Bytes) - if err != nil { - return crt, fmt.Errorf( - "error parsing certificate: decodedCert.Bytes: %s", - err, - ) - } - - decodedKey, _ := pem.Decode(key) - if decodedKey == nil { - return crt, errors.New("unable to decode key") - } - _, err = x509.ParsePKCS1PrivateKey(decodedKey.Bytes) - if err != nil { - return crt, fmt.Errorf( - "error parsing prive key: decodedKey.Bytes: %s", - err, - ) - } - - crt.Cert = string(cert) - crt.Key = string(key) - - return crt, nil -} - -func generateCertificateAuthority( - cn string, - daysValid int, -) (certificate, error) { - ca := certificate{} - - template, err := getBaseCertTemplate(cn, nil, nil, daysValid) - if err != nil { - return ca, err - } - // Override KeyUsage and IsCA - template.KeyUsage = x509.KeyUsageKeyEncipherment | - x509.KeyUsageDigitalSignature | - x509.KeyUsageCertSign - template.IsCA = true - - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return ca, fmt.Errorf("error generating rsa key: %s", err) - } - - ca.Cert, ca.Key, err = getCertAndKey(template, priv, template, priv) - if err != nil { - return ca, err - } - - return ca, nil -} - -func generateSelfSignedCertificate( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, -) (certificate, error) { - cert := certificate{} - - template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) - if err != nil { - return cert, err - } - - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return cert, fmt.Errorf("error generating rsa key: %s", err) - } - - cert.Cert, cert.Key, err = getCertAndKey(template, priv, template, priv) - if err != nil { - return cert, err - } - - return cert, nil -} - -func generateSignedCertificate( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, - ca certificate, -) (certificate, error) { - cert := certificate{} - - decodedSignerCert, _ := pem.Decode([]byte(ca.Cert)) - if decodedSignerCert == nil { - return cert, errors.New("unable to decode certificate") - } - signerCert, err := x509.ParseCertificate(decodedSignerCert.Bytes) - if err != nil { - return cert, fmt.Errorf( - "error parsing certificate: decodedSignerCert.Bytes: %s", - err, - ) - } - decodedSignerKey, _ := pem.Decode([]byte(ca.Key)) - if decodedSignerKey == nil { - return cert, errors.New("unable to decode key") - } - signerKey, err := x509.ParsePKCS1PrivateKey(decodedSignerKey.Bytes) - if err != nil { - return cert, fmt.Errorf( - "error parsing prive key: decodedSignerKey.Bytes: %s", - err, - ) - } - - template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) - if err != nil { - return cert, err - } - - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return cert, fmt.Errorf("error generating rsa key: %s", err) - } - - cert.Cert, cert.Key, err = getCertAndKey( - template, - priv, - signerCert, - signerKey, - ) - if err != nil { - return cert, err - } - - return cert, nil -} - -func getCertAndKey( - template *x509.Certificate, - signeeKey *rsa.PrivateKey, - parent *x509.Certificate, - signingKey *rsa.PrivateKey, -) (string, string, error) { - derBytes, err := x509.CreateCertificate( - rand.Reader, - template, - parent, - &signeeKey.PublicKey, - signingKey, - ) - if err != nil { - return "", "", fmt.Errorf("error creating certificate: %s", err) - } - - certBuffer := bytes.Buffer{} - if err := pem.Encode( - &certBuffer, - &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}, - ); err != nil { - return "", "", fmt.Errorf("error pem-encoding certificate: %s", err) - } - - keyBuffer := bytes.Buffer{} - if err := pem.Encode( - &keyBuffer, - &pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: x509.MarshalPKCS1PrivateKey(signeeKey), - }, - ); err != nil { - return "", "", fmt.Errorf("error pem-encoding key: %s", err) - } - - return string(certBuffer.Bytes()), string(keyBuffer.Bytes()), nil -} - -func getBaseCertTemplate( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, -) (*x509.Certificate, error) { - ipAddresses, err := getNetIPs(ips) - if err != nil { - return nil, err - } - dnsNames, err := getAlternateDNSStrs(alternateDNS) - if err != nil { - return nil, err - } - serialNumberUpperBound := new(big.Int).Lsh(big.NewInt(1), 128) - serialNumber, err := rand.Int(rand.Reader, serialNumberUpperBound) - if err != nil { - return nil, err - } - return &x509.Certificate{ - SerialNumber: serialNumber, - Subject: pkix.Name{ - CommonName: cn, - }, - IPAddresses: ipAddresses, - DNSNames: dnsNames, - NotBefore: time.Now(), - NotAfter: time.Now().Add(time.Hour * 24 * time.Duration(daysValid)), - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{ - x509.ExtKeyUsageServerAuth, - x509.ExtKeyUsageClientAuth, - }, - BasicConstraintsValid: true, - }, nil -} - -func getNetIPs(ips []interface{}) ([]net.IP, error) { - if ips == nil { - return []net.IP{}, nil - } - var ipStr string - var ok bool - var netIP net.IP - netIPs := make([]net.IP, len(ips)) - for i, ip := range ips { - ipStr, ok = ip.(string) - if !ok { - return nil, fmt.Errorf("error parsing ip: %v is not a string", ip) - } - netIP = net.ParseIP(ipStr) - if netIP == nil { - return nil, fmt.Errorf("error parsing ip: %s", ipStr) - } - netIPs[i] = netIP - } - return netIPs, nil -} - -func getAlternateDNSStrs(alternateDNS []interface{}) ([]string, error) { - if alternateDNS == nil { - return []string{}, nil - } - var dnsStr string - var ok bool - alternateDNSStrs := make([]string, len(alternateDNS)) - for i, dns := range alternateDNS { - dnsStr, ok = dns.(string) - if !ok { - return nil, fmt.Errorf( - "error processing alternate dns name: %v is not a string", - dns, - ) - } - alternateDNSStrs[i] = dnsStr - } - return alternateDNSStrs, nil -} diff --git a/vendor/github.com/go-task/slim-sprig/glide.lock b/vendor/github.com/go-task/slim-sprig/glide.lock deleted file mode 100644 index 32908e4ed1f..00000000000 --- a/vendor/github.com/go-task/slim-sprig/glide.lock +++ /dev/null @@ -1,31 +0,0 @@ -hash: 6a3f4f83c443958625ff1bafadd95c96d20d729f34e8e8c2fa72782194fc4807 -updated: 2019-01-30T20:16:27.780177826+01:00 -imports: -- name: github.com/google/uuid - version: 064e2069ce9c359c118179501254f67d7d37ba24 -- name: github.com/huandu/xstrings - version: f02667b379e2fb5916c3cda2cf31e0eb885d79f8 -- name: github.com/imdario/mergo - version: 7c29201646fa3de8506f701213473dd407f19646 -- name: github.com/Masterminds/goutils - version: 41ac8693c5c10a92ea1ff5ac3a7f95646f6123b0 -- name: github.com/Masterminds/semver - version: 59c29afe1a994eacb71c833025ca7acf874bb1da -- name: github.com/stretchr/testify - version: c679ae2cc0cb27ec3293fea7e254e47386f05d69 - subpackages: - - assert -- name: golang.org/x/crypto - version: de0752318171da717af4ce24d0a2e8626afaeb11 - subpackages: - - pbkdf2 - - scrypt -testImports: -- name: github.com/davecgh/go-spew - version: 782f4967f2dc4564575ca782fe2d04090b5faca8 - subpackages: - - spew -- name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d - subpackages: - - difflib diff --git a/vendor/github.com/go-task/slim-sprig/glide.yaml b/vendor/github.com/go-task/slim-sprig/glide.yaml deleted file mode 100644 index 712b2ba88f5..00000000000 --- a/vendor/github.com/go-task/slim-sprig/glide.yaml +++ /dev/null @@ -1,16 +0,0 @@ -package: github.com/Masterminds/sprig -import: -- package: github.com/Masterminds/goutils - version: ^1.0.0 -- package: github.com/google/uuid - version: ^0.2 -- package: golang.org/x/crypto - subpackages: - - scrypt -- package: github.com/Masterminds/semver - version: v1.2.2 -- package: github.com/stretchr/testify -- package: github.com/imdario/mergo - version: ~0.3.7 -- package: github.com/huandu/xstrings - version: ^1.2 diff --git a/vendor/github.com/go-task/slim-sprig/regex.go b/vendor/github.com/go-task/slim-sprig/regex.go deleted file mode 100644 index 2016f66336f..00000000000 --- a/vendor/github.com/go-task/slim-sprig/regex.go +++ /dev/null @@ -1,35 +0,0 @@ -package sprig - -import ( - "regexp" -) - -func regexMatch(regex string, s string) bool { - match, _ := regexp.MatchString(regex, s) - return match -} - -func regexFindAll(regex string, s string, n int) []string { - r := regexp.MustCompile(regex) - return r.FindAllString(s, n) -} - -func regexFind(regex string, s string) string { - r := regexp.MustCompile(regex) - return r.FindString(s) -} - -func regexReplaceAll(regex string, s string, repl string) string { - r := regexp.MustCompile(regex) - return r.ReplaceAllString(s, repl) -} - -func regexReplaceAllLiteral(regex string, s string, repl string) string { - r := regexp.MustCompile(regex) - return r.ReplaceAllLiteralString(s, repl) -} - -func regexSplit(regex string, s string, n int) []string { - r := regexp.MustCompile(regex) - return r.Split(s, n) -} diff --git a/vendor/github.com/go-task/slim-sprig/semver.go b/vendor/github.com/go-task/slim-sprig/semver.go deleted file mode 100644 index c2bf8a1fdf3..00000000000 --- a/vendor/github.com/go-task/slim-sprig/semver.go +++ /dev/null @@ -1,23 +0,0 @@ -package sprig - -import ( - sv2 "github.com/Masterminds/semver" -) - -func semverCompare(constraint, version string) (bool, error) { - c, err := sv2.NewConstraint(constraint) - if err != nil { - return false, err - } - - v, err := sv2.NewVersion(version) - if err != nil { - return false, err - } - - return c.Check(v), nil -} - -func semver(version string) (*sv2.Version, error) { - return sv2.NewVersion(version) -} diff --git a/vendor/github.com/go-task/slim-sprig/v3/.editorconfig b/vendor/github.com/go-task/slim-sprig/v3/.editorconfig new file mode 100644 index 00000000000..b0c95367e75 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/.editorconfig @@ -0,0 +1,14 @@ +# editorconfig.org + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = tab +indent_size = 8 + +[*.{md,yml,yaml,json}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-task/slim-sprig/v3/.gitattributes b/vendor/github.com/go-task/slim-sprig/v3/.gitattributes new file mode 100644 index 00000000000..176a458f94e --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/.gitattributes @@ -0,0 +1 @@ +* text=auto diff --git a/vendor/github.com/go-task/slim-sprig/.gitignore b/vendor/github.com/go-task/slim-sprig/v3/.gitignore similarity index 100% rename from vendor/github.com/go-task/slim-sprig/.gitignore rename to vendor/github.com/go-task/slim-sprig/v3/.gitignore diff --git a/vendor/github.com/go-task/slim-sprig/CHANGELOG.md b/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md similarity index 67% rename from vendor/github.com/go-task/slim-sprig/CHANGELOG.md rename to vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md index 0a8069c0a40..2ce45dd4eca 100644 --- a/vendor/github.com/go-task/slim-sprig/CHANGELOG.md +++ b/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md @@ -1,5 +1,137 @@ # Changelog +## Release 3.2.3 (2022-11-29) + +### Changed + +- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi) +- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero) +- #353: Updated masterminds/semver which included bug fixes +- #354: Updated golang.org/x/crypto which included bug fixes + +## Release 3.2.2 (2021-02-04) + +This is a re-release of 3.2.1 to satisfy something with the Go module system. + +## Release 3.2.1 (2021-02-04) + +### Changed + +- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr) + +## Release 3.2.0 (2020-12-14) + +### Added + +- #211: Added randInt function (thanks @kochurovro) +- #223: Added fromJson and mustFromJson functions (thanks @mholt) +- #242: Added a bcrypt function (thanks @robbiet480) +- #253: Added randBytes function (thanks @MikaelSmith) +- #254: Added dig function for dicts (thanks @nyarly) +- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton) +- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl) +- #268: Added and and all functions for testing conditions (thanks @phuslu) +- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf + (thanks @andrewmostello) +- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek) +- #270: Extend certificate functions to handle non-RSA keys + add support for + ed25519 keys (thanks @misberner) + +### Changed + +- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer +- Using semver 3.1.1 and mergo 0.3.11 + +### Fixed + +- #249: Fix htmlDateInZone example (thanks @spawnia) + +NOTE: The dependency github.com/imdario/mergo reverted the breaking change in +0.3.9 via 0.3.10 release. + +## Release 3.1.0 (2020-04-16) + +NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9 +that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8. + +### Added + +- #225: Added support for generating htpasswd hash (thanks @rustycl0ck) +- #224: Added duration filter (thanks @frebib) +- #205: Added `seq` function (thanks @thadc23) + +### Changed + +- #203: Unlambda functions with correct signature (thanks @muesli) +- #236: Updated the license formatting for GitHub display purposes +- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9 + as it causes a breaking change for sprig. That issue is tracked at + https://github.com/imdario/mergo/issues/139 + +### Fixed + +- #229: Fix `seq` example in docs (thanks @kalmant) + +## Release 3.0.2 (2019-12-13) + +### Fixed + +- #220: Updating to semver v3.0.3 to fix issue with <= ranges +- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya) + +## Release 3.0.1 (2019-12-08) + +### Fixed + +- #212: Updated semver fixing broken constraint checking with ^0.0 + +## Release 3.0.0 (2019-10-02) + +### Added + +- #187: Added durationRound function (thanks @yjp20) +- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn) +- #193: Added toRawJson support (thanks @Dean-Coakley) +- #197: Added get support to dicts (thanks @Dean-Coakley) + +### Changed + +- #186: Moving dependency management to Go modules +- #186: Updated semver to v3. This has changes in the way ^ is handled +- #194: Updated documentation on merging and how it copies. Added example using deepCopy +- #196: trunc now supports negative values (thanks @Dean-Coakley) + +## Release 2.22.0 (2019-10-02) + +### Added + +- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) +- #195: Added deepCopy function for use with dicts + +### Changed + +- Updated merge and mergeOverwrite documentation to explain copying and how to + use deepCopy with it + +## Release 2.21.0 (2019-09-18) + +### Added + +- #122: Added encryptAES/decryptAES functions (thanks @n0madic) +- #128: Added toDecimal support (thanks @Dean-Coakley) +- #169: Added list contcat (thanks @astorath) +- #174: Added deepEqual function (thanks @bonifaido) +- #170: Added url parse and join functions (thanks @astorath) + +### Changed + +- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify + +### Fixed + +- #172: Fix semver wildcard example (thanks @piepmatz) +- #175: Fix dateInZone doc example (thanks @s3than) + ## Release 2.20.0 (2019-06-18) ### Added diff --git a/vendor/github.com/go-task/slim-sprig/LICENSE.txt b/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt similarity index 96% rename from vendor/github.com/go-task/slim-sprig/LICENSE.txt rename to vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt index 5c95accc2e2..f311b1eaaaa 100644 --- a/vendor/github.com/go-task/slim-sprig/LICENSE.txt +++ b/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt @@ -1,5 +1,4 @@ -Sprig -Copyright (C) 2013 Masterminds +Copyright (C) 2013-2020 Masterminds Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/go-task/slim-sprig/README.md b/vendor/github.com/go-task/slim-sprig/v3/README.md similarity index 62% rename from vendor/github.com/go-task/slim-sprig/README.md rename to vendor/github.com/go-task/slim-sprig/v3/README.md index b70569585f8..b5ab564254f 100644 --- a/vendor/github.com/go-task/slim-sprig/README.md +++ b/vendor/github.com/go-task/slim-sprig/v3/README.md @@ -1,35 +1,32 @@ -# Sprig: Template functions for Go templates -[![Stability: Sustained](https://masterminds.github.io/stability/sustained.svg)](https://masterminds.github.io/stability/sustained.html) -[![Build Status](https://travis-ci.org/Masterminds/sprig.svg?branch=master)](https://travis-ci.org/Masterminds/sprig) +# Slim-Sprig: Template functions for Go templates [![Go Reference](https://pkg.go.dev/badge/github.com/go-task/slim-sprig/v3.svg)](https://pkg.go.dev/github.com/go-task/slim-sprig/v3) -The Go language comes with a [built-in template -language](http://golang.org/pkg/text/template/), but not -very many template functions. Sprig is a library that provides more than 100 commonly -used template functions. - -It is inspired by the template functions found in -[Twig](http://twig.sensiolabs.org/documentation) and in various -JavaScript libraries, such as [underscore.js](http://underscorejs.org/). +Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with +all functions that depend on external (non standard library) or crypto packages +removed. +The reason for this is to make this library more lightweight. Most of these +functions (specially crypto ones) are not needed on most apps, but costs a lot +in terms of binary size and compilation time. ## Usage -**Template developers**: Please use Sprig's [function documentation](http://masterminds.github.io/sprig/) for +**Template developers**: Please use Slim-Sprig's [function documentation](https://go-task.github.io/slim-sprig/) for detailed instructions and code snippets for the >100 template functions available. -**Go developers**: If you'd like to include Sprig as a library in your program, -our API documentation is available [at GoDoc.org](http://godoc.org/github.com/Masterminds/sprig). +**Go developers**: If you'd like to include Slim-Sprig as a library in your program, +our API documentation is available [at GoDoc.org](http://godoc.org/github.com/go-task/slim-sprig). For standard usage, read on. -### Load the Sprig library +### Load the Slim-Sprig library -To load the Sprig `FuncMap`: +To load the Slim-Sprig `FuncMap`: ```go import ( - "github.com/Masterminds/sprig" "html/template" + + "github.com/go-task/slim-sprig" ) // This example illustrates that the FuncMap *must* be set before the @@ -37,8 +34,6 @@ import ( tpl := template.Must( template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html") ) - - ``` ### Calling the functions inside of templates diff --git a/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml b/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml new file mode 100644 index 00000000000..8e6346bb19e --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml @@ -0,0 +1,12 @@ +# https://taskfile.dev + +version: '3' + +tasks: + default: + cmds: + - task: test + + test: + cmds: + - go test -v . diff --git a/vendor/github.com/go-task/slim-sprig/v3/crypto.go b/vendor/github.com/go-task/slim-sprig/v3/crypto.go new file mode 100644 index 00000000000..d06e516d49e --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/crypto.go @@ -0,0 +1,24 @@ +package sprig + +import ( + "crypto/sha1" + "crypto/sha256" + "encoding/hex" + "fmt" + "hash/adler32" +) + +func sha256sum(input string) string { + hash := sha256.Sum256([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func sha1sum(input string) string { + hash := sha1.Sum([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func adler32sum(input string) string { + hash := adler32.Checksum([]byte(input)) + return fmt.Sprintf("%d", hash) +} diff --git a/vendor/github.com/go-task/slim-sprig/date.go b/vendor/github.com/go-task/slim-sprig/v3/date.go similarity index 52% rename from vendor/github.com/go-task/slim-sprig/date.go rename to vendor/github.com/go-task/slim-sprig/v3/date.go index d1d6155d72c..ed022ddacac 100644 --- a/vendor/github.com/go-task/slim-sprig/date.go +++ b/vendor/github.com/go-task/slim-sprig/v3/date.go @@ -55,6 +55,14 @@ func dateModify(fmt string, date time.Time) time.Time { return date.Add(d) } +func mustDateModify(fmt string, date time.Time) (time.Time, error) { + d, err := time.ParseDuration(fmt) + if err != nil { + return time.Time{}, err + } + return date.Add(d), nil +} + func dateAgo(date interface{}) string { var t time.Time @@ -73,11 +81,72 @@ func dateAgo(date interface{}) string { return duration.String() } +func duration(sec interface{}) string { + var n int64 + switch value := sec.(type) { + default: + n = 0 + case string: + n, _ = strconv.ParseInt(value, 10, 64) + case int64: + n = value + } + return (time.Duration(n) * time.Second).String() +} + +func durationRound(duration interface{}) string { + var d time.Duration + switch duration := duration.(type) { + default: + d = 0 + case string: + d, _ = time.ParseDuration(duration) + case int64: + d = time.Duration(duration) + case time.Time: + d = time.Since(duration) + } + + u := uint64(d) + neg := d < 0 + if neg { + u = -u + } + + var ( + year = uint64(time.Hour) * 24 * 365 + month = uint64(time.Hour) * 24 * 30 + day = uint64(time.Hour) * 24 + hour = uint64(time.Hour) + minute = uint64(time.Minute) + second = uint64(time.Second) + ) + switch { + case u > year: + return strconv.FormatUint(u/year, 10) + "y" + case u > month: + return strconv.FormatUint(u/month, 10) + "mo" + case u > day: + return strconv.FormatUint(u/day, 10) + "d" + case u > hour: + return strconv.FormatUint(u/hour, 10) + "h" + case u > minute: + return strconv.FormatUint(u/minute, 10) + "m" + case u > second: + return strconv.FormatUint(u/second, 10) + "s" + } + return "0s" +} + func toDate(fmt, str string) time.Time { t, _ := time.ParseInLocation(fmt, str, time.Local) return t } +func mustToDate(fmt, str string) (time.Time, error) { + return time.ParseInLocation(fmt, str, time.Local) +} + func unixEpoch(date time.Time) string { return strconv.FormatInt(date.Unix(), 10) } diff --git a/vendor/github.com/go-task/slim-sprig/defaults.go b/vendor/github.com/go-task/slim-sprig/v3/defaults.go similarity index 53% rename from vendor/github.com/go-task/slim-sprig/defaults.go rename to vendor/github.com/go-task/slim-sprig/v3/defaults.go index ed6a8ab291c..b9f979666dd 100644 --- a/vendor/github.com/go-task/slim-sprig/defaults.go +++ b/vendor/github.com/go-task/slim-sprig/v3/defaults.go @@ -1,10 +1,18 @@ package sprig import ( + "bytes" "encoding/json" + "math/rand" "reflect" + "strings" + "time" ) +func init() { + rand.Seed(time.Now().UnixNano()) +} + // dfault checks whether `given` is set, and returns default if not set. // // This returns `d` if `given` appears not to be set, and `given` otherwise. @@ -37,7 +45,7 @@ func empty(given interface{}) bool { case reflect.Array, reflect.Slice, reflect.Map, reflect.String: return g.Len() == 0 case reflect.Bool: - return g.Bool() == false + return !g.Bool() case reflect.Complex64, reflect.Complex128: return g.Complex() == 0 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: @@ -61,18 +69,90 @@ func coalesce(v ...interface{}) interface{} { return nil } +// all returns true if empty(x) is false for all values x in the list. +// If the list is empty, return true. +func all(v ...interface{}) bool { + for _, val := range v { + if empty(val) { + return false + } + } + return true +} + +// any returns true if empty(x) is false for any x in the list. +// If the list is empty, return false. +func any(v ...interface{}) bool { + for _, val := range v { + if !empty(val) { + return true + } + } + return false +} + +// fromJson decodes JSON into a structured value, ignoring errors. +func fromJson(v string) interface{} { + output, _ := mustFromJson(v) + return output +} + +// mustFromJson decodes JSON into a structured value, returning errors. +func mustFromJson(v string) (interface{}, error) { + var output interface{} + err := json.Unmarshal([]byte(v), &output) + return output, err +} + // toJson encodes an item into a JSON string func toJson(v interface{}) string { output, _ := json.Marshal(v) return string(output) } +func mustToJson(v interface{}) (string, error) { + output, err := json.Marshal(v) + if err != nil { + return "", err + } + return string(output), nil +} + // toPrettyJson encodes an item into a pretty (indented) JSON string func toPrettyJson(v interface{}) string { output, _ := json.MarshalIndent(v, "", " ") return string(output) } +func mustToPrettyJson(v interface{}) (string, error) { + output, err := json.MarshalIndent(v, "", " ") + if err != nil { + return "", err + } + return string(output), nil +} + +// toRawJson encodes an item into a JSON string with no escaping of HTML characters. +func toRawJson(v interface{}) string { + output, err := mustToRawJson(v) + if err != nil { + panic(err) + } + return string(output) +} + +// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters. +func mustToRawJson(v interface{}) (string, error) { + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err := enc.Encode(&v) + if err != nil { + return "", err + } + return strings.TrimSuffix(buf.String(), "\n"), nil +} + // ternary returns the first value if the last value is true, otherwise returns the second value. func ternary(vt interface{}, vf interface{}, v bool) interface{} { if v { diff --git a/vendor/github.com/go-task/slim-sprig/dict.go b/vendor/github.com/go-task/slim-sprig/v3/dict.go similarity index 69% rename from vendor/github.com/go-task/slim-sprig/dict.go rename to vendor/github.com/go-task/slim-sprig/v3/dict.go index 026eccb735f..77ebc61b189 100644 --- a/vendor/github.com/go-task/slim-sprig/dict.go +++ b/vendor/github.com/go-task/slim-sprig/v3/dict.go @@ -1,6 +1,11 @@ package sprig -import "github.com/imdario/mergo" +func get(d map[string]interface{}, key string) interface{} { + if val, ok := d[key]; ok { + return val + } + return "" +} func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { d[key] = value @@ -77,26 +82,6 @@ func dict(v ...interface{}) map[string]interface{} { return dict } -func merge(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { - for _, src := range srcs { - if err := mergo.Merge(&dst, src); err != nil { - // Swallow errors inside of a template. - return "" - } - } - return dst -} - -func mergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { - for _, src := range srcs { - if err := mergo.MergeWithOverwrite(&dst, src); err != nil { - // Swallow errors inside of a template. - return "" - } - } - return dst -} - func values(dict map[string]interface{}) []interface{} { values := []interface{}{} for _, value := range dict { @@ -105,3 +90,29 @@ func values(dict map[string]interface{}) []interface{} { return values } + +func dig(ps ...interface{}) (interface{}, error) { + if len(ps) < 3 { + panic("dig needs at least three arguments") + } + dict := ps[len(ps)-1].(map[string]interface{}) + def := ps[len(ps)-2] + ks := make([]string, len(ps)-2) + for i := 0; i < len(ks); i++ { + ks[i] = ps[i].(string) + } + + return digFromDict(dict, def, ks) +} + +func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) { + k, ns := ks[0], ks[1:len(ks)] + step, has := dict[k] + if !has { + return d, nil + } + if len(ns) == 0 { + return step, nil + } + return digFromDict(step.(map[string]interface{}), d, ns) +} diff --git a/vendor/github.com/go-task/slim-sprig/doc.go b/vendor/github.com/go-task/slim-sprig/v3/doc.go similarity index 92% rename from vendor/github.com/go-task/slim-sprig/doc.go rename to vendor/github.com/go-task/slim-sprig/v3/doc.go index 8f8f1d73703..aabb9d4489f 100644 --- a/vendor/github.com/go-task/slim-sprig/doc.go +++ b/vendor/github.com/go-task/slim-sprig/v3/doc.go @@ -1,5 +1,5 @@ /* -Sprig: Template functions for Go. +Package sprig provides template functions for Go. This package contains a number of utility functions for working with data inside of Go `html/template` and `text/template` files. diff --git a/vendor/github.com/go-task/slim-sprig/functions.go b/vendor/github.com/go-task/slim-sprig/v3/functions.go similarity index 51% rename from vendor/github.com/go-task/slim-sprig/functions.go rename to vendor/github.com/go-task/slim-sprig/v3/functions.go index 6bf15cf180b..5ea74f89930 100644 --- a/vendor/github.com/go-task/slim-sprig/functions.go +++ b/vendor/github.com/go-task/slim-sprig/v3/functions.go @@ -3,18 +3,18 @@ package sprig import ( "errors" "html/template" + "math/rand" "os" "path" + "path/filepath" + "reflect" "strconv" "strings" ttemplate "text/template" "time" - - util "github.com/Masterminds/goutils" - "github.com/huandu/xstrings" ) -// Produce the function map. +// FuncMap produces the function map. // // Use this to pass the functions into the template engine: // @@ -62,7 +62,7 @@ func GenericFuncMap() map[string]interface{} { } // These functions are not guaranteed to evaluate to the same result for given input, because they -// refer to the environemnt or global state. +// refer to the environment or global state. var nonhermeticFunctions = []string{ // Date functions "date", @@ -79,81 +79,76 @@ var nonhermeticFunctions = []string{ "randAlpha", "randAscii", "randNumeric", + "randBytes", "uuidv4", // OS "env", "expandenv", + + // Network + "getHostByName", } var genericMap = map[string]interface{}{ "hello": func() string { return "Hello!" }, // Date functions - "date": date, - "date_in_zone": dateInZone, - "date_modify": dateModify, - "now": func() time.Time { return time.Now() }, - "htmlDate": htmlDate, - "htmlDateInZone": htmlDateInZone, - "dateInZone": dateInZone, - "dateModify": dateModify, - "ago": dateAgo, - "toDate": toDate, - "unixEpoch": unixEpoch, + "ago": dateAgo, + "date": date, + "date_in_zone": dateInZone, + "date_modify": dateModify, + "dateInZone": dateInZone, + "dateModify": dateModify, + "duration": duration, + "durationRound": durationRound, + "htmlDate": htmlDate, + "htmlDateInZone": htmlDateInZone, + "must_date_modify": mustDateModify, + "mustDateModify": mustDateModify, + "mustToDate": mustToDate, + "now": time.Now, + "toDate": toDate, + "unixEpoch": unixEpoch, // Strings - "abbrev": abbrev, - "abbrevboth": abbrevboth, - "trunc": trunc, - "trim": strings.TrimSpace, - "upper": strings.ToUpper, - "lower": strings.ToLower, - "title": strings.Title, - "untitle": untitle, - "substr": substring, + "trunc": trunc, + "trim": strings.TrimSpace, + "upper": strings.ToUpper, + "lower": strings.ToLower, + "title": strings.Title, + "substr": substring, // Switch order so that "foo" | repeat 5 "repeat": func(count int, str string) string { return strings.Repeat(str, count) }, // Deprecated: Use trimAll. "trimall": func(a, b string) string { return strings.Trim(b, a) }, // Switch order so that "$foo" | trimall "$" - "trimAll": func(a, b string) string { return strings.Trim(b, a) }, - "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) }, - "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) }, - "nospace": util.DeleteWhiteSpace, - "initials": initials, - "randAlphaNum": randAlphaNumeric, - "randAlpha": randAlpha, - "randAscii": randAscii, - "randNumeric": randNumeric, - "swapcase": util.SwapCase, - "shuffle": xstrings.Shuffle, - "snakecase": xstrings.ToSnakeCase, - "camelcase": xstrings.ToCamelCase, - "kebabcase": xstrings.ToKebabCase, - "wrap": func(l int, s string) string { return util.Wrap(s, l) }, - "wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) }, + "trimAll": func(a, b string) string { return strings.Trim(b, a) }, + "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) }, + "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) }, // Switch order so that "foobar" | contains "foo" - "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, - "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, - "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, - "quote": quote, - "squote": squote, - "cat": cat, - "indent": indent, - "nindent": nindent, - "replace": replace, - "plural": plural, - "sha1sum": sha1sum, - "sha256sum": sha256sum, + "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, + "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, + "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, + "quote": quote, + "squote": squote, + "cat": cat, + "indent": indent, + "nindent": nindent, + "replace": replace, + "plural": plural, + "sha1sum": sha1sum, + "sha256sum": sha256sum, "adler32sum": adler32sum, - "toString": strval, + "toString": strval, // Wrap Atoi to stop errors. - "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, - "int64": toInt64, - "int": toInt, - "float64": toFloat64, + "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, + "int64": toInt64, + "int": toInt, + "float64": toFloat64, + "seq": seq, + "toDecimal": toDecimal, //"gt": func(a, b int) bool {return a > b}, //"gte": func(a, b int) bool {return a >= b}, @@ -189,9 +184,12 @@ var genericMap = map[string]interface{}{ } return val }, + "randInt": func(min, max int) int { return rand.Intn(max-min) + min }, "biggest": max, "max": max, "min": min, + "maxf": maxf, + "minf": minf, "ceil": ceil, "floor": floor, "round": round, @@ -202,13 +200,22 @@ var genericMap = map[string]interface{}{ "sortAlpha": sortAlpha, // Defaults - "default": dfault, - "empty": empty, - "coalesce": coalesce, - "compact": compact, - "toJson": toJson, - "toPrettyJson": toPrettyJson, - "ternary": ternary, + "default": dfault, + "empty": empty, + "coalesce": coalesce, + "all": all, + "any": any, + "compact": compact, + "mustCompact": mustCompact, + "fromJson": fromJson, + "toJson": toJson, + "toPrettyJson": toPrettyJson, + "toRawJson": toRawJson, + "mustFromJson": mustFromJson, + "mustToJson": mustToJson, + "mustToPrettyJson": mustToPrettyJson, + "mustToRawJson": mustToRawJson, + "ternary": ternary, // Reflection "typeOf": typeOf, @@ -216,18 +223,29 @@ var genericMap = map[string]interface{}{ "typeIsLike": typeIsLike, "kindOf": kindOf, "kindIs": kindIs, + "deepEqual": reflect.DeepEqual, // OS: - "env": func(s string) string { return os.Getenv(s) }, - "expandenv": func(s string) string { return os.ExpandEnv(s) }, + "env": os.Getenv, + "expandenv": os.ExpandEnv, - // File Paths: + // Network: + "getHostByName": getHostByName, + + // Paths: "base": path.Base, "dir": path.Dir, "clean": path.Clean, "ext": path.Ext, "isAbs": path.IsAbs, + // Filepaths: + "osBase": filepath.Base, + "osClean": filepath.Clean, + "osDir": filepath.Dir, + "osExt": filepath.Ext, + "osIsAbs": filepath.IsAbs, + // Encoding: "b64enc": base64encode, "b64dec": base64decode, @@ -238,6 +256,7 @@ var genericMap = map[string]interface{}{ "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. "list": list, "dict": dict, + "get": get, "set": set, "unset": unset, "hasKey": hasKey, @@ -245,45 +264,54 @@ var genericMap = map[string]interface{}{ "keys": keys, "pick": pick, "omit": omit, - "merge": merge, - "mergeOverwrite": mergeOverwrite, "values": values, "append": push, "push": push, - "prepend": prepend, - "first": first, - "rest": rest, - "last": last, - "initial": initial, - "reverse": reverse, - "uniq": uniq, - "without": without, - "has": has, - "slice": slice, - - // Crypto: - "genPrivateKey": generatePrivateKey, - "derivePassword": derivePassword, - "buildCustomCert": buildCustomCertificate, - "genCA": generateCertificateAuthority, - "genSelfSignedCert": generateSelfSignedCertificate, - "genSignedCert": generateSignedCertificate, - - // UUIDs: - "uuidv4": uuidv4, - - // SemVer: - "semver": semver, - "semverCompare": semverCompare, + "mustAppend": mustPush, "mustPush": mustPush, + "prepend": prepend, + "mustPrepend": mustPrepend, + "first": first, + "mustFirst": mustFirst, + "rest": rest, + "mustRest": mustRest, + "last": last, + "mustLast": mustLast, + "initial": initial, + "mustInitial": mustInitial, + "reverse": reverse, + "mustReverse": mustReverse, + "uniq": uniq, + "mustUniq": mustUniq, + "without": without, + "mustWithout": mustWithout, + "has": has, + "mustHas": mustHas, + "slice": slice, + "mustSlice": mustSlice, + "concat": concat, + "dig": dig, + "chunk": chunk, + "mustChunk": mustChunk, // Flow Control: "fail": func(msg string) (string, error) { return "", errors.New(msg) }, // Regex - "regexMatch": regexMatch, - "regexFindAll": regexFindAll, - "regexFind": regexFind, - "regexReplaceAll": regexReplaceAll, - "regexReplaceAllLiteral": regexReplaceAllLiteral, - "regexSplit": regexSplit, + "regexMatch": regexMatch, + "mustRegexMatch": mustRegexMatch, + "regexFindAll": regexFindAll, + "mustRegexFindAll": mustRegexFindAll, + "regexFind": regexFind, + "mustRegexFind": mustRegexFind, + "regexReplaceAll": regexReplaceAll, + "mustRegexReplaceAll": mustRegexReplaceAll, + "regexReplaceAllLiteral": regexReplaceAllLiteral, + "mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral, + "regexSplit": regexSplit, + "mustRegexSplit": mustRegexSplit, + "regexQuoteMeta": regexQuoteMeta, + + // URLs: + "urlParse": urlParse, + "urlJoin": urlJoin, } diff --git a/vendor/github.com/go-task/slim-sprig/list.go b/vendor/github.com/go-task/slim-sprig/v3/list.go similarity index 54% rename from vendor/github.com/go-task/slim-sprig/list.go rename to vendor/github.com/go-task/slim-sprig/v3/list.go index 41e136625fa..ca0fbb78932 100644 --- a/vendor/github.com/go-task/slim-sprig/list.go +++ b/vendor/github.com/go-task/slim-sprig/v3/list.go @@ -2,6 +2,7 @@ package sprig import ( "fmt" + "math" "reflect" "sort" ) @@ -15,6 +16,15 @@ func list(v ...interface{}) []interface{} { } func push(list interface{}, v interface{}) []interface{} { + l, err := mustPush(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPush(list interface{}, v interface{}) ([]interface{}, error) { tp := reflect.TypeOf(list).Kind() switch tp { case reflect.Slice, reflect.Array: @@ -26,14 +36,23 @@ func push(list interface{}, v interface{}) []interface{} { nl[i] = l2.Index(i).Interface() } - return append(nl, v) + return append(nl, v), nil default: - panic(fmt.Sprintf("Cannot push on type %s", tp)) + return nil, fmt.Errorf("Cannot push on type %s", tp) } } func prepend(list interface{}, v interface{}) []interface{} { + l, err := mustPrepend(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) { //return append([]interface{}{v}, list...) tp := reflect.TypeOf(list).Kind() @@ -47,14 +66,67 @@ func prepend(list interface{}, v interface{}) []interface{} { nl[i] = l2.Index(i).Interface() } - return append([]interface{}{v}, nl...) + return append([]interface{}{v}, nl...), nil + + default: + return nil, fmt.Errorf("Cannot prepend on type %s", tp) + } +} + +func chunk(size int, list interface{}) [][]interface{} { + l, err := mustChunk(size, list) + if err != nil { + panic(err) + } + + return l +} + +func mustChunk(size int, list interface{}) ([][]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + + cs := int(math.Floor(float64(l-1)/float64(size)) + 1) + nl := make([][]interface{}, cs) + + for i := 0; i < cs; i++ { + clen := size + if i == cs-1 { + clen = int(math.Floor(math.Mod(float64(l), float64(size)))) + if clen == 0 { + clen = size + } + } + + nl[i] = make([]interface{}, clen) + + for j := 0; j < clen; j++ { + ix := i*size + j + nl[i][j] = l2.Index(ix).Interface() + } + } + + return nl, nil default: - panic(fmt.Sprintf("Cannot prepend on type %s", tp)) + return nil, fmt.Errorf("Cannot chunk type %s", tp) } } func last(list interface{}) interface{} { + l, err := mustLast(list) + if err != nil { + panic(err) + } + + return l +} + +func mustLast(list interface{}) (interface{}, error) { tp := reflect.TypeOf(list).Kind() switch tp { case reflect.Slice, reflect.Array: @@ -62,16 +134,25 @@ func last(list interface{}) interface{} { l := l2.Len() if l == 0 { - return nil + return nil, nil } - return l2.Index(l - 1).Interface() + return l2.Index(l - 1).Interface(), nil default: - panic(fmt.Sprintf("Cannot find last on type %s", tp)) + return nil, fmt.Errorf("Cannot find last on type %s", tp) } } func first(list interface{}) interface{} { + l, err := mustFirst(list) + if err != nil { + panic(err) + } + + return l +} + +func mustFirst(list interface{}) (interface{}, error) { tp := reflect.TypeOf(list).Kind() switch tp { case reflect.Slice, reflect.Array: @@ -79,16 +160,25 @@ func first(list interface{}) interface{} { l := l2.Len() if l == 0 { - return nil + return nil, nil } - return l2.Index(0).Interface() + return l2.Index(0).Interface(), nil default: - panic(fmt.Sprintf("Cannot find first on type %s", tp)) + return nil, fmt.Errorf("Cannot find first on type %s", tp) } } func rest(list interface{}) []interface{} { + l, err := mustRest(list) + if err != nil { + panic(err) + } + + return l +} + +func mustRest(list interface{}) ([]interface{}, error) { tp := reflect.TypeOf(list).Kind() switch tp { case reflect.Slice, reflect.Array: @@ -96,7 +186,7 @@ func rest(list interface{}) []interface{} { l := l2.Len() if l == 0 { - return nil + return nil, nil } nl := make([]interface{}, l-1) @@ -104,13 +194,22 @@ func rest(list interface{}) []interface{} { nl[i-1] = l2.Index(i).Interface() } - return nl + return nl, nil default: - panic(fmt.Sprintf("Cannot find rest on type %s", tp)) + return nil, fmt.Errorf("Cannot find rest on type %s", tp) } } func initial(list interface{}) []interface{} { + l, err := mustInitial(list) + if err != nil { + panic(err) + } + + return l +} + +func mustInitial(list interface{}) ([]interface{}, error) { tp := reflect.TypeOf(list).Kind() switch tp { case reflect.Slice, reflect.Array: @@ -118,7 +217,7 @@ func initial(list interface{}) []interface{} { l := l2.Len() if l == 0 { - return nil + return nil, nil } nl := make([]interface{}, l-1) @@ -126,9 +225,9 @@ func initial(list interface{}) []interface{} { nl[i] = l2.Index(i).Interface() } - return nl + return nl, nil default: - panic(fmt.Sprintf("Cannot find initial on type %s", tp)) + return nil, fmt.Errorf("Cannot find initial on type %s", tp) } } @@ -145,6 +244,15 @@ func sortAlpha(list interface{}) []string { } func reverse(v interface{}) []interface{} { + l, err := mustReverse(v) + if err != nil { + panic(err) + } + + return l +} + +func mustReverse(v interface{}) ([]interface{}, error) { tp := reflect.TypeOf(v).Kind() switch tp { case reflect.Slice, reflect.Array: @@ -157,13 +265,22 @@ func reverse(v interface{}) []interface{} { nl[l-i-1] = l2.Index(i).Interface() } - return nl + return nl, nil default: - panic(fmt.Sprintf("Cannot find reverse on type %s", tp)) + return nil, fmt.Errorf("Cannot find reverse on type %s", tp) } } func compact(list interface{}) []interface{} { + l, err := mustCompact(list) + if err != nil { + panic(err) + } + + return l +} + +func mustCompact(list interface{}) ([]interface{}, error) { tp := reflect.TypeOf(list).Kind() switch tp { case reflect.Slice, reflect.Array: @@ -179,13 +296,22 @@ func compact(list interface{}) []interface{} { } } - return nl + return nl, nil default: - panic(fmt.Sprintf("Cannot compact on type %s", tp)) + return nil, fmt.Errorf("Cannot compact on type %s", tp) } } func uniq(list interface{}) []interface{} { + l, err := mustUniq(list) + if err != nil { + panic(err) + } + + return l +} + +func mustUniq(list interface{}) ([]interface{}, error) { tp := reflect.TypeOf(list).Kind() switch tp { case reflect.Slice, reflect.Array: @@ -201,9 +327,9 @@ func uniq(list interface{}) []interface{} { } } - return dest + return dest, nil default: - panic(fmt.Sprintf("Cannot find uniq on type %s", tp)) + return nil, fmt.Errorf("Cannot find uniq on type %s", tp) } } @@ -217,6 +343,15 @@ func inList(haystack []interface{}, needle interface{}) bool { } func without(list interface{}, omit ...interface{}) []interface{} { + l, err := mustWithout(list, omit...) + if err != nil { + panic(err) + } + + return l +} + +func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) { tp := reflect.TypeOf(list).Kind() switch tp { case reflect.Slice, reflect.Array: @@ -232,15 +367,24 @@ func without(list interface{}, omit ...interface{}) []interface{} { } } - return res + return res, nil default: - panic(fmt.Sprintf("Cannot find without on type %s", tp)) + return nil, fmt.Errorf("Cannot find without on type %s", tp) } } func has(needle interface{}, haystack interface{}) bool { + l, err := mustHas(needle, haystack) + if err != nil { + panic(err) + } + + return l +} + +func mustHas(needle interface{}, haystack interface{}) (bool, error) { if haystack == nil { - return false + return false, nil } tp := reflect.TypeOf(haystack).Kind() switch tp { @@ -251,13 +395,13 @@ func has(needle interface{}, haystack interface{}) bool { for i := 0; i < l; i++ { item = l2.Index(i).Interface() if reflect.DeepEqual(needle, item) { - return true + return true, nil } } - return false + return false, nil default: - panic(fmt.Sprintf("Cannot find has on type %s", tp)) + return false, fmt.Errorf("Cannot find has on type %s", tp) } } @@ -267,6 +411,15 @@ func has(needle interface{}, haystack interface{}) bool { // slice $list 3 5 -> list[3:5] // slice $list 3 -> list[3:5] = list[3:] func slice(list interface{}, indices ...interface{}) interface{} { + l, err := mustSlice(list, indices...) + if err != nil { + panic(err) + } + + return l +} + +func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) { tp := reflect.TypeOf(list).Kind() switch tp { case reflect.Slice, reflect.Array: @@ -274,7 +427,7 @@ func slice(list interface{}, indices ...interface{}) interface{} { l := l2.Len() if l == 0 { - return nil + return nil, nil } var start, end int @@ -287,8 +440,25 @@ func slice(list interface{}, indices ...interface{}) interface{} { end = toInt(indices[1]) } - return l2.Slice(start, end).Interface() + return l2.Slice(start, end).Interface(), nil default: - panic(fmt.Sprintf("list should be type of slice or array but %s", tp)) + return nil, fmt.Errorf("list should be type of slice or array but %s", tp) + } +} + +func concat(lists ...interface{}) interface{} { + var res []interface{} + for _, list := range lists { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + for i := 0; i < l2.Len(); i++ { + res = append(res, l2.Index(i).Interface()) + } + default: + panic(fmt.Sprintf("Cannot concat type %s as list", tp)) + } } + return res } diff --git a/vendor/github.com/go-task/slim-sprig/v3/network.go b/vendor/github.com/go-task/slim-sprig/v3/network.go new file mode 100644 index 00000000000..108d78a9462 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/network.go @@ -0,0 +1,12 @@ +package sprig + +import ( + "math/rand" + "net" +) + +func getHostByName(name string) string { + addrs, _ := net.LookupHost(name) + //TODO: add error handing when release v3 comes out + return addrs[rand.Intn(len(addrs))] +} diff --git a/vendor/github.com/go-task/slim-sprig/numeric.go b/vendor/github.com/go-task/slim-sprig/v3/numeric.go similarity index 65% rename from vendor/github.com/go-task/slim-sprig/numeric.go rename to vendor/github.com/go-task/slim-sprig/v3/numeric.go index 4bd89bf7f80..98cbb37a193 100644 --- a/vendor/github.com/go-task/slim-sprig/numeric.go +++ b/vendor/github.com/go-task/slim-sprig/v3/numeric.go @@ -1,9 +1,11 @@ package sprig import ( + "fmt" "math" "reflect" "strconv" + "strings" ) // toFloat64 converts 64-bit floats @@ -27,7 +29,7 @@ func toFloat64(v interface{}) float64 { case reflect.Float32, reflect.Float64: return val.Float() case reflect.Bool: - if val.Bool() == true { + if val.Bool() { return 1 } return 0 @@ -67,7 +69,7 @@ func toInt64(v interface{}) int64 { case reflect.Float32, reflect.Float64: return int64(val.Float()) case reflect.Bool: - if val.Bool() == true { + if val.Bool() { return 1 } return 0 @@ -87,6 +89,15 @@ func max(a interface{}, i ...interface{}) int64 { return aa } +func maxf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Max(aa, bb) + } + return aa +} + func min(a interface{}, i ...interface{}) int64 { aa := toInt64(a) for _, b := range i { @@ -98,6 +109,15 @@ func min(a interface{}, i ...interface{}) int64 { return aa } +func minf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Min(aa, bb) + } + return aa +} + func until(count int) []int { step := 1 if count < 0 { @@ -138,10 +158,10 @@ func ceil(a interface{}) float64 { return math.Ceil(aa) } -func round(a interface{}, p int, r_opt ...float64) float64 { +func round(a interface{}, p int, rOpt ...float64) float64 { roundOn := .5 - if len(r_opt) > 0 { - roundOn = r_opt[0] + if len(rOpt) > 0 { + roundOn = rOpt[0] } val := toFloat64(a) places := toFloat64(p) @@ -157,3 +177,52 @@ func round(a interface{}, p int, r_opt ...float64) float64 { } return round / pow } + +// converts unix octal to decimal +func toDecimal(v interface{}) int64 { + result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) + if err != nil { + return 0 + } + return result +} + +func seq(params ...int) string { + increment := 1 + switch len(params) { + case 0: + return "" + case 1: + start := 1 + end := params[0] + if end < start { + increment = -1 + } + return intArrayToString(untilStep(start, end+increment, increment), " ") + case 3: + start := params[0] + end := params[2] + step := params[1] + if end < start { + increment = -1 + if step > 0 { + return "" + } + } + return intArrayToString(untilStep(start, end+increment, step), " ") + case 2: + start := params[0] + end := params[1] + step := 1 + if end < start { + step = -1 + } + return intArrayToString(untilStep(start, end+step, step), " ") + default: + return "" + } +} + +func intArrayToString(slice []int, delimeter string) string { + return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]") +} diff --git a/vendor/github.com/go-task/slim-sprig/reflect.go b/vendor/github.com/go-task/slim-sprig/v3/reflect.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/reflect.go rename to vendor/github.com/go-task/slim-sprig/v3/reflect.go diff --git a/vendor/github.com/go-task/slim-sprig/v3/regex.go b/vendor/github.com/go-task/slim-sprig/v3/regex.go new file mode 100644 index 00000000000..fab55101897 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/regex.go @@ -0,0 +1,83 @@ +package sprig + +import ( + "regexp" +) + +func regexMatch(regex string, s string) bool { + match, _ := regexp.MatchString(regex, s) + return match +} + +func mustRegexMatch(regex string, s string) (bool, error) { + return regexp.MatchString(regex, s) +} + +func regexFindAll(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.FindAllString(s, n) +} + +func mustRegexFindAll(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.FindAllString(s, n), nil +} + +func regexFind(regex string, s string) string { + r := regexp.MustCompile(regex) + return r.FindString(s) +} + +func mustRegexFind(regex string, s string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.FindString(s), nil +} + +func regexReplaceAll(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllString(s, repl) +} + +func mustRegexReplaceAll(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllString(s, repl), nil +} + +func regexReplaceAllLiteral(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllLiteralString(s, repl) +} + +func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllLiteralString(s, repl), nil +} + +func regexSplit(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.Split(s, n) +} + +func mustRegexSplit(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.Split(s, n), nil +} + +func regexQuoteMeta(s string) string { + return regexp.QuoteMeta(s) +} diff --git a/vendor/github.com/go-task/slim-sprig/strings.go b/vendor/github.com/go-task/slim-sprig/v3/strings.go similarity index 77% rename from vendor/github.com/go-task/slim-sprig/strings.go rename to vendor/github.com/go-task/slim-sprig/v3/strings.go index 943fa3e8ad5..3c62d6b6f21 100644 --- a/vendor/github.com/go-task/slim-sprig/strings.go +++ b/vendor/github.com/go-task/slim-sprig/v3/strings.go @@ -7,8 +7,6 @@ import ( "reflect" "strconv" "strings" - - util "github.com/Masterminds/goutils" ) func base64encode(v string) string { @@ -35,51 +33,6 @@ func base32decode(v string) string { return string(data) } -func abbrev(width int, s string) string { - if width < 4 { - return s - } - r, _ := util.Abbreviate(s, width) - return r -} - -func abbrevboth(left, right int, s string) string { - if right < 4 || left > 0 && right < 7 { - return s - } - r, _ := util.AbbreviateFull(s, left, right) - return r -} -func initials(s string) string { - // Wrap this just to eliminate the var args, which templates don't do well. - return util.Initials(s) -} - -func randAlphaNumeric(count int) string { - // It is not possible, it appears, to actually generate an error here. - r, _ := util.CryptoRandomAlphaNumeric(count) - return r -} - -func randAlpha(count int) string { - r, _ := util.CryptoRandomAlphabetic(count) - return r -} - -func randAscii(count int) string { - r, _ := util.CryptoRandomAscii(count) - return r -} - -func randNumeric(count int) string { - r, _ := util.CryptoRandomNumeric(count) - return r -} - -func untitle(str string) string { - return util.Uncapitalize(str) -} - func quote(str ...interface{}) string { out := make([]string, 0, len(str)) for _, s := range str { @@ -154,9 +107,9 @@ func strslice(v interface{}) []string { default: if v == nil { return []string{} - } else { - return []string{strval(v)} } + + return []string{strval(v)} } } } @@ -187,10 +140,13 @@ func strval(v interface{}) string { } func trunc(c int, s string) string { - if len(s) <= c { - return s + if c < 0 && len(s)+c > 0 { + return s[len(s)+c:] + } + if c >= 0 && len(s) > c { + return s[:c] } - return s[0:c] + return s } func join(sep string, v interface{}) string { diff --git a/vendor/github.com/go-task/slim-sprig/v3/url.go b/vendor/github.com/go-task/slim-sprig/v3/url.go new file mode 100644 index 00000000000..b8e120e19ba --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/url.go @@ -0,0 +1,66 @@ +package sprig + +import ( + "fmt" + "net/url" + "reflect" +) + +func dictGetOrEmpty(dict map[string]interface{}, key string) string { + value, ok := dict[key] + if !ok { + return "" + } + tp := reflect.TypeOf(value).Kind() + if tp != reflect.String { + panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String())) + } + return reflect.ValueOf(value).String() +} + +// parses given URL to return dict object +func urlParse(v string) map[string]interface{} { + dict := map[string]interface{}{} + parsedURL, err := url.Parse(v) + if err != nil { + panic(fmt.Sprintf("unable to parse url: %s", err)) + } + dict["scheme"] = parsedURL.Scheme + dict["host"] = parsedURL.Host + dict["hostname"] = parsedURL.Hostname() + dict["path"] = parsedURL.Path + dict["query"] = parsedURL.RawQuery + dict["opaque"] = parsedURL.Opaque + dict["fragment"] = parsedURL.Fragment + if parsedURL.User != nil { + dict["userinfo"] = parsedURL.User.String() + } else { + dict["userinfo"] = "" + } + + return dict +} + +// join given dict to URL string +func urlJoin(d map[string]interface{}) string { + resURL := url.URL{ + Scheme: dictGetOrEmpty(d, "scheme"), + Host: dictGetOrEmpty(d, "host"), + Path: dictGetOrEmpty(d, "path"), + RawQuery: dictGetOrEmpty(d, "query"), + Opaque: dictGetOrEmpty(d, "opaque"), + Fragment: dictGetOrEmpty(d, "fragment"), + } + userinfo := dictGetOrEmpty(d, "userinfo") + var user *url.Userinfo + if userinfo != "" { + tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo)) + if err != nil { + panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err)) + } + user = tempURL.User + } + + resURL.User = user + return resURL.String() +} diff --git a/vendor/github.com/gofrs/flock/.gitignore b/vendor/github.com/gofrs/flock/.gitignore new file mode 100644 index 00000000000..daf913b1b34 --- /dev/null +++ b/vendor/github.com/gofrs/flock/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/gofrs/flock/.travis.yml b/vendor/github.com/gofrs/flock/.travis.yml new file mode 100644 index 00000000000..b16d040fa89 --- /dev/null +++ b/vendor/github.com/gofrs/flock/.travis.yml @@ -0,0 +1,10 @@ +language: go +go: + - 1.14.x + - 1.15.x +script: go test -v -check.vv -race ./... +sudo: false +notifications: + email: + on_success: never + on_failure: always diff --git a/vendor/github.com/gofrs/flock/LICENSE b/vendor/github.com/gofrs/flock/LICENSE new file mode 100644 index 00000000000..8b8ff36fe42 --- /dev/null +++ b/vendor/github.com/gofrs/flock/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2015-2020, Tim Heckman +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of gofrs nor the names of its contributors may be used + to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gofrs/flock/README.md b/vendor/github.com/gofrs/flock/README.md new file mode 100644 index 00000000000..71ce63692e8 --- /dev/null +++ b/vendor/github.com/gofrs/flock/README.md @@ -0,0 +1,41 @@ +# flock +[![TravisCI Build Status](https://img.shields.io/travis/gofrs/flock/master.svg?style=flat)](https://travis-ci.org/gofrs/flock) +[![GoDoc](https://img.shields.io/badge/godoc-flock-blue.svg?style=flat)](https://godoc.org/github.com/gofrs/flock) +[![License](https://img.shields.io/badge/license-BSD_3--Clause-brightgreen.svg?style=flat)](https://github.com/gofrs/flock/blob/master/LICENSE) +[![Go Report Card](https://goreportcard.com/badge/github.com/gofrs/flock)](https://goreportcard.com/report/github.com/gofrs/flock) + +`flock` implements a thread-safe sync.Locker interface for file locking. It also +includes a non-blocking TryLock() function to allow locking without blocking execution. + +## License +`flock` is released under the BSD 3-Clause License. See the `LICENSE` file for more details. + +## Go Compatibility +This package makes use of the `context` package that was introduced in Go 1.7. As such, this +package has an implicit dependency on Go 1.7+. + +## Installation +``` +go get -u github.com/gofrs/flock +``` + +## Usage +```Go +import "github.com/gofrs/flock" + +fileLock := flock.New("/var/lock/go-lock.lock") + +locked, err := fileLock.TryLock() + +if err != nil { + // handle locking error +} + +if locked { + // do work + fileLock.Unlock() +} +``` + +For more detailed usage information take a look at the package API docs on +[GoDoc](https://godoc.org/github.com/gofrs/flock). diff --git a/vendor/github.com/gofrs/flock/appveyor.yml b/vendor/github.com/gofrs/flock/appveyor.yml new file mode 100644 index 00000000000..909b4bf7cb4 --- /dev/null +++ b/vendor/github.com/gofrs/flock/appveyor.yml @@ -0,0 +1,25 @@ +version: '{build}' + +build: false +deploy: false + +clone_folder: 'c:\gopath\src\github.com\gofrs\flock' + +environment: + GOPATH: 'c:\gopath' + GOVERSION: '1.15' + +init: + - git config --global core.autocrlf input + +install: + - rmdir c:\go /s /q + - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.msi + - msiexec /i go%GOVERSION%.windows-amd64.msi /q + - set Path=c:\go\bin;c:\gopath\bin;%Path% + - go version + - go env + +test_script: + - go get -t ./... + - go test -race -v ./... diff --git a/vendor/github.com/gofrs/flock/flock.go b/vendor/github.com/gofrs/flock/flock.go new file mode 100644 index 00000000000..95c784ca504 --- /dev/null +++ b/vendor/github.com/gofrs/flock/flock.go @@ -0,0 +1,144 @@ +// Copyright 2015 Tim Heckman. All rights reserved. +// Use of this source code is governed by the BSD 3-Clause +// license that can be found in the LICENSE file. + +// Package flock implements a thread-safe interface for file locking. +// It also includes a non-blocking TryLock() function to allow locking +// without blocking execution. +// +// Package flock is released under the BSD 3-Clause License. See the LICENSE file +// for more details. +// +// While using this library, remember that the locking behaviors are not +// guaranteed to be the same on each platform. For example, some UNIX-like +// operating systems will transparently convert a shared lock to an exclusive +// lock. If you Unlock() the flock from a location where you believe that you +// have the shared lock, you may accidentally drop the exclusive lock. +package flock + +import ( + "context" + "os" + "runtime" + "sync" + "time" +) + +// Flock is the struct type to handle file locking. All fields are unexported, +// with access to some of the fields provided by getter methods (Path() and Locked()). +type Flock struct { + path string + m sync.RWMutex + fh *os.File + l bool + r bool +} + +// New returns a new instance of *Flock. The only parameter +// it takes is the path to the desired lockfile. +func New(path string) *Flock { + return &Flock{path: path} +} + +// NewFlock returns a new instance of *Flock. The only parameter +// it takes is the path to the desired lockfile. +// +// Deprecated: Use New instead. +func NewFlock(path string) *Flock { + return New(path) +} + +// Close is equivalent to calling Unlock. +// +// This will release the lock and close the underlying file descriptor. +// It will not remove the file from disk, that's up to your application. +func (f *Flock) Close() error { + return f.Unlock() +} + +// Path returns the path as provided in NewFlock(). +func (f *Flock) Path() string { + return f.path +} + +// Locked returns the lock state (locked: true, unlocked: false). +// +// Warning: by the time you use the returned value, the state may have changed. +func (f *Flock) Locked() bool { + f.m.RLock() + defer f.m.RUnlock() + return f.l +} + +// RLocked returns the read lock state (locked: true, unlocked: false). +// +// Warning: by the time you use the returned value, the state may have changed. +func (f *Flock) RLocked() bool { + f.m.RLock() + defer f.m.RUnlock() + return f.r +} + +func (f *Flock) String() string { + return f.path +} + +// TryLockContext repeatedly tries to take an exclusive lock until one of the +// conditions is met: TryLock succeeds, TryLock fails with error, or Context +// Done channel is closed. +func (f *Flock) TryLockContext(ctx context.Context, retryDelay time.Duration) (bool, error) { + return tryCtx(ctx, f.TryLock, retryDelay) +} + +// TryRLockContext repeatedly tries to take a shared lock until one of the +// conditions is met: TryRLock succeeds, TryRLock fails with error, or Context +// Done channel is closed. +func (f *Flock) TryRLockContext(ctx context.Context, retryDelay time.Duration) (bool, error) { + return tryCtx(ctx, f.TryRLock, retryDelay) +} + +func tryCtx(ctx context.Context, fn func() (bool, error), retryDelay time.Duration) (bool, error) { + if ctx.Err() != nil { + return false, ctx.Err() + } + for { + if ok, err := fn(); ok || err != nil { + return ok, err + } + select { + case <-ctx.Done(): + return false, ctx.Err() + case <-time.After(retryDelay): + // try again + } + } +} + +func (f *Flock) setFh() error { + // open a new os.File instance + // create it if it doesn't exist, and open the file read-only. + flags := os.O_CREATE + if runtime.GOOS == "aix" { + // AIX cannot preform write-lock (ie exclusive) on a + // read-only file. + flags |= os.O_RDWR + } else { + flags |= os.O_RDONLY + } + fh, err := os.OpenFile(f.path, flags, os.FileMode(0600)) + if err != nil { + return err + } + + // set the filehandle on the struct + f.fh = fh + return nil +} + +// ensure the file handle is closed if no lock is held +func (f *Flock) ensureFhState() { + if !f.l && !f.r && f.fh != nil { + f.fh.Close() + f.fh = nil + } +} diff --git a/vendor/github.com/gofrs/flock/flock_aix.go b/vendor/github.com/gofrs/flock/flock_aix.go new file mode 100644 index 00000000000..7277c1b6b26 --- /dev/null +++ b/vendor/github.com/gofrs/flock/flock_aix.go @@ -0,0 +1,281 @@ +// Copyright 2019 Tim Heckman. All rights reserved. Use of this source code is +// governed by the BSD 3-Clause license that can be found in the LICENSE file. + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code implements the filelock API using POSIX 'fcntl' locks, which attach +// to an (inode, process) pair rather than a file descriptor. To avoid unlocking +// files prematurely when the same file is opened through different descriptors, +// we allow only one read-lock at a time. +// +// This code is adapted from the Go package: +// cmd/go/internal/lockedfile/internal/filelock + +//+build aix + +package flock + +import ( + "errors" + "io" + "os" + "sync" + "syscall" + + "golang.org/x/sys/unix" +) + +type lockType int16 + +const ( + readLock lockType = unix.F_RDLCK + writeLock lockType = unix.F_WRLCK +) + +type cmdType int + +const ( + tryLock cmdType = unix.F_SETLK + waitLock cmdType = unix.F_SETLKW +) + +type inode = uint64 + +type inodeLock struct { + owner *Flock + queue []<-chan *Flock +} + +var ( + mu sync.Mutex + inodes = map[*Flock]inode{} + locks = map[inode]inodeLock{} +) + +// Lock is a blocking call to try and take an exclusive file lock. It will wait +// until it is able to obtain the exclusive file lock. It's recommended that +// TryLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already exclusive-locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +// +// If the *Flock has a shared lock (RLock), this may transparently replace the +// shared lock with an exclusive lock on some UNIX-like operating systems. Be +// careful when using exclusive locks in conjunction with shared locks +// (RLock()), because calling Unlock() may accidentally release the exclusive +// lock that was once a shared lock. +func (f *Flock) Lock() error { + return f.lock(&f.l, writeLock) +} + +// RLock is a blocking call to try and take a shared file lock. It will wait +// until it is able to obtain the shared file lock. It's recommended that +// TryRLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already shared-locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +func (f *Flock) RLock() error { + return f.lock(&f.r, readLock) +} + +func (f *Flock) lock(locked *bool, flag lockType) error { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return err + } + defer f.ensureFhState() + } + + if _, err := f.doLock(waitLock, flag, true); err != nil { + return err + } + + *locked = true + return nil +} + +func (f *Flock) doLock(cmd cmdType, lt lockType, blocking bool) (bool, error) { + // POSIX locks apply per inode and process, and the lock for an inode is + // released when *any* descriptor for that inode is closed. So we need to + // synchronize access to each inode internally, and must serialize lock and + // unlock calls that refer to the same inode through different descriptors. + fi, err := f.fh.Stat() + if err != nil { + return false, err + } + ino := inode(fi.Sys().(*syscall.Stat_t).Ino) + + mu.Lock() + if i, dup := inodes[f]; dup && i != ino { + mu.Unlock() + return false, &os.PathError{ + Path: f.Path(), + Err: errors.New("inode for file changed since last Lock or RLock"), + } + } + + inodes[f] = ino + + var wait chan *Flock + l := locks[ino] + if l.owner == f { + // This file already owns the lock, but the call may change its lock type. + } else if l.owner == nil { + // No owner: it's ours now. + l.owner = f + } else if !blocking { + // Already owned: cannot take the lock. + mu.Unlock() + return false, nil + } else { + // Already owned: add a channel to wait on. + wait = make(chan *Flock) + l.queue = append(l.queue, wait) + } + locks[ino] = l + mu.Unlock() + + if wait != nil { + wait <- f + } + + err = setlkw(f.fh.Fd(), cmd, lt) + + if err != nil { + f.doUnlock() + if cmd == tryLock && err == unix.EACCES { + return false, nil + } + return false, err + } + + return true, nil +} + +func (f *Flock) Unlock() error { + f.m.Lock() + defer f.m.Unlock() + + // if we aren't locked or if the lockfile instance is nil + // just return a nil error because we are unlocked + if (!f.l && !f.r) || f.fh == nil { + return nil + } + + if err := f.doUnlock(); err != nil { + return err + } + + f.fh.Close() + + f.l = false + f.r = false + f.fh = nil + + return nil +} + +func (f *Flock) doUnlock() (err error) { + var owner *Flock + mu.Lock() + ino, ok := inodes[f] + if ok { + owner = locks[ino].owner + } + mu.Unlock() + + if owner == f { + err = setlkw(f.fh.Fd(), waitLock, unix.F_UNLCK) + } + + mu.Lock() + l := locks[ino] + if len(l.queue) == 0 { + // No waiters: remove the map entry. + delete(locks, ino) + } else { + // The first waiter is sending us their file now. + // Receive it and update the queue. + l.owner = <-l.queue[0] + l.queue = l.queue[1:] + locks[ino] = l + } + delete(inodes, f) + mu.Unlock() + + return err +} + +// TryLock is the preferred function for taking an exclusive file lock. This +// function takes an RW-mutex lock before it tries to lock the file, so there is +// the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the exclusive +// file lock, the function will return false instead of waiting for the lock. If +// we get the lock, we also set the *Flock instance as being exclusive-locked. +func (f *Flock) TryLock() (bool, error) { + return f.try(&f.l, writeLock) +} + +// TryRLock is the preferred function for taking a shared file lock. This +// function takes an RW-mutex lock before it tries to lock the file, so there is +// the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the shared file +// lock, the function will return false instead of waiting for the lock. If we +// get the lock, we also set the *Flock instance as being share-locked. +func (f *Flock) TryRLock() (bool, error) { + return f.try(&f.r, readLock) +} + +func (f *Flock) try(locked *bool, flag lockType) (bool, error) { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return true, nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return false, err + } + defer f.ensureFhState() + } + + haslock, err := f.doLock(tryLock, flag, false) + if err != nil { + return false, err + } + + *locked = haslock + return haslock, nil +} + +// setlkw calls FcntlFlock with cmd for the entire file indicated by fd. +func setlkw(fd uintptr, cmd cmdType, lt lockType) error { + for { + err := unix.FcntlFlock(fd, int(cmd), &unix.Flock_t{ + Type: int16(lt), + Whence: io.SeekStart, + Start: 0, + Len: 0, // All bytes. + }) + if err != unix.EINTR { + return err + } + } +} diff --git a/vendor/github.com/gofrs/flock/flock_unix.go b/vendor/github.com/gofrs/flock/flock_unix.go new file mode 100644 index 00000000000..c315a3e2908 --- /dev/null +++ b/vendor/github.com/gofrs/flock/flock_unix.go @@ -0,0 +1,197 @@ +// Copyright 2015 Tim Heckman. All rights reserved. +// Use of this source code is governed by the BSD 3-Clause +// license that can be found in the LICENSE file. + +// +build !aix,!windows + +package flock + +import ( + "os" + "syscall" +) + +// Lock is a blocking call to try and take an exclusive file lock. It will wait +// until it is able to obtain the exclusive file lock. It's recommended that +// TryLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already exclusive-locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +// +// If the *Flock has a shared lock (RLock), this may transparently replace the +// shared lock with an exclusive lock on some UNIX-like operating systems. Be +// careful when using exclusive locks in conjunction with shared locks +// (RLock()), because calling Unlock() may accidentally release the exclusive +// lock that was once a shared lock. +func (f *Flock) Lock() error { + return f.lock(&f.l, syscall.LOCK_EX) +} + +// RLock is a blocking call to try and take a shared file lock. It will wait +// until it is able to obtain the shared file lock. It's recommended that +// TryRLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already shared-locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +func (f *Flock) RLock() error { + return f.lock(&f.r, syscall.LOCK_SH) +} + +func (f *Flock) lock(locked *bool, flag int) error { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return err + } + defer f.ensureFhState() + } + + if err := syscall.Flock(int(f.fh.Fd()), flag); err != nil { + shouldRetry, reopenErr := f.reopenFDOnError(err) + if reopenErr != nil { + return reopenErr + } + + if !shouldRetry { + return err + } + + if err = syscall.Flock(int(f.fh.Fd()), flag); err != nil { + return err + } + } + + *locked = true + return nil +} + +// Unlock is a function to unlock the file. This file takes a RW-mutex lock, so +// while it is running the Locked() and RLocked() functions will be blocked. +// +// This function short-circuits if we are unlocked already. If not, it calls +// syscall.LOCK_UN on the file and closes the file descriptor. It does not +// remove the file from disk. It's up to your application to do. +// +// Please note, if your shared lock became an exclusive lock this may +// unintentionally drop the exclusive lock if called by the consumer that +// believes they have a shared lock. Please see Lock() for more details. +func (f *Flock) Unlock() error { + f.m.Lock() + defer f.m.Unlock() + + // if we aren't locked or if the lockfile instance is nil + // just return a nil error because we are unlocked + if (!f.l && !f.r) || f.fh == nil { + return nil + } + + // mark the file as unlocked + if err := syscall.Flock(int(f.fh.Fd()), syscall.LOCK_UN); err != nil { + return err + } + + f.fh.Close() + + f.l = false + f.r = false + f.fh = nil + + return nil +} + +// TryLock is the preferred function for taking an exclusive file lock. This +// function takes an RW-mutex lock before it tries to lock the file, so there is +// the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the exclusive +// file lock, the function will return false instead of waiting for the lock. If +// we get the lock, we also set the *Flock instance as being exclusive-locked. +func (f *Flock) TryLock() (bool, error) { + return f.try(&f.l, syscall.LOCK_EX) +} + +// TryRLock is the preferred function for taking a shared file lock. This +// function takes an RW-mutex lock before it tries to lock the file, so there is +// the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the shared file +// lock, the function will return false instead of waiting for the lock. If we +// get the lock, we also set the *Flock instance as being share-locked. +func (f *Flock) TryRLock() (bool, error) { + return f.try(&f.r, syscall.LOCK_SH) +} + +func (f *Flock) try(locked *bool, flag int) (bool, error) { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return true, nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return false, err + } + defer f.ensureFhState() + } + + var retried bool +retry: + err := syscall.Flock(int(f.fh.Fd()), flag|syscall.LOCK_NB) + + switch err { + case syscall.EWOULDBLOCK: + return false, nil + case nil: + *locked = true + return true, nil + } + if !retried { + if shouldRetry, reopenErr := f.reopenFDOnError(err); reopenErr != nil { + return false, reopenErr + } else if shouldRetry { + retried = true + goto retry + } + } + + return false, err +} + +// reopenFDOnError determines whether we should reopen the file handle +// in readwrite mode and try again. This comes from util-linux/sys-utils/flock.c: +// Since Linux 3.4 (commit 55725513) +// Probably NFSv4 where flock() is emulated by fcntl(). +func (f *Flock) reopenFDOnError(err error) (bool, error) { + if err != syscall.EIO && err != syscall.EBADF { + return false, nil + } + if st, err := f.fh.Stat(); err == nil { + // if the file is able to be read and written + if st.Mode()&0600 == 0600 { + f.fh.Close() + f.fh = nil + + // reopen in read-write mode and set the filehandle + fh, err := os.OpenFile(f.path, os.O_CREATE|os.O_RDWR, os.FileMode(0600)) + if err != nil { + return false, err + } + f.fh = fh + return true, nil + } + } + + return false, nil +} diff --git a/vendor/github.com/gofrs/flock/flock_winapi.go b/vendor/github.com/gofrs/flock/flock_winapi.go new file mode 100644 index 00000000000..fe405a255ae --- /dev/null +++ b/vendor/github.com/gofrs/flock/flock_winapi.go @@ -0,0 +1,76 @@ +// Copyright 2015 Tim Heckman. All rights reserved. +// Use of this source code is governed by the BSD 3-Clause +// license that can be found in the LICENSE file. + +// +build windows + +package flock + +import ( + "syscall" + "unsafe" +) + +var ( + kernel32, _ = syscall.LoadLibrary("kernel32.dll") + procLockFileEx, _ = syscall.GetProcAddress(kernel32, "LockFileEx") + procUnlockFileEx, _ = syscall.GetProcAddress(kernel32, "UnlockFileEx") +) + +const ( + winLockfileFailImmediately = 0x00000001 + winLockfileExclusiveLock = 0x00000002 + winLockfileSharedLock = 0x00000000 +) + +// Use of 0x00000000 for the shared lock is a guess based on some the MS Windows +// `LockFileEX` docs, which document the `LOCKFILE_EXCLUSIVE_LOCK` flag as: +// +// > The function requests an exclusive lock. Otherwise, it requests a shared +// > lock. +// +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + +func lockFileEx(handle syscall.Handle, flags uint32, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) (bool, syscall.Errno) { + r1, _, errNo := syscall.Syscall6( + uintptr(procLockFileEx), + 6, + uintptr(handle), + uintptr(flags), + uintptr(reserved), + uintptr(numberOfBytesToLockLow), + uintptr(numberOfBytesToLockHigh), + uintptr(unsafe.Pointer(offset))) + + if r1 != 1 { + if errNo == 0 { + return false, syscall.EINVAL + } + + return false, errNo + } + + return true, 0 +} + +func unlockFileEx(handle syscall.Handle, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) (bool, syscall.Errno) { + r1, _, errNo := syscall.Syscall6( + uintptr(procUnlockFileEx), + 5, + uintptr(handle), + uintptr(reserved), + uintptr(numberOfBytesToLockLow), + uintptr(numberOfBytesToLockHigh), + uintptr(unsafe.Pointer(offset)), + 0) + + if r1 != 1 { + if errNo == 0 { + return false, syscall.EINVAL + } + + return false, errNo + } + + return true, 0 +} diff --git a/vendor/github.com/gofrs/flock/flock_windows.go b/vendor/github.com/gofrs/flock/flock_windows.go new file mode 100644 index 00000000000..ddb534ccef0 --- /dev/null +++ b/vendor/github.com/gofrs/flock/flock_windows.go @@ -0,0 +1,142 @@ +// Copyright 2015 Tim Heckman. All rights reserved. +// Use of this source code is governed by the BSD 3-Clause +// license that can be found in the LICENSE file. + +package flock + +import ( + "syscall" +) + +// ErrorLockViolation is the error code returned from the Windows syscall when a +// lock would block and you ask to fail immediately. +const ErrorLockViolation syscall.Errno = 0x21 // 33 + +// Lock is a blocking call to try and take an exclusive file lock. It will wait +// until it is able to obtain the exclusive file lock. It's recommended that +// TryLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +func (f *Flock) Lock() error { + return f.lock(&f.l, winLockfileExclusiveLock) +} + +// RLock is a blocking call to try and take a shared file lock. It will wait +// until it is able to obtain the shared file lock. It's recommended that +// TryRLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +func (f *Flock) RLock() error { + return f.lock(&f.r, winLockfileSharedLock) +} + +func (f *Flock) lock(locked *bool, flag uint32) error { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return err + } + defer f.ensureFhState() + } + + if _, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}); errNo > 0 { + return errNo + } + + *locked = true + return nil +} + +// Unlock is a function to unlock the file. This file takes a RW-mutex lock, so +// while it is running the Locked() and RLocked() functions will be blocked. +// +// This function short-circuits if we are unlocked already. If not, it calls +// UnlockFileEx() on the file and closes the file descriptor. It does not remove +// the file from disk. It's up to your application to do. +func (f *Flock) Unlock() error { + f.m.Lock() + defer f.m.Unlock() + + // if we aren't locked or if the lockfile instance is nil + // just return a nil error because we are unlocked + if (!f.l && !f.r) || f.fh == nil { + return nil + } + + // mark the file as unlocked + if _, errNo := unlockFileEx(syscall.Handle(f.fh.Fd()), 0, 1, 0, &syscall.Overlapped{}); errNo > 0 { + return errNo + } + + f.fh.Close() + + f.l = false + f.r = false + f.fh = nil + + return nil +} + +// TryLock is the preferred function for taking an exclusive file lock. This +// function does take a RW-mutex lock before it tries to lock the file, so there +// is the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the exclusive +// file lock, the function will return false instead of waiting for the lock. If +// we get the lock, we also set the *Flock instance as being exclusive-locked. +func (f *Flock) TryLock() (bool, error) { + return f.try(&f.l, winLockfileExclusiveLock) +} + +// TryRLock is the preferred function for taking a shared file lock. This +// function does take a RW-mutex lock before it tries to lock the file, so there +// is the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the shared file +// lock, the function will return false instead of waiting for the lock. If we +// get the lock, we also set the *Flock instance as being shared-locked. +func (f *Flock) TryRLock() (bool, error) { + return f.try(&f.r, winLockfileSharedLock) +} + +func (f *Flock) try(locked *bool, flag uint32) (bool, error) { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return true, nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return false, err + } + defer f.ensureFhState() + } + + _, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag|winLockfileFailImmediately, 0, 1, 0, &syscall.Overlapped{}) + + if errNo > 0 { + if errNo == ErrorLockViolation || errNo == syscall.ERROR_IO_PENDING { + return false, nil + } + + return false, errNo + } + + *locked = true + + return true, nil +} diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go index 182c926b908..860bb304c34 100644 --- a/vendor/github.com/google/pprof/profile/encode.go +++ b/vendor/github.com/google/pprof/profile/encode.go @@ -530,6 +530,7 @@ func (p *Line) decoder() []decoder { func (p *Line) encode(b *buffer) { encodeUint64Opt(b, 1, p.functionIDX) encodeInt64Opt(b, 2, p.Line) + encodeInt64Opt(b, 3, p.Column) } var lineDecoder = []decoder{ @@ -538,6 +539,8 @@ var lineDecoder = []decoder{ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) }, // optional int64 line = 2 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) }, + // optional int64 column = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Column) }, } func (p *Function) decoder() []decoder { diff --git a/vendor/github.com/google/pprof/profile/legacy_java_profile.go b/vendor/github.com/google/pprof/profile/legacy_java_profile.go index 91f45e53c6c..4580bab1839 100644 --- a/vendor/github.com/google/pprof/profile/legacy_java_profile.go +++ b/vendor/github.com/google/pprof/profile/legacy_java_profile.go @@ -56,7 +56,7 @@ func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte } // Strip out addresses for better merge. - if err = p.Aggregate(true, true, true, true, false); err != nil { + if err = p.Aggregate(true, true, true, true, false, false); err != nil { return nil, err } @@ -99,7 +99,7 @@ func parseJavaProfile(b []byte) (*Profile, error) { } // Strip out addresses for better merge. - if err = p.Aggregate(true, true, true, true, false); err != nil { + if err = p.Aggregate(true, true, true, true, false, false); err != nil { return nil, err } diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go index 4b66282cb8e..eee0132e740 100644 --- a/vendor/github.com/google/pprof/profile/merge.go +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -326,12 +326,13 @@ func (l *Location) key() locationKey { key.addr -= l.Mapping.Start key.mappingID = l.Mapping.ID } - lines := make([]string, len(l.Line)*2) + lines := make([]string, len(l.Line)*3) for i, line := range l.Line { if line.Function != nil { lines[i*2] = strconv.FormatUint(line.Function.ID, 16) } lines[i*2+1] = strconv.FormatInt(line.Line, 16) + lines[i*2+2] = strconv.FormatInt(line.Column, 16) } key.lines = strings.Join(lines, "|") return key @@ -418,6 +419,7 @@ func (pm *profileMerger) mapLine(src Line) Line { ln := Line{ Function: pm.mapFunction(src.Function), Line: src.Line, + Column: src.Column, } return ln } diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go index 60ef7e92687..62df80a5563 100644 --- a/vendor/github.com/google/pprof/profile/profile.go +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -145,6 +145,7 @@ type Location struct { type Line struct { Function *Function Line int64 + Column int64 functionIDX uint64 } @@ -436,7 +437,7 @@ func (p *Profile) CheckValid() error { // Aggregate merges the locations in the profile into equivalence // classes preserving the request attributes. It also updates the // samples to point to the merged locations. -func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error { +func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, columnnumber, address bool) error { for _, m := range p.Mapping { m.HasInlineFrames = m.HasInlineFrames && inlineFrame m.HasFunctions = m.HasFunctions && function @@ -458,7 +459,7 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address } // Aggregate locations - if !inlineFrame || !address || !linenumber { + if !inlineFrame || !address || !linenumber || !columnnumber { for _, l := range p.Location { if !inlineFrame && len(l.Line) > 1 { l.Line = l.Line[len(l.Line)-1:] @@ -466,6 +467,12 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address if !linenumber { for i := range l.Line { l.Line[i].Line = 0 + l.Line[i].Column = 0 + } + } + if !columnnumber { + for i := range l.Line { + l.Line[i].Column = 0 } } if !address { @@ -627,10 +634,11 @@ func (l *Location) string() string { for li := range l.Line { lnStr := "??" if fn := l.Line[li].Function; fn != nil { - lnStr = fmt.Sprintf("%s %s:%d s=%d", + lnStr = fmt.Sprintf("%s %s:%d:%d s=%d", fn.Name, fn.Filename, l.Line[li].Line, + l.Line[li].Column, fn.StartLine) if fn.Name != fn.SystemName { lnStr = lnStr + "(" + fn.SystemName + ")" diff --git a/vendor/github.com/itchyny/gojq/.dockerignore b/vendor/github.com/itchyny/gojq/.dockerignore index c8e02dc8f5b..54095155ccf 100644 --- a/vendor/github.com/itchyny/gojq/.dockerignore +++ b/vendor/github.com/itchyny/gojq/.dockerignore @@ -1,9 +1,16 @@ /gojq /goxz /CREDITS -/._* /y.output *.exe *.test *.out -/.github/ +*.md +*.y +**/*.jq +**/*.json +**/*.yaml +**/*_test.go +.github +_gojq +_tools diff --git a/vendor/github.com/itchyny/gojq/.gitattributes b/vendor/github.com/itchyny/gojq/.gitattributes index 9c2075be6e1..797f3959396 100644 --- a/vendor/github.com/itchyny/gojq/.gitattributes +++ b/vendor/github.com/itchyny/gojq/.gitattributes @@ -1,2 +1,3 @@ **/testdata/** binary -/builtin.go eol=lf +/builtin.go eol=lf linguist-generated=true +/parser.go eol=lf linguist-generated=true diff --git a/vendor/github.com/itchyny/gojq/.gitignore b/vendor/github.com/itchyny/gojq/.gitignore index e350f9308d6..bbeb991f8c3 100644 --- a/vendor/github.com/itchyny/gojq/.gitignore +++ b/vendor/github.com/itchyny/gojq/.gitignore @@ -1,7 +1,6 @@ /gojq /goxz /CREDITS -/._* /y.output *.exe *.test diff --git a/vendor/github.com/itchyny/gojq/CHANGELOG.md b/vendor/github.com/itchyny/gojq/CHANGELOG.md index 65e605fb6e4..9ae257a2e31 100644 --- a/vendor/github.com/itchyny/gojq/CHANGELOG.md +++ b/vendor/github.com/itchyny/gojq/CHANGELOG.md @@ -1,4 +1,46 @@ # Changelog +## [v0.12.16](https://github.com/itchyny/gojq/compare/v0.12.15..v0.12.16) (2024-06-01) +* fix offset of query parsing error on multi-byte characters +* fix tests of `exp10` and `atan2` failing on some platforms +* fix `debug/1` to be available only when `debug/0` is defined +* improve parser to allow binary operators as object values +* improve compiler to emit error if query is missing + +## [v0.12.15](https://github.com/itchyny/gojq/compare/v0.12.14..v0.12.15) (2024-04-01) +* implement `ltrim`, `rtrim`, and `trim` functions +* implement `gojq.ParseError` for getting the offset and token of query parsing error +* implement `gojq.HaltError` for detecting halt errors and stopping outer iteration +* fix object construction with duplicate keys (`{x:0,y:1} | {a:.x,a:.y}`) +* fix `halt` and `halt_error` functions to stop the command execution immediately +* fix variable scope of binding syntax (`"a" as $v | def f: $v; "b" as $v | f`) +* fix pre-defined variables to be available in initial modules (`$ARGS` in `~/.jq`) +* fix `ltrimstr` and `rtrimstr` functions to emit error on non-string input +* fix `nearbyint` and `rint` functions to round ties to even +* improve parser to allow `reduce`, `foreach`, `if`, `try`-`catch` syntax as object values +* remove `pow10` in favor of `exp10`, define `scalbn` and `scalbln` by `ldexp` + +## [v0.12.14](https://github.com/itchyny/gojq/compare/v0.12.13..v0.12.14) (2023-12-01) +* implement `abs`, `pick`, and `debug/1` functions +* implement `--raw-output0` option, and remove `--nul-output` (`-0`) option +* fix string multiplication by zero to emit an empty string +* fix zero divided by zero to emit an error, not `nan` +* fix modulo operator to emit `nan` if either side is `nan` +* fix `implode` function to emit replacement characters on invalid code points +* fix `stderr` function to output strings in raw format +* fix `error` function to throw an error even for `null` +* fix `walk` function on multiple outputs arguments +* fix `--from-file` option to work with `--args` and `--jsonargs` options +* fix the default module search path `../lib` relative to the executable +* improve query parser to support comment continuation with backslash +* improve `modulemeta` function to include defined function names in the module +* improve search path of `import` and `include` directives to support `$ORIGIN` expansion +* remove deprecated `leaf_paths` function + +## [v0.12.13](https://github.com/itchyny/gojq/compare/v0.12.12..v0.12.13) (2023-06-01) +* implement `@urid` format string to decode URI values +* fix functions returning arrays not to emit nil slices (`flatten`, `group_by`, + `unique`, `unique_by`, `nth`, `indices`, `path`, and `modulemeta.deps`) + ## [v0.12.12](https://github.com/itchyny/gojq/compare/v0.12.11..v0.12.12) (2023-03-01) * fix assignment operator (`=`) with overlapping paths and multiple values (`[[]] | .. = ..`) * fix crash on multiplying large numbers to an empty string (`9223372036854775807 * ""`) diff --git a/vendor/github.com/itchyny/gojq/Dockerfile b/vendor/github.com/itchyny/gojq/Dockerfile index 284ece77625..d5e0dce63e2 100644 --- a/vendor/github.com/itchyny/gojq/Dockerfile +++ b/vendor/github.com/itchyny/gojq/Dockerfile @@ -1,6 +1,8 @@ -FROM golang:1.19 AS builder +FROM golang:1.22 AS builder WORKDIR /app +COPY go.* ./ +RUN go mod download COPY . . ENV CGO_ENABLED 0 RUN make build diff --git a/vendor/github.com/itchyny/gojq/LICENSE b/vendor/github.com/itchyny/gojq/LICENSE index 3f4fcb26ccb..fe59004071d 100644 --- a/vendor/github.com/itchyny/gojq/LICENSE +++ b/vendor/github.com/itchyny/gojq/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2019-2023 itchyny +Copyright (c) 2019-2024 itchyny Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/itchyny/gojq/README.md b/vendor/github.com/itchyny/gojq/README.md index 6370e4409e5..7b34f93c348 100644 --- a/vendor/github.com/itchyny/gojq/README.md +++ b/vendor/github.com/itchyny/gojq/README.md @@ -1,11 +1,11 @@ # gojq -[![CI Status](https://github.com/itchyny/gojq/workflows/CI/badge.svg)](https://github.com/itchyny/gojq/actions) +[![CI Status](https://github.com/itchyny/gojq/actions/workflows/ci.yaml/badge.svg?branch=main)](https://github.com/itchyny/gojq/actions?query=branch:main) [![Go Report Card](https://goreportcard.com/badge/github.com/itchyny/gojq)](https://goreportcard.com/report/github.com/itchyny/gojq) [![MIT License](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/itchyny/gojq/blob/main/LICENSE) [![release](https://img.shields.io/github/release/itchyny/gojq/all.svg)](https://github.com/itchyny/gojq/releases) [![pkg.go.dev](https://pkg.go.dev/badge/github.com/itchyny/gojq)](https://pkg.go.dev/github.com/itchyny/gojq) -### Pure Go implementation of [jq](https://github.com/stedolan/jq) +### Pure Go implementation of [jq](https://github.com/jqlang/jq) This is an implementation of jq command written in Go language. You can also embed gojq as a library to your Go products. @@ -77,10 +77,9 @@ docker run -i --rm ghcr.io/itchyny/gojq - gojq implements nice error messages for invalid query and JSON input. The error message of jq is sometimes difficult to tell where to fix the query. - gojq does not keep the order of object keys. I understand this might cause problems for some scripts but basically, we should not rely on the order of object keys. Due to this limitation, gojq does not have `keys_unsorted` function and `--sort-keys` (`-S`) option. I would implement when ordered map is implemented in the standard library of Go but I'm less motivated. - gojq supports arbitrary-precision integer calculation while jq does not; jq loses the precision of large integers when calculation is involved. Note that even with gojq, all mathematical functions, including `floor` and `round`, convert integers to floating-point numbers; only addition, subtraction, multiplication, modulo, and division operators (when divisible) keep the integer precision. To calculate floor division of integers without losing the precision, use `def idivide($n): (. - . % $n) / $n;`. To round down floating-point numbers to integers, use `def ifloor: floor | tostring | tonumber;`, but note that this function does not work with large floating-point numbers and also loses the precision of large integers. -- gojq fixes various bugs of jq. gojq correctly deletes elements of arrays by `|= empty` ([jq#2051](https://github.com/stedolan/jq/issues/2051)). gojq fixes `try`/`catch` handling ([jq#1859](https://github.com/stedolan/jq/issues/1859), [jq#1885](https://github.com/stedolan/jq/issues/1885), [jq#2140](https://github.com/stedolan/jq/issues/2140)). gojq fixes `nth/2` to output nothing when the count is equal to or larger than the stream size ([jq#1867](https://github.com/stedolan/jq/issues/1867)). gojq consistently counts by characters (not by bytes) in `index`, `rindex`, and `indices` functions; `"12345" | .[index("3"):]` results in `"345"` ([jq#1430](https://github.com/stedolan/jq/issues/1430), [jq#1624](https://github.com/stedolan/jq/issues/1624)). gojq handles overlapping occurrence differently in `rindex` and `indices`; `"ababa" | [rindex("aba"), indices("aba")]` results in `[2,[0,2]]` ([jq#2433](https://github.com/stedolan/jq/issues/2433)). gojq supports string indexing; `"abcde"[2]` ([jq#1520](https://github.com/stedolan/jq/issues/1520)). gojq accepts indexing query `.e0` ([jq#1526](https://github.com/stedolan/jq/issues/1526), [jq#1651](https://github.com/stedolan/jq/issues/1651)), and allows `gsub` to handle patterns including `"^"` ([jq#2148](https://github.com/stedolan/jq/issues/2148)). gojq improves variable lexer to allow using keywords for variable names, especially in binding patterns, also disallows spaces after `$` ([jq#526](https://github.com/stedolan/jq/issues/526)). gojq fixes handling files with no newline characters at the end ([jq#2374](https://github.com/stedolan/jq/issues/2374)). -- gojq truncates down floating-point numbers on indexing (`[0] | .[0.5]` results in `0` not `null`), and slicing (`[0,1,2] | .[0.5:1.5]` results in `[0]` not `[0,1]`). gojq parses unary operators with higher precedence than variable binding (`[-1 as $x | 1,$x]` results in `[1,-1]` not `[-1,-1]`). gojq implements `@uri` to escape all the reserved characters defined in RFC 3986, Sec. 2.2 ([jq#1506](https://github.com/stedolan/jq/issues/1506)), and fixes `@base64d` to allow binary string as the decoded string ([jq#1931](https://github.com/stedolan/jq/issues/1931)). gojq improves time formatting and parsing; deals with `%f` in `strftime` and `strptime` ([jq#1409](https://github.com/stedolan/jq/issues/1409)), parses timezone offsets with `fromdate` and `fromdateiso8601` ([jq#1053](https://github.com/stedolan/jq/issues/1053)), supports timezone name/offset with `%Z`/`%z` in `strptime` ([jq#929](https://github.com/stedolan/jq/issues/929), [jq#2195](https://github.com/stedolan/jq/issues/2195)), and looks up correct timezone during daylight saving time on formatting with `%Z` ([jq#1912](https://github.com/stedolan/jq/issues/1912)). gojq supports nanoseconds in date and time functions. -- gojq does not support some functions intentionally; `get_jq_origin`, `get_prog_origin`, `get_search_list` (unstable, not listed in jq document), `input_line_number`, `$__loc__` (performance issue), `recurse_down` (deprecated in jq). gojq does not support some flags; `--ascii-output, -a` (performance issue), `--seq` (not used commonly), `--sort-keys, -S` (sorts by default because `map[string]any` does not keep the order), `--unbuffered` (unbuffered by default). gojq does not parse JSON extensions supported by jq; `NaN`, `Infinity`, and `[000]`. gojq normalizes floating-point numbers to fit to double-precision floating-point numbers. gojq does not support or behaves differently with some regular expression metacharacters and flags (regular expression engine differences). gojq does not support BOM (`encoding/json` does not support this). gojq disallows using keywords for function names (`def true: .; true` is a confusing query), and module name prefixes in function declarations (using module prefixes like `def m::f: .;` is undocumented). -- gojq supports reading from YAML input (`--yaml-input`) while jq does not. gojq also supports YAML output (`--yaml-output`). +- gojq behaves differently than jq in some features, hoping that jq will fix the behaviors in the future. gojq consistently counts by characters (not by bytes) in `index`, `rindex`, and `indices` functions; `"12345" | .[index("3"):]` results in `"345"` ([jq#1430](https://github.com/jqlang/jq/issues/1430), [jq#1624](https://github.com/jqlang/jq/issues/1624)). gojq supports string indexing; `"abcde"[2]` ([jq#1520](https://github.com/jqlang/jq/issues/1520)). gojq fixes handling files with no newline characters at the end ([jq#2374](https://github.com/jqlang/jq/issues/2374)). gojq consistently truncates down floating-point number indices both in indexing (`[0] | .[0.5]` results in `0`), and slicing (`[0,1,2] | .[0.5:1.5]` results in `[0]`). gojq parses unary operators with higher precedence than variable binding (`[-1 as $x | 1,$x]` results in `[1,-1]` not `[-1,-1]`) ([jq#3053](https://github.com/jqlang/jq/pull/3053)). gojq fixes `@base64d` to allow binary string as the decoded string ([jq#1931](https://github.com/jqlang/jq/issues/1931)). gojq improves time formatting and parsing; deals with `%f` in `strftime` and `strptime` ([jq#1409](https://github.com/jqlang/jq/issues/1409)), parses timezone offsets with `fromdate` and `fromdateiso8601` ([jq#1053](https://github.com/jqlang/jq/issues/1053)), supports timezone name/offset with `%Z`/`%z` in `strptime` ([jq#929](https://github.com/jqlang/jq/issues/929), [jq#2195](https://github.com/jqlang/jq/issues/2195)), and looks up correct timezone during daylight saving time on formatting with `%Z` ([jq#1912](https://github.com/jqlang/jq/issues/1912)). gojq supports nanoseconds in date and time functions. +- gojq does not support some functions intentionally; `get_jq_origin`, `get_prog_origin`, `get_search_list` (unstable, not listed in jq document), `input_line_number`, `$__loc__` (performance issue). gojq does not support some flags; `--ascii-output, -a` (performance issue), `--seq` (not used commonly), `--sort-keys, -S` (sorts by default because `map[string]any` does not keep the order), `--unbuffered` (unbuffered by default). gojq does not parse JSON extensions supported by jq; `NaN`, `Infinity`, and `[000]`. gojq normalizes floating-point numbers to fit to double-precision floating-point numbers. gojq does not support some regular expression metacharacters, backreferences, look-around assertions, and some flags (regular expression engine differences). gojq does not support BOM (`encoding/json` does not support this). gojq disallows using keywords for function names (`def true: .; true` is a confusing query), and module name prefixes in function declarations (using module prefixes like `def m::f: .;` is undocumented). +- gojq supports reading from YAML input (`--yaml-input`) while jq does not. gojq also supports YAML output (`--yaml-output`). gojq supports `@urid` format string ([jq#798](https://github.com/jqlang/jq/issues/798), [jq#2261](https://github.com/jqlang/jq/issues/2261)). ### Color configuration The gojq command automatically disables coloring output when the output is not a tty. @@ -117,6 +116,9 @@ func main() { break } if err, ok := v.(error); ok { + if err, ok := err.(*gojq.HaltError); ok && err.Value() == nil { + break + } log.Fatalln(err) } fmt.Printf("%#v\n", v) @@ -125,12 +127,18 @@ func main() { ``` - Firstly, use [`gojq.Parse(string) (*Query, error)`](https://pkg.go.dev/github.com/itchyny/gojq#Parse) to get the query from a string. + - Use [`gojq.ParseError`](https://pkg.go.dev/github.com/itchyny/gojq#ParseError) to get the error position and token of the parsing error. - Secondly, get the result iterator - using [`query.Run`](https://pkg.go.dev/github.com/itchyny/gojq#Query.Run) or [`query.RunWithContext`](https://pkg.go.dev/github.com/itchyny/gojq#Query.RunWithContext) - or alternatively, compile the query using [`gojq.Compile`](https://pkg.go.dev/github.com/itchyny/gojq#Compile) and then [`code.Run`](https://pkg.go.dev/github.com/itchyny/gojq#Code.Run) or [`code.RunWithContext`](https://pkg.go.dev/github.com/itchyny/gojq#Code.RunWithContext). You can reuse the `*Code` against multiple inputs to avoid compilation of the same query. But for arguments of `code.Run`, do not give values sharing same data between multiple calls. - In either case, you cannot use custom type values as the query input. The type should be `[]any` for an array and `map[string]any` for a map (just like decoded to an `any` using the [encoding/json](https://golang.org/pkg/encoding/json/) package). You can't use `[]int` or `map[string]string`, for example. If you want to query your custom struct, marshal to JSON, unmarshal to `any` and use it as the query input. - Thirdly, iterate through the results using [`iter.Next() (any, bool)`](https://pkg.go.dev/github.com/itchyny/gojq#Iter). The iterator can emit an error so make sure to handle it. The method returns `true` with results, and `false` when the iterator terminates. - - The return type is not `(any, error)` because iterators can emit multiple errors and you can continue after an error. It is difficult for the iterator to tell the termination in this situation. + - The return type is not `(any, error)` because the iterator may emit multiple errors. The `jq` and `gojq` commands stop the iteration on the first error, but the library user can choose to stop the iteration on errors, or to continue until it terminates. + - In any case, it is recommended to stop the iteration on [`gojq.HaltError`](https://pkg.go.dev/github.com/itchyny/gojq#HaltError), which is emitted by `halt` and `halt_error` functions, although these functions are rarely used. + The error implements [`gojq.ValueError`](https://pkg.go.dev/github.com/itchyny/gojq#ValueError), and if the error value is `nil`, stop the iteration without handling the error. + Technically speaking, we can fix the iterator to terminate on the halting error, but it does not terminate at the moment. + The `halt` function in jq not only stops the iteration, but also terminates the command execution, even if there are still input values. + So, gojq leaves it up to the library user how to handle the halting error. - Note that the result iterator may emit infinite number of values; `repeat(0)` and `range(infinite)`. It may stuck with no output value; `def f: f; f`. Use `RunWithContext` when you want to limit the execution time. [`gojq.Compile`](https://pkg.go.dev/github.com/itchyny/gojq#Compile) allows to configure the following compiler options. @@ -146,7 +154,7 @@ func main() { Report bug at [Issues・itchyny/gojq - GitHub](https://github.com/itchyny/gojq/issues). ## Author -itchyny (https://github.com/itchyny) +itchyny () ## License This software is released under the MIT License, see LICENSE. diff --git a/vendor/github.com/itchyny/gojq/_gojq b/vendor/github.com/itchyny/gojq/_gojq index d403a314607..01e4c4f77f7 100644 --- a/vendor/github.com/itchyny/gojq/_gojq +++ b/vendor/github.com/itchyny/gojq/_gojq @@ -3,9 +3,9 @@ _gojq() { _arguments -s -S \ - '(-r --raw-output -j --join-output -0 --nul-output)'{-r,--raw-output}'[output raw strings]' \ - '(-r --raw-output -j --join-output -0 --nul-output)'{-j,--join-output}'[output without newlines]' \ - '(-r --raw-output -j --join-output -0 --nul-output)'{-0,--nul-output}'[output with NUL character]' \ + '(-r --raw-output --raw-output0 -j --join-output)'{-r,--raw-output}'[output raw strings]' \ + '(-r --raw-output -j --join-output)--raw-output0[implies -r with NUL character delimiter]' \ + '(-r --raw-output --raw-output0 -j --join-output)'{-j,--join-output}'[implies -r with no newline delimiter]' \ '(-c --compact-output --indent --tab --yaml-output)'{-c,--compact-output}'[output without pretty-printing]' \ '(-c --compact-output --tab --yaml-output)--indent=[number of spaces for indentation]:indentation count:(2 4 8)' \ '(-c --compact-output --indent --yaml-output)--tab[use tabs for indentation]' \ @@ -17,7 +17,7 @@ _gojq() '(-R --raw-input --yaml-input)--stream[parse input in stream fashion]' \ '(-R --raw-input --stream )--yaml-input[read input as YAML format]' \ '(-s --slurp)'{-s,--slurp}'[read all inputs into an array]' \ - '(-f --from-file 1)'{-f,--from-file}='[load query from file]:filename of jq query:_files' \ + '(-f --from-file 1)'{-f,--from-file}'[load query from file]:filename of jq query:_files' \ '*-L=[directory to search modules from]:module directory:_directories' \ '*--arg[set a string value to a variable]:variable name: :string value' \ '*--argjson[set a JSON value to a variable]:variable name: :JSON value' \ diff --git a/vendor/github.com/itchyny/gojq/builtin.go b/vendor/github.com/itchyny/gojq/builtin.go index ccf31358773..89b03dc7aa0 100644 --- a/vendor/github.com/itchyny/gojq/builtin.go +++ b/vendor/github.com/itchyny/gojq/builtin.go @@ -4,65 +4,65 @@ package gojq func init() { builtinFuncDefs = map[string][]*FuncDef{ - "IN": []*FuncDef{&FuncDef{Name: "IN", Args: []string{"s"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "any", Args: []*Query{&Query{Left: &Query{Func: "s"}, Op: OpEq, Right: &Query{Func: "."}}, &Query{Func: "."}}}}}}, &FuncDef{Name: "IN", Args: []string{"src", "s"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "any", Args: []*Query{&Query{Left: &Query{Func: "src"}, Op: OpEq, Right: &Query{Func: "s"}}, &Query{Func: "."}}}}}}}, - "INDEX": []*FuncDef{&FuncDef{Name: "INDEX", Args: []string{"stream", "idx_expr"}, Body: &Query{Term: &Term{Type: TermTypeReduce, Reduce: &Reduce{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "stream"}}, Pattern: &Pattern{Name: "$row"}, Start: &Query{Term: &Term{Type: TermTypeObject, Object: &Object{}}}, Update: &Query{Left: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Start: &Query{Left: &Query{Func: "$row"}, Op: OpPipe, Right: &Query{Left: &Query{Func: "idx_expr"}, Op: OpPipe, Right: &Query{Func: "tostring"}}}}}}, Op: OpAssign, Right: &Query{Func: "$row"}}}}}}, &FuncDef{Name: "INDEX", Args: []string{"idx_expr"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "INDEX", Args: []*Query{&Query{Term: &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{&Suffix{Iter: true}}}}, &Query{Func: "idx_expr"}}}}}}}, - "JOIN": []*FuncDef{&FuncDef{Name: "JOIN", Args: []string{"$idx", "idx_expr"}, Body: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Left: &Query{Term: &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{&Suffix{Iter: true}}}}, Op: OpPipe, Right: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Left: &Query{Func: "."}, Op: OpComma, Right: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "$idx"}, SuffixList: []*Suffix{&Suffix{Index: &Index{Start: &Query{Func: "idx_expr"}}}}}}}}}}}}}}}, &FuncDef{Name: "JOIN", Args: []string{"$idx", "stream", "idx_expr"}, Body: &Query{Left: &Query{Func: "stream"}, Op: OpPipe, Right: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Left: &Query{Func: "."}, Op: OpComma, Right: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "$idx"}, SuffixList: []*Suffix{&Suffix{Index: &Index{Start: &Query{Func: "idx_expr"}}}}}}}}}}}}, &FuncDef{Name: "JOIN", Args: []string{"$idx", "stream", "idx_expr", "join_expr"}, Body: &Query{Left: &Query{Func: "stream"}, Op: OpPipe, Right: &Query{Left: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Left: &Query{Func: "."}, Op: OpComma, Right: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "$idx"}, SuffixList: []*Suffix{&Suffix{Index: &Index{Start: &Query{Func: "idx_expr"}}}}}}}}}}, Op: OpPipe, Right: &Query{Func: "join_expr"}}}}}, - "_assign": []*FuncDef{}, - "_modify": []*FuncDef{}, - "all": []*FuncDef{&FuncDef{Name: "all", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "all", Args: []*Query{&Query{Func: "."}}}}}}, &FuncDef{Name: "all", Args: []string{"y"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "all", Args: []*Query{&Query{Term: &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{&Suffix{Iter: true}}}}, &Query{Func: "y"}}}}}}, &FuncDef{Name: "all", Args: []string{"g", "y"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "isempty", Args: []*Query{&Query{Left: &Query{Func: "g"}, Op: OpPipe, Right: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{&Query{Left: &Query{Func: "y"}, Op: OpPipe, Right: &Query{Func: "not"}}}}}}}}}}}}}, - "any": []*FuncDef{&FuncDef{Name: "any", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "any", Args: []*Query{&Query{Func: "."}}}}}}, &FuncDef{Name: "any", Args: []string{"y"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "any", Args: []*Query{&Query{Term: &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{&Suffix{Iter: true}}}}, &Query{Func: "y"}}}}}}, &FuncDef{Name: "any", Args: []string{"g", "y"}, Body: &Query{Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "isempty", Args: []*Query{&Query{Left: &Query{Func: "g"}, Op: OpPipe, Right: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{&Query{Func: "y"}}}}}}}}}}, Op: OpPipe, Right: &Query{Func: "not"}}}}, - "arrays": []*FuncDef{&FuncDef{Name: "arrays", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{&Query{Left: &Query{Func: "type"}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "array"}}}}}}}}}}, - "booleans": []*FuncDef{&FuncDef{Name: "booleans", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{&Query{Left: &Query{Func: "type"}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "boolean"}}}}}}}}}}, - "capture": []*FuncDef{&FuncDef{Name: "capture", Args: []string{"$re"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "capture", Args: []*Query{&Query{Func: "$re"}, &Query{Func: "null"}}}}}}, &FuncDef{Name: "capture", Args: []string{"$re", "$flags"}, Body: &Query{Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "match", Args: []*Query{&Query{Func: "$re"}, &Query{Func: "$flags"}}}}}, Op: OpPipe, Right: &Query{Func: "_capture"}}}}, - "combinations": []*FuncDef{&FuncDef{Name: "combinations", Body: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Left: &Query{Func: "length"}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}}, Then: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{}}}, Else: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Start: &Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}}, SuffixList: []*Suffix{&Suffix{Iter: true}, &Suffix{Bind: &Bind{Patterns: []*Pattern{&Pattern{Name: "$x"}}, Body: &Query{Left: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Func: "$x"}}}}, Op: OpAdd, Right: &Query{Term: &Term{Type: TermTypeQuery, Query: &Query{Left: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Start: &Query{Term: &Term{Type: TermTypeNumber, Number: "1"}}, IsSlice: true}}}, Op: OpPipe, Right: &Query{Func: "combinations"}}}}}}}}}}}}}}, &FuncDef{Name: "combinations", Args: []string{"n"}, Body: &Query{Left: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "limit", Args: []*Query{&Query{Func: "n"}, &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "repeat", Args: []*Query{&Query{Func: "."}}}}}}}}}}}}, Op: OpPipe, Right: &Query{Func: "combinations"}}}}, - "del": []*FuncDef{&FuncDef{Name: "del", Args: []string{"f"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "delpaths", Args: []*Query{&Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "path", Args: []*Query{&Query{Func: "f"}}}}}}}}}}}}}}, - "finites": []*FuncDef{&FuncDef{Name: "finites", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{&Query{Func: "isfinite"}}}}}}}, - "first": []*FuncDef{&FuncDef{Name: "first", Body: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Start: &Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}}}}}, &FuncDef{Name: "first", Args: []string{"g"}, Body: &Query{Term: &Term{Type: TermTypeLabel, Label: &Label{Ident: "$out", Body: &Query{Left: &Query{Func: "g"}, Op: OpPipe, Right: &Query{Left: &Query{Func: "."}, Op: OpComma, Right: &Query{Term: &Term{Type: TermTypeBreak, Break: "$out"}}}}}}}}}, - "fromdate": []*FuncDef{&FuncDef{Name: "fromdate", Body: &Query{Func: "fromdateiso8601"}}}, - "fromdateiso8601": []*FuncDef{&FuncDef{Name: "fromdateiso8601", Body: &Query{Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "strptime", Args: []*Query{&Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "%Y-%m-%dT%H:%M:%S%z"}}}}}}}, Op: OpPipe, Right: &Query{Func: "mktime"}}}}, - "fromstream": []*FuncDef{&FuncDef{Name: "fromstream", Args: []string{"f"}, Body: &Query{Term: &Term{Type: TermTypeObject, Object: &Object{KeyVals: []*ObjectKeyVal{&ObjectKeyVal{Key: "x", Val: &ObjectVal{Queries: []*Query{&Query{Func: "null"}}}}, &ObjectKeyVal{Key: "e", Val: &ObjectVal{Queries: []*Query{&Query{Func: "false"}}}}}}, SuffixList: []*Suffix{&Suffix{Bind: &Bind{Patterns: []*Pattern{&Pattern{Name: "$init"}}, Body: &Query{Term: &Term{Type: TermTypeForeach, Foreach: &Foreach{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "f"}}, Pattern: &Pattern{Name: "$i"}, Start: &Query{Func: "$init"}, Update: &Query{Left: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Name: "e"}}}, Then: &Query{Func: "$init"}}}}, Op: OpPipe, Right: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Left: &Query{Func: "$i"}, Op: OpPipe, Right: &Query{Left: &Query{Func: "length"}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeNumber, Number: "2"}}}}, Then: &Query{Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "setpath", Args: []*Query{&Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "e"}}}}}}, &Query{Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "$i"}, SuffixList: []*Suffix{&Suffix{Index: &Index{Start: &Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}}}}}}, Op: OpPipe, Right: &Query{Left: &Query{Func: "length"}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}}}}}}}, Op: OpPipe, Right: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "setpath", Args: []*Query{&Query{Left: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "x"}}}}}}, Op: OpAdd, Right: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "$i"}, SuffixList: []*Suffix{&Suffix{Index: &Index{Start: &Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}}}}}}}, &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "$i"}, SuffixList: []*Suffix{&Suffix{Index: &Index{Start: &Query{Term: &Term{Type: TermTypeNumber, Number: "1"}}}}}}}}}}}}, Else: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "setpath", Args: []*Query{&Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "e"}}}}}}, &Query{Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "$i"}, SuffixList: []*Suffix{&Suffix{Index: &Index{Start: &Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}}}}}}, Op: OpPipe, Right: &Query{Left: &Query{Func: "length"}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeNumber, Number: "1"}}}}}}}}}}}}, Extract: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Name: "e"}}}, Then: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Name: "x"}}}, Else: &Query{Func: "empty"}}}}}}}}}}}}}}, - "group_by": []*FuncDef{&FuncDef{Name: "group_by", Args: []string{"f"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "_group_by", Args: []*Query{&Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "map", Args: []*Query{&Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Func: "f"}}}}}}}}}}}}}}, - "gsub": []*FuncDef{&FuncDef{Name: "gsub", Args: []string{"$re", "str"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "sub", Args: []*Query{&Query{Func: "$re"}, &Query{Func: "str"}, &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "g"}}}}}}}}, &FuncDef{Name: "gsub", Args: []string{"$re", "str", "$flags"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "sub", Args: []*Query{&Query{Func: "$re"}, &Query{Func: "str"}, &Query{Left: &Query{Func: "$flags"}, Op: OpAdd, Right: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "g"}}}}}}}}}}, - "in": []*FuncDef{&FuncDef{Name: "in", Args: []string{"xs"}, Body: &Query{Term: &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{&Suffix{Bind: &Bind{Patterns: []*Pattern{&Pattern{Name: "$x"}}, Body: &Query{Left: &Query{Func: "xs"}, Op: OpPipe, Right: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "has", Args: []*Query{&Query{Func: "$x"}}}}}}}}}}}}}, - "inputs": []*FuncDef{&FuncDef{Name: "inputs", Body: &Query{Term: &Term{Type: TermTypeTry, Try: &Try{Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "repeat", Args: []*Query{&Query{Func: "input"}}}}}, Catch: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Left: &Query{Func: "."}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "break"}}}}, Then: &Query{Func: "empty"}, Else: &Query{Func: "error"}}}}}}}}}, - "inside": []*FuncDef{&FuncDef{Name: "inside", Args: []string{"xs"}, Body: &Query{Term: &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{&Suffix{Bind: &Bind{Patterns: []*Pattern{&Pattern{Name: "$x"}}, Body: &Query{Left: &Query{Func: "xs"}, Op: OpPipe, Right: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "contains", Args: []*Query{&Query{Func: "$x"}}}}}}}}}}}}}, - "isempty": []*FuncDef{&FuncDef{Name: "isempty", Args: []string{"g"}, Body: &Query{Term: &Term{Type: TermTypeLabel, Label: &Label{Ident: "$out", Body: &Query{Left: &Query{Term: &Term{Type: TermTypeQuery, Query: &Query{Left: &Query{Func: "g"}, Op: OpPipe, Right: &Query{Left: &Query{Func: "false"}, Op: OpComma, Right: &Query{Term: &Term{Type: TermTypeBreak, Break: "$out"}}}}}}, Op: OpComma, Right: &Query{Func: "true"}}}}}}}, - "iterables": []*FuncDef{&FuncDef{Name: "iterables", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{&Query{Left: &Query{Func: "type"}, Op: OpPipe, Right: &Query{Left: &Query{Left: &Query{Func: "."}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "array"}}}}, Op: OpOr, Right: &Query{Left: &Query{Func: "."}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "object"}}}}}}}}}}}}, - "last": []*FuncDef{&FuncDef{Name: "last", Body: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Start: &Query{Term: &Term{Type: TermTypeUnary, Unary: &Unary{Op: OpSub, Term: &Term{Type: TermTypeNumber, Number: "1"}}}}}}}}, &FuncDef{Name: "last", Args: []string{"g"}, Body: &Query{Term: &Term{Type: TermTypeReduce, Reduce: &Reduce{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "g"}}, Pattern: &Pattern{Name: "$item"}, Start: &Query{Func: "null"}, Update: &Query{Func: "$item"}}}}}}, - "leaf_paths": []*FuncDef{&FuncDef{Name: "leaf_paths", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "paths", Args: []*Query{&Query{Func: "scalars"}}}}}}}, - "limit": []*FuncDef{&FuncDef{Name: "limit", Args: []string{"$n", "g"}, Body: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Left: &Query{Func: "$n"}, Op: OpGt, Right: &Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}}, Then: &Query{Term: &Term{Type: TermTypeLabel, Label: &Label{Ident: "$out", Body: &Query{Term: &Term{Type: TermTypeForeach, Foreach: &Foreach{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "g"}}, Pattern: &Pattern{Name: "$item"}, Start: &Query{Func: "$n"}, Update: &Query{Left: &Query{Func: "."}, Op: OpSub, Right: &Query{Term: &Term{Type: TermTypeNumber, Number: "1"}}}, Extract: &Query{Left: &Query{Func: "$item"}, Op: OpComma, Right: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Left: &Query{Func: "."}, Op: OpLe, Right: &Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}}, Then: &Query{Term: &Term{Type: TermTypeBreak, Break: "$out"}}, Else: &Query{Func: "empty"}}}}}}}}}}}, Elif: []*IfElif{&IfElif{Cond: &Query{Left: &Query{Func: "$n"}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}}, Then: &Query{Func: "empty"}}}, Else: &Query{Func: "g"}}}}}}, - "map": []*FuncDef{&FuncDef{Name: "map", Args: []string{"f"}, Body: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Left: &Query{Term: &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{&Suffix{Iter: true}}}}, Op: OpPipe, Right: &Query{Func: "f"}}}}}}}, - "map_values": []*FuncDef{&FuncDef{Name: "map_values", Args: []string{"f"}, Body: &Query{Left: &Query{Term: &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{&Suffix{Iter: true}}}}, Op: OpModify, Right: &Query{Func: "f"}}}}, - "match": []*FuncDef{&FuncDef{Name: "match", Args: []string{"$re"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "match", Args: []*Query{&Query{Func: "$re"}, &Query{Func: "null"}}}}}}, &FuncDef{Name: "match", Args: []string{"$re", "$flags"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "_match", Args: []*Query{&Query{Func: "$re"}, &Query{Func: "$flags"}, &Query{Func: "false"}}}, SuffixList: []*Suffix{&Suffix{Iter: true}}}}}}, - "max_by": []*FuncDef{&FuncDef{Name: "max_by", Args: []string{"f"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "_max_by", Args: []*Query{&Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "map", Args: []*Query{&Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Func: "f"}}}}}}}}}}}}}}, - "min_by": []*FuncDef{&FuncDef{Name: "min_by", Args: []string{"f"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "_min_by", Args: []*Query{&Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "map", Args: []*Query{&Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Func: "f"}}}}}}}}}}}}}}, - "normals": []*FuncDef{&FuncDef{Name: "normals", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{&Query{Func: "isnormal"}}}}}}}, - "not": []*FuncDef{&FuncDef{Name: "not", Body: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Func: "."}, Then: &Query{Func: "false"}, Else: &Query{Func: "true"}}}}}}, - "nth": []*FuncDef{&FuncDef{Name: "nth", Args: []string{"$n"}, Body: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Start: &Query{Func: "$n"}}}}}, &FuncDef{Name: "nth", Args: []string{"$n", "g"}, Body: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Left: &Query{Func: "$n"}, Op: OpLt, Right: &Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}}, Then: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "error", Args: []*Query{&Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "nth doesn't support negative indices"}}}}}}}, Else: &Query{Term: &Term{Type: TermTypeLabel, Label: &Label{Ident: "$out", Body: &Query{Term: &Term{Type: TermTypeForeach, Foreach: &Foreach{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "g"}}, Pattern: &Pattern{Name: "$item"}, Start: &Query{Left: &Query{Func: "$n"}, Op: OpAdd, Right: &Query{Term: &Term{Type: TermTypeNumber, Number: "1"}}}, Update: &Query{Left: &Query{Func: "."}, Op: OpSub, Right: &Query{Term: &Term{Type: TermTypeNumber, Number: "1"}}}, Extract: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Left: &Query{Func: "."}, Op: OpLe, Right: &Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}}, Then: &Query{Left: &Query{Func: "$item"}, Op: OpComma, Right: &Query{Term: &Term{Type: TermTypeBreak, Break: "$out"}}}, Else: &Query{Func: "empty"}}}}}}}}}}}}}}}, - "nulls": []*FuncDef{&FuncDef{Name: "nulls", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{&Query{Left: &Query{Func: "."}, Op: OpEq, Right: &Query{Func: "null"}}}}}}}}, - "numbers": []*FuncDef{&FuncDef{Name: "numbers", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{&Query{Left: &Query{Func: "type"}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "number"}}}}}}}}}}, - "objects": []*FuncDef{&FuncDef{Name: "objects", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{&Query{Left: &Query{Func: "type"}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "object"}}}}}}}}}}, - "paths": []*FuncDef{&FuncDef{Name: "paths", Body: &Query{Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "path", Args: []*Query{&Query{Func: ".."}}}}}, Op: OpPipe, Right: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{&Query{Left: &Query{Func: "."}, Op: OpNe, Right: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{}}}}}}}}}}, &FuncDef{Name: "paths", Args: []string{"f"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "paths"}, SuffixList: []*Suffix{&Suffix{Bind: &Bind{Patterns: []*Pattern{&Pattern{Name: "$p"}}, Body: &Query{Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{&Query{Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "getpath", Args: []*Query{&Query{Func: "$p"}}}}}, Op: OpPipe, Right: &Query{Func: "f"}}}}}}, Op: OpPipe, Right: &Query{Func: "$p"}}}}}}}}}, - "range": []*FuncDef{&FuncDef{Name: "range", Args: []string{"$end"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "_range", Args: []*Query{&Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}, &Query{Func: "$end"}, &Query{Term: &Term{Type: TermTypeNumber, Number: "1"}}}}}}}, &FuncDef{Name: "range", Args: []string{"$start", "$end"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "_range", Args: []*Query{&Query{Func: "$start"}, &Query{Func: "$end"}, &Query{Term: &Term{Type: TermTypeNumber, Number: "1"}}}}}}}, &FuncDef{Name: "range", Args: []string{"$start", "$end", "$step"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "_range", Args: []*Query{&Query{Func: "$start"}, &Query{Func: "$end"}, &Query{Func: "$step"}}}}}}}, - "recurse": []*FuncDef{&FuncDef{Name: "recurse", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "recurse", Args: []*Query{&Query{Term: &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{&Suffix{Iter: true}, &Suffix{Optional: true}}}}}}}}}, &FuncDef{Name: "recurse", Args: []string{"f"}, Body: &Query{FuncDefs: []*FuncDef{&FuncDef{Name: "r", Body: &Query{Left: &Query{Func: "."}, Op: OpComma, Right: &Query{Term: &Term{Type: TermTypeQuery, Query: &Query{Left: &Query{Func: "f"}, Op: OpPipe, Right: &Query{Func: "r"}}}}}}}, Func: "r"}}, &FuncDef{Name: "recurse", Args: []string{"f", "cond"}, Body: &Query{FuncDefs: []*FuncDef{&FuncDef{Name: "r", Body: &Query{Left: &Query{Func: "."}, Op: OpComma, Right: &Query{Term: &Term{Type: TermTypeQuery, Query: &Query{Left: &Query{Func: "f"}, Op: OpPipe, Right: &Query{Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{&Query{Func: "cond"}}}}}, Op: OpPipe, Right: &Query{Func: "r"}}}}}}}}, Func: "r"}}}, - "repeat": []*FuncDef{&FuncDef{Name: "repeat", Args: []string{"f"}, Body: &Query{FuncDefs: []*FuncDef{&FuncDef{Name: "_repeat", Body: &Query{Left: &Query{Func: "f"}, Op: OpComma, Right: &Query{Func: "_repeat"}}}}, Func: "_repeat"}}}, - "scalars": []*FuncDef{&FuncDef{Name: "scalars", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{&Query{Left: &Query{Func: "type"}, Op: OpPipe, Right: &Query{Left: &Query{Left: &Query{Func: "."}, Op: OpNe, Right: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "array"}}}}, Op: OpAnd, Right: &Query{Left: &Query{Func: "."}, Op: OpNe, Right: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "object"}}}}}}}}}}}}, - "scan": []*FuncDef{&FuncDef{Name: "scan", Args: []string{"$re"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "scan", Args: []*Query{&Query{Func: "$re"}, &Query{Func: "null"}}}}}}, &FuncDef{Name: "scan", Args: []string{"$re", "$flags"}, Body: &Query{Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "match", Args: []*Query{&Query{Func: "$re"}, &Query{Left: &Query{Func: "$flags"}, Op: OpAdd, Right: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "g"}}}}}}}}, Op: OpPipe, Right: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Left: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Name: "captures"}}}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{}}}}, Then: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Name: "string"}}}, Else: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Name: "captures"}, SuffixList: []*Suffix{&Suffix{Iter: true}, &Suffix{Index: &Index{Name: "string"}}}}}}}}}}}}}}, - "select": []*FuncDef{&FuncDef{Name: "select", Args: []string{"f"}, Body: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Func: "f"}, Then: &Query{Func: "."}, Else: &Query{Func: "empty"}}}}}}, - "sort_by": []*FuncDef{&FuncDef{Name: "sort_by", Args: []string{"f"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "_sort_by", Args: []*Query{&Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "map", Args: []*Query{&Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Func: "f"}}}}}}}}}}}}}}, - "splits": []*FuncDef{&FuncDef{Name: "splits", Args: []string{"$re"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "splits", Args: []*Query{&Query{Func: "$re"}, &Query{Func: "null"}}}}}}, &FuncDef{Name: "splits", Args: []string{"$re", "$flags"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "split", Args: []*Query{&Query{Func: "$re"}, &Query{Func: "$flags"}}}, SuffixList: []*Suffix{&Suffix{Iter: true}}}}}}, - "strings": []*FuncDef{&FuncDef{Name: "strings", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{&Query{Left: &Query{Func: "type"}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "string"}}}}}}}}}}, - "sub": []*FuncDef{&FuncDef{Name: "sub", Args: []string{"$re", "str"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "sub", Args: []*Query{&Query{Func: "$re"}, &Query{Func: "str"}, &Query{Func: "null"}}}}}}, &FuncDef{Name: "sub", Args: []string{"$re", "str", "$flags"}, Body: &Query{Term: &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{&Suffix{Bind: &Bind{Patterns: []*Pattern{&Pattern{Name: "$str"}}, Body: &Query{FuncDefs: []*FuncDef{&FuncDef{Name: "_sub", Body: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Left: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Name: "matches"}}}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{}}}}, Then: &Query{Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "$str"}, SuffixList: []*Suffix{&Suffix{Index: &Index{End: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Name: "offset"}}}, IsSlice: true}}}}}, Op: OpAdd, Right: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Name: "string"}}}}, Else: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Name: "matches"}, SuffixList: []*Suffix{&Suffix{Index: &Index{Start: &Query{Term: &Term{Type: TermTypeUnary, Unary: &Unary{Op: OpSub, Term: &Term{Type: TermTypeNumber, Number: "1"}}}}}}, &Suffix{Bind: &Bind{Patterns: []*Pattern{&Pattern{Name: "$r"}}, Body: &Query{Left: &Query{Term: &Term{Type: TermTypeObject, Object: &Object{KeyVals: []*ObjectKeyVal{&ObjectKeyVal{Key: "string", Val: &ObjectVal{Queries: []*Query{&Query{Term: &Term{Type: TermTypeQuery, Query: &Query{Left: &Query{Left: &Query{Term: &Term{Type: TermTypeQuery, Query: &Query{Left: &Query{Func: "$r"}, Op: OpPipe, Right: &Query{Left: &Query{Func: "_capture"}, Op: OpPipe, Right: &Query{Func: "str"}}}}}, Op: OpAdd, Right: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "$str"}, SuffixList: []*Suffix{&Suffix{Index: &Index{Start: &Query{Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "$r"}, SuffixList: []*Suffix{&Suffix{Index: &Index{Name: "offset"}}}}}, Op: OpAdd, Right: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "$r"}, SuffixList: []*Suffix{&Suffix{Index: &Index{Name: "length"}}}}}}, End: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Name: "offset"}}}, IsSlice: true}}}}}}, Op: OpAdd, Right: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Name: "string"}}}}}}}}}, &ObjectKeyVal{Key: "offset", Val: &ObjectVal{Queries: []*Query{&Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "$r"}, SuffixList: []*Suffix{&Suffix{Index: &Index{Name: "offset"}}}}}}}}, &ObjectKeyVal{Key: "matches", Val: &ObjectVal{Queries: []*Query{&Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Name: "matches"}, SuffixList: []*Suffix{&Suffix{Index: &Index{End: &Query{Term: &Term{Type: TermTypeUnary, Unary: &Unary{Op: OpSub, Term: &Term{Type: TermTypeNumber, Number: "1"}}}}, IsSlice: true}}}}}}}}}}}}, Op: OpPipe, Right: &Query{Func: "_sub"}}}}}}}}}}}}, Left: &Query{Term: &Term{Type: TermTypeObject, Object: &Object{KeyVals: []*ObjectKeyVal{&ObjectKeyVal{Key: "string", Val: &ObjectVal{Queries: []*Query{&Query{Term: &Term{Type: TermTypeString, Str: &String{}}}}}}, &ObjectKeyVal{Key: "matches", Val: &ObjectVal{Queries: []*Query{&Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "match", Args: []*Query{&Query{Func: "$re"}, &Query{Func: "$flags"}}}}}}}}}}}}}}}, Op: OpPipe, Right: &Query{Func: "_sub"}}}}}}}}}, - "test": []*FuncDef{&FuncDef{Name: "test", Args: []string{"$re"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "test", Args: []*Query{&Query{Func: "$re"}, &Query{Func: "null"}}}}}}, &FuncDef{Name: "test", Args: []string{"$re", "$flags"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "_match", Args: []*Query{&Query{Func: "$re"}, &Query{Func: "$flags"}, &Query{Func: "true"}}}}}}}, - "todate": []*FuncDef{&FuncDef{Name: "todate", Body: &Query{Func: "todateiso8601"}}}, - "todateiso8601": []*FuncDef{&FuncDef{Name: "todateiso8601", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "strftime", Args: []*Query{&Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "%Y-%m-%dT%H:%M:%SZ"}}}}}}}}}, - "tostream": []*FuncDef{&FuncDef{Name: "tostream", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "path", Args: []*Query{&Query{FuncDefs: []*FuncDef{&FuncDef{Name: "r", Body: &Query{Left: &Query{Term: &Term{Type: TermTypeQuery, Query: &Query{Left: &Query{Term: &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{&Suffix{Iter: true}, &Suffix{Optional: true}}}}, Op: OpPipe, Right: &Query{Func: "r"}}}}, Op: OpComma, Right: &Query{Func: "."}}}}, Func: "r"}}}, SuffixList: []*Suffix{&Suffix{Bind: &Bind{Patterns: []*Pattern{&Pattern{Name: "$p"}}, Body: &Query{Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "getpath", Args: []*Query{&Query{Func: "$p"}}}}}, Op: OpPipe, Right: &Query{Term: &Term{Type: TermTypeReduce, Reduce: &Reduce{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "path", Args: []*Query{&Query{Term: &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{&Suffix{Iter: true}, &Suffix{Optional: true}}}}}}}, Pattern: &Pattern{Name: "$q"}, Start: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Left: &Query{Func: "$p"}, Op: OpComma, Right: &Query{Func: "."}}}}}, Update: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Left: &Query{Func: "$p"}, Op: OpAdd, Right: &Query{Func: "$q"}}}}}}}}}}}}}}}}, - "truncate_stream": []*FuncDef{&FuncDef{Name: "truncate_stream", Args: []string{"f"}, Body: &Query{Term: &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{&Suffix{Bind: &Bind{Patterns: []*Pattern{&Pattern{Name: "$n"}}, Body: &Query{Left: &Query{Func: "null"}, Op: OpPipe, Right: &Query{Left: &Query{Func: "f"}, Op: OpPipe, Right: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Left: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Start: &Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}}}}, Op: OpPipe, Right: &Query{Left: &Query{Func: "length"}, Op: OpGt, Right: &Query{Func: "$n"}}}, Then: &Query{Left: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Start: &Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}}}}, Op: OpModify, Right: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Start: &Query{Func: "$n"}, IsSlice: true}}}}, Else: &Query{Func: "empty"}}}}}}}}}}}}}, - "unique_by": []*FuncDef{&FuncDef{Name: "unique_by", Args: []string{"f"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "_unique_by", Args: []*Query{&Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "map", Args: []*Query{&Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Func: "f"}}}}}}}}}}}}}}, - "until": []*FuncDef{&FuncDef{Name: "until", Args: []string{"cond", "next"}, Body: &Query{FuncDefs: []*FuncDef{&FuncDef{Name: "_until", Body: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Func: "cond"}, Then: &Query{Func: "."}, Else: &Query{Left: &Query{Func: "next"}, Op: OpPipe, Right: &Query{Func: "_until"}}}}}}}, Func: "_until"}}}, - "values": []*FuncDef{&FuncDef{Name: "values", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{&Query{Left: &Query{Func: "."}, Op: OpNe, Right: &Query{Func: "null"}}}}}}}}, - "walk": []*FuncDef{&FuncDef{Name: "walk", Args: []string{"f"}, Body: &Query{FuncDefs: []*FuncDef{&FuncDef{Name: "_walk", Body: &Query{Left: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Left: &Query{Func: "type"}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "array"}}}}, Then: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "map", Args: []*Query{&Query{Func: "_walk"}}}}}, Elif: []*IfElif{&IfElif{Cond: &Query{Left: &Query{Func: "type"}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "object"}}}}, Then: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "map_values", Args: []*Query{&Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "last", Args: []*Query{&Query{Func: "_walk"}}}}}}}}}}}}}}, Op: OpPipe, Right: &Query{Func: "f"}}}}, Func: "_walk"}}}, - "while": []*FuncDef{&FuncDef{Name: "while", Args: []string{"cond", "update"}, Body: &Query{FuncDefs: []*FuncDef{&FuncDef{Name: "_while", Body: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Func: "cond"}, Then: &Query{Left: &Query{Func: "."}, Op: OpComma, Right: &Query{Term: &Term{Type: TermTypeQuery, Query: &Query{Left: &Query{Func: "update"}, Op: OpPipe, Right: &Query{Func: "_while"}}}}}, Else: &Query{Func: "empty"}}}}}}, Func: "_while"}}}, - "with_entries": []*FuncDef{&FuncDef{Name: "with_entries", Args: []string{"f"}, Body: &Query{Left: &Query{Func: "to_entries"}, Op: OpPipe, Right: &Query{Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "map", Args: []*Query{&Query{Func: "f"}}}}}, Op: OpPipe, Right: &Query{Func: "from_entries"}}}}}, + "IN": {{Name: "IN", Args: []string{"s"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "any", Args: []*Query{{Left: &Query{Func: "s"}, Op: OpEq, Right: &Query{Func: "."}}, {Func: "."}}}}}}, {Name: "IN", Args: []string{"src", "s"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "any", Args: []*Query{{Left: &Query{Func: "src"}, Op: OpEq, Right: &Query{Func: "s"}}, {Func: "."}}}}}}}, + "INDEX": {{Name: "INDEX", Args: []string{"stream", "idx_expr"}, Body: &Query{Term: &Term{Type: TermTypeReduce, Reduce: &Reduce{Query: &Query{Func: "stream"}, Pattern: &Pattern{Name: "$row"}, Start: &Query{Term: &Term{Type: TermTypeObject, Object: &Object{}}}, Update: &Query{Left: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Start: &Query{Left: &Query{Func: "$row"}, Op: OpPipe, Right: &Query{Left: &Query{Func: "idx_expr"}, Op: OpPipe, Right: &Query{Func: "tostring"}}}}}}, Op: OpAssign, Right: &Query{Func: "$row"}}}}}}, {Name: "INDEX", Args: []string{"idx_expr"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "INDEX", Args: []*Query{{Term: &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{{Iter: true}}}}, {Func: "idx_expr"}}}}}}}, + "JOIN": {{Name: "JOIN", Args: []string{"$idx", "idx_expr"}, Body: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Left: &Query{Term: &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{{Iter: true}}}}, Op: OpPipe, Right: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Left: &Query{Func: "."}, Op: OpComma, Right: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "$idx"}, SuffixList: []*Suffix{{Index: &Index{Start: &Query{Func: "idx_expr"}}}}}}}}}}}}}}}, {Name: "JOIN", Args: []string{"$idx", "stream", "idx_expr"}, Body: &Query{Left: &Query{Func: "stream"}, Op: OpPipe, Right: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Left: &Query{Func: "."}, Op: OpComma, Right: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "$idx"}, SuffixList: []*Suffix{{Index: &Index{Start: &Query{Func: "idx_expr"}}}}}}}}}}}}, {Name: "JOIN", Args: []string{"$idx", "stream", "idx_expr", "join_expr"}, Body: &Query{Left: &Query{Func: "stream"}, Op: OpPipe, Right: &Query{Left: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Left: &Query{Func: "."}, Op: OpComma, Right: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "$idx"}, SuffixList: []*Suffix{{Index: &Index{Start: &Query{Func: "idx_expr"}}}}}}}}}}, Op: OpPipe, Right: &Query{Func: "join_expr"}}}}}, + "_assign": {}, + "_modify": {}, + "all": {{Name: "all", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "all", Args: []*Query{{Func: "."}}}}}}, {Name: "all", Args: []string{"y"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "all", Args: []*Query{{Term: &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{{Iter: true}}}}, {Func: "y"}}}}}}, {Name: "all", Args: []string{"g", "y"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "isempty", Args: []*Query{{Left: &Query{Func: "g"}, Op: OpPipe, Right: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{{Left: &Query{Func: "y"}, Op: OpPipe, Right: &Query{Func: "not"}}}}}}}}}}}}}, + "any": {{Name: "any", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "any", Args: []*Query{{Func: "."}}}}}}, {Name: "any", Args: []string{"y"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "any", Args: []*Query{{Term: &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{{Iter: true}}}}, {Func: "y"}}}}}}, {Name: "any", Args: []string{"g", "y"}, Body: &Query{Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "isempty", Args: []*Query{{Left: &Query{Func: "g"}, Op: OpPipe, Right: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{{Func: "y"}}}}}}}}}}, Op: OpPipe, Right: &Query{Func: "not"}}}}, + "arrays": {{Name: "arrays", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{{Left: &Query{Func: "type"}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "array"}}}}}}}}}}, + "booleans": {{Name: "booleans", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{{Left: &Query{Func: "type"}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "boolean"}}}}}}}}}}, + "capture": {{Name: "capture", Args: []string{"$re"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "capture", Args: []*Query{{Func: "$re"}, {Func: "null"}}}}}}, {Name: "capture", Args: []string{"$re", "$flags"}, Body: &Query{Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "match", Args: []*Query{{Func: "$re"}, {Func: "$flags"}}}}}, Op: OpPipe, Right: &Query{Func: "_capture"}}}}, + "combinations": {{Name: "combinations", Body: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Left: &Query{Func: "length"}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}}, Then: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{}}}, Else: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Start: &Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}}, SuffixList: []*Suffix{{Iter: true}, {Bind: &Bind{Patterns: []*Pattern{{Name: "$x"}}, Body: &Query{Left: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Func: "$x"}}}}, Op: OpAdd, Right: &Query{Term: &Term{Type: TermTypeQuery, Query: &Query{Left: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Start: &Query{Term: &Term{Type: TermTypeNumber, Number: "1"}}, IsSlice: true}}}, Op: OpPipe, Right: &Query{Func: "combinations"}}}}}}}}}}}}}}, {Name: "combinations", Args: []string{"n"}, Body: &Query{Left: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "limit", Args: []*Query{{Func: "n"}, {Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "repeat", Args: []*Query{{Func: "."}}}}}}}}}}}}, Op: OpPipe, Right: &Query{Func: "combinations"}}}}, + "del": {{Name: "del", Args: []string{"f"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "delpaths", Args: []*Query{{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "path", Args: []*Query{{Func: "f"}}}}}}}}}}}}}}, + "finites": {{Name: "finites", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{{Func: "isfinite"}}}}}}}, + "first": {{Name: "first", Body: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Start: &Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}}}}}, {Name: "first", Args: []string{"g"}, Body: &Query{Term: &Term{Type: TermTypeLabel, Label: &Label{Ident: "$out", Body: &Query{Left: &Query{Func: "g"}, Op: OpPipe, Right: &Query{Left: &Query{Func: "."}, Op: OpComma, Right: &Query{Term: &Term{Type: TermTypeBreak, Break: "$out"}}}}}}}}}, + "fromdate": {{Name: "fromdate", Body: &Query{Func: "fromdateiso8601"}}}, + "fromdateiso8601": {{Name: "fromdateiso8601", Body: &Query{Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "strptime", Args: []*Query{{Term: &Term{Type: TermTypeString, Str: &String{Str: "%Y-%m-%dT%H:%M:%S%z"}}}}}}}, Op: OpPipe, Right: &Query{Func: "mktime"}}}}, + "fromstream": {{Name: "fromstream", Args: []string{"f"}, Body: &Query{Term: &Term{Type: TermTypeObject, Object: &Object{KeyVals: []*ObjectKeyVal{{Key: "x", Val: &Query{Func: "null"}}, {Key: "e", Val: &Query{Func: "false"}}}}, SuffixList: []*Suffix{{Bind: &Bind{Patterns: []*Pattern{{Name: "$init"}}, Body: &Query{Term: &Term{Type: TermTypeForeach, Foreach: &Foreach{Query: &Query{Func: "f"}, Pattern: &Pattern{Name: "$i"}, Start: &Query{Func: "$init"}, Update: &Query{Left: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Name: "e"}}}, Then: &Query{Func: "$init"}}}}, Op: OpPipe, Right: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Left: &Query{Func: "$i"}, Op: OpPipe, Right: &Query{Left: &Query{Func: "length"}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeNumber, Number: "2"}}}}, Then: &Query{Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "setpath", Args: []*Query{{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "e"}}}}}}, {Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "$i"}, SuffixList: []*Suffix{{Index: &Index{Start: &Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}}}}}}, Op: OpPipe, Right: &Query{Left: &Query{Func: "length"}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}}}}}}}, Op: OpPipe, Right: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "setpath", Args: []*Query{{Left: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "x"}}}}}}, Op: OpAdd, Right: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "$i"}, SuffixList: []*Suffix{{Index: &Index{Start: &Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}}}}}}}, {Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "$i"}, SuffixList: []*Suffix{{Index: &Index{Start: &Query{Term: &Term{Type: TermTypeNumber, Number: "1"}}}}}}}}}}}}, Else: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "setpath", Args: []*Query{{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "e"}}}}}}, {Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "$i"}, SuffixList: []*Suffix{{Index: &Index{Start: &Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}}}}}}, Op: OpPipe, Right: &Query{Left: &Query{Func: "length"}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeNumber, Number: "1"}}}}}}}}}}}}, Extract: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Name: "e"}}}, Then: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Name: "x"}}}, Else: &Query{Func: "empty"}}}}}}}}}}}}}}, + "group_by": {{Name: "group_by", Args: []string{"f"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "_group_by", Args: []*Query{{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "map", Args: []*Query{{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Func: "f"}}}}}}}}}}}}}}, + "gsub": {{Name: "gsub", Args: []string{"$re", "str"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "sub", Args: []*Query{{Func: "$re"}, {Func: "str"}, {Term: &Term{Type: TermTypeString, Str: &String{Str: "g"}}}}}}}}, {Name: "gsub", Args: []string{"$re", "str", "$flags"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "sub", Args: []*Query{{Func: "$re"}, {Func: "str"}, {Left: &Query{Func: "$flags"}, Op: OpAdd, Right: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "g"}}}}}}}}}}, + "in": {{Name: "in", Args: []string{"xs"}, Body: &Query{Term: &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{{Bind: &Bind{Patterns: []*Pattern{{Name: "$x"}}, Body: &Query{Left: &Query{Func: "xs"}, Op: OpPipe, Right: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "has", Args: []*Query{{Func: "$x"}}}}}}}}}}}}}, + "inputs": {{Name: "inputs", Body: &Query{Term: &Term{Type: TermTypeTry, Try: &Try{Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "repeat", Args: []*Query{{Func: "input"}}}}}, Catch: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Left: &Query{Func: "."}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "break"}}}}, Then: &Query{Func: "empty"}, Else: &Query{Func: "error"}}}}}}}}}, + "inside": {{Name: "inside", Args: []string{"xs"}, Body: &Query{Term: &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{{Bind: &Bind{Patterns: []*Pattern{{Name: "$x"}}, Body: &Query{Left: &Query{Func: "xs"}, Op: OpPipe, Right: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "contains", Args: []*Query{{Func: "$x"}}}}}}}}}}}}}, + "isempty": {{Name: "isempty", Args: []string{"g"}, Body: &Query{Term: &Term{Type: TermTypeLabel, Label: &Label{Ident: "$out", Body: &Query{Left: &Query{Term: &Term{Type: TermTypeQuery, Query: &Query{Left: &Query{Func: "g"}, Op: OpPipe, Right: &Query{Left: &Query{Func: "false"}, Op: OpComma, Right: &Query{Term: &Term{Type: TermTypeBreak, Break: "$out"}}}}}}, Op: OpComma, Right: &Query{Func: "true"}}}}}}}, + "iterables": {{Name: "iterables", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{{Left: &Query{Func: "type"}, Op: OpPipe, Right: &Query{Left: &Query{Left: &Query{Func: "."}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "array"}}}}, Op: OpOr, Right: &Query{Left: &Query{Func: "."}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "object"}}}}}}}}}}}}, + "last": {{Name: "last", Body: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Start: &Query{Term: &Term{Type: TermTypeUnary, Unary: &Unary{Op: OpSub, Term: &Term{Type: TermTypeNumber, Number: "1"}}}}}}}}, {Name: "last", Args: []string{"g"}, Body: &Query{Term: &Term{Type: TermTypeReduce, Reduce: &Reduce{Query: &Query{Func: "g"}, Pattern: &Pattern{Name: "$item"}, Start: &Query{Func: "null"}, Update: &Query{Func: "$item"}}}}}}, + "limit": {{Name: "limit", Args: []string{"$n", "g"}, Body: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Left: &Query{Func: "$n"}, Op: OpGt, Right: &Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}}, Then: &Query{Term: &Term{Type: TermTypeLabel, Label: &Label{Ident: "$out", Body: &Query{Term: &Term{Type: TermTypeForeach, Foreach: &Foreach{Query: &Query{Func: "g"}, Pattern: &Pattern{Name: "$item"}, Start: &Query{Func: "$n"}, Update: &Query{Left: &Query{Func: "."}, Op: OpSub, Right: &Query{Term: &Term{Type: TermTypeNumber, Number: "1"}}}, Extract: &Query{Left: &Query{Func: "$item"}, Op: OpComma, Right: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Left: &Query{Func: "."}, Op: OpLe, Right: &Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}}, Then: &Query{Term: &Term{Type: TermTypeBreak, Break: "$out"}}, Else: &Query{Func: "empty"}}}}}}}}}}}, Elif: []*IfElif{{Cond: &Query{Left: &Query{Func: "$n"}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}}, Then: &Query{Func: "empty"}}}, Else: &Query{Func: "g"}}}}}}, + "map": {{Name: "map", Args: []string{"f"}, Body: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Left: &Query{Term: &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{{Iter: true}}}}, Op: OpPipe, Right: &Query{Func: "f"}}}}}}}, + "map_values": {{Name: "map_values", Args: []string{"f"}, Body: &Query{Left: &Query{Term: &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{{Iter: true}}}}, Op: OpModify, Right: &Query{Func: "f"}}}}, + "match": {{Name: "match", Args: []string{"$re"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "match", Args: []*Query{{Func: "$re"}, {Func: "null"}}}}}}, {Name: "match", Args: []string{"$re", "$flags"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "_match", Args: []*Query{{Func: "$re"}, {Func: "$flags"}, {Func: "false"}}}, SuffixList: []*Suffix{{Iter: true}}}}}}, + "max_by": {{Name: "max_by", Args: []string{"f"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "_max_by", Args: []*Query{{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "map", Args: []*Query{{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Func: "f"}}}}}}}}}}}}}}, + "min_by": {{Name: "min_by", Args: []string{"f"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "_min_by", Args: []*Query{{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "map", Args: []*Query{{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Func: "f"}}}}}}}}}}}}}}, + "normals": {{Name: "normals", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{{Func: "isnormal"}}}}}}}, + "not": {{Name: "not", Body: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Func: "."}, Then: &Query{Func: "false"}, Else: &Query{Func: "true"}}}}}}, + "nth": {{Name: "nth", Args: []string{"$n"}, Body: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Start: &Query{Func: "$n"}}}}}, {Name: "nth", Args: []string{"$n", "g"}, Body: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Left: &Query{Func: "$n"}, Op: OpLt, Right: &Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}}, Then: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "error", Args: []*Query{{Term: &Term{Type: TermTypeString, Str: &String{Str: "nth doesn't support negative indices"}}}}}}}, Else: &Query{Term: &Term{Type: TermTypeLabel, Label: &Label{Ident: "$out", Body: &Query{Term: &Term{Type: TermTypeForeach, Foreach: &Foreach{Query: &Query{Func: "g"}, Pattern: &Pattern{Name: "$item"}, Start: &Query{Left: &Query{Func: "$n"}, Op: OpAdd, Right: &Query{Term: &Term{Type: TermTypeNumber, Number: "1"}}}, Update: &Query{Left: &Query{Func: "."}, Op: OpSub, Right: &Query{Term: &Term{Type: TermTypeNumber, Number: "1"}}}, Extract: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Left: &Query{Func: "."}, Op: OpLe, Right: &Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}}, Then: &Query{Left: &Query{Func: "$item"}, Op: OpComma, Right: &Query{Term: &Term{Type: TermTypeBreak, Break: "$out"}}}, Else: &Query{Func: "empty"}}}}}}}}}}}}}}}, + "nulls": {{Name: "nulls", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{{Left: &Query{Func: "."}, Op: OpEq, Right: &Query{Func: "null"}}}}}}}}, + "numbers": {{Name: "numbers", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{{Left: &Query{Func: "type"}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "number"}}}}}}}}}}, + "objects": {{Name: "objects", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{{Left: &Query{Func: "type"}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "object"}}}}}}}}}}, + "paths": {{Name: "paths", Body: &Query{Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "path", Args: []*Query{{Func: ".."}}}}}, Op: OpPipe, Right: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{{Left: &Query{Func: "."}, Op: OpNe, Right: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{}}}}}}}}}}, {Name: "paths", Args: []string{"f"}, Body: &Query{Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "path", Args: []*Query{{Left: &Query{Func: ".."}, Op: OpPipe, Right: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{{Func: "f"}}}}}}}}}}, Op: OpPipe, Right: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{{Left: &Query{Func: "."}, Op: OpNe, Right: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{}}}}}}}}}}}, + "pick": {{Name: "pick", Args: []string{"f"}, Body: &Query{Term: &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{{Bind: &Bind{Patterns: []*Pattern{{Name: "$v"}}, Body: &Query{Term: &Term{Type: TermTypeReduce, Reduce: &Reduce{Query: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "path", Args: []*Query{{Func: "f"}}}}}, Pattern: &Pattern{Name: "$p"}, Start: &Query{Func: "null"}, Update: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "setpath", Args: []*Query{{Func: "$p"}, {Left: &Query{Func: "$v"}, Op: OpPipe, Right: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "getpath", Args: []*Query{{Func: "$p"}}}}}}}}}}}}}}}}}}}}, + "range": {{Name: "range", Args: []string{"$end"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "_range", Args: []*Query{{Term: &Term{Type: TermTypeNumber, Number: "0"}}, {Func: "$end"}, {Term: &Term{Type: TermTypeNumber, Number: "1"}}}}}}}, {Name: "range", Args: []string{"$start", "$end"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "_range", Args: []*Query{{Func: "$start"}, {Func: "$end"}, {Term: &Term{Type: TermTypeNumber, Number: "1"}}}}}}}, {Name: "range", Args: []string{"$start", "$end", "$step"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "_range", Args: []*Query{{Func: "$start"}, {Func: "$end"}, {Func: "$step"}}}}}}}, + "recurse": {{Name: "recurse", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "recurse", Args: []*Query{{Term: &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{{Iter: true}, {Optional: true}}}}}}}}}, {Name: "recurse", Args: []string{"f"}, Body: &Query{FuncDefs: []*FuncDef{{Name: "r", Body: &Query{Left: &Query{Func: "."}, Op: OpComma, Right: &Query{Term: &Term{Type: TermTypeQuery, Query: &Query{Left: &Query{Func: "f"}, Op: OpPipe, Right: &Query{Func: "r"}}}}}}}, Func: "r"}}, {Name: "recurse", Args: []string{"f", "cond"}, Body: &Query{FuncDefs: []*FuncDef{{Name: "r", Body: &Query{Left: &Query{Func: "."}, Op: OpComma, Right: &Query{Term: &Term{Type: TermTypeQuery, Query: &Query{Left: &Query{Func: "f"}, Op: OpPipe, Right: &Query{Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{{Func: "cond"}}}}}, Op: OpPipe, Right: &Query{Func: "r"}}}}}}}}, Func: "r"}}}, + "repeat": {{Name: "repeat", Args: []string{"f"}, Body: &Query{FuncDefs: []*FuncDef{{Name: "_repeat", Body: &Query{Left: &Query{Func: "f"}, Op: OpComma, Right: &Query{Func: "_repeat"}}}}, Func: "_repeat"}}}, + "scalars": {{Name: "scalars", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{{Left: &Query{Func: "type"}, Op: OpPipe, Right: &Query{Left: &Query{Left: &Query{Func: "."}, Op: OpNe, Right: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "array"}}}}, Op: OpAnd, Right: &Query{Left: &Query{Func: "."}, Op: OpNe, Right: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "object"}}}}}}}}}}}}, + "scan": {{Name: "scan", Args: []string{"$re"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "scan", Args: []*Query{{Func: "$re"}, {Func: "null"}}}}}}, {Name: "scan", Args: []string{"$re", "$flags"}, Body: &Query{Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "match", Args: []*Query{{Func: "$re"}, {Left: &Query{Func: "$flags"}, Op: OpAdd, Right: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "g"}}}}}}}}, Op: OpPipe, Right: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Left: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Name: "captures"}}}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{}}}}, Then: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Name: "string"}}}, Else: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Name: "captures"}, SuffixList: []*Suffix{{Iter: true}, {Index: &Index{Name: "string"}}}}}}}}}}}}}}, + "select": {{Name: "select", Args: []string{"f"}, Body: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Func: "f"}, Then: &Query{Func: "."}, Else: &Query{Func: "empty"}}}}}}, + "sort_by": {{Name: "sort_by", Args: []string{"f"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "_sort_by", Args: []*Query{{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "map", Args: []*Query{{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Func: "f"}}}}}}}}}}}}}}, + "splits": {{Name: "splits", Args: []string{"$re"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "splits", Args: []*Query{{Func: "$re"}, {Func: "null"}}}}}}, {Name: "splits", Args: []string{"$re", "$flags"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "split", Args: []*Query{{Func: "$re"}, {Func: "$flags"}}}, SuffixList: []*Suffix{{Iter: true}}}}}}, + "strings": {{Name: "strings", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{{Left: &Query{Func: "type"}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "string"}}}}}}}}}}, + "sub": {{Name: "sub", Args: []string{"$re", "str"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "sub", Args: []*Query{{Func: "$re"}, {Func: "str"}, {Func: "null"}}}}}}, {Name: "sub", Args: []string{"$re", "str", "$flags"}, Body: &Query{Term: &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{{Bind: &Bind{Patterns: []*Pattern{{Name: "$str"}}, Body: &Query{FuncDefs: []*FuncDef{{Name: "_sub", Body: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Left: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Name: "matches"}}}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{}}}}, Then: &Query{Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "$str"}, SuffixList: []*Suffix{{Index: &Index{End: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Name: "offset"}}}, IsSlice: true}}}}}, Op: OpAdd, Right: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Name: "string"}}}}, Else: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Name: "matches"}, SuffixList: []*Suffix{{Index: &Index{Start: &Query{Term: &Term{Type: TermTypeUnary, Unary: &Unary{Op: OpSub, Term: &Term{Type: TermTypeNumber, Number: "1"}}}}}}, {Bind: &Bind{Patterns: []*Pattern{{Name: "$r"}}, Body: &Query{Left: &Query{Term: &Term{Type: TermTypeObject, Object: &Object{KeyVals: []*ObjectKeyVal{{Key: "string", Val: &Query{Left: &Query{Left: &Query{Term: &Term{Type: TermTypeQuery, Query: &Query{Left: &Query{Func: "$r"}, Op: OpPipe, Right: &Query{Left: &Query{Func: "_capture"}, Op: OpPipe, Right: &Query{Func: "str"}}}}}, Op: OpAdd, Right: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "$str"}, SuffixList: []*Suffix{{Index: &Index{Start: &Query{Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "$r"}, SuffixList: []*Suffix{{Index: &Index{Name: "offset"}}}}}, Op: OpAdd, Right: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "$r"}, SuffixList: []*Suffix{{Index: &Index{Name: "length"}}}}}}, End: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Name: "offset"}}}, IsSlice: true}}}}}}, Op: OpAdd, Right: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Name: "string"}}}}}, {Key: "offset", Val: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "$r"}, SuffixList: []*Suffix{{Index: &Index{Name: "offset"}}}}}}, {Key: "matches", Val: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Name: "matches"}, SuffixList: []*Suffix{{Index: &Index{End: &Query{Term: &Term{Type: TermTypeUnary, Unary: &Unary{Op: OpSub, Term: &Term{Type: TermTypeNumber, Number: "1"}}}}, IsSlice: true}}}}}}}}}}, Op: OpPipe, Right: &Query{Func: "_sub"}}}}}}}}}}}}, Left: &Query{Term: &Term{Type: TermTypeObject, Object: &Object{KeyVals: []*ObjectKeyVal{{Key: "string", Val: &Query{Term: &Term{Type: TermTypeString, Str: &String{}}}}, {Key: "matches", Val: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "match", Args: []*Query{{Func: "$re"}, {Func: "$flags"}}}}}}}}}}}}}, Op: OpPipe, Right: &Query{Func: "_sub"}}}}}}}}}, + "test": {{Name: "test", Args: []string{"$re"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "test", Args: []*Query{{Func: "$re"}, {Func: "null"}}}}}}, {Name: "test", Args: []string{"$re", "$flags"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "_match", Args: []*Query{{Func: "$re"}, {Func: "$flags"}, {Func: "true"}}}}}}}, + "todate": {{Name: "todate", Body: &Query{Func: "todateiso8601"}}}, + "todateiso8601": {{Name: "todateiso8601", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "strftime", Args: []*Query{{Term: &Term{Type: TermTypeString, Str: &String{Str: "%Y-%m-%dT%H:%M:%SZ"}}}}}}}}}, + "tostream": {{Name: "tostream", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "path", Args: []*Query{{FuncDefs: []*FuncDef{{Name: "r", Body: &Query{Left: &Query{Term: &Term{Type: TermTypeQuery, Query: &Query{Left: &Query{Term: &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{{Iter: true}, {Optional: true}}}}, Op: OpPipe, Right: &Query{Func: "r"}}}}, Op: OpComma, Right: &Query{Func: "."}}}}, Func: "r"}}}, SuffixList: []*Suffix{{Bind: &Bind{Patterns: []*Pattern{{Name: "$p"}}, Body: &Query{Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "getpath", Args: []*Query{{Func: "$p"}}}}}, Op: OpPipe, Right: &Query{Term: &Term{Type: TermTypeReduce, Reduce: &Reduce{Query: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "path", Args: []*Query{{Term: &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{{Iter: true}, {Optional: true}}}}}}}}, Pattern: &Pattern{Name: "$q"}, Start: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Left: &Query{Func: "$p"}, Op: OpComma, Right: &Query{Func: "."}}}}}, Update: &Query{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Left: &Query{Func: "$p"}, Op: OpAdd, Right: &Query{Func: "$q"}}}}}}}}}}}}}}}}, + "truncate_stream": {{Name: "truncate_stream", Args: []string{"f"}, Body: &Query{Term: &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{{Bind: &Bind{Patterns: []*Pattern{{Name: "$n"}}, Body: &Query{Left: &Query{Func: "null"}, Op: OpPipe, Right: &Query{Left: &Query{Func: "f"}, Op: OpPipe, Right: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Left: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Start: &Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}}}}, Op: OpPipe, Right: &Query{Left: &Query{Func: "length"}, Op: OpGt, Right: &Query{Func: "$n"}}}, Then: &Query{Left: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Start: &Query{Term: &Term{Type: TermTypeNumber, Number: "0"}}}}}, Op: OpModify, Right: &Query{Term: &Term{Type: TermTypeIndex, Index: &Index{Start: &Query{Func: "$n"}, IsSlice: true}}}}, Else: &Query{Func: "empty"}}}}}}}}}}}}}, + "unique_by": {{Name: "unique_by", Args: []string{"f"}, Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "_unique_by", Args: []*Query{{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "map", Args: []*Query{{Term: &Term{Type: TermTypeArray, Array: &Array{Query: &Query{Func: "f"}}}}}}}}}}}}}}, + "until": {{Name: "until", Args: []string{"cond", "next"}, Body: &Query{FuncDefs: []*FuncDef{{Name: "_until", Body: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Func: "cond"}, Then: &Query{Func: "."}, Else: &Query{Left: &Query{Func: "next"}, Op: OpPipe, Right: &Query{Func: "_until"}}}}}}}, Func: "_until"}}}, + "values": {{Name: "values", Body: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "select", Args: []*Query{{Left: &Query{Func: "."}, Op: OpNe, Right: &Query{Func: "null"}}}}}}}}, + "walk": {{Name: "walk", Args: []string{"f"}, Body: &Query{FuncDefs: []*FuncDef{{Name: "_walk", Body: &Query{Left: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Left: &Query{Func: "type"}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "array"}}}}, Then: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "map", Args: []*Query{{Func: "_walk"}}}}}, Elif: []*IfElif{{Cond: &Query{Left: &Query{Func: "type"}, Op: OpEq, Right: &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: "object"}}}}, Then: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "map_values", Args: []*Query{{Func: "_walk"}}}}}}}}}}, Op: OpPipe, Right: &Query{Func: "f"}}}}, Func: "_walk"}}}, + "while": {{Name: "while", Args: []string{"cond", "update"}, Body: &Query{FuncDefs: []*FuncDef{{Name: "_while", Body: &Query{Term: &Term{Type: TermTypeIf, If: &If{Cond: &Query{Func: "cond"}, Then: &Query{Left: &Query{Func: "."}, Op: OpComma, Right: &Query{Term: &Term{Type: TermTypeQuery, Query: &Query{Left: &Query{Func: "update"}, Op: OpPipe, Right: &Query{Func: "_while"}}}}}, Else: &Query{Func: "empty"}}}}}}, Func: "_while"}}}, + "with_entries": {{Name: "with_entries", Args: []string{"f"}, Body: &Query{Left: &Query{Func: "to_entries"}, Op: OpPipe, Right: &Query{Left: &Query{Term: &Term{Type: TermTypeFunc, Func: &Func{Name: "map", Args: []*Query{{Func: "f"}}}}}, Op: OpPipe, Right: &Query{Func: "from_entries"}}}}}, } } diff --git a/vendor/github.com/itchyny/gojq/builtin.jq b/vendor/github.com/itchyny/gojq/builtin.jq index 66d630731f2..ac6292ba746 100644 --- a/vendor/github.com/itchyny/gojq/builtin.jq +++ b/vendor/github.com/itchyny/gojq/builtin.jq @@ -37,7 +37,6 @@ def strings: select(type == "string"); def nulls: select(. == null); def values: select(. != null); def scalars: select(type | . != "array" and . != "object"); -def leaf_paths: paths(scalars); def inside(xs): . as $x | xs | contains($x); def combinations: @@ -52,7 +51,7 @@ def walk(f): if type == "array" then map(_walk) elif type == "object" then - map_values(last(_walk)) + map_values(_walk) end | f; _walk; @@ -117,7 +116,9 @@ def tostream: def map_values(f): .[] |= f; def del(f): delpaths([path(f)]); def paths: path(..) | select(. != []); -def paths(f): paths as $p | select(getpath($p) | f) | $p; +def paths(f): path(.. | select(f)) | select(. != []); +def pick(f): . as $v | + reduce path(f) as $p (null; setpath($p; $v | getpath($p))); def fromdateiso8601: strptime("%Y-%m-%dT%H:%M:%S%z") | mktime; def todateiso8601: strftime("%Y-%m-%dT%H:%M:%SZ"); @@ -149,7 +150,7 @@ def sub($re; str; $flags): else .matches[-1] as $r | { - string: (($r | _capture | str) + $str[$r.offset+$r.length:.offset] + .string), + string: ($r | _capture | str) + $str[$r.offset+$r.length:.offset] + .string, offset: $r.offset, matches: .matches[:-1], } | diff --git a/vendor/github.com/itchyny/gojq/compare.go b/vendor/github.com/itchyny/gojq/compare.go index e70c1fbbb76..6ab22754a9b 100644 --- a/vendor/github.com/itchyny/gojq/compare.go +++ b/vendor/github.com/itchyny/gojq/compare.go @@ -9,10 +9,6 @@ import ( // The result will be 0 if l == r, -1 if l < r, and +1 if l > r. // This comparison is used by built-in operators and functions. func Compare(l, r any) int { - return compare(l, r) -} - -func compare(l, r any) int { return binopTypeSwitch(l, r, compareInt, func(l, r float64) any { @@ -44,7 +40,7 @@ func compare(l, r any) int { n = len(r) } for i := 0; i < n; i++ { - if cmp := compare(l[i], r[i]); cmp != 0 { + if cmp := Compare(l[i], r[i]); cmp != 0 { return cmp } } @@ -52,11 +48,11 @@ func compare(l, r any) int { }, func(l, r map[string]any) any { lk, rk := funcKeys(l), funcKeys(r) - if cmp := compare(lk, rk); cmp != 0 { + if cmp := Compare(lk, rk); cmp != 0 { return cmp } for _, k := range lk.([]any) { - if cmp := compare(l[k.(string)], r[k.(string)]); cmp != 0 { + if cmp := Compare(l[k.(string)], r[k.(string)]); cmp != 0 { return cmp } } diff --git a/vendor/github.com/itchyny/gojq/compiler.go b/vendor/github.com/itchyny/gojq/compiler.go index 135387fa31f..b42517f8048 100644 --- a/vendor/github.com/itchyny/gojq/compiler.go +++ b/vendor/github.com/itchyny/gojq/compiler.go @@ -84,6 +84,13 @@ func Compile(q *Query, options ...CompilerOption) (*Code, error) { setscope := c.lazy(func() *code { return &code{op: opscope, v: [3]int{scope.id, scope.variablecnt, 0}} }) + for _, name := range c.variables { + if !newLexer(name).validVarName() { + return nil, &variableNameError{name} + } + c.appendCodeInfo(name) + c.append(&code{op: opstore, v: c.pushVariable(name)}) + } if c.moduleLoader != nil { if moduleLoader, ok := c.moduleLoader.(interface { LoadInitModules() ([]*Query, error) @@ -113,13 +120,6 @@ func Compile(q *Query, options ...CompilerOption) (*Code, error) { } func (c *compiler) compile(q *Query) error { - for _, name := range c.variables { - if !newLexer(name).validVarName() { - return &variableNameError{name} - } - c.appendCodeInfo(name) - c.append(&code{op: opstore, v: c.pushVariable(name)}) - } for _, i := range q.Imports { if err := c.compileImport(i); err != nil { return err @@ -398,6 +398,8 @@ func (c *compiler) compileQuery(e *Query) error { return c.compileTerm(e.Term) } switch e.Op { + case Operator(0): + return errors.New(`missing query (try ".")`) case OpPipe: if err := c.compileQuery(e.Left); err != nil { return err @@ -538,6 +540,7 @@ func (c *compiler) compileQueryUpdate(l, r *Query, op Operator) error { } func (c *compiler) compileBind(e *Term, b *Bind) error { + defer c.newScopeDepth()() c.append(&code{op: opdup}) c.append(&code{op: opexpbegin}) if err := c.compileTerm(e); err != nil { @@ -734,7 +737,7 @@ func (c *compiler) compileReduce(e *Reduce) error { } f() c.append(&code{op: opstore, v: v}) - if err := c.compileTerm(e.Term); err != nil { + if err := c.compileQuery(e.Query); err != nil { return err } if _, err := c.compilePattern(nil, e.Pattern); err != nil { @@ -765,7 +768,7 @@ func (c *compiler) compileForeach(e *Foreach) error { } f() c.append(&code{op: opstore, v: v}) - if err := c.compileTerm(e.Term); err != nil { + if err := c.compileQuery(e.Query); err != nil { return err } if _, err := c.compilePattern(nil, e.Pattern); err != nil { @@ -990,6 +993,22 @@ func (c *compiler) compileFunc(e *Func) error { true, -1, ) + case "debug": + setfork := c.lazy(func() *code { + return &code{op: opfork, v: len(c.codes)} + }) + if err := c.compileQuery(e.Args[0]); err != nil { + return err + } + if err := c.compileFunc(&Func{Name: "debug"}); err != nil { + if _, ok := err.(*funcNotFoundError); ok { + err = &funcNotFoundError{e} + } + return err + } + c.append(&code{op: opbacktrack}) + setfork() + return nil default: return c.compileCall(e.Name, e.Args) } @@ -1172,7 +1191,7 @@ func (c *compiler) funcInput(any, []any) any { func (c *compiler) funcModulemeta(v any, _ []any) any { s, ok := v.(string) if !ok { - return &funcTypeError{"modulemeta", v} + return &func0TypeError{"modulemeta", v} } if c.moduleLoader == nil { return fmt.Errorf("cannot load module: %q", s) @@ -1196,35 +1215,52 @@ func (c *compiler) funcModulemeta(v any, _ []any) any { if meta == nil { meta = make(map[string]any) } - var deps []any - for _, i := range q.Imports { + meta["defs"] = listModuleDefs(q) + meta["deps"] = listModuleDeps(q) + return meta +} + +func listModuleDefs(q *Query) []any { + type funcNameArity struct { + name string + arity int + } + var xs []*funcNameArity + for _, fd := range q.FuncDefs { + if fd.Name[0] != '_' { + xs = append(xs, &funcNameArity{fd.Name, len(fd.Args)}) + } + } + sort.Slice(xs, func(i, j int) bool { + return xs[i].name < xs[j].name || + xs[i].name == xs[j].name && xs[i].arity < xs[j].arity + }) + defs := make([]any, len(xs)) + for i, x := range xs { + defs[i] = x.name + "/" + strconv.Itoa(x.arity) + } + return defs +} + +func listModuleDeps(q *Query) []any { + deps := make([]any, len(q.Imports)) + for j, i := range q.Imports { v := i.Meta.ToValue() if v == nil { v = make(map[string]any) - } else { - for k := range v { - // dirty hack to remove the internal fields - if strings.HasPrefix(k, "$$") { - delete(v, k) - } - } } - if i.ImportPath == "" { - v["relpath"] = i.IncludePath - } else { - v["relpath"] = i.ImportPath - } - if err != nil { - return err + relpath := i.ImportPath + if relpath == "" { + relpath = i.IncludePath } + v["relpath"] = relpath if i.ImportAlias != "" { v["as"] = strings.TrimPrefix(i.ImportAlias, "$") } v["is_data"] = strings.HasPrefix(i.ImportAlias, "$") - deps = append(deps, v) + deps[j] = v } - meta["deps"] = deps - return meta + return deps } func (c *compiler) compileObject(e *Object) error { @@ -1311,10 +1347,8 @@ func (c *compiler) compileObjectKeyVal(v [2]int, kv *ObjectKeyVal) error { } if kv.Val != nil { c.append(&code{op: opload, v: v}) - for _, e := range kv.Val.Queries { - if err := c.compileQuery(e); err != nil { - return err - } + if err := c.compileQuery(kv.Val); err != nil { + return err } } return nil @@ -1409,6 +1443,8 @@ func formatToFunc(format string) *Func { return &Func{Name: "_tohtml"} case "@uri": return &Func{Name: "_touri"} + case "@urid": + return &Func{Name: "_tourid"} case "@csv": return &Func{Name: "_tocsv"} case "@tsv": diff --git a/vendor/github.com/itchyny/gojq/debug.go b/vendor/github.com/itchyny/gojq/debug.go index ad3d72160a8..236982809f6 100644 --- a/vendor/github.com/itchyny/gojq/debug.go +++ b/vendor/github.com/itchyny/gojq/debug.go @@ -103,7 +103,7 @@ func (env *env) debugCodes() { s = "\t## " + name } } - fmt.Fprintf(debugOut, "\t%d\t%s%s%s\n", i, formatOp(c.op, false), debugOperand(c), s) + fmt.Fprintf(debugOut, "\t%d\t%-*s%s%s\n", i, 25, c.op, debugOperand(c), s) } fmt.Fprintln(debugOut, "\t"+strings.Repeat("-", 40)+"+") } @@ -114,7 +114,11 @@ func (env *env) debugState(pc int, backtrack bool) { } var sb strings.Builder c := env.codes[pc] - fmt.Fprintf(&sb, "\t%d\t%s%s\t|", pc, formatOp(c.op, backtrack), debugOperand(c)) + op := c.op.String() + if backtrack { + op += " " + } + fmt.Fprintf(&sb, "\t%d\t%-*s%s\t|", pc, 25, op, debugOperand(c)) var xs []int for i := env.stack.index; i >= 0; i = env.stack.data[i].next { xs = append(xs, i) @@ -149,13 +153,6 @@ func (env *env) debugState(pc int, backtrack bool) { fmt.Fprintln(debugOut, sb.String()) } -func formatOp(c opcode, backtrack bool) string { - if backtrack { - return c.String() + " " + strings.Repeat(" ", 13-len(c.String())) - } - return c.String() + strings.Repeat(" ", 25-len(c.String())) -} - func (env *env) debugForks(pc int, op string) { if !debug { return @@ -173,7 +170,7 @@ func (env *env) debugForks(pc int, op string) { sb.WriteByte('>') } } - fmt.Fprintf(debugOut, "\t-\t%s%s%d\t|\t%s\n", op, strings.Repeat(" ", 22), pc, sb.String()) + fmt.Fprintf(debugOut, "\t-\t%-*s%d\t|\t%s\n", 25, op, pc, sb.String()) } func debugOperand(c *code) string { diff --git a/vendor/github.com/itchyny/gojq/error.go b/vendor/github.com/itchyny/gojq/error.go index 695463f3b86..18b06b1ccfd 100644 --- a/vendor/github.com/itchyny/gojq/error.go +++ b/vendor/github.com/itchyny/gojq/error.go @@ -27,20 +27,20 @@ func (err *expectedArrayError) Error() string { return "expected an array but got: " + typeErrorPreview(err.v) } -type expectedStringError struct { +type iteratorError struct { v any } -func (err *expectedStringError) Error() string { - return "expected a string but got: " + typeErrorPreview(err.v) +func (err *iteratorError) Error() string { + return "cannot iterate over: " + typeErrorPreview(err.v) } -type iteratorError struct { - v any +type arrayIndexNegativeError struct { + v int } -func (err *iteratorError) Error() string { - return "cannot iterate over: " + typeErrorPreview(err.v) +func (err *arrayIndexNegativeError) Error() string { + return "array index should not be negative: " + Preview(err.v) } type arrayIndexTooLargeError struct { @@ -83,13 +83,10 @@ func (err *expectedStartEndError) Error() string { return `expected "start" and "end" for slicing but got: ` + typeErrorPreview(err.v) } -type lengthMismatchError struct { - name string - v, x []any -} +type lengthMismatchError struct{} -func (err *lengthMismatchError) Error() string { - return "length mismatch in " + err.name + ": " + typeErrorPreview(err.v) + ", " + typeErrorPreview(err.x) +func (*lengthMismatchError) Error() string { + return "length mismatch" } type inputNotAllowedError struct{} @@ -106,19 +103,66 @@ func (err *funcNotFoundError) Error() string { return "function not defined: " + err.f.Name + "/" + strconv.Itoa(len(err.f.Args)) } -type funcTypeError struct { +type func0TypeError struct { name string v any } -func (err *funcTypeError) Error() string { +func (err *func0TypeError) Error() string { return err.name + " cannot be applied to: " + typeErrorPreview(err.v) } +type func1TypeError struct { + name string + v, w any +} + +func (err *func1TypeError) Error() string { + return err.name + "(" + Preview(err.w) + ") cannot be applied to: " + typeErrorPreview(err.v) +} + +type func2TypeError struct { + name string + v, w, x any +} + +func (err *func2TypeError) Error() string { + return err.name + "(" + Preview(err.w) + "; " + Preview(err.x) + ") cannot be applied to: " + typeErrorPreview(err.v) +} + +type func0WrapError struct { + name string + v any + err error +} + +func (err *func0WrapError) Error() string { + return err.name + " cannot be applied to " + Preview(err.v) + ": " + err.err.Error() +} + +type func1WrapError struct { + name string + v, w any + err error +} + +func (err *func1WrapError) Error() string { + return err.name + "(" + Preview(err.w) + ") cannot be applied to " + Preview(err.v) + ": " + err.err.Error() +} + +type func2WrapError struct { + name string + v, w, x any + err error +} + +func (err *func2WrapError) Error() string { + return err.name + "(" + Preview(err.w) + "; " + Preview(err.x) + ") cannot be applied to " + Preview(err.v) + ": " + err.err.Error() +} + type exitCodeError struct { value any code int - halt bool } func (err *exitCodeError) Error() string { @@ -128,10 +172,6 @@ func (err *exitCodeError) Error() string { return "error: " + jsonMarshal(err.value) } -func (err *exitCodeError) IsEmptyError() bool { - return err.value == nil -} - func (err *exitCodeError) Value() any { return err.value } @@ -140,24 +180,28 @@ func (err *exitCodeError) ExitCode() int { return err.code } -func (err *exitCodeError) IsHaltError() bool { - return err.halt -} +// HaltError is an error emitted by halt and halt_error functions. +// It implements [ValueError], and if the value is nil, discard the error +// and stop the iteration. Consider a query like "1, halt, 2"; +// the first value is 1, and the second value is a HaltError with nil value. +// You might think the iterator should not emit an error this case, but it +// should so that we can recognize the halt error to stop the outer loop +// of iterating input values; echo 1 2 3 | gojq "., halt". +type HaltError exitCodeError -type containsTypeError struct { - l, r any -} - -func (err *containsTypeError) Error() string { - return "cannot check contains(" + Preview(err.r) + "): " + typeErrorPreview(err.l) +func (err *HaltError) Error() string { + return "halt " + (*exitCodeError)(err).Error() } -type hasKeyTypeError struct { - l, r any +// Value returns the value of the error. This implements [ValueError], +// but halt error is not catchable by try-catch. +func (err *HaltError) Value() any { + return (*exitCodeError)(err).Value() } -func (err *hasKeyTypeError) Error() string { - return "cannot check whether " + typeErrorPreview(err.l) + " has a key: " + typeErrorPreview(err.r) +// ExitCode returns the exit code of the error. +func (err *HaltError) ExitCode() int { + return (*exitCodeError)(err).ExitCode() } type flattenDepthError struct { @@ -165,7 +209,7 @@ type flattenDepthError struct { } func (err *flattenDepthError) Error() string { - return "flatten depth must not be negative: " + typeErrorPreview(err.v) + return "flatten depth should not be negative: " + Preview(err.v) } type joinTypeError struct { @@ -173,7 +217,13 @@ type joinTypeError struct { } func (err *joinTypeError) Error() string { - return "cannot join: " + typeErrorPreview(err.v) + return "join cannot be applied to an array including: " + typeErrorPreview(err.v) +} + +type timeArrayError struct{} + +func (*timeArrayError) Error() string { + return "expected an array of 8 numbers" } type unaryTypeError struct { @@ -229,7 +279,7 @@ func (err *formatRowError) Error() string { type tooManyVariableValuesError struct{} -func (err *tooManyVariableValuesError) Error() string { +func (*tooManyVariableValuesError) Error() string { return "too many variable values provided" } @@ -266,7 +316,7 @@ func (err *breakError) Error() string { return "label not defined: " + err.n } -func (err *breakError) ExitCode() int { +func (*breakError) ExitCode() int { return 3 } @@ -294,14 +344,6 @@ func (err *invalidPathIterError) Error() string { return "invalid path on iterating against: " + typeErrorPreview(err.v) } -type getpathError struct { - v, path any -} - -func (err *getpathError) Error() string { - return "cannot getpath with " + Preview(err.path) + " against: " + typeErrorPreview(err.v) -} - type queryParseError struct { fname, contents string err error diff --git a/vendor/github.com/itchyny/gojq/execute.go b/vendor/github.com/itchyny/gojq/execute.go index d43ef3e97fb..344d8a3c574 100644 --- a/vendor/github.com/itchyny/gojq/execute.go +++ b/vendor/github.com/itchyny/gojq/execute.go @@ -66,7 +66,9 @@ loop: err = &objectKeyNotStringError{k} break loop } - m[s] = v + if _, ok := m[s]; !ok { + m[s] = v + } } env.push(m) case opappend: @@ -86,23 +88,15 @@ loop: if err == nil { break loop } - switch er := err.(type) { + switch e := err.(type) { case *tryEndError: - err = er.err + err = e.err break loop - case *breakError: + case *breakError, *HaltError: break loop case ValueError: - if er, ok := er.(*exitCodeError); ok && er.halt { - break loop - } - if v := er.Value(); v != nil { - env.pop() - env.push(v) - } else { - err = nil - break loop - } + env.pop() + env.push(e.Value()) default: env.pop() env.push(err.Error()) @@ -191,9 +185,7 @@ loop: } w := v[0].(func(any, []any) any)(x, args) if e, ok := w.(error); ok { - if er, ok := e.(*exitCodeError); !ok || er.value != nil || er.halt { - err = e - } + err = e break loop } env.push(w) @@ -436,7 +428,7 @@ func (env *env) pathIntact(v any) bool { } func (env *env) poppaths() []any { - var xs []any + xs := []any{} for { p := env.paths.pop().(pathValue) if p.path == nil { diff --git a/vendor/github.com/itchyny/gojq/func.go b/vendor/github.com/itchyny/gojq/func.go index d94a7a47425..e06b4ff7254 100644 --- a/vendor/github.com/itchyny/gojq/func.go +++ b/vendor/github.com/itchyny/gojq/func.go @@ -3,6 +3,7 @@ package gojq import ( "encoding/base64" "encoding/json" + "errors" "fmt" "io" "math" @@ -14,6 +15,7 @@ import ( "strconv" "strings" "time" + "unicode" "unicode/utf8" "github.com/itchyny/timefmt-go" @@ -49,6 +51,8 @@ func init() { "builtins": argFunc0(nil), "input": argFunc0(nil), "modulemeta": argFunc0(nil), + "debug": argFunc1(nil), + "abs": argFunc0(funcAbs), "length": argFunc0(funcLength), "utf8bytelength": argFunc0(funcUtf8ByteLength), "keys": argFunc0(funcKeys), @@ -68,6 +72,9 @@ func init() { "endswith": argFunc1(funcEndsWith), "ltrimstr": argFunc1(funcLtrimstr), "rtrimstr": argFunc1(funcRtrimstr), + "ltrim": argFunc0(funcLtrim), + "rtrim": argFunc0(funcRtrim), + "trim": argFunc0(funcTrim), "explode": argFunc0(funcExplode), "implode": argFunc0(funcImplode), "split": {argcount1 | argcount2, false, funcSplit}, @@ -78,6 +85,7 @@ func init() { "format": argFunc1(funcFormat), "_tohtml": argFunc0(funcToHTML), "_touri": argFunc0(funcToURI), + "_tourid": argFunc0(funcToURId), "_tocsv": argFunc0(funcToCSV), "_totsv": argFunc0(funcToTSV), "_tosh": argFunc0(funcToSh), @@ -125,8 +133,8 @@ func init() { "atanh": mathFunc("atanh", math.Atanh), "floor": mathFunc("floor", math.Floor), "round": mathFunc("round", math.Round), - "nearbyint": mathFunc("nearbyint", math.Round), - "rint": mathFunc("rint", math.Round), + "nearbyint": mathFunc("nearbyint", math.RoundToEven), + "rint": mathFunc("rint", math.RoundToEven), "ceil": mathFunc("ceil", math.Ceil), "trunc": mathFunc("trunc", math.Trunc), "significand": mathFunc("significand", funcSignificand), @@ -162,15 +170,14 @@ func init() { "fmod": mathFunc2("fmod", math.Mod), "hypot": mathFunc2("hypot", math.Hypot), "jn": mathFunc2("jn", funcJn), - "ldexp": mathFunc2("ldexp", funcLdexp), "nextafter": mathFunc2("nextafter", math.Nextafter), "nexttoward": mathFunc2("nexttoward", math.Nextafter), "remainder": mathFunc2("remainder", math.Remainder), - "scalb": mathFunc2("scalb", funcScalb), - "scalbln": mathFunc2("scalbln", funcScalbln), + "ldexp": mathFunc2("ldexp", funcLdexp), + "scalb": mathFunc2("scalb", funcLdexp), + "scalbln": mathFunc2("scalbln", funcLdexp), "yn": mathFunc2("yn", funcYn), "pow": mathFunc2("pow", math.Pow), - "pow10": mathFunc("pow10", funcExp10), "fma": mathFunc3("fma", math.FMA), "infinite": argFunc0(funcInfinite), "isfinite": argFunc0(funcIsfinite), @@ -234,7 +241,7 @@ func mathFunc(name string, f func(float64) float64) function { return argFunc0(func(v any) any { x, ok := toFloat(v) if !ok { - return &funcTypeError{name, v} + return &func0TypeError{name, v} } return f(x) }) @@ -244,11 +251,11 @@ func mathFunc2(name string, f func(_, _ float64) float64) function { return argFunc2(func(_, x, y any) any { l, ok := toFloat(x) if !ok { - return &funcTypeError{name, x} + return &func0TypeError{name, x} } r, ok := toFloat(y) if !ok { - return &funcTypeError{name, y} + return &func0TypeError{name, y} } return f(l, r) }) @@ -258,20 +265,39 @@ func mathFunc3(name string, f func(_, _, _ float64) float64) function { return argFunc3(func(_, a, b, c any) any { x, ok := toFloat(a) if !ok { - return &funcTypeError{name, a} + return &func0TypeError{name, a} } y, ok := toFloat(b) if !ok { - return &funcTypeError{name, b} + return &func0TypeError{name, b} } z, ok := toFloat(c) if !ok { - return &funcTypeError{name, c} + return &func0TypeError{name, c} } return f(x, y, z) }) } +func funcAbs(v any) any { + switch v := v.(type) { + case int: + if v >= 0 { + return v + } + return -v + case float64: + return math.Abs(v) + case *big.Int: + if v.Sign() >= 0 { + return v + } + return new(big.Int).Abs(v) + default: + return &func0TypeError{"abs", v} + } +} + func funcLength(v any) any { switch v := v.(type) { case nil: @@ -295,14 +321,14 @@ func funcLength(v any) any { case map[string]any: return len(v) default: - return &funcTypeError{"length", v} + return &func0TypeError{"length", v} } } func funcUtf8ByteLength(v any) any { s, ok := v.(string) if !ok { - return &funcTypeError{"utf8bytelength", v} + return &func0TypeError{"utf8bytelength", v} } return len(s) } @@ -322,7 +348,7 @@ func funcKeys(v any) any { } return w default: - return &funcTypeError{"keys", v} + return &func0TypeError{"keys", v} } } @@ -366,7 +392,7 @@ func funcHas(v, x any) any { case nil: return false } - return &hasKeyTypeError{v, x} + return &func1TypeError{"has", v, x} } func funcToEntries(v any) any { @@ -384,14 +410,14 @@ func funcToEntries(v any) any { } return w default: - return &funcTypeError{"to_entries", v} + return &func0TypeError{"to_entries", v} } } func funcFromEntries(v any) any { vs, ok := v.([]any) if !ok { - return &funcTypeError{"from_entries", v} + return &func0TypeError{"from_entries", v} } w := make(map[string]any, len(vs)) for _, v := range vs { @@ -405,13 +431,13 @@ func funcFromEntries(v any) any { for _, k := range [4]string{"key", "Key", "name", "Name"} { if k := v[k]; k != nil && k != false { if key, ok = k.(string); !ok { - return &objectKeyNotStringError{k} + return &func0WrapError{"from_entries", vs, &objectKeyNotStringError{k}} } break } } if !ok { - return &objectKeyNotStringError{nil} + return &func0WrapError{"from_entries", vs, &objectKeyNotStringError{nil}} } for _, k := range [2]string{"value", "Value"} { if value, ok = v[k]; ok { @@ -420,7 +446,7 @@ func funcFromEntries(v any) any { } w[key] = value default: - return &funcTypeError{"from_entries", v} + return &func0TypeError{"from_entries", v} } } return w @@ -429,7 +455,7 @@ func funcFromEntries(v any) any { func funcAdd(v any) any { vs, ok := values(v) if !ok { - return &funcTypeError{"add", v} + return &func0TypeError{"add", v} } v = nil for _, x := range vs { @@ -494,11 +520,11 @@ func funcToNumber(v any) any { return v case string: if !newLexer(v).validNumber() { - return fmt.Errorf("invalid number: %q", v) + return &func0WrapError{"tonumber", v, errors.New("invalid number")} } return toNumber(v) default: - return &funcTypeError{"tonumber", v} + return &func0TypeError{"tonumber", v} } } @@ -520,7 +546,7 @@ func funcType(v any) any { func funcReverse(v any) any { vs, ok := v.([]any) if !ok { - return &funcTypeError{"reverse", v} + return &func0TypeError{"reverse", v} } ws := make([]any, len(vs)) for i, v := range vs { @@ -562,22 +588,22 @@ func funcContains(v, x any) any { if l == r { return true } - return &containsTypeError{l, r} + return &func1TypeError{"contains", l, r} }, ) } func funcIndices(v, x any) any { - return indexFunc(v, x, indices) + return indexFunc("indices", v, x, indices) } func indices(vs, xs []any) any { - var rs []any + rs := []any{} if len(xs) == 0 { return rs } for i := 0; i <= len(vs)-len(xs); i++ { - if compare(vs[i:i+len(xs)], xs) == 0 { + if Compare(vs[i:i+len(xs)], xs) == 0 { rs = append(rs, i) } } @@ -585,12 +611,12 @@ func indices(vs, xs []any) any { } func funcIndex(v, x any) any { - return indexFunc(v, x, func(vs, xs []any) any { + return indexFunc("index", v, x, func(vs, xs []any) any { if len(xs) == 0 { return nil } for i := 0; i <= len(vs)-len(xs); i++ { - if compare(vs[i:i+len(xs)], xs) == 0 { + if Compare(vs[i:i+len(xs)], xs) == 0 { return i } } @@ -599,12 +625,12 @@ func funcIndex(v, x any) any { } func funcRindex(v, x any) any { - return indexFunc(v, x, func(vs, xs []any) any { + return indexFunc("rindex", v, x, func(vs, xs []any) any { if len(xs) == 0 { return nil } for i := len(vs) - len(xs); i >= 0; i-- { - if compare(vs[i:i+len(xs)], xs) == 0 { + if Compare(vs[i:i+len(xs)], xs) == 0 { return i } } @@ -612,7 +638,7 @@ func funcRindex(v, x any) any { }) } -func indexFunc(v, x any, f func(_, _ []any) any) any { +func indexFunc(name string, v, x any, f func(_, _ []any) any) any { switch v := v.(type) { case nil: return nil @@ -627,20 +653,20 @@ func indexFunc(v, x any, f func(_, _ []any) any) any { if x, ok := x.(string); ok { return f(explode(v), explode(x)) } - return &expectedStringError{x} + return &func1TypeError{name, v, x} default: - return &expectedArrayError{v} + return &func1TypeError{name, v, x} } } func funcStartsWith(v, x any) any { s, ok := v.(string) if !ok { - return &funcTypeError{"startswith", v} + return &func1TypeError{"startswith", v, x} } t, ok := x.(string) if !ok { - return &funcTypeError{"startswith", x} + return &func1TypeError{"startswith", v, x} } return strings.HasPrefix(s, t) } @@ -648,11 +674,11 @@ func funcStartsWith(v, x any) any { func funcEndsWith(v, x any) any { s, ok := v.(string) if !ok { - return &funcTypeError{"endswith", v} + return &func1TypeError{"endswith", v, x} } t, ok := x.(string) if !ok { - return &funcTypeError{"endswith", x} + return &func1TypeError{"endswith", v, x} } return strings.HasSuffix(s, t) } @@ -660,11 +686,11 @@ func funcEndsWith(v, x any) any { func funcLtrimstr(v, x any) any { s, ok := v.(string) if !ok { - return v + return &func1TypeError{"ltrimstr", v, x} } t, ok := x.(string) if !ok { - return v + return &func1TypeError{"ltrimstr", v, x} } return strings.TrimPrefix(s, t) } @@ -672,19 +698,43 @@ func funcLtrimstr(v, x any) any { func funcRtrimstr(v, x any) any { s, ok := v.(string) if !ok { - return v + return &func1TypeError{"rtrimstr", v, x} } t, ok := x.(string) if !ok { - return v + return &func1TypeError{"rtrimstr", v, x} } return strings.TrimSuffix(s, t) } +func funcLtrim(v any) any { + s, ok := v.(string) + if !ok { + return &func0TypeError{"ltrim", v} + } + return strings.TrimLeftFunc(s, unicode.IsSpace) +} + +func funcRtrim(v any) any { + s, ok := v.(string) + if !ok { + return &func0TypeError{"rtrim", v} + } + return strings.TrimRightFunc(s, unicode.IsSpace) +} + +func funcTrim(v any) any { + s, ok := v.(string) + if !ok { + return &func0TypeError{"trim", v} + } + return strings.TrimSpace(s) +} + func funcExplode(v any) any { s, ok := v.(string) if !ok { - return &funcTypeError{"explode", v} + return &func0TypeError{"explode", v} } return explode(s) } @@ -702,15 +752,19 @@ func explode(s string) []any { func funcImplode(v any) any { vs, ok := v.([]any) if !ok { - return &funcTypeError{"implode", v} + return &func0TypeError{"implode", v} } var sb strings.Builder sb.Grow(len(vs)) for _, v := range vs { - if r, ok := toInt(v); ok && 0 <= r && r <= utf8.MaxRune { - sb.WriteRune(rune(r)) + if r, ok := toInt(v); ok { + if 0 <= r && r <= utf8.MaxRune { + sb.WriteRune(rune(r)) + } else { + sb.WriteRune(utf8.RuneError) + } } else { - return &funcTypeError{"implode", vs} + return &func0TypeError{"implode", vs} } } return sb.String() @@ -719,11 +773,11 @@ func funcImplode(v any) any { func funcSplit(v any, args []any) any { s, ok := v.(string) if !ok { - return &funcTypeError{"split", v} + return &func0TypeError{"split", v} } x, ok := args[0].(string) if !ok { - return &funcTypeError{"split", x} + return &func0TypeError{"split", x} } var ss []string if len(args) == 1 { @@ -733,7 +787,7 @@ func funcSplit(v any, args []any) any { if args[1] != nil { v, ok := args[1].(string) if !ok { - return &funcTypeError{"split", args[1]} + return &func0TypeError{"split", args[1]} } flags = v } @@ -753,7 +807,7 @@ func funcSplit(v any, args []any) any { func funcASCIIDowncase(v any) any { s, ok := v.(string) if !ok { - return &funcTypeError{"ascii_downcase", v} + return &func0TypeError{"ascii_downcase", v} } return strings.Map(func(r rune) rune { if 'A' <= r && r <= 'Z' { @@ -766,7 +820,7 @@ func funcASCIIDowncase(v any) any { func funcASCIIUpcase(v any) any { s, ok := v.(string) if !ok { - return &funcTypeError{"ascii_upcase", v} + return &func0TypeError{"ascii_upcase", v} } return strings.Map(func(r rune) rune { if 'a' <= r && r <= 'z' { @@ -783,16 +837,16 @@ func funcToJSON(v any) any { func funcFromJSON(v any) any { s, ok := v.(string) if !ok { - return &funcTypeError{"fromjson", v} + return &func0TypeError{"fromjson", v} } var w any dec := json.NewDecoder(strings.NewReader(s)) dec.UseNumber() if err := dec.Decode(&w); err != nil { - return err + return &func0WrapError{"fromjson", v, err} } if _, err := dec.Token(); err != io.EOF { - return &funcTypeError{"fromjson", v} + return &func0TypeError{"fromjson", v} } return normalizeNumbers(w) } @@ -800,7 +854,7 @@ func funcFromJSON(v any) any { func funcFormat(v, x any) any { s, ok := x.(string) if !ok { - return &funcTypeError{"format", x} + return &func0TypeError{"format", x} } format := "@" + s f := formatToFunc(format) @@ -836,6 +890,19 @@ func funcToURI(v any) any { } } +func funcToURId(v any) any { + switch x := funcToString(v).(type) { + case string: + x, err := url.QueryUnescape(x) + if err != nil { + return &func0WrapError{"@urid", v, err} + } + return x + default: + return x + } +} + var csvEscaper = strings.NewReplacer( `"`, `""`, "\x00", `\0`, @@ -876,7 +943,7 @@ func funcToSh(v any) any { func formatJoin(typ string, v any, sep string, escape func(string) string) any { vs, ok := v.([]any) if !ok { - return &funcTypeError{"@" + typ, v} + return &func0TypeError{"@" + typ, v} } ss := make([]string, len(vs)) for i, v := range vs { @@ -911,7 +978,7 @@ func funcToBase64d(v any) any { } y, err := base64.RawStdEncoding.DecodeString(x) if err != nil { - return err + return &func0WrapError{"@base64d", v, err} } return string(y) default: @@ -1089,7 +1156,7 @@ func clampIndex(i, min, max int) int { func funcFlatten(v any, args []any) any { vs, ok := values(v) if !ok { - return &funcTypeError{"flatten", v} + return &func0TypeError{"flatten", v} } var depth float64 if len(args) == 0 { @@ -1097,13 +1164,13 @@ func funcFlatten(v any, args []any) any { } else { depth, ok = toFloat(args[0]) if !ok { - return &funcTypeError{"flatten", args[0]} + return &func0TypeError{"flatten", args[0]} } if depth < 0 { return &flattenDepthError{depth} } } - return flatten(nil, vs, depth) + return flatten([]any{}, vs, depth) } func flatten(xs, vs []any, depth float64) []any { @@ -1122,7 +1189,7 @@ type rangeIter struct { } func (iter *rangeIter) Next() (any, bool) { - if compare(iter.step, 0)*compare(iter.value, iter.end) >= 0 { + if Compare(iter.step, 0)*Compare(iter.value, iter.end) >= 0 { return nil, false } v := iter.value @@ -1135,7 +1202,7 @@ func funcRange(_ any, xs []any) any { switch x.(type) { case int, float64, *big.Int: default: - return &funcTypeError{"range", x} + return &func0TypeError{"range", x} } } return &rangeIter{xs[0], xs[1], xs[2]} @@ -1144,7 +1211,7 @@ func funcRange(_ any, xs []any) any { func funcMin(v any) any { vs, ok := v.([]any) if !ok { - return &funcTypeError{"min", v} + return &func0TypeError{"min", v} } return minMaxBy(vs, vs, true) } @@ -1152,14 +1219,14 @@ func funcMin(v any) any { func funcMinBy(v, x any) any { vs, ok := v.([]any) if !ok { - return &funcTypeError{"min_by", v} + return &func1TypeError{"min_by", v, x} } xs, ok := x.([]any) if !ok { - return &funcTypeError{"min_by", x} + return &func1TypeError{"min_by", v, x} } if len(vs) != len(xs) { - return &lengthMismatchError{"min_by", vs, xs} + return &func1WrapError{"min_by", v, x, &lengthMismatchError{}} } return minMaxBy(vs, xs, true) } @@ -1167,7 +1234,7 @@ func funcMinBy(v, x any) any { func funcMax(v any) any { vs, ok := v.([]any) if !ok { - return &funcTypeError{"max", v} + return &func0TypeError{"max", v} } return minMaxBy(vs, vs, false) } @@ -1175,14 +1242,14 @@ func funcMax(v any) any { func funcMaxBy(v, x any) any { vs, ok := v.([]any) if !ok { - return &funcTypeError{"max_by", v} + return &func1TypeError{"max_by", v, x} } xs, ok := x.([]any) if !ok { - return &funcTypeError{"max_by", x} + return &func1TypeError{"max_by", v, x} } if len(vs) != len(xs) { - return &lengthMismatchError{"max_by", vs, xs} + return &func1WrapError{"max_by", v, x, &lengthMismatchError{}} } return minMaxBy(vs, xs, false) } @@ -1193,7 +1260,7 @@ func minMaxBy(vs, xs []any, isMin bool) any { } i, j, x := 0, 0, xs[0] for i++; i < len(xs); i++ { - if compare(x, xs[i]) > 0 == isMin { + if Compare(x, xs[i]) > 0 == isMin { j, x = i, xs[i] } } @@ -1207,21 +1274,24 @@ type sortItem struct { func sortItems(name string, v, x any) ([]*sortItem, error) { vs, ok := v.([]any) if !ok { - return nil, &funcTypeError{name, v} + if strings.HasSuffix(name, "_by") { + return nil, &func1TypeError{name, v, x} + } + return nil, &func0TypeError{name, v} } xs, ok := x.([]any) if !ok { - return nil, &funcTypeError{name, x} + return nil, &func1TypeError{name, v, x} } if len(vs) != len(xs) { - return nil, &lengthMismatchError{name, vs, xs} + return nil, &func1WrapError{name, v, x, &lengthMismatchError{}} } items := make([]*sortItem, len(vs)) for i, v := range vs { items[i] = &sortItem{v, xs[i]} } sort.SliceStable(items, func(i, j int) bool { - return compare(items[i].key, items[j].key) < 0 + return Compare(items[i].key, items[j].key) < 0 }) return items, nil } @@ -1251,10 +1321,10 @@ func funcGroupBy(v, x any) any { if err != nil { return err } - var rs []any + rs := []any{} var last any for i, r := range items { - if i == 0 || compare(last, r.key) != 0 { + if i == 0 || Compare(last, r.key) != 0 { rs, last = append(rs, []any{r.value}), r.key } else { rs[len(rs)-1] = append(rs[len(rs)-1].([]any), r.value) @@ -1276,10 +1346,10 @@ func uniqueBy(name string, v, x any) any { if err != nil { return err } - var rs []any + rs := []any{} var last any for i, r := range items { - if i == 0 || compare(last, r.key) != 0 { + if i == 0 || Compare(last, r.key) != 0 { rs, last = append(rs, r.value), r.key } } @@ -1289,14 +1359,14 @@ func uniqueBy(name string, v, x any) any { func funcJoin(v, x any) any { vs, ok := values(v) if !ok { - return &funcTypeError{"join", v} + return &func1TypeError{"join", v, x} } if len(vs) == 0 { return "" } sep, ok := x.(string) if len(vs) > 1 && !ok { - return &funcTypeError{"join", x} + return &func1TypeError{"join", v, x} } ss := make([]string, len(vs)) for i, v := range vs { @@ -1333,7 +1403,7 @@ func funcExp10(v float64) float64 { func funcFrexp(v any) any { x, ok := toFloat(v) if !ok { - return &funcTypeError{"frexp", v} + return &func0TypeError{"frexp", v} } f, e := math.Frexp(x) return []any{f, e} @@ -1342,7 +1412,7 @@ func funcFrexp(v any) any { func funcModf(v any) any { x, ok := toFloat(v) if !ok { - return &funcTypeError{"modf", v} + return &func0TypeError{"modf", v} } i, f := math.Modf(x) return []any{f, i} @@ -1369,14 +1439,6 @@ func funcLdexp(l, r float64) float64 { return math.Ldexp(l, int(r)) } -func funcScalb(l, r float64) float64 { - return l * math.Pow(2, r) -} - -func funcScalbln(l, r float64) float64 { - return l * math.Pow(2, r) -} - func funcYn(l, r float64) float64 { return math.Yn(int(l), r) } @@ -1405,7 +1467,7 @@ func funcIsnan(v any) any { if v == nil { return false } - return &funcTypeError{"isnan", v} + return &func0TypeError{"isnan", v} } return math.IsNaN(x) } @@ -1465,16 +1527,13 @@ func funcSetpathWithAllocator(v any, args []any) any { func setpath(v, p, n any, a allocator) any { path, ok := p.([]any) if !ok { - return &funcTypeError{"setpath", p} + return &func1TypeError{"setpath", v, p} } - var err error - if v, err = update(v, path, n, a); err != nil { - if err, ok := err.(*funcTypeError); ok { - err.name = "setpath" - } - return err + u, err := update(v, path, n, a) + if err != nil { + return &func2WrapError{"setpath", v, p, n, err} } - return v + return u } func funcDelpaths(v, p any) any { @@ -1489,7 +1548,7 @@ func funcDelpathsWithAllocator(v any, args []any) any { func delpaths(v, p any, a allocator) any { paths, ok := p.([]any) if !ok { - return &funcTypeError{"delpaths", p} + return &func1TypeError{"delpaths", v, p} } if len(paths) == 0 { return v @@ -1499,16 +1558,18 @@ func delpaths(v, p any, a allocator) any { // jq -n "[0, 1, 2, 3] | delpaths([[1], [2]])" #=> [0, 3]. var empty struct{} var err error - for _, p := range paths { - path, ok := p.([]any) + u := v + for _, q := range paths { + path, ok := q.([]any) if !ok { - return &funcTypeError{"delpaths", p} + return &func1WrapError{"delpaths", v, p, &expectedArrayError{q}} } - if v, err = update(v, path, empty, a); err != nil { - return err + u, err = update(u, path, empty, a) + if err != nil { + return &func1WrapError{"delpaths", v, p, err} } } - return deleteEmpty(v) + return deleteEmpty(u) } func update(v any, path []any, n any, a allocator) (any, error) { @@ -1587,7 +1648,7 @@ func updateArrayIndex(v []any, i int, path []any, n any, a allocator) (any, erro if n == struct{}{} { return v, nil } - return nil, &funcTypeError{v: i} + return nil, &arrayIndexNegativeError{i} } else if j < len(v) { i = j x = v[i] @@ -1708,20 +1769,20 @@ func deleteEmpty(v any) any { } func funcGetpath(v, p any) any { - keys, ok := p.([]any) + path, ok := p.([]any) if !ok { - return &funcTypeError{"getpath", p} + return &func1TypeError{"getpath", v, p} } u := v - for _, x := range keys { + for _, x := range path { switch v.(type) { case nil, []any, map[string]any: v = funcIndex2(nil, v, x) - if _, ok := v.(error); ok { - return &getpathError{u, p} + if err, ok := v.(error); ok { + return &func1WrapError{"getpath", u, p, err} } default: - return &getpathError{u, p} + return &func1TypeError{"getpath", u, p} } } return v @@ -1730,7 +1791,7 @@ func funcGetpath(v, p any) any { func funcTranspose(v any) any { vss, ok := v.([]any) if !ok { - return &funcTypeError{"transpose", v} + return &func0TypeError{"transpose", v} } if len(vss) == 0 { return []any{} @@ -1739,7 +1800,7 @@ func funcTranspose(v any) any { for _, vs := range vss { vs, ok := vs.([]any) if !ok { - return &funcTypeError{"transpose", v} + return &func0TypeError{"transpose", v} } if k := len(vs); l < k { l = k @@ -1763,12 +1824,12 @@ func funcTranspose(v any) any { func funcBsearch(v, t any) any { vs, ok := v.([]any) if !ok { - return &funcTypeError{"bsearch", v} + return &func1TypeError{"bsearch", v, t} } i := sort.Search(len(vs), func(i int) bool { - return compare(vs[i], t) >= 0 + return Compare(vs[i], t) >= 0 }) - if i < len(vs) && compare(vs[i], t) == 0 { + if i < len(vs) && Compare(vs[i], t) == 0 { return i } return -i - 1 @@ -1778,14 +1839,14 @@ func funcGmtime(v any) any { if v, ok := toFloat(v); ok { return epochToArray(v, time.UTC) } - return &funcTypeError{"gmtime", v} + return &func0TypeError{"gmtime", v} } func funcLocaltime(v any) any { if v, ok := toFloat(v); ok { return epochToArray(v, time.Local) } - return &funcTypeError{"localtime", v} + return &func0TypeError{"localtime", v} } func epochToArray(v float64, loc *time.Location) []any { @@ -1803,14 +1864,15 @@ func epochToArray(v float64, loc *time.Location) []any { } func funcMktime(v any) any { - if a, ok := v.([]any); ok { - t, err := arrayToTime("mktime", a, time.UTC) - if err != nil { - return err - } - return timeToEpoch(t) + a, ok := v.([]any) + if !ok { + return &func0TypeError{"mktime", v} } - return &funcTypeError{"mktime", v} + t, err := arrayToTime(a, time.UTC) + if err != nil { + return &func0WrapError{"mktime", v, err} + } + return timeToEpoch(t) } func timeToEpoch(t time.Time) float64 { @@ -1821,90 +1883,95 @@ func funcStrftime(v, x any) any { if w, ok := toFloat(v); ok { v = epochToArray(w, time.UTC) } - if a, ok := v.([]any); ok { - if format, ok := x.(string); ok { - t, err := arrayToTime("strftime", a, time.UTC) - if err != nil { - return err - } - return timefmt.Format(t, format) - } - return &funcTypeError{"strftime", x} + a, ok := v.([]any) + if !ok { + return &func1TypeError{"strftime", v, x} + } + format, ok := x.(string) + if !ok { + return &func1TypeError{"strftime", v, x} + } + t, err := arrayToTime(a, time.UTC) + if err != nil { + return &func1WrapError{"strftime", v, x, err} } - return &funcTypeError{"strftime", v} + return timefmt.Format(t, format) } func funcStrflocaltime(v, x any) any { if w, ok := toFloat(v); ok { v = epochToArray(w, time.Local) } - if a, ok := v.([]any); ok { - if format, ok := x.(string); ok { - t, err := arrayToTime("strflocaltime", a, time.Local) - if err != nil { - return err - } - return timefmt.Format(t, format) - } - return &funcTypeError{"strflocaltime", x} + a, ok := v.([]any) + if !ok { + return &func1TypeError{"strflocaltime", v, x} + } + format, ok := x.(string) + if !ok { + return &func1TypeError{"strflocaltime", v, x} + } + t, err := arrayToTime(a, time.Local) + if err != nil { + return &func1WrapError{"strflocaltime", v, x, err} } - return &funcTypeError{"strflocaltime", v} + return timefmt.Format(t, format) } func funcStrptime(v, x any) any { - if v, ok := v.(string); ok { - if format, ok := x.(string); ok { - t, err := timefmt.Parse(v, format) - if err != nil { - return err - } - var s time.Time - if t == s { - return &funcTypeError{"strptime", v} - } - return epochToArray(timeToEpoch(t), time.UTC) - } - return &funcTypeError{"strptime", x} + s, ok := v.(string) + if !ok { + return &func1TypeError{"strptime", v, x} + } + format, ok := x.(string) + if !ok { + return &func1TypeError{"strptime", v, x} + } + t, err := timefmt.Parse(s, format) + if err != nil { + return &func1WrapError{"strptime", v, x, err} + } + var u time.Time + if t == u { + return &func1TypeError{"strptime", v, x} } - return &funcTypeError{"strptime", v} + return epochToArray(timeToEpoch(t), time.UTC) } -func arrayToTime(name string, a []any, loc *time.Location) (time.Time, error) { +func arrayToTime(a []any, loc *time.Location) (time.Time, error) { var t time.Time if len(a) != 8 { - return t, &funcTypeError{name, a} + return t, &timeArrayError{} } var y, m, d, h, min, sec, nsec int - if x, ok := toInt(a[0]); ok { - y = x - } else { - return t, &funcTypeError{name, a} + var ok bool + if y, ok = toInt(a[0]); !ok { + return t, &timeArrayError{} } - if x, ok := toInt(a[1]); ok { - m = x + 1 + if m, ok = toInt(a[1]); ok { + m++ } else { - return t, &funcTypeError{name, a} + return t, &timeArrayError{} } - if x, ok := toInt(a[2]); ok { - d = x - } else { - return t, &funcTypeError{name, a} + if d, ok = toInt(a[2]); !ok { + return t, &timeArrayError{} } - if x, ok := toInt(a[3]); ok { - h = x - } else { - return t, &funcTypeError{name, a} + if h, ok = toInt(a[3]); !ok { + return t, &timeArrayError{} } - if x, ok := toInt(a[4]); ok { - min = x - } else { - return t, &funcTypeError{name, a} + if min, ok = toInt(a[4]); !ok { + return t, &timeArrayError{} } if x, ok := toFloat(a[5]); ok { sec = int(x) nsec = int((x - math.Floor(x)) * 1e9) } else { - return t, &funcTypeError{name, a} + return t, &timeArrayError{} + } + if _, ok = toFloat(a[6]); !ok { + return t, &timeArrayError{} + } + if _, ok = toFloat(a[7]); !ok { + return t, &timeArrayError{} } return time.Date(y, time.Month(m), d, h, min, sec, nsec, loc), nil } @@ -1914,21 +1981,25 @@ func funcNow(any) any { } func funcMatch(v, re, fs, testing any) any { + name := "match" + if testing == true { + name = "test" + } var flags string if fs != nil { v, ok := fs.(string) if !ok { - return &funcTypeError{"match", fs} + return &func2TypeError{name, v, re, fs} } flags = v } s, ok := v.(string) if !ok { - return &funcTypeError{"match", v} + return &func2TypeError{name, v, re, fs} } restr, ok := re.(string) if !ok { - return &funcTypeError{"match", v} + return &func2TypeError{name, v, re, fs} } r, err := compileRegexp(restr, flags) if err != nil { @@ -2025,15 +2096,11 @@ func funcError(v any, args []any) any { if len(args) > 0 { v = args[0] } - code := 5 - if v == nil { - code = 0 - } - return &exitCodeError{v, code, false} + return &exitCodeError{v, 5} } func funcHalt(any) any { - return &exitCodeError{nil, 0, true} + return &HaltError{nil, 0} } func funcHaltError(v any, args []any) any { @@ -2041,10 +2108,10 @@ func funcHaltError(v any, args []any) any { if len(args) > 0 { var ok bool if code, ok = toInt(args[0]); !ok { - return &funcTypeError{"halt_error", args[0]} + return &func0TypeError{"halt_error", args[0]} } } - return &exitCodeError{v, code, true} + return &HaltError{v, code} } func toInt(x any) (int, bool) { diff --git a/vendor/github.com/itchyny/gojq/go.dev.mod b/vendor/github.com/itchyny/gojq/go.dev.mod index 9a0579ca69c..1e831626881 100644 --- a/vendor/github.com/itchyny/gojq/go.dev.mod +++ b/vendor/github.com/itchyny/gojq/go.dev.mod @@ -1,8 +1,8 @@ module github.com/itchyny/gojq -go 1.18 +go 1.20 require ( - github.com/itchyny/astgen-go v0.0.0-20210914105503-cc8fccf6f972 // indirect - github.com/itchyny/timefmt-go v0.1.5 // indirect + github.com/itchyny/astgen-go v0.0.0-20231113225122-e1c22b9aaf7b // indirect + github.com/itchyny/timefmt-go v0.1.6 // indirect ) diff --git a/vendor/github.com/itchyny/gojq/go.dev.sum b/vendor/github.com/itchyny/gojq/go.dev.sum index 66aee6c59e3..e8691b822fa 100644 --- a/vendor/github.com/itchyny/gojq/go.dev.sum +++ b/vendor/github.com/itchyny/gojq/go.dev.sum @@ -1,4 +1,4 @@ -github.com/itchyny/astgen-go v0.0.0-20210914105503-cc8fccf6f972 h1:XYWolmPDLTY9B1O5o/Ad811/mtVkaHWMiZdbPLm/nDA= -github.com/itchyny/astgen-go v0.0.0-20210914105503-cc8fccf6f972/go.mod h1:jTXcxGeQMJfFN3wWjtzb4aAaWDDN+QbezE0HjH1XfNk= -github.com/itchyny/timefmt-go v0.1.5 h1:G0INE2la8S6ru/ZI5JecgyzbbJNs5lG1RcBqa7Jm6GE= -github.com/itchyny/timefmt-go v0.1.5/go.mod h1:nEP7L+2YmAbT2kZ2HfSs1d8Xtw9LY8D2stDBckWakZ8= +github.com/itchyny/astgen-go v0.0.0-20231113225122-e1c22b9aaf7b h1:72fDU7wad+r3iQObaxhlXVIpAIMRUIUMrNa3go1vb8s= +github.com/itchyny/astgen-go v0.0.0-20231113225122-e1c22b9aaf7b/go.mod h1:Zp6xzEWVc2pQ/ObfLD6t/M6gDegsJWKdGKJSiT7qlu0= +github.com/itchyny/timefmt-go v0.1.6 h1:ia3s54iciXDdzWzwaVKXZPbiXzxxnv1SPGFfM/myJ5Q= +github.com/itchyny/timefmt-go v0.1.6/go.mod h1:RRDZYC5s9ErkjQvTvvU7keJjxUYzIISJGxm9/mAERQg= diff --git a/vendor/github.com/itchyny/gojq/lexer.go b/vendor/github.com/itchyny/gojq/lexer.go index 82bb2b6b9cb..0c2efd122c8 100644 --- a/vendor/github.com/itchyny/gojq/lexer.go +++ b/vendor/github.com/itchyny/gojq/lexer.go @@ -235,7 +235,8 @@ func (l *lexer) Lex(lval *yySymType) (tokenType int) { default: if ch >= utf8.RuneSelf { r, size := utf8.DecodeRuneInString(l.source[l.offset-1:]) - l.offset += size + // -1 to adjust for first byte consumed by next() + l.offset += size - 1 l.token = string(r) } } @@ -247,15 +248,9 @@ func (l *lexer) next() (byte, bool) { ch := l.source[l.offset] l.offset++ if ch == '#' { - if len(l.source) == l.offset { + if l.skipComment() { return 0, true } - for !isNewLine(l.source[l.offset]) { - l.offset++ - if len(l.source) == l.offset { - return 0, true - } - } } else if !isWhite(ch) { return ch, false } else if len(l.source) == l.offset { @@ -264,6 +259,28 @@ func (l *lexer) next() (byte, bool) { } } +func (l *lexer) skipComment() bool { + for { + switch l.peek() { + case 0: + return true + case '\\': + switch l.offset++; l.peek() { + case '\\', '\n': + l.offset++ + case '\r': + if l.offset++; l.peek() == '\n' { + l.offset++ + } + } + case '\n', '\r': + return false + default: + l.offset++ + } + } +} + func (l *lexer) peek() byte { if len(l.source) == l.offset { return 0 @@ -505,37 +522,34 @@ func quoteAndEscape(src string, quote bool, controls int) []byte { return buf } -type parseError struct { - offset int - token string +// ParseError represents a description of a query parsing error. +type ParseError struct { + Offset int // the error occurred after reading Offset bytes + Token string // the Token that caused the error (may be empty) tokenType int } -func (err *parseError) Error() string { +func (err *ParseError) Error() string { switch err.tokenType { case eof: return "unexpected EOF" case tokInvalid: - return "invalid token " + jsonMarshal(err.token) + return "invalid token " + jsonMarshal(err.Token) case tokInvalidEscapeSequence: - return `invalid escape sequence "` + err.token + `" in string literal` + return `invalid escape sequence "` + err.Token + `" in string literal` case tokUnterminatedString: return "unterminated string literal" default: - return "unexpected token " + jsonMarshal(err.token) + return "unexpected token " + jsonMarshal(err.Token) } } -func (err *parseError) Token() (string, int) { - return err.token, err.offset -} - func (l *lexer) Error(string) { offset, token := l.offset, l.token if l.tokenType != eof && l.tokenType < utf8.RuneSelf { token = string(rune(l.tokenType)) } - l.err = &parseError{offset, token, l.tokenType} + l.err = &ParseError{offset, token, l.tokenType} } func isWhite(ch byte) bool { @@ -562,12 +576,3 @@ func isHex(ch byte) bool { func isNumber(ch byte) bool { return '0' <= ch && ch <= '9' } - -func isNewLine(ch byte) bool { - switch ch { - case '\n', '\r': - return true - default: - return false - } -} diff --git a/vendor/github.com/itchyny/gojq/module_loader.go b/vendor/github.com/itchyny/gojq/module_loader.go index 6e9ba48cc06..0a73ba05b84 100644 --- a/vendor/github.com/itchyny/gojq/module_loader.go +++ b/vendor/github.com/itchyny/gojq/module_loader.go @@ -13,16 +13,24 @@ import ( // // Implement following optional methods. Use [NewModuleLoader] to load local modules. // +// LoadInitModules() ([]*Query, error) // LoadModule(string) (*Query, error) // LoadModuleWithMeta(string, map[string]any) (*Query, error) -// LoadInitModules() ([]*Query, error) // LoadJSON(string) (any, error) // LoadJSONWithMeta(string, map[string]any) (any, error) type ModuleLoader any -// NewModuleLoader creates a new [ModuleLoader] reading local modules in the paths. +// NewModuleLoader creates a new [ModuleLoader] loading local modules in the paths. +// Note that user can load modules outside the paths using "search" path of metadata. +// Empty paths are ignored, so specify "." for the current working directory. func NewModuleLoader(paths []string) ModuleLoader { - return &moduleLoader{expandHomeDir(paths)} + ps := make([]string, 0, len(paths)) + for _, path := range paths { + if path = resolvePath(path, ""); path != "" { + ps = append(ps, path) + } + } + return &moduleLoader{ps} } type moduleLoader struct { @@ -49,7 +57,7 @@ func (l *moduleLoader) LoadInitModules() ([]*Query, error) { if err != nil { return nil, err } - q, err := parseModule(path, string(cnt)) + q, err := parseModule(string(cnt), filepath.Dir(path)) if err != nil { return nil, &queryParseError{path, string(cnt), err} } @@ -67,7 +75,7 @@ func (l *moduleLoader) LoadModuleWithMeta(name string, meta map[string]any) (*Qu if err != nil { return nil, err } - q, err := parseModule(path, string(cnt)) + q, err := parseModule(string(cnt), filepath.Dir(path)) if err != nil { return nil, &queryParseError{path, string(cnt), err} } @@ -84,7 +92,7 @@ func (l *moduleLoader) LoadJSONWithMeta(name string, meta map[string]any) (any, return nil, err } defer f.Close() - var vals []any + vals := []any{} dec := json.NewDecoder(f) dec.UseNumber() for { @@ -109,15 +117,15 @@ func (l *moduleLoader) LoadJSONWithMeta(name string, meta map[string]any) (any, func (l *moduleLoader) lookupModule(name, extension string, meta map[string]any) (string, error) { paths := l.paths - if path := searchPath(meta); path != "" { + if path, ok := meta["search"].(string); ok { paths = append([]string{path}, paths...) } for _, base := range paths { - path := filepath.Clean(filepath.Join(base, name+extension)) + path := filepath.Join(base, name+extension) if _, err := os.Stat(path); err == nil { return path, err } - path = filepath.Clean(filepath.Join(base, name, filepath.Base(name)+extension)) + path = filepath.Join(base, name, filepath.Base(name)+extension) if _, err := os.Stat(path); err == nil { return path, err } @@ -125,66 +133,50 @@ func (l *moduleLoader) lookupModule(name, extension string, meta map[string]any) return "", fmt.Errorf("module not found: %q", name) } -// This is a dirty hack to implement the "search" field. -func parseModule(path, cnt string) (*Query, error) { +func parseModule(cnt, dir string) (*Query, error) { q, err := Parse(cnt) if err != nil { return nil, err } for _, i := range q.Imports { - if i.Meta == nil { - continue + if i.Meta != nil { + for _, e := range i.Meta.KeyVals { + if e.Key == "search" || e.KeyString == "search" { + if path, ok := e.Val.toString(); ok { + if path = resolvePath(path, dir); path != "" { + e.Val.Str = path + } else { + e.Val.Null = true + } + } + } + } } - i.Meta.KeyVals = append( - i.Meta.KeyVals, - &ConstObjectKeyVal{ - Key: "$$path", - Val: &ConstTerm{Str: path}, - }, - ) } return q, nil } -func searchPath(meta map[string]any) string { - x, ok := meta["search"] - if !ok { - return "" - } - s, ok := x.(string) - if !ok { - return "" - } - if filepath.IsAbs(s) { - return s - } - if strings.HasPrefix(s, "~") { - if homeDir, err := os.UserHomeDir(); err == nil { - return filepath.Join(homeDir, s[1:]) +func resolvePath(path, dir string) string { + switch { + case filepath.IsAbs(path): + return path + case strings.HasPrefix(path, "~/"): + dir, err := os.UserHomeDir() + if err != nil { + return "" } - } - var path string - if x, ok := meta["$$path"]; ok { - path, _ = x.(string) - } - if path == "" { - return s - } - return filepath.Join(filepath.Dir(path), s) -} - -func expandHomeDir(paths []string) []string { - var homeDir string - var err error - for i, path := range paths { - if strings.HasPrefix(path, "~") { - if homeDir == "" && err == nil { - homeDir, err = os.UserHomeDir() - } - if homeDir != "" { - paths[i] = filepath.Join(homeDir, path[1:]) - } + return filepath.Join(dir, path[2:]) + case strings.HasPrefix(path, "$ORIGIN/"): + exe, err := os.Executable() + if err != nil { + return "" + } + exe, err = filepath.EvalSymlinks(exe) + if err != nil { + return "" } + return filepath.Join(filepath.Dir(exe), path[8:]) + default: + return filepath.Join(dir, path) } - return paths } diff --git a/vendor/github.com/itchyny/gojq/operator.go b/vendor/github.com/itchyny/gojq/operator.go index 73a548e01fa..64b74b78054 100644 --- a/vendor/github.com/itchyny/gojq/operator.go +++ b/vendor/github.com/itchyny/gojq/operator.go @@ -371,7 +371,7 @@ func funcOpSub(_, l, r any) any { L: for _, l := range l { for _, r := range r { - if compare(l, r) == 0 { + if Compare(l, r) == 0 { continue L } } @@ -433,11 +433,11 @@ func deepMergeObjects(l, r map[string]any) any { } func repeatString(s string, n float64) any { - if n <= 0.0 || len(s) > 0 && n > float64(0x10000000/len(s)) || math.IsNaN(n) { + if n < 0.0 || len(s) > 0 && n > float64(0x10000000/len(s)) || math.IsNaN(n) { return nil } - if int(n) < 1 { - return s + if s == "" { + return "" } return strings.Repeat(s, int(n)) } @@ -446,9 +446,6 @@ func funcOpDiv(_, l, r any) any { return binopTypeSwitch(l, r, func(l, r int) any { if r == 0 { - if l == 0 { - return math.NaN() - } return &zeroDivisionError{l, r} } if l%r == 0 { @@ -458,18 +455,12 @@ func funcOpDiv(_, l, r any) any { }, func(l, r float64) any { if r == 0.0 { - if l == 0.0 { - return math.NaN() - } return &zeroDivisionError{l, r} } return l / r }, func(l, r *big.Int) any { if r.Sign() == 0 { - if l.Sign() == 0 { - return math.NaN() - } return &zeroDivisionError{l, r} } d, m := new(big.Int).DivMod(l, r, new(big.Int)) @@ -508,6 +499,9 @@ func funcOpMod(_, l, r any) any { if ri == 0 { return &zeroModuloError{l, r} } + if math.IsNaN(l) || math.IsNaN(r) { + return math.NaN() + } return floatToInt(l) % ri }, func(l, r *big.Int) any { @@ -531,25 +525,25 @@ func funcOpAlt(_, l, r any) any { } func funcOpEq(_, l, r any) any { - return compare(l, r) == 0 + return Compare(l, r) == 0 } func funcOpNe(_, l, r any) any { - return compare(l, r) != 0 + return Compare(l, r) != 0 } func funcOpGt(_, l, r any) any { - return compare(l, r) > 0 + return Compare(l, r) > 0 } func funcOpLt(_, l, r any) any { - return compare(l, r) < 0 + return Compare(l, r) < 0 } func funcOpGe(_, l, r any) any { - return compare(l, r) >= 0 + return Compare(l, r) >= 0 } func funcOpLe(_, l, r any) any { - return compare(l, r) <= 0 + return Compare(l, r) <= 0 } diff --git a/vendor/github.com/itchyny/gojq/parser.go b/vendor/github.com/itchyny/gojq/parser.go index 1e5e50af229..5a0dfdc776f 100644 --- a/vendor/github.com/itchyny/gojq/parser.go +++ b/vendor/github.com/itchyny/gojq/parser.go @@ -5,21 +5,7 @@ package gojq import __yyfmt__ "fmt" -// Parse a query string, and returns the query struct. -// -// If parsing failed, the returned error has the method Token() (string, int), -// which reports the invalid token and the byte offset in the query string. The -// token is empty if the error occurred after scanning the entire query string. -// The byte offset is the scanned bytes when the error occurred. -// //line parser.go.y:2 -func Parse(src string) (*Query, error) { - l := newLexer(src) - if yyParse(l) > 0 { - return nil, l.err - } - return l.result, nil -} func reverseFuncDef(xs []*FuncDef) []*FuncDef { for i, j := 0, len(xs)-1; i < j; i, j = i+1, j-1 { @@ -35,7 +21,7 @@ func prependFuncDef(xs []*FuncDef, x *FuncDef) []*FuncDef { return xs } -//line parser.go.y:33 +//line parser.go.y:19 type yySymType struct { yys int value any @@ -46,9 +32,9 @@ type yySymType struct { const tokAltOp = 57346 const tokUpdateOp = 57347 const tokDestAltOp = 57348 -const tokOrOp = 57349 -const tokAndOp = 57350 -const tokCompareOp = 57351 +const tokCompareOp = 57349 +const tokOrOp = 57350 +const tokAndOp = 57351 const tokModule = 57352 const tokImport = 57353 const tokInclude = 57354 @@ -59,33 +45,34 @@ const tokBreak = 57358 const tokNull = 57359 const tokTrue = 57360 const tokFalse = 57361 -const tokIdent = 57362 -const tokVariable = 57363 -const tokModuleIdent = 57364 -const tokModuleVariable = 57365 -const tokIndex = 57366 -const tokNumber = 57367 -const tokFormat = 57368 -const tokString = 57369 -const tokStringStart = 57370 -const tokStringQuery = 57371 -const tokStringEnd = 57372 -const tokIf = 57373 -const tokThen = 57374 -const tokElif = 57375 -const tokElse = 57376 -const tokEnd = 57377 -const tokTry = 57378 -const tokCatch = 57379 -const tokReduce = 57380 -const tokForeach = 57381 -const tokRecurse = 57382 -const tokFuncDefPost = 57383 -const tokTermPost = 57384 -const tokEmptyCatch = 57385 -const tokInvalid = 57386 -const tokInvalidEscapeSequence = 57387 -const tokUnterminatedString = 57388 +const tokIf = 57362 +const tokThen = 57363 +const tokElif = 57364 +const tokElse = 57365 +const tokEnd = 57366 +const tokTry = 57367 +const tokCatch = 57368 +const tokReduce = 57369 +const tokForeach = 57370 +const tokIdent = 57371 +const tokVariable = 57372 +const tokModuleIdent = 57373 +const tokModuleVariable = 57374 +const tokRecurse = 57375 +const tokIndex = 57376 +const tokNumber = 57377 +const tokFormat = 57378 +const tokString = 57379 +const tokStringStart = 57380 +const tokStringQuery = 57381 +const tokStringEnd = 57382 +const tokInvalid = 57383 +const tokInvalidEscapeSequence = 57384 +const tokUnterminatedString = 57385 +const tokFuncDefQuery = 57386 +const tokExpr = 57387 +const tokTerm = 57388 +const tokEmptyCatch = 57389 var yyToknames = [...]string{ "$end", @@ -94,9 +81,9 @@ var yyToknames = [...]string{ "tokAltOp", "tokUpdateOp", "tokDestAltOp", + "tokCompareOp", "tokOrOp", "tokAndOp", - "tokCompareOp", "tokModule", "tokImport", "tokInclude", @@ -107,10 +94,20 @@ var yyToknames = [...]string{ "tokNull", "tokTrue", "tokFalse", + "tokIf", + "tokThen", + "tokElif", + "tokElse", + "tokEnd", + "tokTry", + "tokCatch", + "tokReduce", + "tokForeach", "tokIdent", "tokVariable", "tokModuleIdent", "tokModuleVariable", + "tokRecurse", "tokIndex", "tokNumber", "tokFormat", @@ -118,22 +115,12 @@ var yyToknames = [...]string{ "tokStringStart", "tokStringQuery", "tokStringEnd", - "tokIf", - "tokThen", - "tokElif", - "tokElse", - "tokEnd", - "tokTry", - "tokCatch", - "tokReduce", - "tokForeach", - "tokRecurse", - "tokFuncDefPost", - "tokTermPost", - "tokEmptyCatch", "tokInvalid", "tokInvalidEscapeSequence", "tokUnterminatedString", + "tokFuncDefQuery", + "tokExpr", + "tokTerm", "'|'", "','", "'+'", @@ -143,6 +130,7 @@ var yyToknames = [...]string{ "'%'", "'.'", "'?'", + "tokEmptyCatch", "'['", "';'", "':'", @@ -159,218 +147,181 @@ const yyEofCode = 1 const yyErrCode = 2 const yyInitialStackSize = 16 -//line parser.go.y:693 +//line parser.go.y:671 //line yacctab:1 var yyExca = [...]int16{ -1, 1, 1, -1, -2, 0, - -1, 97, - 55, 0, - -2, 104, - -1, 130, + -1, 145, 5, 0, - -2, 32, - -1, 133, - 9, 0, - -2, 35, - -1, 194, - 58, 114, - -2, 54, + -2, 27, + -1, 148, + 7, 0, + -2, 30, + -1, 199, + 59, 114, + -2, 49, } const yyPrivate = 57344 -const yyLast = 1127 +const yyLast = 782 var yyAct = [...]int16{ - 86, 214, 174, 112, 12, 203, 9, 175, 111, 31, - 190, 6, 156, 140, 117, 47, 95, 97, 93, 94, - 89, 141, 49, 7, 179, 180, 181, 240, 246, 264, - 239, 103, 177, 106, 178, 227, 164, 119, 107, 108, - 105, 245, 102, 75, 76, 113, 77, 78, 79, 123, - 226, 163, 211, 225, 259, 210, 142, 179, 180, 181, - 158, 159, 143, 182, 122, 177, 224, 178, 219, 7, - 235, 234, 104, 127, 243, 128, 129, 130, 131, 132, - 133, 134, 135, 136, 137, 138, 72, 74, 80, 81, - 82, 83, 84, 147, 73, 88, 182, 196, 73, 229, - 195, 145, 7, 150, 228, 161, 166, 165, 157, 126, - 125, 124, 144, 88, 258, 167, 80, 81, 82, 83, - 84, 206, 73, 44, 242, 91, 90, 92, 183, 184, - 82, 83, 84, 154, 73, 153, 267, 186, 49, 173, - 42, 43, 100, 91, 90, 92, 99, 191, 120, 197, - 256, 257, 200, 192, 201, 202, 188, 75, 76, 207, - 77, 78, 79, 198, 199, 209, 42, 43, 216, 92, - 215, 215, 218, 213, 113, 98, 75, 76, 185, 77, - 78, 79, 204, 205, 101, 221, 222, 170, 155, 171, - 169, 3, 28, 27, 230, 96, 220, 232, 176, 46, - 223, 11, 80, 81, 82, 83, 84, 11, 73, 78, - 79, 157, 241, 110, 8, 152, 237, 255, 236, 72, - 74, 80, 81, 82, 83, 84, 85, 73, 79, 278, - 160, 191, 277, 121, 189, 253, 254, 192, 248, 247, - 187, 139, 249, 250, 208, 262, 260, 261, 215, 263, - 80, 81, 82, 83, 84, 149, 73, 268, 269, 10, - 270, 5, 4, 2, 1, 88, 272, 273, 80, 81, - 82, 83, 84, 0, 73, 279, 0, 0, 271, 280, - 51, 52, 0, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 115, 116, 91, 90, 92, 0, 0, - 42, 43, 0, 87, 63, 64, 65, 66, 67, 68, - 69, 70, 71, 88, 0, 20, 0, 17, 37, 24, - 25, 26, 38, 40, 39, 41, 23, 29, 30, 42, - 43, 0, 114, 15, 0, 0, 212, 0, 16, 0, - 13, 14, 22, 91, 90, 92, 0, 0, 0, 0, - 0, 33, 34, 0, 0, 0, 21, 0, 36, 0, - 148, 32, 0, 146, 35, 51, 52, 0, 53, 54, - 55, 56, 57, 58, 59, 60, 61, 62, 115, 116, - 0, 0, 0, 0, 0, 42, 43, 0, 0, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 18, 19, - 20, 0, 17, 37, 24, 25, 26, 38, 40, 39, - 41, 23, 29, 30, 42, 43, 0, 114, 15, 0, - 0, 109, 0, 16, 0, 13, 14, 22, 0, 0, - 0, 0, 0, 0, 0, 0, 33, 34, 0, 0, - 0, 21, 0, 36, 0, 0, 32, 0, 20, 35, - 17, 37, 24, 25, 26, 38, 40, 39, 41, 23, - 29, 30, 42, 43, 0, 0, 15, 0, 0, 0, - 0, 16, 0, 13, 14, 22, 0, 0, 0, 0, - 0, 0, 0, 0, 33, 34, 0, 0, 0, 21, - 0, 36, 0, 0, 32, 0, 231, 35, 20, 0, - 17, 37, 24, 25, 26, 38, 40, 39, 41, 23, - 29, 30, 42, 43, 0, 0, 15, 0, 0, 0, - 0, 16, 0, 13, 14, 22, 0, 0, 0, 0, - 0, 0, 0, 0, 33, 34, 0, 0, 0, 21, - 0, 36, 0, 0, 32, 0, 118, 35, 20, 0, - 17, 37, 24, 25, 26, 38, 40, 39, 41, 23, - 29, 30, 42, 43, 0, 0, 15, 0, 77, 78, - 79, 16, 0, 13, 14, 22, 0, 0, 0, 0, - 0, 0, 0, 0, 33, 34, 0, 0, 0, 21, - 0, 36, 0, 0, 32, 51, 52, 35, 53, 54, - 55, 56, 57, 58, 59, 60, 61, 62, 48, 0, - 80, 81, 82, 83, 84, 50, 73, 0, 0, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 51, 52, - 0, 53, 54, 55, 56, 57, 58, 59, 60, 61, - 62, 48, 0, 0, 0, 0, 0, 0, 50, 0, - 0, 172, 63, 64, 65, 66, 67, 68, 69, 70, - 71, 51, 52, 0, 53, 54, 55, 56, 57, 58, - 59, 60, 61, 62, 115, 194, 0, 0, 0, 0, - 0, 42, 43, 0, 45, 63, 64, 65, 66, 67, - 68, 69, 70, 71, 37, 24, 25, 26, 38, 40, - 39, 41, 23, 29, 30, 42, 43, 75, 76, 0, - 77, 78, 79, 193, 0, 0, 0, 0, 22, 0, - 0, 0, 0, 0, 0, 0, 0, 33, 34, 0, - 0, 0, 21, 0, 36, 0, 0, 32, 75, 76, - 35, 77, 78, 79, 0, 0, 0, 0, 0, 0, - 72, 74, 80, 81, 82, 83, 84, 0, 73, 0, - 0, 0, 75, 76, 252, 77, 78, 79, 0, 0, + 78, 134, 186, 102, 103, 10, 175, 195, 32, 211, + 48, 108, 81, 176, 131, 6, 229, 5, 50, 73, + 74, 159, 14, 180, 181, 182, 124, 98, 110, 135, + 280, 97, 228, 279, 115, 104, 16, 158, 265, 121, + 114, 178, 123, 179, 244, 73, 74, 180, 181, 182, + 73, 74, 112, 113, 154, 155, 136, 117, 117, 117, + 254, 243, 137, 183, 282, 178, 255, 179, 220, 6, + 247, 116, 118, 119, 128, 129, 73, 74, 99, 73, + 74, 227, 73, 74, 246, 141, 238, 183, 201, 237, + 132, 200, 139, 6, 235, 226, 138, 163, 208, 80, + 157, 207, 241, 231, 230, 161, 162, 73, 74, 117, + 117, 117, 117, 117, 117, 117, 117, 117, 117, 83, + 82, 278, 84, 144, 145, 146, 147, 148, 149, 150, + 151, 152, 153, 184, 185, 174, 50, 160, 193, 73, + 74, 127, 196, 202, 203, 126, 197, 73, 74, 125, + 73, 74, 248, 253, 189, 204, 45, 240, 206, 73, + 74, 245, 143, 210, 214, 215, 73, 74, 104, 217, + 218, 213, 79, 219, 86, 87, 76, 90, 88, 89, + 169, 43, 44, 117, 117, 73, 74, 75, 166, 117, + 222, 224, 80, 225, 73, 74, 273, 212, 212, 232, + 132, 223, 234, 216, 120, 271, 73, 74, 191, 239, + 43, 44, 83, 82, 85, 84, 274, 270, 96, 91, + 92, 93, 94, 95, 73, 74, 93, 94, 95, 249, + 84, 164, 251, 252, 196, 236, 267, 250, 197, 130, + 25, 256, 73, 74, 262, 263, 187, 188, 3, 190, + 257, 258, 260, 261, 264, 24, 266, 73, 74, 9, + 221, 268, 269, 117, 117, 111, 171, 272, 172, 170, + 13, 275, 276, 77, 90, 277, 89, 212, 212, 13, + 177, 281, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 106, 107, 91, 92, 93, 94, 95, + 47, 43, 44, 101, 165, 259, 91, 92, 93, 94, + 95, 242, 156, 122, 194, 17, 192, 15, 37, 21, + 22, 23, 33, 133, 105, 205, 7, 34, 209, 35, + 36, 39, 41, 40, 42, 19, 20, 28, 31, 43, + 44, 8, 4, 2, 86, 87, 1, 90, 88, 89, + 0, 29, 30, 0, 168, 90, 18, 0, 0, 27, + 0, 142, 38, 0, 140, 26, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, + 66, 67, 68, 69, 70, 71, 72, 106, 107, 91, + 92, 93, 94, 95, 0, 43, 44, 91, 92, 93, + 94, 95, 0, 0, 0, 0, 0, 11, 12, 17, + 0, 15, 37, 21, 22, 23, 33, 0, 105, 0, + 0, 34, 100, 35, 36, 39, 41, 40, 42, 19, + 20, 28, 31, 43, 44, 0, 0, 0, 0, 86, + 87, 0, 90, 88, 89, 29, 30, 0, 0, 167, + 18, 0, 0, 27, 0, 0, 38, 0, 17, 26, + 15, 37, 21, 22, 23, 33, 0, 0, 0, 0, + 34, 0, 35, 36, 39, 41, 40, 42, 19, 20, + 28, 31, 43, 44, 91, 92, 93, 94, 95, 0, + 0, 0, 0, 0, 29, 30, 90, 88, 89, 18, + 0, 0, 27, 0, 0, 38, 0, 233, 26, 17, + 0, 15, 37, 21, 22, 23, 33, 0, 0, 0, + 0, 34, 0, 35, 36, 39, 41, 40, 42, 19, + 20, 28, 31, 43, 44, 0, 0, 0, 91, 92, + 93, 94, 95, 0, 0, 29, 30, 0, 0, 0, + 18, 0, 0, 27, 0, 0, 38, 0, 109, 26, + 17, 0, 15, 37, 21, 22, 23, 33, 0, 0, + 0, 0, 34, 0, 35, 36, 39, 41, 40, 42, + 19, 20, 28, 31, 43, 44, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 29, 30, 0, 0, + 0, 18, 0, 0, 27, 0, 0, 38, 0, 0, + 26, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 49, 0, 0, 0, 0, 0, 0, 0, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 49, 0, 0, 0, 0, 173, 0, 0, + 51, 37, 21, 22, 23, 33, 0, 0, 0, 0, + 34, 0, 35, 36, 39, 41, 40, 42, 19, 20, + 28, 31, 43, 44, 0, 0, 0, 46, 0, 0, + 0, 0, 0, 0, 29, 30, 0, 0, 0, 18, + 0, 0, 27, 0, 0, 38, 0, 0, 26, 52, + 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, + 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, + 106, 199, 0, 0, 0, 0, 0, 0, 43, 44, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 72, 74, 80, 81, 82, 83, 84, 0, 73, - 0, 0, 0, 75, 76, 233, 77, 78, 79, 0, - 0, 0, 0, 0, 0, 72, 74, 80, 81, 82, - 83, 84, 0, 73, 0, 0, 0, 75, 76, 168, - 77, 78, 79, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 72, 74, 80, 81, - 82, 83, 84, 0, 73, 0, 0, 75, 76, 281, - 77, 78, 79, 0, 0, 0, 0, 0, 0, 0, - 72, 74, 80, 81, 82, 83, 84, 0, 73, 0, - 0, 75, 76, 276, 77, 78, 79, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 72, 74, 80, 81, 82, 83, 84, 0, 73, 0, - 0, 75, 76, 251, 77, 78, 79, 0, 0, 0, - 0, 0, 0, 0, 72, 74, 80, 81, 82, 83, - 84, 0, 73, 0, 0, 75, 76, 244, 77, 78, - 79, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 72, 74, 80, 81, 82, 83, - 84, 0, 73, 0, 0, 75, 76, 217, 77, 78, - 79, 0, 0, 0, 0, 0, 0, 0, 72, 74, - 80, 81, 82, 83, 84, 0, 73, 0, 0, 75, - 76, 162, 77, 78, 79, 0, 0, 0, 0, 0, - 75, 76, 0, 77, 78, 79, 0, 0, 72, 74, - 80, 81, 82, 83, 84, 0, 73, 0, 275, 75, - 76, 0, 77, 78, 79, 0, 0, 0, 0, 0, - 0, 0, 72, 74, 80, 81, 82, 83, 84, 0, - 73, 0, 266, 72, 74, 80, 81, 82, 83, 84, - 0, 73, 0, 265, 75, 76, 0, 77, 78, 79, - 0, 0, 72, 74, 80, 81, 82, 83, 84, 0, - 73, 0, 238, 0, 0, 0, 75, 76, 0, 77, - 78, 79, 274, 0, 0, 75, 76, 0, 77, 78, - 79, 0, 0, 0, 0, 0, 0, 72, 74, 80, - 81, 82, 83, 84, 151, 73, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 72, - 74, 80, 81, 82, 83, 84, 0, 73, 72, 74, - 80, 81, 82, 83, 84, 0, 73, + 0, 198, } var yyPact = [...]int16{ - 181, -1000, -1000, -39, -1000, 387, 66, 621, -1000, 1071, - -1000, 535, 289, 678, 678, 535, 535, 154, 119, 115, - 164, 113, -1000, -1000, -1000, -1000, -1000, 13, -1000, -1000, - 139, -1000, 535, 678, 678, 358, 485, 127, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, 1, -1000, 53, 52, - 51, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 238, -1000, -1000, -48, 406, 98, 643, -1000, -1000, -1000, + 112, 150, 139, 557, 158, 184, 170, 189, 173, -1000, + -1000, -1000, -1000, -1000, 18, -1000, 368, 506, -1000, 665, + 665, 144, -1000, 557, 665, 665, 665, 174, 557, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -22, -1000, 90, + 86, 82, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 535, -1000, 535, 535, 535, 535, 535, 535, - 535, 535, 535, 535, 535, -1000, 1071, 0, -1000, -1000, - -1000, 113, 302, 241, 89, 1062, 535, 98, 86, 174, - -39, 2, -1000, -1000, 535, -1000, 921, 71, 71, -1000, - -12, -1000, 49, 48, 535, -1000, -1000, -1000, -1000, 758, - -1000, 160, -1000, 588, 40, 40, 40, 1071, 153, 153, - 561, 201, 219, 67, 79, 79, 43, 43, 43, 131, - -1000, -1000, 0, 654, -1000, -1000, -1000, 39, 535, 0, - 0, 535, -1000, 535, 535, 162, 64, -1000, 535, 162, - -5, 1071, -1000, -1000, 273, 678, 678, 897, -1000, -1000, - -1000, 535, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 7, -1000, -1000, 535, 0, 5, -1000, -13, - -1000, 46, 41, 535, -1000, -1000, 435, 734, 12, 11, - 1071, -1000, 1071, -39, -1000, -1000, -1000, 1005, -30, -1000, - -1000, 535, -1000, -1000, 77, 71, 77, 16, 867, -1000, - -20, -1000, 1071, -1000, -1000, 0, -1000, 654, 0, 0, - 843, -1000, 703, -1000, 535, 535, 117, 57, -1000, -4, - 162, 1071, 678, 678, -1000, -1000, 40, -1000, -1000, -1000, - -1000, -29, -1000, 986, 975, 101, 535, 535, -1000, 535, - -1000, 71, 77, -1000, 0, 535, 535, -1000, 1040, 1071, - 951, -1000, 813, 172, 535, -1000, -1000, -1000, 535, 1071, - 789, -1000, + -1000, -1000, -1000, 557, 557, 225, -48, -1000, 112, -1, + -1000, -1000, -1000, 173, 312, 115, 665, 665, 665, 665, + 665, 665, 665, 665, 665, 665, -5, -1000, -1000, 557, + -1000, -27, -1000, 78, 46, 557, -1000, -1000, -1000, -1000, + 35, 557, 65, 65, -1000, 210, 162, 65, 445, 350, + -1000, 119, 229, -1000, 613, 30, 30, 30, 112, -1000, + 217, 96, -1000, 202, -1000, -1000, -1, 721, -1000, -1000, + -1000, 29, 557, 557, 170, 499, 267, 358, 256, 175, + 175, -1000, -1000, -1000, 557, 217, 40, 112, -1000, 274, + 665, 665, 103, -1000, 557, -1000, 665, -1, -1, -1000, + -1000, -1000, 557, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 6, -1000, -1000, -48, -1000, -1000, -1000, + 557, -1, 33, -1000, -32, -1000, 45, 44, 557, -1000, + -1000, 455, 32, 112, 177, 28, -1000, -1000, 557, -1000, + -1000, 110, 170, 110, 43, 112, -1000, 1, -16, 100, + -1000, 22, -1000, 94, 112, -1000, -1000, -1, -1000, 721, + -1, -1, 92, -1000, -2, -1000, -1000, 7, 217, 112, + 665, 665, 230, 557, 557, -1000, -1000, 30, -1000, -1000, + -1000, -1000, -1000, -21, -1000, 557, -1000, 110, 110, 212, + 557, 557, 159, 147, -1000, -1, 138, -1000, 195, 112, + 557, 557, -1000, -1000, 557, 60, -28, 112, -1000, -1000, + 557, 3, -1000, } var yyPgo = [...]int16{ - 0, 264, 263, 262, 261, 259, 12, 214, 195, 244, - 0, 241, 13, 240, 234, 10, 4, 9, 233, 20, - 230, 218, 217, 215, 213, 8, 1, 2, 7, 199, - 15, 198, 196, 5, 193, 192, 14, 3, + 0, 356, 353, 352, 351, 14, 336, 259, 265, 335, + 0, 333, 1, 326, 324, 7, 36, 22, 8, 323, + 12, 322, 321, 315, 314, 313, 3, 9, 6, 13, + 310, 10, 280, 260, 2, 255, 240, 11, 4, } var yyR1 = [...]int8{ 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, - 6, 6, 7, 7, 8, 8, 9, 9, 33, 33, - 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, - 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, - 10, 10, 11, 11, 12, 12, 12, 13, 13, 14, - 14, 15, 15, 15, 15, 16, 16, 16, 16, 16, - 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, - 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, - 16, 16, 16, 16, 17, 17, 18, 18, 18, 34, - 34, 35, 35, 19, 19, 19, 19, 19, 20, 20, - 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, - 25, 25, 25, 37, 37, 37, 26, 26, 27, 27, - 27, 27, 27, 27, 27, 28, 28, 28, 29, 29, - 30, 30, 30, 31, 31, 32, 32, 36, 36, 36, - 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, - 36, 36, 36, 36, 36, 36, 36, 36, + 6, 6, 7, 7, 8, 8, 9, 9, 34, 34, + 10, 10, 10, 10, 10, 10, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 11, 11, 12, + 12, 12, 13, 13, 14, 14, 15, 15, 15, 15, + 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, + 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, + 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, + 17, 17, 17, 17, 18, 18, 19, 19, 19, 35, + 35, 36, 36, 20, 20, 20, 20, 20, 21, 21, + 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, + 26, 26, 26, 38, 38, 38, 27, 27, 28, 28, + 28, 28, 28, 28, 28, 29, 29, 29, 30, 30, + 31, 31, 31, 32, 32, 33, 33, 37, 37, 37, + 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, + 37, 37, 37, 37, 37, 37, 37, 37, } var yyR2 = [...]int8{ - 0, 2, 0, 3, 2, 2, 0, 2, 6, 4, - 0, 1, 0, 2, 5, 8, 1, 3, 1, 1, - 2, 3, 5, 9, 9, 11, 7, 3, 4, 2, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 1, 1, 3, 1, 3, 3, 1, 3, 1, - 3, 3, 3, 5, 1, 1, 1, 1, 2, 2, - 1, 1, 1, 1, 4, 1, 1, 1, 2, 1, - 3, 2, 2, 2, 3, 4, 2, 3, 2, 2, + 0, 3, 0, 3, 0, 2, 6, 4, 0, 1, + 1, 1, 0, 2, 5, 8, 1, 3, 1, 1, + 2, 3, 5, 4, 3, 1, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 1, 1, 3, 1, + 3, 3, 1, 3, 1, 3, 3, 3, 5, 1, + 1, 1, 1, 2, 2, 1, 1, 1, 1, 4, + 1, 2, 3, 4, 2, 3, 1, 2, 2, 1, + 2, 1, 7, 3, 9, 9, 11, 2, 3, 2, 2, 2, 3, 3, 1, 3, 0, 2, 4, 1, 1, 1, 1, 2, 3, 4, 4, 5, 1, 3, 0, 5, 0, 2, 0, 2, 1, 3, 3, 3, - 5, 1, 1, 1, 1, 1, 1, 3, 1, 1, + 5, 1, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 4, 1, 3, 3, 3, 3, 2, 3, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, @@ -378,67 +329,67 @@ var yyR2 = [...]int8{ } var yyChk = [...]int16{ - -1000, -1, -2, 10, -3, -4, -28, 62, -7, -10, - -5, -8, -16, 38, 39, 31, 36, 15, 11, 12, - 13, 54, 40, 24, 17, 18, 19, -34, -35, 25, - 26, -17, 59, 49, 50, 62, 56, 16, 20, 22, - 21, 23, 27, 28, 57, 63, -29, -30, 20, -36, - 27, 7, 8, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 31, 32, 33, 34, 35, 36, 37, - 38, 39, 47, 55, 48, 4, 5, 7, 8, 9, - 49, 50, 51, 52, 53, -7, -10, 14, 24, -19, - 55, 54, 56, -16, -16, -10, -8, -10, 21, 27, - 27, 20, -19, -17, 59, -17, -10, -16, -16, 63, - -24, -25, -37, -17, 59, 20, 21, -36, 61, -10, - 21, -18, 63, 48, 58, 58, 58, -10, -10, -10, - -10, -10, -10, -10, -10, -10, -10, -10, -10, -11, - -12, 21, 56, 62, -19, -17, 61, -10, 58, 14, - 14, 32, -23, 37, 47, 14, -6, -28, 58, 59, - -20, -10, 60, 63, 48, 58, 58, -10, 61, 30, - 27, 29, 63, -30, -27, -28, -31, 25, 27, 17, - 18, 19, 56, -27, -27, 47, 6, -13, -12, -14, - -15, -37, -17, 59, 21, 61, 58, -10, -12, -12, - -10, -10, -10, -33, 20, 21, 57, -10, -9, -33, - 60, 57, 63, -25, -26, -16, -26, 60, -10, 61, - -32, -27, -10, -12, 61, 48, 63, 48, 58, 58, - -10, 61, -10, 61, 59, 59, -21, -6, 57, 60, - 57, -10, 47, 58, 60, 61, 48, -12, -15, -12, - -12, 60, 61, -10, -10, -22, 33, 34, 57, 58, - -33, -16, -26, -27, 58, 57, 57, 35, -10, -10, - -10, -12, -10, -10, 32, 57, 60, 60, 57, -10, - -10, 60, + -1000, -1, -2, 10, -3, -29, 63, -6, -4, -7, + -10, 11, 12, -8, -17, 15, -16, 13, 54, 33, + 34, 17, 18, 19, -35, -36, 63, 57, 35, 49, + 50, 36, -18, 20, 25, 27, 28, 16, 60, 29, + 31, 30, 32, 37, 38, 58, 64, -30, -31, 29, + -37, 37, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, + 26, 27, 28, 47, 48, 37, 37, -7, -10, 14, + 34, -20, 55, 54, 57, 30, 4, 5, 8, 9, + 7, 49, 50, 51, 52, 53, 29, -20, -18, 60, + 64, -25, -26, -38, -18, 60, 29, 30, -37, 62, + -10, -8, -17, -17, -18, -10, -16, -17, -16, -16, + 30, -10, -19, 64, 48, 59, 59, 59, -10, -10, + 14, -5, -29, -11, -12, 30, 57, 63, -20, -18, + 62, -10, 59, 47, -16, -16, -16, -16, -16, -16, + -16, -16, -16, -16, 59, 60, -21, -10, 64, 48, + 59, 59, -10, 62, 21, -24, 26, 14, 14, 61, + 40, 37, 39, 64, -31, -28, -29, -32, 35, 37, + 17, 18, 19, 57, -28, -28, -34, 29, 30, 58, + 47, 6, -13, -12, -14, -15, -38, -18, 60, 30, + 62, 59, -10, -10, -10, -9, -34, 61, 58, 64, + -26, -27, -16, -27, 61, -10, -16, -12, -12, -10, + 62, -33, -28, -5, -10, -12, 62, 48, 64, 48, + 59, 59, -10, 62, -10, 62, 58, 61, 58, -10, + 47, 59, -22, 60, 60, 61, 62, 48, 58, -12, + -15, -12, -12, 61, 62, 59, -34, -27, -27, -23, + 22, 23, -10, -10, -28, 59, -10, 24, -10, -10, + 58, 58, -12, 58, 21, -10, -10, -10, 61, 61, + 58, -10, 61, } var yyDef = [...]int16{ - 2, -2, 6, 0, 1, 12, 0, 0, 4, 5, - 7, 12, 41, 0, 0, 0, 0, 0, 0, 0, - 0, 55, 56, 57, 60, 61, 62, 63, 65, 66, - 67, 69, 0, 0, 0, 0, 0, 0, 89, 90, - 91, 92, 84, 86, 3, 125, 0, 128, 0, 0, - 0, 137, 138, 139, 140, 141, 142, 143, 144, 145, - 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, - 156, 157, 0, 29, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 13, 20, 0, 79, 80, - 81, 0, 0, 0, 0, 0, 0, -2, 0, 0, - 10, 0, 58, 59, 0, 68, 0, 71, 72, 73, - 0, 106, 111, 112, 0, 113, 114, 115, 76, 0, - 78, 0, 126, 0, 0, 0, 0, 21, 30, 31, - -2, 33, 34, -2, 36, 37, 38, 39, 40, 0, - 42, 44, 0, 0, 82, 83, 93, 0, 0, 0, - 0, 0, 27, 0, 0, 0, 0, 11, 0, 0, - 0, 98, 70, 74, 0, 0, 0, 0, 77, 85, - 87, 0, 127, 129, 130, 118, 119, 120, 121, 122, - 123, 124, 0, 131, 132, 0, 0, 0, 47, 0, - 49, 0, 0, 0, -2, 94, 0, 0, 0, 0, - 100, 105, 28, 10, 18, 19, 9, 0, 0, 16, - 64, 0, 75, 107, 108, 116, 109, 0, 0, 133, - 0, 135, 22, 43, 45, 0, 46, 0, 0, 0, - 0, 95, 0, 96, 0, 0, 102, 0, 14, 0, - 0, 99, 0, 0, 88, 134, 0, 48, 50, 51, - 52, 0, 97, 0, 0, 0, 0, 0, 8, 0, - 17, 117, 110, 136, 0, 0, 0, 26, 0, 103, - 0, 53, 0, 0, 0, 15, 23, 24, 0, 101, - 0, 25, + 2, -2, 4, 0, 12, 0, 0, 1, 5, 10, + 11, 0, 0, 12, 36, 0, 25, 0, 50, 51, + 52, 55, 56, 57, 58, 60, 0, 0, 66, 0, + 0, 69, 71, 0, 0, 0, 0, 0, 0, 89, + 90, 91, 92, 84, 86, 3, 125, 0, 128, 0, + 0, 0, 137, 138, 139, 140, 141, 142, 143, 144, + 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, + 155, 156, 157, 0, 0, 0, 8, 13, 20, 0, + 79, 80, 81, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 53, 54, 0, + 61, 0, 106, 111, 112, 0, 113, 114, 115, 64, + 0, 0, 67, 68, 70, 0, 104, 36, 0, 0, + 77, 0, 0, 126, 0, 0, 0, 0, 21, 24, + 0, 0, 9, 0, 37, 39, 0, 0, 82, 83, + 93, 0, 0, 0, 26, -2, 28, 29, -2, 31, + 32, 33, 34, 35, 0, 0, 0, 98, 62, 0, + 0, 0, 0, 65, 0, 73, 0, 0, 0, 78, + 85, 87, 0, 127, 129, 130, 118, 119, 120, 121, + 122, 123, 124, 0, 131, 132, 8, 18, 19, 7, + 0, 0, 0, 42, 0, 44, 0, 0, 0, -2, + 94, 0, 0, 23, 0, 0, 16, 59, 0, 63, + 107, 108, 117, 109, 0, 100, 105, 0, 0, 0, + 133, 0, 135, 0, 22, 38, 40, 0, 41, 0, + 0, 0, 0, 95, 0, 96, 14, 0, 0, 99, + 0, 0, 102, 0, 0, 88, 134, 0, 6, 43, + 45, 46, 47, 0, 97, 0, 17, 116, 110, 0, + 0, 0, 0, 0, 136, 0, 0, 72, 0, 103, + 0, 0, 48, 15, 0, 0, 0, 101, 74, 75, + 0, 0, 76, } var yyTok1 = [...]int8{ @@ -446,15 +397,15 @@ var yyTok1 = [...]int8{ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 53, 3, 3, - 59, 60, 51, 49, 48, 50, 54, 52, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 58, 57, + 60, 61, 51, 49, 48, 50, 54, 52, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 59, 58, 3, 3, 3, 55, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 56, 3, 61, 3, 3, 3, 3, 3, 3, + 3, 57, 3, 62, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 62, 47, 63, + 3, 3, 3, 63, 47, 64, } var yyTok2 = [...]int8{ @@ -462,7 +413,7 @@ var yyTok2 = [...]int8{ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, - 42, 43, 44, 45, 46, + 42, 43, 44, 45, 46, 56, } var yyTok3 = [...]int8{ @@ -807,933 +758,756 @@ yydefault: switch yynt { case 1: - yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:73 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:58 { - if yyDollar[1].value != nil { - yyDollar[2].value.(*Query).Meta = yyDollar[1].value.(*ConstObject) - } - yylex.(*lexer).result = yyDollar[2].value.(*Query) + query := yyDollar[3].value.(*Query) + query.Meta = yyDollar[1].value.(*ConstObject) + query.Imports = yyDollar[2].value.([]*Import) + yylex.(*lexer).result = query } case 2: yyDollar = yyS[yypt-0 : yypt+1] -//line parser.go.y:80 +//line parser.go.y:67 { - yyVAL.value = nil + yyVAL.value = (*ConstObject)(nil) } case 3: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:84 +//line parser.go.y:71 { yyVAL.value = yyDollar[2].value } case 4: - yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:90 - { - yyVAL.value = &Query{Imports: yyDollar[1].value.([]*Import), FuncDefs: reverseFuncDef(yyDollar[2].value.([]*FuncDef)), Term: &Term{Type: TermTypeIdentity}} - } - case 5: - yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:94 - { - if yyDollar[1].value != nil { - yyDollar[2].value.(*Query).Imports = yyDollar[1].value.([]*Import) - } - yyVAL.value = yyDollar[2].value - } - case 6: yyDollar = yyS[yypt-0 : yypt+1] -//line parser.go.y:101 +//line parser.go.y:77 { yyVAL.value = []*Import(nil) } - case 7: + case 5: yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:105 +//line parser.go.y:81 { yyVAL.value = append(yyDollar[1].value.([]*Import), yyDollar[2].value.(*Import)) } - case 8: + case 6: yyDollar = yyS[yypt-6 : yypt+1] -//line parser.go.y:111 +//line parser.go.y:87 { yyVAL.value = &Import{ImportPath: yyDollar[2].token, ImportAlias: yyDollar[4].token, Meta: yyDollar[5].value.(*ConstObject)} } - case 9: + case 7: yyDollar = yyS[yypt-4 : yypt+1] -//line parser.go.y:115 +//line parser.go.y:91 { yyVAL.value = &Import{IncludePath: yyDollar[2].token, Meta: yyDollar[3].value.(*ConstObject)} } - case 10: + case 8: yyDollar = yyS[yypt-0 : yypt+1] -//line parser.go.y:121 +//line parser.go.y:97 { yyVAL.value = (*ConstObject)(nil) } - case 11: + case 10: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:124 +//line parser.go.y:104 { + yyVAL.value = &Query{FuncDefs: reverseFuncDef(yyDollar[1].value.([]*FuncDef))} } case 12: yyDollar = yyS[yypt-0 : yypt+1] -//line parser.go.y:128 +//line parser.go.y:111 { yyVAL.value = []*FuncDef(nil) } case 13: yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:132 +//line parser.go.y:115 { yyVAL.value = append(yyDollar[2].value.([]*FuncDef), yyDollar[1].value.(*FuncDef)) } case 14: yyDollar = yyS[yypt-5 : yypt+1] -//line parser.go.y:138 +//line parser.go.y:121 { yyVAL.value = &FuncDef{Name: yyDollar[2].token, Body: yyDollar[4].value.(*Query)} } case 15: yyDollar = yyS[yypt-8 : yypt+1] -//line parser.go.y:142 +//line parser.go.y:125 { yyVAL.value = &FuncDef{yyDollar[2].token, yyDollar[4].value.([]string), yyDollar[7].value.(*Query)} } case 16: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:148 +//line parser.go.y:131 { yyVAL.value = []string{yyDollar[1].token} } case 17: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:152 +//line parser.go.y:135 { yyVAL.value = append(yyDollar[1].value.([]string), yyDollar[3].token) } - case 18: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:157 - { - } - case 19: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:158 - { - } case 20: yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:162 +//line parser.go.y:145 { - yyDollar[2].value.(*Query).FuncDefs = prependFuncDef(yyDollar[2].value.(*Query).FuncDefs, yyDollar[1].value.(*FuncDef)) - yyVAL.value = yyDollar[2].value + query := yyDollar[2].value.(*Query) + query.FuncDefs = prependFuncDef(query.FuncDefs, yyDollar[1].value.(*FuncDef)) + yyVAL.value = query } case 21: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:167 +//line parser.go.y:151 { yyVAL.value = &Query{Left: yyDollar[1].value.(*Query), Op: OpPipe, Right: yyDollar[3].value.(*Query)} } case 22: yyDollar = yyS[yypt-5 : yypt+1] -//line parser.go.y:171 +//line parser.go.y:155 { - yyDollar[1].value.(*Term).SuffixList = append(yyDollar[1].value.(*Term).SuffixList, &Suffix{Bind: &Bind{yyDollar[3].value.([]*Pattern), yyDollar[5].value.(*Query)}}) - yyVAL.value = &Query{Term: yyDollar[1].value.(*Term)} + term := yyDollar[1].value.(*Term) + term.SuffixList = append(term.SuffixList, &Suffix{Bind: &Bind{yyDollar[3].value.([]*Pattern), yyDollar[5].value.(*Query)}}) + yyVAL.value = &Query{Term: term} } case 23: - yyDollar = yyS[yypt-9 : yypt+1] -//line parser.go.y:176 - { - yyVAL.value = &Query{Term: &Term{Type: TermTypeReduce, Reduce: &Reduce{yyDollar[2].value.(*Term), yyDollar[4].value.(*Pattern), yyDollar[6].value.(*Query), yyDollar[8].value.(*Query)}}} - } - case 24: - yyDollar = yyS[yypt-9 : yypt+1] -//line parser.go.y:180 - { - yyVAL.value = &Query{Term: &Term{Type: TermTypeForeach, Foreach: &Foreach{yyDollar[2].value.(*Term), yyDollar[4].value.(*Pattern), yyDollar[6].value.(*Query), yyDollar[8].value.(*Query), nil}}} - } - case 25: - yyDollar = yyS[yypt-11 : yypt+1] -//line parser.go.y:184 - { - yyVAL.value = &Query{Term: &Term{Type: TermTypeForeach, Foreach: &Foreach{yyDollar[2].value.(*Term), yyDollar[4].value.(*Pattern), yyDollar[6].value.(*Query), yyDollar[8].value.(*Query), yyDollar[10].value.(*Query)}}} - } - case 26: - yyDollar = yyS[yypt-7 : yypt+1] -//line parser.go.y:188 - { - yyVAL.value = &Query{Term: &Term{Type: TermTypeIf, If: &If{yyDollar[2].value.(*Query), yyDollar[4].value.(*Query), yyDollar[5].value.([]*IfElif), yyDollar[6].value.(*Query)}}} - } - case 27: - yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:192 - { - yyVAL.value = &Query{Term: &Term{Type: TermTypeTry, Try: &Try{yyDollar[2].value.(*Query), yyDollar[3].value.(*Query)}}} - } - case 28: yyDollar = yyS[yypt-4 : yypt+1] -//line parser.go.y:196 +//line parser.go.y:161 { yyVAL.value = &Query{Term: &Term{Type: TermTypeLabel, Label: &Label{yyDollar[2].token, yyDollar[4].value.(*Query)}}} } - case 29: - yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:200 - { - if t := yyDollar[1].value.(*Query).Term; t != nil { - t.SuffixList = append(t.SuffixList, &Suffix{Optional: true}) - } else { - yyVAL.value = &Query{Term: &Term{Type: TermTypeQuery, Query: yyDollar[1].value.(*Query), SuffixList: []*Suffix{{Optional: true}}}} - } - } - case 30: + case 24: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:208 +//line parser.go.y:165 { yyVAL.value = &Query{Left: yyDollar[1].value.(*Query), Op: OpComma, Right: yyDollar[3].value.(*Query)} } - case 31: + case 26: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:212 +//line parser.go.y:172 { yyVAL.value = &Query{Left: yyDollar[1].value.(*Query), Op: yyDollar[2].operator, Right: yyDollar[3].value.(*Query)} } - case 32: + case 27: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:216 +//line parser.go.y:176 { yyVAL.value = &Query{Left: yyDollar[1].value.(*Query), Op: yyDollar[2].operator, Right: yyDollar[3].value.(*Query)} } - case 33: + case 28: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:220 +//line parser.go.y:180 { yyVAL.value = &Query{Left: yyDollar[1].value.(*Query), Op: OpOr, Right: yyDollar[3].value.(*Query)} } - case 34: + case 29: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:224 +//line parser.go.y:184 { yyVAL.value = &Query{Left: yyDollar[1].value.(*Query), Op: OpAnd, Right: yyDollar[3].value.(*Query)} } - case 35: + case 30: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:228 +//line parser.go.y:188 { yyVAL.value = &Query{Left: yyDollar[1].value.(*Query), Op: yyDollar[2].operator, Right: yyDollar[3].value.(*Query)} } - case 36: + case 31: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:232 +//line parser.go.y:192 { yyVAL.value = &Query{Left: yyDollar[1].value.(*Query), Op: OpAdd, Right: yyDollar[3].value.(*Query)} } - case 37: + case 32: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:236 +//line parser.go.y:196 { yyVAL.value = &Query{Left: yyDollar[1].value.(*Query), Op: OpSub, Right: yyDollar[3].value.(*Query)} } - case 38: + case 33: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:240 +//line parser.go.y:200 { yyVAL.value = &Query{Left: yyDollar[1].value.(*Query), Op: OpMul, Right: yyDollar[3].value.(*Query)} } - case 39: + case 34: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:244 +//line parser.go.y:204 { yyVAL.value = &Query{Left: yyDollar[1].value.(*Query), Op: OpDiv, Right: yyDollar[3].value.(*Query)} } - case 40: + case 35: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:248 +//line parser.go.y:208 { yyVAL.value = &Query{Left: yyDollar[1].value.(*Query), Op: OpMod, Right: yyDollar[3].value.(*Query)} } - case 41: + case 36: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:252 +//line parser.go.y:212 { yyVAL.value = &Query{Term: yyDollar[1].value.(*Term)} } - case 42: + case 37: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:258 +//line parser.go.y:218 { yyVAL.value = []*Pattern{yyDollar[1].value.(*Pattern)} } - case 43: + case 38: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:262 +//line parser.go.y:222 { yyVAL.value = append(yyDollar[1].value.([]*Pattern), yyDollar[3].value.(*Pattern)) } - case 44: + case 39: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:268 +//line parser.go.y:228 { yyVAL.value = &Pattern{Name: yyDollar[1].token} } - case 45: + case 40: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:272 +//line parser.go.y:232 { yyVAL.value = &Pattern{Array: yyDollar[2].value.([]*Pattern)} } - case 46: + case 41: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:276 +//line parser.go.y:236 { yyVAL.value = &Pattern{Object: yyDollar[2].value.([]*PatternObject)} } - case 47: + case 42: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:282 +//line parser.go.y:242 { yyVAL.value = []*Pattern{yyDollar[1].value.(*Pattern)} } - case 48: + case 43: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:286 +//line parser.go.y:246 { yyVAL.value = append(yyDollar[1].value.([]*Pattern), yyDollar[3].value.(*Pattern)) } - case 49: + case 44: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:292 +//line parser.go.y:252 { yyVAL.value = []*PatternObject{yyDollar[1].value.(*PatternObject)} } - case 50: + case 45: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:296 +//line parser.go.y:256 { yyVAL.value = append(yyDollar[1].value.([]*PatternObject), yyDollar[3].value.(*PatternObject)) } - case 51: + case 46: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:302 +//line parser.go.y:262 { yyVAL.value = &PatternObject{Key: yyDollar[1].token, Val: yyDollar[3].value.(*Pattern)} } - case 52: + case 47: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:306 +//line parser.go.y:266 { yyVAL.value = &PatternObject{KeyString: yyDollar[1].value.(*String), Val: yyDollar[3].value.(*Pattern)} } - case 53: + case 48: yyDollar = yyS[yypt-5 : yypt+1] -//line parser.go.y:310 +//line parser.go.y:270 { yyVAL.value = &PatternObject{KeyQuery: yyDollar[2].value.(*Query), Val: yyDollar[5].value.(*Pattern)} } - case 54: + case 49: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:314 +//line parser.go.y:274 { yyVAL.value = &PatternObject{Key: yyDollar[1].token} } - case 55: + case 50: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:320 +//line parser.go.y:280 { yyVAL.value = &Term{Type: TermTypeIdentity} } - case 56: + case 51: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:324 +//line parser.go.y:284 { yyVAL.value = &Term{Type: TermTypeRecurse} } - case 57: + case 52: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:328 +//line parser.go.y:288 { yyVAL.value = &Term{Type: TermTypeIndex, Index: &Index{Name: yyDollar[1].token}} } - case 58: + case 53: yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:332 +//line parser.go.y:292 { - if yyDollar[2].value.(*Suffix).Iter { - yyVAL.value = &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{yyDollar[2].value.(*Suffix)}} + suffix := yyDollar[2].value.(*Suffix) + if suffix.Iter { + yyVAL.value = &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{suffix}} } else { - yyVAL.value = &Term{Type: TermTypeIndex, Index: yyDollar[2].value.(*Suffix).Index} + yyVAL.value = &Term{Type: TermTypeIndex, Index: suffix.Index} } } - case 59: + case 54: yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:340 +//line parser.go.y:301 { yyVAL.value = &Term{Type: TermTypeIndex, Index: &Index{Str: yyDollar[2].value.(*String)}} } - case 60: + case 55: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:344 +//line parser.go.y:305 { yyVAL.value = &Term{Type: TermTypeNull} } - case 61: + case 56: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:348 +//line parser.go.y:309 { yyVAL.value = &Term{Type: TermTypeTrue} } - case 62: + case 57: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:352 +//line parser.go.y:313 { yyVAL.value = &Term{Type: TermTypeFalse} } - case 63: + case 58: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:356 +//line parser.go.y:317 { yyVAL.value = &Term{Type: TermTypeFunc, Func: &Func{Name: yyDollar[1].token}} } - case 64: + case 59: yyDollar = yyS[yypt-4 : yypt+1] -//line parser.go.y:360 +//line parser.go.y:321 { yyVAL.value = &Term{Type: TermTypeFunc, Func: &Func{Name: yyDollar[1].token, Args: yyDollar[3].value.([]*Query)}} } - case 65: + case 60: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:364 +//line parser.go.y:325 { yyVAL.value = &Term{Type: TermTypeFunc, Func: &Func{Name: yyDollar[1].token}} } + case 61: + yyDollar = yyS[yypt-2 : yypt+1] +//line parser.go.y:329 + { + yyVAL.value = &Term{Type: TermTypeObject, Object: &Object{}} + } + case 62: + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:333 + { + yyVAL.value = &Term{Type: TermTypeObject, Object: &Object{yyDollar[2].value.([]*ObjectKeyVal)}} + } + case 63: + yyDollar = yyS[yypt-4 : yypt+1] +//line parser.go.y:337 + { + yyVAL.value = &Term{Type: TermTypeObject, Object: &Object{yyDollar[2].value.([]*ObjectKeyVal)}} + } + case 64: + yyDollar = yyS[yypt-2 : yypt+1] +//line parser.go.y:341 + { + yyVAL.value = &Term{Type: TermTypeArray, Array: &Array{}} + } + case 65: + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:345 + { + yyVAL.value = &Term{Type: TermTypeArray, Array: &Array{yyDollar[2].value.(*Query)}} + } case 66: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:368 +//line parser.go.y:349 { yyVAL.value = &Term{Type: TermTypeNumber, Number: yyDollar[1].token} } case 67: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:372 + yyDollar = yyS[yypt-2 : yypt+1] +//line parser.go.y:353 { - yyVAL.value = &Term{Type: TermTypeFormat, Format: yyDollar[1].token} + yyVAL.value = &Term{Type: TermTypeUnary, Unary: &Unary{OpAdd, yyDollar[2].value.(*Term)}} } case 68: yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:376 +//line parser.go.y:357 { - yyVAL.value = &Term{Type: TermTypeFormat, Format: yyDollar[1].token, Str: yyDollar[2].value.(*String)} + yyVAL.value = &Term{Type: TermTypeUnary, Unary: &Unary{OpSub, yyDollar[2].value.(*Term)}} } case 69: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:380 +//line parser.go.y:361 { - yyVAL.value = &Term{Type: TermTypeString, Str: yyDollar[1].value.(*String)} + yyVAL.value = &Term{Type: TermTypeFormat, Format: yyDollar[1].token} } case 70: - yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:384 + yyDollar = yyS[yypt-2 : yypt+1] +//line parser.go.y:365 { - yyVAL.value = &Term{Type: TermTypeQuery, Query: yyDollar[2].value.(*Query)} + yyVAL.value = &Term{Type: TermTypeFormat, Format: yyDollar[1].token, Str: yyDollar[2].value.(*String)} } case 71: - yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:388 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:369 { - yyVAL.value = &Term{Type: TermTypeUnary, Unary: &Unary{OpAdd, yyDollar[2].value.(*Term)}} + yyVAL.value = &Term{Type: TermTypeString, Str: yyDollar[1].value.(*String)} } case 72: - yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:392 + yyDollar = yyS[yypt-7 : yypt+1] +//line parser.go.y:373 { - yyVAL.value = &Term{Type: TermTypeUnary, Unary: &Unary{OpSub, yyDollar[2].value.(*Term)}} + yyVAL.value = &Term{Type: TermTypeIf, If: &If{yyDollar[2].value.(*Query), yyDollar[4].value.(*Query), yyDollar[5].value.([]*IfElif), yyDollar[6].value.(*Query)}} } case 73: - yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:396 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:377 { - yyVAL.value = &Term{Type: TermTypeObject, Object: &Object{}} + yyVAL.value = &Term{Type: TermTypeTry, Try: &Try{yyDollar[2].value.(*Query), yyDollar[3].value.(*Query)}} } case 74: - yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:400 + yyDollar = yyS[yypt-9 : yypt+1] +//line parser.go.y:381 { - yyVAL.value = &Term{Type: TermTypeObject, Object: &Object{yyDollar[2].value.([]*ObjectKeyVal)}} + yyVAL.value = &Term{Type: TermTypeReduce, Reduce: &Reduce{yyDollar[2].value.(*Query), yyDollar[4].value.(*Pattern), yyDollar[6].value.(*Query), yyDollar[8].value.(*Query)}} } case 75: - yyDollar = yyS[yypt-4 : yypt+1] -//line parser.go.y:404 + yyDollar = yyS[yypt-9 : yypt+1] +//line parser.go.y:385 { - yyVAL.value = &Term{Type: TermTypeObject, Object: &Object{yyDollar[2].value.([]*ObjectKeyVal)}} + yyVAL.value = &Term{Type: TermTypeForeach, Foreach: &Foreach{yyDollar[2].value.(*Query), yyDollar[4].value.(*Pattern), yyDollar[6].value.(*Query), yyDollar[8].value.(*Query), nil}} } case 76: - yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:408 + yyDollar = yyS[yypt-11 : yypt+1] +//line parser.go.y:389 { - yyVAL.value = &Term{Type: TermTypeArray, Array: &Array{}} + yyVAL.value = &Term{Type: TermTypeForeach, Foreach: &Foreach{yyDollar[2].value.(*Query), yyDollar[4].value.(*Pattern), yyDollar[6].value.(*Query), yyDollar[8].value.(*Query), yyDollar[10].value.(*Query)}} } case 77: - yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:412 + yyDollar = yyS[yypt-2 : yypt+1] +//line parser.go.y:393 { - yyVAL.value = &Term{Type: TermTypeArray, Array: &Array{yyDollar[2].value.(*Query)}} + yyVAL.value = &Term{Type: TermTypeBreak, Break: yyDollar[2].token} } case 78: - yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:416 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:397 { - yyVAL.value = &Term{Type: TermTypeBreak, Break: yyDollar[2].token} + yyVAL.value = &Term{Type: TermTypeQuery, Query: yyDollar[2].value.(*Query)} } case 79: yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:420 +//line parser.go.y:401 { yyDollar[1].value.(*Term).SuffixList = append(yyDollar[1].value.(*Term).SuffixList, &Suffix{Index: &Index{Name: yyDollar[2].token}}) } case 80: yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:424 +//line parser.go.y:405 { yyDollar[1].value.(*Term).SuffixList = append(yyDollar[1].value.(*Term).SuffixList, yyDollar[2].value.(*Suffix)) } case 81: yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:428 +//line parser.go.y:409 { yyDollar[1].value.(*Term).SuffixList = append(yyDollar[1].value.(*Term).SuffixList, &Suffix{Optional: true}) } case 82: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:432 +//line parser.go.y:413 { yyDollar[1].value.(*Term).SuffixList = append(yyDollar[1].value.(*Term).SuffixList, yyDollar[3].value.(*Suffix)) } case 83: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:436 +//line parser.go.y:417 { yyDollar[1].value.(*Term).SuffixList = append(yyDollar[1].value.(*Term).SuffixList, &Suffix{Index: &Index{Str: yyDollar[3].value.(*String)}}) } case 84: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:442 +//line parser.go.y:423 { yyVAL.value = &String{Str: yyDollar[1].token} } case 85: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:446 +//line parser.go.y:427 { yyVAL.value = &String{Queries: yyDollar[2].value.([]*Query)} } case 86: yyDollar = yyS[yypt-0 : yypt+1] -//line parser.go.y:452 +//line parser.go.y:433 { yyVAL.value = []*Query{} } case 87: yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:456 +//line parser.go.y:437 { yyVAL.value = append(yyDollar[1].value.([]*Query), &Query{Term: &Term{Type: TermTypeString, Str: &String{Str: yyDollar[2].token}}}) } case 88: yyDollar = yyS[yypt-4 : yypt+1] -//line parser.go.y:460 +//line parser.go.y:441 { yylex.(*lexer).inString = true yyVAL.value = append(yyDollar[1].value.([]*Query), &Query{Term: &Term{Type: TermTypeQuery, Query: yyDollar[3].value.(*Query)}}) } - case 89: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:466 - { - } - case 90: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:467 - { - } - case 91: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:470 - { - } - case 92: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:471 - { - } case 93: yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:475 +//line parser.go.y:456 { yyVAL.value = &Suffix{Iter: true} } case 94: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:479 +//line parser.go.y:460 { yyVAL.value = &Suffix{Index: &Index{Start: yyDollar[2].value.(*Query)}} } case 95: yyDollar = yyS[yypt-4 : yypt+1] -//line parser.go.y:483 +//line parser.go.y:464 { yyVAL.value = &Suffix{Index: &Index{Start: yyDollar[2].value.(*Query), IsSlice: true}} } case 96: yyDollar = yyS[yypt-4 : yypt+1] -//line parser.go.y:487 +//line parser.go.y:468 { yyVAL.value = &Suffix{Index: &Index{End: yyDollar[3].value.(*Query), IsSlice: true}} } case 97: yyDollar = yyS[yypt-5 : yypt+1] -//line parser.go.y:491 +//line parser.go.y:472 { yyVAL.value = &Suffix{Index: &Index{Start: yyDollar[2].value.(*Query), End: yyDollar[4].value.(*Query), IsSlice: true}} } case 98: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:497 +//line parser.go.y:478 { yyVAL.value = []*Query{yyDollar[1].value.(*Query)} } case 99: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:501 +//line parser.go.y:482 { yyVAL.value = append(yyDollar[1].value.([]*Query), yyDollar[3].value.(*Query)) } case 100: yyDollar = yyS[yypt-0 : yypt+1] -//line parser.go.y:507 +//line parser.go.y:488 { yyVAL.value = []*IfElif(nil) } case 101: yyDollar = yyS[yypt-5 : yypt+1] -//line parser.go.y:511 +//line parser.go.y:492 { yyVAL.value = append(yyDollar[1].value.([]*IfElif), &IfElif{yyDollar[3].value.(*Query), yyDollar[5].value.(*Query)}) } case 102: yyDollar = yyS[yypt-0 : yypt+1] -//line parser.go.y:517 +//line parser.go.y:498 { yyVAL.value = (*Query)(nil) } case 103: yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:521 +//line parser.go.y:502 { yyVAL.value = yyDollar[2].value } case 104: yyDollar = yyS[yypt-0 : yypt+1] -//line parser.go.y:527 +//line parser.go.y:508 { yyVAL.value = (*Query)(nil) } case 105: yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:531 +//line parser.go.y:512 { yyVAL.value = yyDollar[2].value } case 106: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:537 +//line parser.go.y:518 { yyVAL.value = []*ObjectKeyVal{yyDollar[1].value.(*ObjectKeyVal)} } case 107: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:541 +//line parser.go.y:522 { yyVAL.value = append(yyDollar[1].value.([]*ObjectKeyVal), yyDollar[3].value.(*ObjectKeyVal)) } case 108: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:547 +//line parser.go.y:528 { - yyVAL.value = &ObjectKeyVal{Key: yyDollar[1].token, Val: yyDollar[3].value.(*ObjectVal)} + yyVAL.value = &ObjectKeyVal{Key: yyDollar[1].token, Val: yyDollar[3].value.(*Query)} } case 109: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:551 +//line parser.go.y:532 { - yyVAL.value = &ObjectKeyVal{KeyString: yyDollar[1].value.(*String), Val: yyDollar[3].value.(*ObjectVal)} + yyVAL.value = &ObjectKeyVal{KeyString: yyDollar[1].value.(*String), Val: yyDollar[3].value.(*Query)} } case 110: yyDollar = yyS[yypt-5 : yypt+1] -//line parser.go.y:555 +//line parser.go.y:536 { - yyVAL.value = &ObjectKeyVal{KeyQuery: yyDollar[2].value.(*Query), Val: yyDollar[5].value.(*ObjectVal)} + yyVAL.value = &ObjectKeyVal{KeyQuery: yyDollar[2].value.(*Query), Val: yyDollar[5].value.(*Query)} } case 111: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:559 +//line parser.go.y:540 { yyVAL.value = &ObjectKeyVal{Key: yyDollar[1].token} } case 112: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:563 +//line parser.go.y:544 { yyVAL.value = &ObjectKeyVal{KeyString: yyDollar[1].value.(*String)} } - case 113: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:568 - { - } - case 114: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:569 - { - } - case 115: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:570 - { - } case 116: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:574 - { - yyVAL.value = &ObjectVal{[]*Query{{Term: yyDollar[1].value.(*Term)}}} - } - case 117: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:578 +//line parser.go.y:555 { - yyVAL.value = &ObjectVal{append(yyDollar[1].value.(*ObjectVal).Queries, &Query{Term: yyDollar[3].value.(*Term)})} + yyVAL.value = &Query{Left: yyDollar[1].value.(*Query), Op: OpPipe, Right: yyDollar[3].value.(*Query)} } case 118: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:584 +//line parser.go.y:562 { yyVAL.value = &ConstTerm{Object: yyDollar[1].value.(*ConstObject)} } case 119: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:588 +//line parser.go.y:566 { yyVAL.value = &ConstTerm{Array: yyDollar[1].value.(*ConstArray)} } case 120: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:592 +//line parser.go.y:570 { yyVAL.value = &ConstTerm{Number: yyDollar[1].token} } case 121: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:596 +//line parser.go.y:574 { yyVAL.value = &ConstTerm{Str: yyDollar[1].token} } case 122: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:600 +//line parser.go.y:578 { yyVAL.value = &ConstTerm{Null: true} } case 123: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:604 +//line parser.go.y:582 { yyVAL.value = &ConstTerm{True: true} } case 124: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:608 +//line parser.go.y:586 { yyVAL.value = &ConstTerm{False: true} } case 125: yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:614 +//line parser.go.y:592 { yyVAL.value = &ConstObject{} } case 126: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:618 +//line parser.go.y:596 { yyVAL.value = &ConstObject{yyDollar[2].value.([]*ConstObjectKeyVal)} } case 127: yyDollar = yyS[yypt-4 : yypt+1] -//line parser.go.y:622 +//line parser.go.y:600 { yyVAL.value = &ConstObject{yyDollar[2].value.([]*ConstObjectKeyVal)} } case 128: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:628 +//line parser.go.y:606 { yyVAL.value = []*ConstObjectKeyVal{yyDollar[1].value.(*ConstObjectKeyVal)} } case 129: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:632 +//line parser.go.y:610 { yyVAL.value = append(yyDollar[1].value.([]*ConstObjectKeyVal), yyDollar[3].value.(*ConstObjectKeyVal)) } case 130: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:638 +//line parser.go.y:616 { yyVAL.value = &ConstObjectKeyVal{Key: yyDollar[1].token, Val: yyDollar[3].value.(*ConstTerm)} } case 131: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:642 +//line parser.go.y:620 { yyVAL.value = &ConstObjectKeyVal{Key: yyDollar[1].token, Val: yyDollar[3].value.(*ConstTerm)} } case 132: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:646 +//line parser.go.y:624 { yyVAL.value = &ConstObjectKeyVal{KeyString: yyDollar[1].token, Val: yyDollar[3].value.(*ConstTerm)} } case 133: yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:652 +//line parser.go.y:630 { yyVAL.value = &ConstArray{} } case 134: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:656 +//line parser.go.y:634 { yyVAL.value = &ConstArray{yyDollar[2].value.([]*ConstTerm)} } case 135: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:662 +//line parser.go.y:640 { yyVAL.value = []*ConstTerm{yyDollar[1].value.(*ConstTerm)} } case 136: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:666 +//line parser.go.y:644 { yyVAL.value = append(yyDollar[1].value.([]*ConstTerm), yyDollar[3].value.(*ConstTerm)) } - case 137: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:671 - { - } - case 138: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:672 - { - } - case 139: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:673 - { - } - case 140: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:674 - { - } - case 141: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:675 - { - } - case 142: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:676 - { - } - case 143: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:677 - { - } - case 144: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:678 - { - } - case 145: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:679 - { - } - case 146: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:680 - { - } - case 147: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:681 - { - } - case 148: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:682 - { - } - case 149: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:683 - { - } - case 150: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:684 - { - } - case 151: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:685 - { - } - case 152: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:686 - { - } - case 153: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:687 - { - } - case 154: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:688 - { - } - case 155: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:689 - { - } - case 156: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:690 - { - } - case 157: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:691 - { - } } goto yystack /* stack new state and value */ } diff --git a/vendor/github.com/itchyny/gojq/parser.go.y b/vendor/github.com/itchyny/gojq/parser.go.y index 380c3cf65da..5481e211408 100644 --- a/vendor/github.com/itchyny/gojq/parser.go.y +++ b/vendor/github.com/itchyny/gojq/parser.go.y @@ -1,20 +1,6 @@ %{ package gojq -// Parse a query string, and returns the query struct. -// -// If parsing failed, the returned error has the method Token() (string, int), -// which reports the invalid token and the byte offset in the query string. The -// token is empty if the error occurred after scanning the entire query string. -// The byte offset is the scanned bytes when the error occurred. -func Parse(src string) (*Query, error) { - l := newLexer(src) - if yyParse(l) > 0 { - return nil, l.err - } - return l.result, nil -} - func reverseFuncDef(xs []*FuncDef) []*FuncDef { for i, j := 0, len(xs)-1; i < j; i, j = i+1, j-1 { xs[i], xs[j] = xs[j], xs[i] @@ -36,24 +22,23 @@ func prependFuncDef(xs []*FuncDef, x *FuncDef) []*FuncDef { operator Operator } -%type program moduleheader programbody imports import metaopt funcdefs funcdef funcdefargs query +%type program header imports import meta body funcdefs funcdef funcargs query %type bindpatterns pattern arraypatterns objectpatterns objectpattern -%type term string stringparts suffix args ifelifs ifelse trycatch +%type expr term string stringparts suffix args ifelifs ifelse trycatch %type objectkeyvals objectkeyval objectval %type constterm constobject constobjectkeyvals constobjectkeyval constarray constarrayelems %type tokIdentVariable tokIdentModuleIdent tokVariableModuleVariable tokKeyword objectkey -%token tokAltOp tokUpdateOp tokDestAltOp tokOrOp tokAndOp tokCompareOp -%token tokModule tokImport tokInclude tokDef tokAs tokLabel tokBreak +%token tokAltOp tokUpdateOp tokDestAltOp tokCompareOp +%token tokOrOp tokAndOp tokModule tokImport tokInclude tokDef tokAs tokLabel tokBreak %token tokNull tokTrue tokFalse -%token tokIdent tokVariable tokModuleIdent tokModuleVariable -%token tokIndex tokNumber tokFormat -%token tokString tokStringStart tokStringQuery tokStringEnd %token tokIf tokThen tokElif tokElse tokEnd %token tokTry tokCatch tokReduce tokForeach -%token tokRecurse tokFuncDefPost tokTermPost tokEmptyCatch -%token tokInvalid tokInvalidEscapeSequence tokUnterminatedString +%token tokIdent tokVariable tokModuleIdent tokModuleVariable +%token tokRecurse tokIndex tokNumber tokFormat +%token tokString tokStringStart tokStringQuery tokStringEnd +%token tokInvalid tokInvalidEscapeSequence tokUnterminatedString -%nonassoc tokFuncDefPost tokTermPost +%nonassoc tokFuncDefQuery tokExpr tokTerm %right '|' %left ',' %right tokAltOp @@ -69,33 +54,24 @@ func prependFuncDef(xs []*FuncDef, x *FuncDef) []*FuncDef { %% program - : moduleheader programbody + : header imports body { - if $1 != nil { $2.(*Query).Meta = $1.(*ConstObject) } - yylex.(*lexer).result = $2.(*Query) + query := $3.(*Query) + query.Meta = $1.(*ConstObject) + query.Imports = $2.([]*Import) + yylex.(*lexer).result = query } -moduleheader +header : { - $$ = nil + $$ = (*ConstObject)(nil) } | tokModule constobject ';' { $$ = $2; } -programbody - : imports funcdefs - { - $$ = &Query{Imports: $1.([]*Import), FuncDefs: reverseFuncDef($2.([]*FuncDef)), Term: &Term{Type: TermTypeIdentity}} - } - | imports query - { - if $1 != nil { $2.(*Query).Imports = $1.([]*Import) } - $$ = $2 - } - imports : { @@ -107,21 +83,28 @@ imports } import - : tokImport tokString tokAs tokIdentVariable metaopt ';' + : tokImport tokString tokAs tokIdentVariable meta ';' { $$ = &Import{ImportPath: $2, ImportAlias: $4, Meta: $5.(*ConstObject)} } - | tokInclude tokString metaopt ';' + | tokInclude tokString meta ';' { $$ = &Import{IncludePath: $2, Meta: $3.(*ConstObject)} } -metaopt +meta : { $$ = (*ConstObject)(nil) } - | constobject {} + | constobject + +body + : funcdefs + { + $$ = &Query{FuncDefs: reverseFuncDef($1.([]*FuncDef))} + } + | query funcdefs : @@ -138,30 +121,31 @@ funcdef { $$ = &FuncDef{Name: $2, Body: $4.(*Query)} } - | tokDef tokIdent '(' funcdefargs ')' ':' query ';' + | tokDef tokIdent '(' funcargs ')' ':' query ';' { $$ = &FuncDef{$2, $4.([]string), $7.(*Query)} } -funcdefargs +funcargs : tokIdentVariable { $$ = []string{$1} } - | funcdefargs ';' tokIdentVariable + | funcargs ';' tokIdentVariable { $$ = append($1.([]string), $3) } tokIdentVariable - : tokIdent {} - | tokVariable {} + : tokIdent + | tokVariable query - : funcdef query %prec tokFuncDefPost + : funcdef query %prec tokFuncDefQuery { - $2.(*Query).FuncDefs = prependFuncDef($2.(*Query).FuncDefs, $1.(*FuncDef)) - $$ = $2 + query := $2.(*Query) + query.FuncDefs = prependFuncDef(query.FuncDefs, $1.(*FuncDef)) + $$ = query } | query '|' query { @@ -169,86 +153,62 @@ query } | term tokAs bindpatterns '|' query { - $1.(*Term).SuffixList = append($1.(*Term).SuffixList, &Suffix{Bind: &Bind{$3.([]*Pattern), $5.(*Query)}}) - $$ = &Query{Term: $1.(*Term)} - } - | tokReduce term tokAs pattern '(' query ';' query ')' - { - $$ = &Query{Term: &Term{Type: TermTypeReduce, Reduce: &Reduce{$2.(*Term), $4.(*Pattern), $6.(*Query), $8.(*Query)}}} - } - | tokForeach term tokAs pattern '(' query ';' query ')' - { - $$ = &Query{Term: &Term{Type: TermTypeForeach, Foreach: &Foreach{$2.(*Term), $4.(*Pattern), $6.(*Query), $8.(*Query), nil}}} - } - | tokForeach term tokAs pattern '(' query ';' query ';' query ')' - { - $$ = &Query{Term: &Term{Type: TermTypeForeach, Foreach: &Foreach{$2.(*Term), $4.(*Pattern), $6.(*Query), $8.(*Query), $10.(*Query)}}} - } - | tokIf query tokThen query ifelifs ifelse tokEnd - { - $$ = &Query{Term: &Term{Type: TermTypeIf, If: &If{$2.(*Query), $4.(*Query), $5.([]*IfElif), $6.(*Query)}}} - } - | tokTry query trycatch - { - $$ = &Query{Term: &Term{Type: TermTypeTry, Try: &Try{$2.(*Query), $3.(*Query)}}} + term := $1.(*Term) + term.SuffixList = append(term.SuffixList, &Suffix{Bind: &Bind{$3.([]*Pattern), $5.(*Query)}}) + $$ = &Query{Term: term} } | tokLabel tokVariable '|' query { $$ = &Query{Term: &Term{Type: TermTypeLabel, Label: &Label{$2, $4.(*Query)}}} } - | query '?' - { - if t := $1.(*Query).Term; t != nil { - t.SuffixList = append(t.SuffixList, &Suffix{Optional: true}) - } else { - $$ = &Query{Term: &Term{Type: TermTypeQuery, Query: $1.(*Query), SuffixList: []*Suffix{{Optional: true}}}} - } - } | query ',' query { $$ = &Query{Left: $1.(*Query), Op: OpComma, Right: $3.(*Query)} } - | query tokAltOp query + | expr %prec tokExpr + +expr + : expr tokAltOp expr { $$ = &Query{Left: $1.(*Query), Op: $2, Right: $3.(*Query)} } - | query tokUpdateOp query + | expr tokUpdateOp expr { $$ = &Query{Left: $1.(*Query), Op: $2, Right: $3.(*Query)} } - | query tokOrOp query + | expr tokOrOp expr { $$ = &Query{Left: $1.(*Query), Op: OpOr, Right: $3.(*Query)} } - | query tokAndOp query + | expr tokAndOp expr { $$ = &Query{Left: $1.(*Query), Op: OpAnd, Right: $3.(*Query)} } - | query tokCompareOp query + | expr tokCompareOp expr { $$ = &Query{Left: $1.(*Query), Op: $2, Right: $3.(*Query)} } - | query '+' query + | expr '+' expr { $$ = &Query{Left: $1.(*Query), Op: OpAdd, Right: $3.(*Query)} } - | query '-' query + | expr '-' expr { $$ = &Query{Left: $1.(*Query), Op: OpSub, Right: $3.(*Query)} } - | query '*' query + | expr '*' expr { $$ = &Query{Left: $1.(*Query), Op: OpMul, Right: $3.(*Query)} } - | query '/' query + | expr '/' expr { $$ = &Query{Left: $1.(*Query), Op: OpDiv, Right: $3.(*Query)} } - | query '%' query + | expr '%' expr { $$ = &Query{Left: $1.(*Query), Op: OpMod, Right: $3.(*Query)} } - | term %prec tokTermPost + | term %prec tokTerm { $$ = &Query{Term: $1.(*Term)} } @@ -330,10 +290,11 @@ term } | '.' suffix { - if $2.(*Suffix).Iter { - $$ = &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{$2.(*Suffix)}} + suffix := $2.(*Suffix) + if suffix.Iter { + $$ = &Term{Type: TermTypeIdentity, SuffixList: []*Suffix{suffix}} } else { - $$ = &Term{Type: TermTypeIndex, Index: $2.(*Suffix).Index} + $$ = &Term{Type: TermTypeIndex, Index: suffix.Index} } } | '.' string @@ -364,25 +325,29 @@ term { $$ = &Term{Type: TermTypeFunc, Func: &Func{Name: $1}} } - | tokNumber + | '{' '}' { - $$ = &Term{Type: TermTypeNumber, Number: $1} + $$ = &Term{Type: TermTypeObject, Object: &Object{}} } - | tokFormat + | '{' objectkeyvals '}' { - $$ = &Term{Type: TermTypeFormat, Format: $1} + $$ = &Term{Type: TermTypeObject, Object: &Object{$2.([]*ObjectKeyVal)}} } - | tokFormat string + | '{' objectkeyvals ',' '}' { - $$ = &Term{Type: TermTypeFormat, Format: $1, Str: $2.(*String)} + $$ = &Term{Type: TermTypeObject, Object: &Object{$2.([]*ObjectKeyVal)}} } - | string + | '[' ']' { - $$ = &Term{Type: TermTypeString, Str: $1.(*String)} + $$ = &Term{Type: TermTypeArray, Array: &Array{}} } - | '(' query ')' + | '[' query ']' { - $$ = &Term{Type: TermTypeQuery, Query: $2.(*Query)} + $$ = &Term{Type: TermTypeArray, Array: &Array{$2.(*Query)}} + } + | tokNumber + { + $$ = &Term{Type: TermTypeNumber, Number: $1} } | '+' term { @@ -392,30 +357,46 @@ term { $$ = &Term{Type: TermTypeUnary, Unary: &Unary{OpSub, $2.(*Term)}} } - | '{' '}' + | tokFormat { - $$ = &Term{Type: TermTypeObject, Object: &Object{}} + $$ = &Term{Type: TermTypeFormat, Format: $1} } - | '{' objectkeyvals '}' + | tokFormat string { - $$ = &Term{Type: TermTypeObject, Object: &Object{$2.([]*ObjectKeyVal)}} + $$ = &Term{Type: TermTypeFormat, Format: $1, Str: $2.(*String)} } - | '{' objectkeyvals ',' '}' + | string { - $$ = &Term{Type: TermTypeObject, Object: &Object{$2.([]*ObjectKeyVal)}} + $$ = &Term{Type: TermTypeString, Str: $1.(*String)} } - | '[' ']' + | tokIf query tokThen query ifelifs ifelse tokEnd { - $$ = &Term{Type: TermTypeArray, Array: &Array{}} + $$ = &Term{Type: TermTypeIf, If: &If{$2.(*Query), $4.(*Query), $5.([]*IfElif), $6.(*Query)}} } - | '[' query ']' + | tokTry expr trycatch { - $$ = &Term{Type: TermTypeArray, Array: &Array{$2.(*Query)}} + $$ = &Term{Type: TermTypeTry, Try: &Try{$2.(*Query), $3.(*Query)}} + } + | tokReduce expr tokAs pattern '(' query ';' query ')' + { + $$ = &Term{Type: TermTypeReduce, Reduce: &Reduce{$2.(*Query), $4.(*Pattern), $6.(*Query), $8.(*Query)}} + } + | tokForeach expr tokAs pattern '(' query ';' query ')' + { + $$ = &Term{Type: TermTypeForeach, Foreach: &Foreach{$2.(*Query), $4.(*Pattern), $6.(*Query), $8.(*Query), nil}} + } + | tokForeach expr tokAs pattern '(' query ';' query ';' query ')' + { + $$ = &Term{Type: TermTypeForeach, Foreach: &Foreach{$2.(*Query), $4.(*Pattern), $6.(*Query), $8.(*Query), $10.(*Query)}} } | tokBreak tokVariable { $$ = &Term{Type: TermTypeBreak, Break: $2} } + | '(' query ')' + { + $$ = &Term{Type: TermTypeQuery, Query: $2.(*Query)} + } | term tokIndex { $1.(*Term).SuffixList = append($1.(*Term).SuffixList, &Suffix{Index: &Index{Name: $2}}) @@ -463,12 +444,12 @@ stringparts } tokIdentModuleIdent - : tokIdent {} - | tokModuleIdent {} + : tokIdent + | tokModuleIdent tokVariableModuleVariable - : tokVariable {} - | tokModuleVariable {} + : tokVariable + | tokModuleVariable suffix : '[' ']' @@ -527,7 +508,7 @@ trycatch { $$ = (*Query)(nil) } - | tokCatch query + | tokCatch expr { $$ = $2 } @@ -545,15 +526,15 @@ objectkeyvals objectkeyval : objectkey ':' objectval { - $$ = &ObjectKeyVal{Key: $1, Val: $3.(*ObjectVal)} + $$ = &ObjectKeyVal{Key: $1, Val: $3.(*Query)} } | string ':' objectval { - $$ = &ObjectKeyVal{KeyString: $1.(*String), Val: $3.(*ObjectVal)} + $$ = &ObjectKeyVal{KeyString: $1.(*String), Val: $3.(*Query)} } | '(' query ')' ':' objectval { - $$ = &ObjectKeyVal{KeyQuery: $2.(*Query), Val: $5.(*ObjectVal)} + $$ = &ObjectKeyVal{KeyQuery: $2.(*Query), Val: $5.(*Query)} } | objectkey { @@ -565,19 +546,16 @@ objectkeyval } objectkey - : tokIdent {} - | tokVariable {} - | tokKeyword {} + : tokIdent + | tokVariable + | tokKeyword objectval - : term + : objectval '|' objectval { - $$ = &ObjectVal{[]*Query{{Term: $1.(*Term)}}} - } - | objectval '|' term - { - $$ = &ObjectVal{append($1.(*ObjectVal).Queries, &Query{Term: $3.(*Term)})} + $$ = &Query{Left: $1.(*Query), Op: OpPipe, Right: $3.(*Query)} } + | expr constterm : constobject @@ -668,26 +646,26 @@ constarrayelems } tokKeyword - : tokOrOp {} - | tokAndOp {} - | tokModule {} - | tokImport {} - | tokInclude {} - | tokDef {} - | tokAs {} - | tokLabel {} - | tokBreak {} - | tokNull {} - | tokTrue {} - | tokFalse {} - | tokIf {} - | tokThen {} - | tokElif {} - | tokElse {} - | tokEnd {} - | tokTry {} - | tokCatch {} - | tokReduce {} - | tokForeach {} + : tokOrOp + | tokAndOp + | tokModule + | tokImport + | tokInclude + | tokDef + | tokAs + | tokLabel + | tokBreak + | tokNull + | tokTrue + | tokFalse + | tokIf + | tokThen + | tokElif + | tokElse + | tokEnd + | tokTry + | tokCatch + | tokReduce + | tokForeach %% diff --git a/vendor/github.com/itchyny/gojq/query.go b/vendor/github.com/itchyny/gojq/query.go index 5f20b4ff6fc..e7cf7789085 100644 --- a/vendor/github.com/itchyny/gojq/query.go +++ b/vendor/github.com/itchyny/gojq/query.go @@ -5,6 +5,20 @@ import ( "strings" ) +// Parse a query string, and returns the query struct. +// +// If parsing failed, it returns an error of type [*ParseError], which has +// the byte offset and the invalid token. The byte offset is the scanned bytes +// when the error occurred. The token is empty if the error occurred after +// scanning the entire query string. +func Parse(src string) (*Query, error) { + l := newLexer(src) + if yyParse(l) > 0 { + return nil, l.err + } + return l.result, nil +} + // Query represents the abstract syntax tree of a jq query. type Query struct { Meta *ConstObject @@ -49,13 +63,8 @@ func (e *Query) writeTo(s *strings.Builder) { for _, im := range e.Imports { im.writeTo(s) } - for i, fd := range e.FuncDefs { - if i > 0 { - s.WriteByte(' ') - } + for _, fd := range e.FuncDefs { fd.writeTo(s) - } - if len(e.FuncDefs) > 0 { s.WriteByte(' ') } if e.Func != "" { @@ -660,7 +669,7 @@ type ObjectKeyVal struct { Key string KeyString *String KeyQuery *Query - Val *ObjectVal + Val *Query } func (e *ObjectKeyVal) String() string { @@ -696,32 +705,6 @@ func (e *ObjectKeyVal) minify() { } } -// ObjectVal ... -type ObjectVal struct { - Queries []*Query -} - -func (e *ObjectVal) String() string { - var s strings.Builder - e.writeTo(&s) - return s.String() -} - -func (e *ObjectVal) writeTo(s *strings.Builder) { - for i, e := range e.Queries { - if i > 0 { - s.WriteString(" | ") - } - e.writeTo(s) - } -} - -func (e *ObjectVal) minify() { - for _, e := range e.Queries { - e.minify() - } -} - // Array ... type Array struct { Query *Query @@ -929,7 +912,7 @@ func (e *Try) minify() { // Reduce ... type Reduce struct { - Term *Term + Query *Query Pattern *Pattern Start *Query Update *Query @@ -943,7 +926,7 @@ func (e *Reduce) String() string { func (e *Reduce) writeTo(s *strings.Builder) { s.WriteString("reduce ") - e.Term.writeTo(s) + e.Query.writeTo(s) s.WriteString(" as ") e.Pattern.writeTo(s) s.WriteString(" (") @@ -954,14 +937,14 @@ func (e *Reduce) writeTo(s *strings.Builder) { } func (e *Reduce) minify() { - e.Term.minify() + e.Query.minify() e.Start.minify() e.Update.minify() } // Foreach ... type Foreach struct { - Term *Term + Query *Query Pattern *Pattern Start *Query Update *Query @@ -976,7 +959,7 @@ func (e *Foreach) String() string { func (e *Foreach) writeTo(s *strings.Builder) { s.WriteString("foreach ") - e.Term.writeTo(s) + e.Query.writeTo(s) s.WriteString(" as ") e.Pattern.writeTo(s) s.WriteString(" (") @@ -991,7 +974,7 @@ func (e *Foreach) writeTo(s *strings.Builder) { } func (e *Foreach) minify() { - e.Term.minify() + e.Query.minify() e.Start.minify() e.Update.minify() if e.Extract != nil { @@ -1075,6 +1058,14 @@ func (e *ConstTerm) toValue() any { } } +func (e *ConstTerm) toString() (string, bool) { + if e.Object != nil || e.Array != nil || + e.Number != "" || e.Null || e.True || e.False { + return "", false + } + return e.Str, true +} + // ConstObject ... type ConstObject struct { KeyVals []*ConstObjectKeyVal @@ -1134,7 +1125,7 @@ func (e *ConstObjectKeyVal) writeTo(s *strings.Builder) { if e.Key != "" { s.WriteString(e.Key) } else { - s.WriteString(e.KeyString) + jsonEncodeString(s, e.KeyString) } s.WriteString(": ") e.Val.writeTo(s) diff --git a/vendor/github.com/itchyny/gojq/release.go b/vendor/github.com/itchyny/gojq/release.go index c34dfb45cbf..07fc716763f 100644 --- a/vendor/github.com/itchyny/gojq/release.go +++ b/vendor/github.com/itchyny/gojq/release.go @@ -5,12 +5,12 @@ package gojq type codeinfo struct{} -func (c *compiler) appendCodeInfo(any) {} +func (*compiler) appendCodeInfo(any) {} -func (c *compiler) deleteCodeInfo(string) {} +func (*compiler) deleteCodeInfo(string) {} -func (env *env) debugCodes() {} +func (*env) debugCodes() {} -func (env *env) debugState(int, bool) {} +func (*env) debugState(int, bool) {} -func (env *env) debugForks(int, string) {} +func (*env) debugForks(int, string) {} diff --git a/vendor/github.com/itchyny/timefmt-go/CHANGELOG.md b/vendor/github.com/itchyny/timefmt-go/CHANGELOG.md index 61a4e9dc4c5..d863ac3bead 100644 --- a/vendor/github.com/itchyny/timefmt-go/CHANGELOG.md +++ b/vendor/github.com/itchyny/timefmt-go/CHANGELOG.md @@ -1,4 +1,9 @@ # Changelog +## [v0.1.6](https://github.com/itchyny/timefmt-go/compare/v0.1.5..v0.1.6) (2024-06-01) +* support parsing week directives (`%A`, `%a`, `%w`, `%u`, `%V`, `%U`, `%W`) +* validate range of values on parsing directives +* fix formatting `%l` to show `12` at midnight + ## [v0.1.5](https://github.com/itchyny/timefmt-go/compare/v0.1.4..v0.1.5) (2022-12-01) * support parsing time zone offset with name using both `%z` and `%Z` diff --git a/vendor/github.com/itchyny/timefmt-go/README.md b/vendor/github.com/itchyny/timefmt-go/README.md index f01af961120..9c028c746a5 100644 --- a/vendor/github.com/itchyny/timefmt-go/README.md +++ b/vendor/github.com/itchyny/timefmt-go/README.md @@ -1,5 +1,5 @@ # timefmt-go -[![CI Status](https://github.com/itchyny/timefmt-go/workflows/CI/badge.svg)](https://github.com/itchyny/timefmt-go/actions) +[![CI Status](https://github.com/itchyny/timefmt-go/actions/workflows/ci.yaml/badge.svg?branch=main)](https://github.com/itchyny/timefmt-go/actions?query=branch:main) [![Go Report Card](https://goreportcard.com/badge/github.com/itchyny/timefmt-go)](https://goreportcard.com/report/github.com/itchyny/timefmt-go) [![MIT License](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/itchyny/timefmt-go/blob/main/LICENSE) [![release](https://img.shields.io/github/release/itchyny/timefmt-go/all.svg)](https://github.com/itchyny/timefmt-go/releases) @@ -54,7 +54,7 @@ Note that `E` and `O` modifier characters are not supported. - `Parse` (`strptime`) allows to parse - composed directives like `%F %T`, - century years like `%C %y`, - - week names like `%A` `%a` (parsed results are discarded). + - week directives like `%W %a` and `%G-W%V-%u`. - `ParseInLocation` is provided for configuring the default location. ![](https://user-images.githubusercontent.com/375258/88606920-de475c80-d0b8-11ea-8d40-cbfee9e35c2e.jpg) @@ -63,7 +63,7 @@ Note that `E` and `O` modifier characters are not supported. Report bug at [Issues・itchyny/timefmt-go - GitHub](https://github.com/itchyny/timefmt-go/issues). ## Author -itchyny (https://github.com/itchyny) +itchyny () ## License This software is released under the MIT License, see LICENSE. diff --git a/vendor/github.com/itchyny/timefmt-go/format.go b/vendor/github.com/itchyny/timefmt-go/format.go index eea976ee9c6..b38202dff0a 100644 --- a/vendor/github.com/itchyny/timefmt-go/format.go +++ b/vendor/github.com/itchyny/timefmt-go/format.go @@ -132,19 +132,13 @@ func AppendFormat(buf []byte, t time.Time, format string) []byte { case 'a': buf = appendString(buf, shortWeekNames[t.Weekday()], width, padding, upper, swap) case 'w': - for ; width > 1; width-- { - buf = append(buf, padding&paddingMask) - } - buf = append(buf, '0'+byte(t.Weekday())) + buf = appendInt(buf, int(t.Weekday()), width, padding) case 'u': w := int(t.Weekday()) if w == 0 { w = 7 } - for ; width > 1; width-- { - buf = append(buf, padding&paddingMask) - } - buf = append(buf, '0'+byte(w)) + buf = appendInt(buf, w, width, padding) case 'V': if width < 2 { width = 2 @@ -193,17 +187,10 @@ func AppendFormat(buf []byte, t time.Time, format string) []byte { } buf = appendInt(buf, hour, width, padding) case 'l': - if width < 2 { - width = 2 - } if padding < ^paddingMask { padding = ' ' } - h := hour - if h > 12 { - h -= 12 - } - buf = appendInt(buf, h, width, padding) + fallthrough case 'I': if width < 2 { width = 2 @@ -215,18 +202,15 @@ func AppendFormat(buf []byte, t time.Time, format string) []byte { h = 12 } buf = appendInt(buf, h, width, padding) + case 'P': + swap = !(upper || swap) + fallthrough case 'p': if hour < 12 { buf = appendString(buf, "AM", width, padding, upper, swap) } else { buf = appendString(buf, "PM", width, padding, upper, swap) } - case 'P': - if hour < 12 { - buf = appendString(buf, "am", width, padding, upper, swap) - } else { - buf = appendString(buf, "pm", width, padding, upper, swap) - } case 'M': if width < 2 { width = 2 @@ -271,14 +255,14 @@ func AppendFormat(buf []byte, t time.Time, format string) []byte { if buf[k] == ' ' { buf[k-1], buf[k] = buf[k], buf[k-1] } - if k = offset % 3600; colons <= 2 || k != 0 { + if offset %= 3600; colons <= 2 || offset != 0 { if colons != 0 { buf = append(buf, ':') } - buf = appendInt(buf, k/60, 2, '0') - if k %= 60; colons == 2 || colons == 3 && k != 0 { + buf = appendInt(buf, offset/60, 2, '0') + if offset %= 60; colons == 2 || colons == 3 && offset != 0 { buf = append(buf, ':') - buf = appendInt(buf, k, 2, '0') + buf = appendInt(buf, offset, 2, '0') } } colons = 0 @@ -294,9 +278,7 @@ func AppendFormat(buf []byte, t time.Time, format string) []byte { copy(buf[k:], buf[j:]) buf = buf[:l] if padding&paddingMask == '0' { - for ; k > i; k-- { - buf[k-1], buf[k] = buf[k], buf[k-1] - } + buf[i], buf[k] = buf[k], buf[i] } } case ':': @@ -444,7 +426,7 @@ func appendString(buf []byte, str string, width int, padding byte, upper, swap b } switch { case swap: - if str[len(str)-1] < 'a' { + if str[1] < 'a' { for _, b := range []byte(str) { buf = append(buf, b|0x20) } diff --git a/vendor/github.com/itchyny/timefmt-go/parse.go b/vendor/github.com/itchyny/timefmt-go/parse.go index 83b0df2c4ee..26ae0f0c253 100644 --- a/vendor/github.com/itchyny/timefmt-go/parse.go +++ b/vendor/github.com/itchyny/timefmt-go/parse.go @@ -3,6 +3,7 @@ package timefmt import ( "errors" "fmt" + "math" "time" ) @@ -18,31 +19,32 @@ func ParseInLocation(source, format string, loc *time.Location) (t time.Time, er } func parse(source, format string, loc, base *time.Location) (t time.Time, err error) { - year, month, day, hour, min, sec, nsec := 1900, 1, 1, 0, 0, 0, 0 + year, month, day, hour, min, sec, nsec := 1900, 1, 0, 0, 0, 0, 0 defer func() { if err != nil { err = fmt.Errorf("failed to parse %q with %q: %w", source, format, err) } }() - var j, century, yday, colons int - var pm, hasZoneName, hasZoneOffset bool + var j, week, weekday, yday, colons int + century, weekstart := -1, time.Weekday(-1) + var pm, hasISOYear, hasZoneName, hasZoneOffset bool var pending string for i, l := 0, len(source); i < len(format); i++ { if b := format[i]; b == '%' { i++ if i == len(format) { - err = errors.New("stray %") + err = errors.New(`stray "%"`) return } b = format[i] L: switch b { case 'Y': - if year, j, err = parseNumber(source, j, 4, 'Y'); err != nil { + if year, j, err = parseNumber(source, j, 4, 0, 9999, 'Y'); err != nil { return } case 'y': - if year, j, err = parseNumber(source, j, 2, 'y'); err != nil { + if year, j, err = parseNumber(source, j, 2, 0, 99, 'y'); err != nil { return } if year < 69 { @@ -51,65 +53,85 @@ func parse(source, format string, loc, base *time.Location) (t time.Time, err er year += 1900 } case 'C': - if century, j, err = parseNumber(source, j, 2, 'C'); err != nil { + if century, j, err = parseNumber(source, j, 2, 0, 99, 'C'); err != nil { return } case 'g': - if year, j, err = parseNumber(source, j, 2, b); err != nil { + if year, j, err = parseNumber(source, j, 2, 0, 99, b); err != nil { return } year += 2000 + hasISOYear = true case 'G': - if year, j, err = parseNumber(source, j, 4, b); err != nil { + if year, j, err = parseNumber(source, j, 4, 0, 9999, b); err != nil { return } + hasISOYear = true case 'm': - if month, j, err = parseNumber(source, j, 2, 'm'); err != nil { + if month, j, err = parseNumber(source, j, 2, 1, 12, 'm'); err != nil { return } case 'B': - if month, j, err = lookup(source, j, longMonthNames, 'B'); err != nil { + if month, j, err = parseAny(source, j, longMonthNames, 'B'); err != nil { return } case 'b', 'h': - if month, j, err = lookup(source, j, shortMonthNames, b); err != nil { + if month, j, err = parseAny(source, j, shortMonthNames, b); err != nil { return } case 'A': - if _, j, err = lookup(source, j, longWeekNames, 'A'); err != nil { + if weekday, j, err = parseAny(source, j, longWeekNames, 'A'); err != nil { return } case 'a': - if _, j, err = lookup(source, j, shortWeekNames, 'a'); err != nil { + if weekday, j, err = parseAny(source, j, shortWeekNames, 'a'); err != nil { return } case 'w': - if j >= l || source[j] < '0' || '6' < source[j] { - err = parseFormatError(b) + if weekday, j, err = parseNumber(source, j, 1, 0, 6, 'w'); err != nil { return } - j++ + weekday++ case 'u': - if j >= l || source[j] < '1' || '7' < source[j] { - err = parseFormatError(b) + if weekday, j, err = parseNumber(source, j, 1, 1, 7, 'u'); err != nil { return } - j++ - case 'V', 'U', 'W': - if _, j, err = parseNumber(source, j, 2, b); err != nil { + weekday = weekday%7 + 1 + case 'V': + if week, j, err = parseNumber(source, j, 2, 1, 53, b); err != nil { + return + } + weekstart = time.Thursday + if weekday == 0 { + weekday = 2 + } + case 'U': + if week, j, err = parseNumber(source, j, 2, 0, 53, b); err != nil { return } + weekstart = time.Sunday + if weekday == 0 { + weekday = 1 + } + case 'W': + if week, j, err = parseNumber(source, j, 2, 0, 53, b); err != nil { + return + } + weekstart = time.Monday + if weekday == 0 { + weekday = 2 + } case 'e': if j < l && source[j] == ' ' { j++ } fallthrough case 'd': - if day, j, err = parseNumber(source, j, 2, b); err != nil { + if day, j, err = parseNumber(source, j, 2, 1, 31, b); err != nil { return } case 'j': - if yday, j, err = parseNumber(source, j, 3, 'j'); err != nil { + if yday, j, err = parseNumber(source, j, 3, 1, 366, 'j'); err != nil { return } case 'k': @@ -118,7 +140,7 @@ func parse(source, format string, loc, base *time.Location) (t time.Time, err er } fallthrough case 'H': - if hour, j, err = parseNumber(source, j, 2, b); err != nil { + if hour, j, err = parseNumber(source, j, 2, 0, 23, b); err != nil { return } case 'l': @@ -127,29 +149,29 @@ func parse(source, format string, loc, base *time.Location) (t time.Time, err er } fallthrough case 'I': - if hour, j, err = parseNumber(source, j, 2, b); err != nil { + if hour, j, err = parseNumber(source, j, 2, 1, 12, b); err != nil { return } if hour == 12 { hour = 0 } - case 'p', 'P': + case 'P', 'p': var ampm int - if ampm, j, err = lookup(source, j, []string{"AM", "PM"}, 'p'); err != nil { + if ampm, j, err = parseAny(source, j, []string{"AM", "PM"}, b); err != nil { return } pm = ampm == 2 case 'M': - if min, j, err = parseNumber(source, j, 2, 'M'); err != nil { + if min, j, err = parseNumber(source, j, 2, 0, 59, 'M'); err != nil { return } case 'S': - if sec, j, err = parseNumber(source, j, 2, 'S'); err != nil { + if sec, j, err = parseNumber(source, j, 2, 0, 60, 'S'); err != nil { return } case 's': var unix int - if unix, j, err = parseNumber(source, j, 10, 's'); err != nil { + if unix, j, err = parseNumber(source, j, 10, 0, math.MaxInt, 's'); err != nil { return } t = time.Unix(int64(unix), 0).In(time.UTC) @@ -158,24 +180,24 @@ func parse(source, format string, loc, base *time.Location) (t time.Time, err er hour, min, sec = t.Clock() month = int(mon) case 'f': - var usec, k, d int - if usec, k, err = parseNumber(source, j, 6, 'f'); err != nil { + usec, i := 0, j + if usec, j, err = parseNumber(source, j, 6, 0, 999999, 'f'); err != nil { return } - for j, d = k, k-j; d < 6; d++ { + for i = j - i; i < 6; i++ { usec *= 10 } nsec = usec * 1000 case 'Z': - k := j - for ; k < l; k++ { - if c := source[k]; c < 'A' || 'Z' < c { + i := j + for ; j < l; j++ { + if c := source[j]; c < 'A' || 'Z' < c { break } } - t, err = time.ParseInLocation("MST", source[j:k], base) + t, err = time.ParseInLocation("MST", source[i:j], base) if err != nil { - err = fmt.Errorf(`cannot parse %q with "%%Z"`, source[j:k]) + err = fmt.Errorf(`cannot parse %q with "%%Z"`, source[i:j]) return } if hasZoneOffset { @@ -186,7 +208,6 @@ func parse(source, format string, loc, base *time.Location) (t time.Time, err er loc = t.Location() } hasZoneName = true - j = k case 'z': if j >= l { err = parseZFormatError(colons) @@ -198,44 +219,41 @@ func parse(source, format string, loc, base *time.Location) (t time.Time, err er sign = -1 fallthrough case '+': - var hour, min, sec, k int - if hour, k, _ = parseNumber(source, j+1, 2, 'z'); k != j+3 { + hour, min, sec, i := 0, 0, 0, j + if hour, j, _ = parseNumber(source, j+1, 2, 0, 23, 'z'); j != i+3 { err = parseZFormatError(colons) return } - if j = k; j >= l || source[j] != ':' { - switch colons { - case 1: - err = errors.New("expected ':' for %:z") - return - case 2: - err = errors.New("expected ':' for %::z") + if j >= l || source[j] != ':' { + if colons > 0 { + err = expectedColonForZFormatError(colons) return } } else if j++; colons == 0 { colons = 4 } - if min, k, _ = parseNumber(source, j, 2, 'z'); k != j+2 { - if colons == 0 { - k = j - } else { + i = j + if min, j, _ = parseNumber(source, j, 2, 0, 59, 'z'); j != i+2 { + if colons > 0 { err = parseZFormatError(colons & 3) return } - } - if j = k; colons > 1 { + j = i + } else if colons > 1 { if j >= l || source[j] != ':' { if colons == 2 { - err = errors.New("expected ':' for %::z") - return - } - } else if sec, k, _ = parseNumber(source, j+1, 2, 'z'); k != j+3 { - if colons == 2 { - err = parseZFormatError(colons) + err = expectedColonForZFormatError(colons) return } } else { - j = k + i = j + if sec, j, _ = parseNumber(source, j+1, 2, 0, 59, 'z'); j != i+3 { + if colons == 2 { + err = parseZFormatError(colons) + return + } + j = i + } } } var name string @@ -258,40 +276,32 @@ func parse(source, format string, loc, base *time.Location) (t time.Time, err er } j++ } else { - if i++; i == len(format) { - err = errors.New(`expected 'z' after "%:"`) - return - } else if b = format[i]; b == 'z' { - colons = 1 - } else if b != ':' { - err = errors.New(`expected 'z' after "%:"`) - return - } else if i++; i == len(format) { - err = errors.New(`expected 'z' after "%::"`) - return - } else if b = format[i]; b == 'z' { - colons = 2 - } else { - err = errors.New(`expected 'z' after "%::"`) - return + for colons = 1; colons <= 2; colons++ { + if i++; i == len(format) { + break + } else if b = format[i]; b == 'z' { + goto L + } else if b != ':' || colons == 2 { + break + } } - goto L + err = expectedZAfterColonError(colons) + return } case 't', 'n': - k := j + i := j K: - for ; k < l; k++ { - switch source[k] { + for ; j < l; j++ { + switch source[j] { case ' ', '\t', '\n', '\v', '\f', '\r': default: break K } } - if k == j { - err = fmt.Errorf("expected a space for %%%c", b) + if i == j { + err = fmt.Errorf(`expected a space for "%%%c"`, b) return } - j = k case '%': if j >= l || source[j] != b { err = expectedFormatError(b) @@ -304,7 +314,7 @@ func parse(source, format string, loc, base *time.Location) (t time.Time, err er if pending, ok = compositions[b]; ok { break } - err = fmt.Errorf(`unexpected format: "%%%c"`, b) + err = fmt.Errorf(`unexpected format "%%%c"`, b) return } if j >= l || source[j] != b { @@ -317,7 +327,7 @@ func parse(source, format string, loc, base *time.Location) (t time.Time, err er b, pending = pending[0], pending[1:] goto L } - } else if j >= len(source) || source[j] != b { + } else if j >= l || source[j] != b { err = expectedFormatError(b) return } else { @@ -325,17 +335,40 @@ func parse(source, format string, loc, base *time.Location) (t time.Time, err er } } if j < len(source) { - err = fmt.Errorf("unconverted string: %q", source[j:]) + err = fmt.Errorf("unparsed string %q", source[j:]) return } if pm { hour += 12 } - if century > 0 { + if century >= 0 { year = century*100 + year%100 } - if yday > 0 { - return time.Date(year, time.January, 1, hour, min, sec, nsec, loc).AddDate(0, 0, yday-1), nil + if day == 0 { + if yday > 0 { + if hasISOYear { + err = errors.New(`use "%Y" to parse non-ISO year for "%j"`) + return + } + return time.Date(year, time.January, yday, hour, min, sec, nsec, loc), nil + } + if weekstart >= time.Sunday { + if weekstart == time.Thursday { + if !hasISOYear { + err = errors.New(`use "%G" to parse ISO year for "%V"`) + return + } + } else if hasISOYear { + err = errors.New(`use "%Y" to parse non-ISO year for "%U" or "%W"`) + return + } + if weekstart > time.Sunday && weekday == 1 { + week++ + } + t := time.Date(year, time.January, -int(weekstart), hour, min, sec, nsec, loc) + return t.AddDate(0, 0, week*7-int(t.Weekday())+weekday-1), nil + } + day = 1 } return time.Date(year, time.Month(month), day, hour, min, sec, nsec, loc), nil } @@ -347,7 +380,7 @@ func locationZone(loc *time.Location) (name string, offset int) { type parseFormatError byte func (err parseFormatError) Error() string { - return fmt.Sprintf("cannot parse %%%c", byte(err)) + return fmt.Sprintf(`cannot parse "%%%c"`, byte(err)) } type expectedFormatError byte @@ -359,46 +392,51 @@ func (err expectedFormatError) Error() string { type parseZFormatError int func (err parseZFormatError) Error() string { - switch int(err) { - case 0: - return "cannot parse %z" - case 1: - return "cannot parse %:z" - default: - return "cannot parse %::z" - } + return `cannot parse "%` + `::z"`[2-err:] +} + +type expectedColonForZFormatError int + +func (err expectedColonForZFormatError) Error() string { + return `expected ':' for "%` + `::z"`[2-err:] +} + +type expectedZAfterColonError int + +func (err expectedZAfterColonError) Error() string { + return `expected 'z' after "%` + `::"`[2-err:] } -func parseNumber(source string, min, size int, format byte) (int, int, error) { - var val int - if l := len(source); min+size > l { +func parseNumber(source string, index, size, min, max int, format byte) (int, int, error) { + var value int + if l := len(source); index+size > l { size = l } else { - size += min + size += index } - i := min + i := index for ; i < size; i++ { if b := source[i]; '0' <= b && b <= '9' { - val = val*10 + int(b&0x0F) + value = value*10 + int(b&0x0F) } else { break } } - if i == min { + if i == index || value < min || max < value { return 0, 0, parseFormatError(format) } - return val, i, nil + return value, i, nil } -func lookup(source string, min int, candidates []string, format byte) (int, int, error) { +func parseAny(source string, index int, candidates []string, format byte) (int, int, error) { L: for i, xs := range candidates { - j := min + j := index for k := 0; k < len(xs); k, j = k+1, j+1 { if j >= len(source) { continue L } - if x, y := xs[k], source[j]; x != y && x|('a'-'A') != y|('a'-'A') { + if x, y := xs[k], source[j]; x != y && x|0x20 != y|0x20 { continue L } } diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml index 4c28dff4655..7a008a4d23e 100644 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -3,7 +3,7 @@ before: hooks: - ./gen.sh - - go install mvdan.cc/garble@v0.10.1 + - go install mvdan.cc/garble@v0.9.3 builds: - @@ -92,7 +92,16 @@ builds: archives: - id: s2-binaries - name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}" + replacements: + aix: AIX + darwin: OSX + linux: Linux + windows: Windows + 386: i386 + amd64: x86_64 + freebsd: FreeBSD + netbsd: NetBSD format_overrides: - goos: windows format: zip @@ -116,7 +125,7 @@ changelog: nfpms: - - file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}" vendor: Klaus Post homepage: https://github.com/klauspost/compress maintainer: Klaus Post @@ -125,3 +134,8 @@ nfpms: formats: - deb - rpm + replacements: + darwin: Darwin + linux: Linux + freebsd: FreeBSD + amd64: x86_64 diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 7e83f583c00..efab55e655d 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,36 +16,6 @@ This package provides various compression algorithms. # changelog -* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2) - * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 - -* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1) - * s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871 - * flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869 - * s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867 - -* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) - * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 - * Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838 - * flate: Add limited window compression https://github.com/klauspost/compress/pull/843 - * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 - * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 - * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 - -* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) - * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 - * s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832 - -* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6) - * zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806 - * zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824 - * gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815 - * s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663 - -* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5) - * zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802 - * gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804 - * Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4) * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784 * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792 @@ -70,9 +40,6 @@ This package provides various compression algorithms. * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 -
- See changes to v1.15.x - * Jan 21st, 2023 (v1.15.15) * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 @@ -199,8 +166,6 @@ Stream decompression is now faster on asynchronous, since the goroutine allocati While the release has been extensively tested, it is recommended to testing when upgrading. -
-
See changes to v1.14.x @@ -661,8 +626,6 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv * [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. * [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression. * [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression. -* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index. -* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor. # license diff --git a/vendor/github.com/klauspost/compress/SECURITY.md b/vendor/github.com/klauspost/compress/SECURITY.md deleted file mode 100644 index ca6685e2b72..00000000000 --- a/vendor/github.com/klauspost/compress/SECURITY.md +++ /dev/null @@ -1,25 +0,0 @@ -# Security Policy - -## Supported Versions - -Security updates are applied only to the latest release. - -## Vulnerability Definition - -A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability. - -Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently. - -Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue. - -It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability. - -Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround. - -## Reporting a Vulnerability - -If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. - -Please disclose it at [security advisory](https://github.com/klauspost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that. - -This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base. diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go index e82fa3bb7b6..43e463611b1 100644 --- a/vendor/github.com/klauspost/compress/fse/bitwriter.go +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -152,11 +152,12 @@ func (b *bitWriter) flushAlign() { // close will write the alignment bit and write the final byte(s) // to the output. -func (b *bitWriter) close() { +func (b *bitWriter) close() error { // End mark b.addBits16Clean(1, 1) // flush until next byte. b.flushAlign() + return nil } // reset and continue writing by appending to out. diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go index 074018d8f94..dac97e58a2d 100644 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -199,8 +199,7 @@ func (s *Scratch) compress(src []byte) error { c2.flush(s.actualTableLog) c1.flush(s.actualTableLog) - s.bw.close() - return nil + return s.bw.close() } // writeCount will write the normalized histogram count to header. @@ -212,7 +211,7 @@ func (s *Scratch) writeCount() error { previous0 bool charnum uint16 - maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3 + maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 // Write Table Size bitStream = uint32(tableLog - minTablelog) diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go index 0ebc9aaac76..aed2347cedd 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -13,6 +13,14 @@ type bitWriter struct { out []byte } +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. // It will not check if there is space for them, so the caller must ensure that it has flushed recently. func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { @@ -94,9 +102,10 @@ func (b *bitWriter) flushAlign() { // close will write the alignment bit and write the final byte(s) // to the output. -func (b *bitWriter) close() { +func (b *bitWriter) close() error { // End mark b.addBits16Clean(1, 1) // flush until next byte. b.flushAlign() + return nil } diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go new file mode 100644 index 00000000000..4dcab8d2327 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bytereader.go @@ -0,0 +1,44 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + v3 := int32(b.b[b.off+3]) + v2 := int32(b.b[b.off+2]) + v1 := int32(b.b[b.off+1]) + v0 := int32(b.b[b.off]) + return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + v3 := uint32(b.b[b.off+3]) + v2 := uint32(b.b[b.off+2]) + v1 := uint32(b.b[b.off+1]) + v0 := uint32(b.b[b.off]) + return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index 84aa3d12f00..4ee4fa18dda 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -227,10 +227,10 @@ func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err err } func (s *Scratch) compress1X(src []byte) ([]byte, error) { - return s.compress1xDo(s.Out, src), nil + return s.compress1xDo(s.Out, src) } -func (s *Scratch) compress1xDo(dst, src []byte) []byte { +func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { var bw = bitWriter{out: dst} // N is length divisible by 4. @@ -260,8 +260,8 @@ func (s *Scratch) compress1xDo(dst, src []byte) []byte { bw.encTwoSymbols(cTable, tmp[1], tmp[0]) } } - bw.close() - return bw.out + err := bw.close() + return bw.out, err } var sixZeros [6]byte @@ -283,8 +283,12 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) { } src = src[len(toDo):] + var err error idx := len(s.Out) - s.Out = s.compress1xDo(s.Out, toDo) + s.Out, err = s.compress1xDo(s.Out, toDo) + if err != nil { + return nil, err + } if len(s.Out)-idx > math.MaxUint16 { // We cannot store the size in the jump table return nil, ErrIncompressible @@ -311,6 +315,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { segmentSize := (len(src) + 3) / 4 var wg sync.WaitGroup + var errs [4]error wg.Add(4) for i := 0; i < 4; i++ { toDo := src @@ -321,12 +326,15 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { // Separate goroutine for each block. go func(i int) { - s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) + s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) wg.Done() }(i) } wg.Wait() for i := 0; i < 4; i++ { + if errs[i] != nil { + return nil, errs[i] + } o := s.tmpOut[i] if len(o) > math.MaxUint16 { // We cannot store the size in the jump table @@ -350,7 +358,6 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { // Does not update s.clearCount. func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { reuse = true - _ = s.count // Assert that s != nil to speed up the following loop. for _, v := range in { s.count[v]++ } @@ -416,7 +423,7 @@ func (s *Scratch) validateTable(c cTable) bool { // minTableLog provides the minimum logSize to safely represent a distribution. func (s *Scratch) minTableLog() uint8 { - minBitsSrc := highBit32(uint32(s.srcLen)) + 1 + minBitsSrc := highBit32(uint32(s.br.remain())) + 1 minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 if minBitsSrc < minBitsSymbols { return uint8(minBitsSrc) @@ -428,7 +435,7 @@ func (s *Scratch) minTableLog() uint8 { func (s *Scratch) optimalTableLog() { tableLog := s.TableLog minBits := s.minTableLog() - maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1 + maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1 if maxBitsSrc < tableLog { // Accuracy can be reduced tableLog = maxBitsSrc diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 54bd08b25c0..3c0b398c72e 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -253,7 +253,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { switch d.actualTableLog { case 8: - const shift = 0 + const shift = 8 - 8 for br.off >= 4 { br.fillFast() v := dt[uint8(br.value>>(56+shift))] diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go index 77ecd68e0a7..e8ad17ad08e 100644 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -88,7 +88,7 @@ type Scratch struct { // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. MaxDecodedSize int - srcLen int + br byteReader // MaxSymbolValue will override the maximum symbol value of the next block. MaxSymbolValue uint8 @@ -170,7 +170,7 @@ func (s *Scratch) prepare(in []byte) (*Scratch, error) { if s.fse == nil { s.fse = &fse.Scratch{} } - s.srcLen = len(in) + s.br.init(in) return s, nil } diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go index 2aa6a95a028..05db94d39a4 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -87,6 +87,18 @@ func emitCopy(dst []byte, offset, length int) int { return i + 2 } +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + func hash(u, shift uint32) uint32 { return (u * 0x1e35a7bd) >> shift } diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index 92e2347bbc0..65b38abed80 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -259,7 +259,7 @@ nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 ## Decompressor -Status: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. +Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), kindly supplied by [fuzzit.dev](https://fuzzit.dev/). @@ -304,7 +304,7 @@ import "github.com/klauspost/compress/zstd" // Create a reader that caches decompressors. // For this operation type we supply a nil Reader. -var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0)) +var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0)) // Decompress a buffer. We don't supply a destination buffer, // so it will be allocated by the decoder. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go index 25ca983941d..97299d499cf 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -17,6 +17,7 @@ import ( // for aligning the input. type bitReader struct { in []byte + off uint // next byte to read is at in[off - 1] value uint64 // Maybe use [16]byte, but shifting is awkward. bitsRead uint8 } @@ -27,6 +28,7 @@ func (b *bitReader) init(in []byte) error { return errors.New("corrupt stream: too short") } b.in = in + b.off = uint(len(in)) // The highest bit of the last byte indicates where to start v := in[len(in)-1] if v == 0 { @@ -67,19 +69,21 @@ func (b *bitReader) fillFast() { if b.bitsRead < 32 { return } - v := b.in[len(b.in)-4:] - b.in = b.in[:len(b.in)-4] + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value = (b.value << 32) | uint64(low) b.bitsRead -= 32 + b.off -= 4 } // fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. func (b *bitReader) fillFastStart() { - v := b.in[len(b.in)-8:] - b.in = b.in[:len(b.in)-8] - b.value = binary.LittleEndian.Uint64(v) + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) b.bitsRead = 0 + b.off -= 8 } // fill() will make sure at least 32 bits are available. @@ -87,25 +91,25 @@ func (b *bitReader) fill() { if b.bitsRead < 32 { return } - if len(b.in) >= 4 { - v := b.in[len(b.in)-4:] - b.in = b.in[:len(b.in)-4] + if b.off >= 4 { + v := b.in[b.off-4:] + v = v[:4] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value = (b.value << 32) | uint64(low) b.bitsRead -= 32 + b.off -= 4 return } - - b.bitsRead -= uint8(8 * len(b.in)) - for len(b.in) > 0 { - b.value = (b.value << 8) | uint64(b.in[len(b.in)-1]) - b.in = b.in[:len(b.in)-1] + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- } } // finished returns true if all bits have been read from the bit stream. func (b *bitReader) finished() bool { - return len(b.in) == 0 && b.bitsRead >= 64 + return b.off == 0 && b.bitsRead >= 64 } // overread returns true if more bits have been requested than is on the stream. @@ -115,7 +119,7 @@ func (b *bitReader) overread() bool { // remain returns the number of bits remaining. func (b *bitReader) remain() uint { - return 8*uint(len(b.in)) + 64 - uint(b.bitsRead) + return b.off*8 + 64 - uint(b.bitsRead) } // close the bitstream and returns an error if out-of-buffer reads occurred. diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go index 1952f175b0d..78b3c61be3e 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -97,11 +97,12 @@ func (b *bitWriter) flushAlign() { // close will write the alignment bit and write the final byte(s) // to the output. -func (b *bitWriter) close() { +func (b *bitWriter) close() error { // End mark b.addBits16Clean(1, 1) // flush until next byte. b.flushAlign() + return nil } // reset and continue writing by appending to out. diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 9f17ce601ff..5f272d87f69 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -592,7 +592,7 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { } seq.fse.setRLE(symb) if debugDecoder { - printf("RLE set to 0x%x, code: %v", symb, v) + printf("RLE set to %+v, code: %v", symb, v) } case compModeFSE: println("Reading table for", tableIndex(i)) diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index 2cfe925ade5..fd4a36f730c 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -361,21 +361,14 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { if len(lits) >= 1024 { // Use 4 Streams. out, reUsed, err = huff0.Compress4X(lits, b.litEnc) - } else if len(lits) > 16 { + } else if len(lits) > 32 { // Use 1 stream single = true out, reUsed, err = huff0.Compress1X(lits, b.litEnc) } else { err = huff0.ErrIncompressible } - if err == nil && len(out)+5 > len(lits) { - // If we are close, we may still be worse or equal to raw. - var lh literalsHeader - lh.setSizes(len(out), len(lits), single) - if len(out)+lh.size() >= len(lits) { - err = huff0.ErrIncompressible - } - } + switch err { case huff0.ErrIncompressible: if debugEncoder { @@ -510,7 +503,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { if len(b.literals) >= 1024 && !raw { // Use 4 Streams. out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) - } else if len(b.literals) > 16 && !raw { + } else if len(b.literals) > 32 && !raw { // Use 1 stream single = true out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) @@ -518,17 +511,6 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { err = huff0.ErrIncompressible } - if err == nil && len(out)+5 > len(b.literals) { - // If we are close, we may still be worse or equal to raw. - var lh literalsHeader - lh.setSize(len(b.literals)) - szRaw := lh.size() - lh.setSizes(len(out), len(b.literals), single) - szComp := lh.size() - if len(out)+szComp >= len(b.literals)+szRaw { - err = huff0.ErrIncompressible - } - } switch err { case huff0.ErrIncompressible: lh.setType(literalsBlockRaw) @@ -791,7 +773,10 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { ml.flush(mlEnc.actualTableLog) of.flush(ofEnc.actualTableLog) ll.flush(llEnc.actualTableLog) - wr.close() + err = wr.close() + if err != nil { + return err + } b.output = wr.out // Maybe even add a bigger margin. diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index 774c5f00fe4..07a90dd7af3 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -107,7 +107,7 @@ func WithDecoderDicts(dicts ...[]byte) DOption { } } -// WithDecoderDictRaw registers a dictionary that may be used by the decoder. +// WithEncoderDictRaw registers a dictionary that may be used by the decoder. // The slice content can be arbitrary data. func WithDecoderDictRaw(id uint32, content []byte) DOption { return func(o *decoderOptions) error { diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index 8d5567fe64c..ca0951452e6 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -1,13 +1,10 @@ package zstd import ( - "bytes" "encoding/binary" "errors" "fmt" "io" - "math" - "sort" "github.com/klauspost/compress/huff0" ) @@ -17,8 +14,9 @@ type dict struct { litEnc *huff0.Scratch llDec, ofDec, mlDec sequenceDec - offsets [3]int - content []byte + //llEnc, ofEnc, mlEnc []*fseEncoder + offsets [3]int + content []byte } const dictMagic = "\x37\xa4\x30\xec" @@ -161,374 +159,3 @@ func InspectDictionary(b []byte) (interface { d, err := loadDict(b) return d, err } - -type BuildDictOptions struct { - // Dictionary ID. - ID uint32 - - // Content to use to create dictionary tables. - Contents [][]byte - - // History to use for all blocks. - History []byte - - // Offsets to use. - Offsets [3]int - - // CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier. - // See https://github.com/facebook/zstd/issues/3724 - CompatV155 bool - - // Use the specified encoder level. - // The dictionary will be built using the specified encoder level, - // which will reflect speed and make the dictionary tailored for that level. - // If not set SpeedBestCompression will be used. - Level EncoderLevel - - // DebugOut will write stats and other details here if set. - DebugOut io.Writer -} - -func BuildDict(o BuildDictOptions) ([]byte, error) { - initPredefined() - hist := o.History - contents := o.Contents - debug := o.DebugOut != nil - println := func(args ...interface{}) { - if o.DebugOut != nil { - fmt.Fprintln(o.DebugOut, args...) - } - } - printf := func(s string, args ...interface{}) { - if o.DebugOut != nil { - fmt.Fprintf(o.DebugOut, s, args...) - } - } - print := func(args ...interface{}) { - if o.DebugOut != nil { - fmt.Fprint(o.DebugOut, args...) - } - } - - if int64(len(hist)) > dictMaxLength { - return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength)) - } - if len(hist) < 8 { - return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8) - } - if len(contents) == 0 { - return nil, errors.New("no content provided") - } - d := dict{ - id: o.ID, - litEnc: nil, - llDec: sequenceDec{}, - ofDec: sequenceDec{}, - mlDec: sequenceDec{}, - offsets: o.Offsets, - content: hist, - } - block := blockEnc{lowMem: false} - block.init() - enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}}) - if o.Level != 0 { - eOpts := encoderOptions{ - level: o.Level, - blockSize: maxMatchLen, - windowSize: maxMatchLen, - dict: &d, - lowMem: false, - } - enc = eOpts.encoder() - } else { - o.Level = SpeedBestCompression - } - var ( - remain [256]int - ll [256]int - ml [256]int - of [256]int - ) - addValues := func(dst *[256]int, src []byte) { - for _, v := range src { - dst[v]++ - } - } - addHist := func(dst *[256]int, src *[256]uint32) { - for i, v := range src { - dst[i] += int(v) - } - } - seqs := 0 - nUsed := 0 - litTotal := 0 - newOffsets := make(map[uint32]int, 1000) - for _, b := range contents { - block.reset(nil) - if len(b) < 8 { - continue - } - nUsed++ - enc.Reset(&d, true) - enc.Encode(&block, b) - addValues(&remain, block.literals) - litTotal += len(block.literals) - seqs += len(block.sequences) - block.genCodes() - addHist(&ll, block.coders.llEnc.Histogram()) - addHist(&ml, block.coders.mlEnc.Histogram()) - addHist(&of, block.coders.ofEnc.Histogram()) - for i, seq := range block.sequences { - if i > 3 { - break - } - offset := seq.offset - if offset == 0 { - continue - } - if offset > 3 { - newOffsets[offset-3]++ - } else { - newOffsets[uint32(o.Offsets[offset-1])]++ - } - } - } - // Find most used offsets. - var sortedOffsets []uint32 - for k := range newOffsets { - sortedOffsets = append(sortedOffsets, k) - } - sort.Slice(sortedOffsets, func(i, j int) bool { - a, b := sortedOffsets[i], sortedOffsets[j] - if a == b { - // Prefer the longer offset - return sortedOffsets[i] > sortedOffsets[j] - } - return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]] - }) - if len(sortedOffsets) > 3 { - if debug { - print("Offsets:") - for i, v := range sortedOffsets { - if i > 20 { - break - } - printf("[%d: %d],", v, newOffsets[v]) - } - println("") - } - - sortedOffsets = sortedOffsets[:3] - } - for i, v := range sortedOffsets { - o.Offsets[i] = int(v) - } - if debug { - println("New repeat offsets", o.Offsets) - } - - if nUsed == 0 || seqs == 0 { - return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs) - } - if debug { - println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal) - } - if seqs/nUsed < 512 { - // Use 512 as minimum. - nUsed = seqs / 512 - } - copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) { - hist := dst.Histogram() - var maxSym uint8 - var maxCount int - var fakeLength int - for i, v := range src { - if v > 0 { - v = v / nUsed - if v == 0 { - v = 1 - } - } - if v > maxCount { - maxCount = v - } - if v != 0 { - maxSym = uint8(i) - } - fakeLength += v - hist[i] = uint32(v) - } - dst.HistogramFinished(maxSym, maxCount) - dst.reUsed = false - dst.useRLE = false - err := dst.normalizeCount(fakeLength) - if err != nil { - return nil, err - } - if debug { - println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength) - } - return dst.writeCount(nil) - } - if debug { - print("Literal lengths: ") - } - llTable, err := copyHist(block.coders.llEnc, &ll) - if err != nil { - return nil, err - } - if debug { - print("Match lengths: ") - } - mlTable, err := copyHist(block.coders.mlEnc, &ml) - if err != nil { - return nil, err - } - if debug { - print("Offsets: ") - } - ofTable, err := copyHist(block.coders.ofEnc, &of) - if err != nil { - return nil, err - } - - // Literal table - avgSize := litTotal - if avgSize > huff0.BlockSizeMax/2 { - avgSize = huff0.BlockSizeMax / 2 - } - huffBuff := make([]byte, 0, avgSize) - // Target size - div := litTotal / avgSize - if div < 1 { - div = 1 - } - if debug { - println("Huffman weights:") - } - for i, n := range remain[:] { - if n > 0 { - n = n / div - // Allow all entries to be represented. - if n == 0 { - n = 1 - } - huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) - if debug { - printf("[%d: %d], ", i, n) - } - } - } - if o.CompatV155 && remain[255]/div == 0 { - huffBuff = append(huffBuff, 255) - } - scratch := &huff0.Scratch{TableLog: 11} - for tries := 0; tries < 255; tries++ { - scratch = &huff0.Scratch{TableLog: 11} - _, _, err = huff0.Compress1X(huffBuff, scratch) - if err == nil { - break - } - if debug { - printf("Try %d: Huffman error: %v\n", tries+1, err) - } - huffBuff = huffBuff[:0] - if tries == 250 { - if debug { - println("Huffman: Bailing out with predefined table") - } - - // Bail out.... Just generate something - huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...) - for i := 0; i < 128; i++ { - huffBuff = append(huffBuff, byte(i)) - } - continue - } - if errors.Is(err, huff0.ErrIncompressible) { - // Try truncating least common. - for i, n := range remain[:] { - if n > 0 { - n = n / (div * (i + 1)) - if n > 0 { - huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) - } - } - } - if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 { - huffBuff = append(huffBuff, 255) - } - if len(huffBuff) == 0 { - huffBuff = append(huffBuff, 0, 255) - } - } - if errors.Is(err, huff0.ErrUseRLE) { - for i, n := range remain[:] { - n = n / (div * (i + 1)) - // Allow all entries to be represented. - if n == 0 { - n = 1 - } - huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) - } - } - } - - var out bytes.Buffer - out.Write([]byte(dictMagic)) - out.Write(binary.LittleEndian.AppendUint32(nil, o.ID)) - out.Write(scratch.OutTable) - if debug { - println("huff table:", len(scratch.OutTable), "bytes") - println("of table:", len(ofTable), "bytes") - println("ml table:", len(mlTable), "bytes") - println("ll table:", len(llTable), "bytes") - } - out.Write(ofTable) - out.Write(mlTable) - out.Write(llTable) - out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0]))) - out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1]))) - out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2]))) - out.Write(hist) - if debug { - _, err := loadDict(out.Bytes()) - if err != nil { - panic(err) - } - i, err := InspectDictionary(out.Bytes()) - if err != nil { - panic(err) - } - println("ID:", i.ID()) - println("Content size:", i.ContentSize()) - println("Encoder:", i.LitEncoder() != nil) - println("Offsets:", i.Offsets()) - var totalSize int - for _, b := range contents { - totalSize += len(b) - } - - encWith := func(opts ...EOption) int { - enc, err := NewWriter(nil, opts...) - if err != nil { - panic(err) - } - defer enc.Close() - var dst []byte - var totalSize int - for _, b := range contents { - dst = enc.EncodeAll(b, dst[:0]) - totalSize += len(dst) - } - return totalSize - } - plain := encWith(WithEncoderLevel(o.Level)) - withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes())) - println("Input size:", totalSize) - println("Plain Compressed:", plain) - println("Dict Compressed:", withDict) - println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)") - } - return out.Bytes(), nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index 5ca46038ad9..e008b99298a 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -144,7 +144,6 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) { } else { e.crc.Reset() } - e.blk.dictLitEnc = nil if d != nil { low := e.lowMem if singleBlock { diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index c81a15357af..9819d414536 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -43,7 +43,7 @@ func (m *match) estBits(bitsPerByte int32) { if m.rep < 0 { ofc = ofCode(uint32(m.s-m.offset) + 3) } else { - ofc = ofCode(uint32(m.rep) & 3) + ofc = ofCode(uint32(m.rep)) } // Cost, excluding ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] @@ -197,13 +197,12 @@ encodeLoop: // Set m to a match at offset if it looks like that will improve compression. improve := func(m *match, offset int32, s int32, first uint32, rep int32) { - delta := s - offset - if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first { + if s-offset >= e.maxMatchOff || load3232(src, offset) != first { return } if debugAsserts { - if offset >= s { - panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) + if offset <= 0 { + panic(offset) } if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) @@ -227,7 +226,7 @@ encodeLoop: } } l := 4 + e.matchlen(s+4, offset+4, src) - if true { + if rep < 0 { // Extend candidate match backwards as far as possible. tMin := s - e.maxMatchOff if tMin < 0 { @@ -282,7 +281,6 @@ encodeLoop: // Load next and check... e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} - index0 := s + 1 // Look far ahead, unless we have a really long match already... if best.length < goodEnough { @@ -345,8 +343,8 @@ encodeLoop: if best.rep > 0 { var seq seq seq.matchLen = uint32(best.length - zstdMinMatch) - if debugAsserts && s < nextEmit { - panic("s < nextEmit") + if debugAsserts && s <= nextEmit { + panic("s <= nextEmit") } addLiterals(&seq, best.s) @@ -358,16 +356,19 @@ encodeLoop: blk.sequences = append(blk.sequences, seq) // Index old s + 1 -> s - 1 + index0 := s + 1 s = best.s + best.length - nextEmit = s - // Index skipped... - end := s - if s > sLimit+4 { - end = sLimit + 4 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, best.length) + } + break encodeLoop } + // Index skipped... off := index0 + e.cur - for index0 < end { + for index0 < s { cv0 := load6432(src, index0) h0 := hashLen(cv0, bestLongTableBits, bestLongLen) h1 := hashLen(cv0, bestShortTableBits, bestShortLen) @@ -376,7 +377,6 @@ encodeLoop: off++ index0++ } - switch best.rep { case 2, 4 | 1: offset1, offset2 = offset2, offset1 @@ -385,17 +385,12 @@ encodeLoop: case 4 | 3: offset1, offset2, offset3 = offset1-1, offset1, offset2 } - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, best.length) - } - break encodeLoop - } continue } // A 4-byte match has been found. Update recent offsets. // We'll later see if more than 4 bytes. + index0 := s + 1 s = best.s t := best.offset offset1, offset2, offset3 = s-t, offset1, offset2 @@ -423,25 +418,19 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) nextEmit = s - - // Index old s + 1 -> s - 1 or sLimit - end := s - if s > sLimit-4 { - end = sLimit - 4 + if s >= sLimit { + break encodeLoop } - off := index0 + e.cur - for index0 < end { + // Index old s + 1 -> s - 1 + for index0 < s { cv0 := load6432(src, index0) h0 := hashLen(cv0, bestLongTableBits, bestLongLen) h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} index0++ - off++ - } - if s >= sLimit { - break encodeLoop } } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 20d25b0e052..8582f31a7cc 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -145,7 +145,7 @@ encodeLoop: var t int32 // We allow the encoder to optionally turn off repeat offsets across blocks canRepeat := len(blk.sequences) > 2 - var matched, index0 int32 + var matched int32 for { if debugAsserts && canRepeat && offset1 == 0 { @@ -162,7 +162,6 @@ encodeLoop: off := s + e.cur e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} - index0 = s + 1 if canRepeat { if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { @@ -259,6 +258,7 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) + index0 := s + repOff2 s += lenght + repOff2 nextEmit = s if s >= sLimit { @@ -498,15 +498,15 @@ encodeLoop: } // Index match start+1 (long) -> s - 1 - off := index0 + e.cur + index0 := s - l + 1 for index0 < s-1 { cv0 := load6432(src, index0) cv1 := cv0 >> 8 h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} index0 += 2 - off += 2 } cv = load6432(src, s) @@ -672,7 +672,7 @@ encodeLoop: var t int32 // We allow the encoder to optionally turn off repeat offsets across blocks canRepeat := len(blk.sequences) > 2 - var matched, index0 int32 + var matched int32 for { if debugAsserts && canRepeat && offset1 == 0 { @@ -691,7 +691,6 @@ encodeLoop: e.markLongShardDirty(nextHashL) e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} e.markShortShardDirty(nextHashS) - index0 = s + 1 if canRepeat { if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { @@ -727,6 +726,7 @@ encodeLoop: blk.sequences = append(blk.sequences, seq) // Index match start+1 (long) -> s - 1 + index0 := s + repOff s += lenght + repOff nextEmit = s @@ -790,6 +790,7 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) + index0 := s + repOff2 s += lenght + repOff2 nextEmit = s if s >= sLimit { @@ -1023,18 +1024,18 @@ encodeLoop: } // Index match start+1 (long) -> s - 1 - off := index0 + e.cur + index0 := s - l + 1 for index0 < s-1 { cv0 := load6432(src, index0) cv1 := cv0 >> 8 h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.markLongShardDirty(h0) h1 := hashLen(cv1, betterShortTableBits, betterShortLen) e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} e.markShortShardDirty(h1) index0 += 2 - off += 2 } cv = load6432(src, s) diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index a154c18f741..7d425109adb 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -1084,7 +1084,7 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { } } e.lastDictID = d.id - allDirty = true + e.allDirty = true } // Reset table to initial state e.cur = e.maxMatchOff diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index f45a3da7dae..315b1a8f2f6 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -133,7 +133,8 @@ encodeLoop: if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { // Consider history as well. var seq seq - length := 4 + e.matchlen(s+6, repIndex+4, src) + var length int32 + length = 4 + e.matchlen(s+6, repIndex+4, src) seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. @@ -644,7 +645,8 @@ encodeLoop: if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { // Consider history as well. var seq seq - length := 4 + e.matchlen(s+6, repIndex+4, src) + var length int32 + length = 4 + e.matchlen(s+6, repIndex+4, src) seq.matchLen = uint32(length - zstdMinMatch) @@ -829,12 +831,13 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { } if true { end := e.maxMatchOff + int32(len(d.content)) - 8 - for i := e.maxMatchOff; i < end; i += 2 { + for i := e.maxMatchOff; i < end; i += 3 { const hashLog = tableBits cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 6 - nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 7 + nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 5 + nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 6 + nextHash2 := hashLen(cv>>16, hashLog, tableFastHashLen) // 2 -> 7 e.dictTable[nextHash] = tableEntry{ val: uint32(cv), offset: i, @@ -843,6 +846,10 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { val: uint32(cv >> 8), offset: i + 1, } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } } } e.lastDictID = d.id diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 72af7ef0fe0..4de0aed0d0d 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -227,7 +227,10 @@ func (e *Encoder) nextBlock(final bool) error { DictID: e.o.dict.ID(), } - dst := fh.appendTo(tmp[:0]) + dst, err := fh.appendTo(tmp[:0]) + if err != nil { + return err + } s.headerWritten = true s.wWg.Wait() var n2 int @@ -480,7 +483,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { Checksum: false, DictID: 0, } - dst = fh.appendTo(dst) + dst, _ = fh.appendTo(dst) // Write raw block as last one only. var blk blockHeader @@ -515,7 +518,10 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { dst = make([]byte, 0, len(src)) } - dst = fh.appendTo(dst) + dst, err := fh.appendTo(dst) + if err != nil { + panic(err) + } // If we can do everything in one block, prefer that. if len(src) <= e.o.blockSize { @@ -575,7 +581,6 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { // Add padding with content from crypto/rand.Reader if e.o.pad > 0 { add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) - var err error dst, err = skippableFrame(dst, add, rand.Reader) if err != nil { panic(err) diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index faaf81921cd..50f70533b43 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -129,7 +129,7 @@ func WithEncoderPadding(n int) EOption { } // No need to waste our time. if n == 1 { - n = 0 + o.pad = 0 } if n > 1<<30 { return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 53e160f7e5a..cc0aa22745d 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -73,20 +73,20 @@ func (d *frameDec) reset(br byteBuffer) error { switch err { case io.EOF, io.ErrUnexpectedEOF: return io.EOF - case nil: - signature[0] = b[0] default: return err + case nil: + signature[0] = b[0] } // Read the rest, don't allow io.ErrUnexpectedEOF b, err = br.readSmall(3) switch err { case io.EOF: return io.EOF - case nil: - copy(signature[1:], b) default: return err + case nil: + copy(signature[1:], b) } if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go index 2f5d5ed4546..4ef7f5a3e3d 100644 --- a/vendor/github.com/klauspost/compress/zstd/frameenc.go +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -22,7 +22,7 @@ type frameHeader struct { const maxHeaderSize = 14 -func (f frameHeader) appendTo(dst []byte) []byte { +func (f frameHeader) appendTo(dst []byte) ([]byte, error) { dst = append(dst, frameMagic...) var fhd uint8 if f.Checksum { @@ -88,7 +88,7 @@ func (f frameHeader) appendTo(dst []byte) []byte { default: panic("invalid fcs") } - return dst + return dst, nil } const skippableFrameHeader = 4 + 4 diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go deleted file mode 100644 index f41932b7a4f..00000000000 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -// matchLen returns how many bytes match in a and b -// -// It assumes that: -// -// len(a) <= len(b) and len(a) > 0 -// -//go:noescape -func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s deleted file mode 100644 index 9a7655c0f76..00000000000 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s +++ /dev/null @@ -1,68 +0,0 @@ -// Copied from S2 implementation. - -//go:build !appengine && !noasm && gc && !noasm - -#include "textflag.h" - -// func matchLen(a []byte, b []byte) int -// Requires: BMI -TEXT ·matchLen(SB), NOSPLIT, $0-56 - MOVQ a_base+0(FP), AX - MOVQ b_base+24(FP), CX - MOVQ a_len+8(FP), DX - - // matchLen - XORL SI, SI - CMPL DX, $0x08 - JB matchlen_match4_standalone - -matchlen_loopback_standalone: - MOVQ (AX)(SI*1), BX - XORQ (CX)(SI*1), BX - TESTQ BX, BX - JZ matchlen_loop_standalone - -#ifdef GOAMD64_v3 - TZCNTQ BX, BX -#else - BSFQ BX, BX -#endif - SARQ $0x03, BX - LEAL (SI)(BX*1), SI - JMP gen_match_len_end - -matchlen_loop_standalone: - LEAL -8(DX), DX - LEAL 8(SI), SI - CMPL DX, $0x08 - JAE matchlen_loopback_standalone - -matchlen_match4_standalone: - CMPL DX, $0x04 - JB matchlen_match2_standalone - MOVL (AX)(SI*1), BX - CMPL (CX)(SI*1), BX - JNE matchlen_match2_standalone - LEAL -4(DX), DX - LEAL 4(SI), SI - -matchlen_match2_standalone: - CMPL DX, $0x02 - JB matchlen_match1_standalone - MOVW (AX)(SI*1), BX - CMPW (CX)(SI*1), BX - JNE matchlen_match1_standalone - LEAL -2(DX), DX - LEAL 2(SI), SI - -matchlen_match1_standalone: - CMPL DX, $0x01 - JB gen_match_len_end - MOVB (AX)(SI*1), BL - CMPB (CX)(SI*1), BL - JNE gen_match_len_end - INCL SI - -gen_match_len_end: - MOVQ SI, ret+48(FP) - RET diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go deleted file mode 100644 index 57b9c31c027..00000000000 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go +++ /dev/null @@ -1,33 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -import ( - "encoding/binary" - "math/bits" -) - -// matchLen returns the maximum common prefix length of a and b. -// a must be the shortest of the two. -func matchLen(a, b []byte) (n int) { - for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { - diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) - if diff != 0 { - return n + bits.TrailingZeros64(diff)>>3 - } - n += 8 - } - - for i := range a { - if a[i] != b[i] { - break - } - n++ - } - return n - -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index d7fe6d82d93..9405fcf1016 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -245,7 +245,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { return io.ErrUnexpectedEOF } var ll, mo, ml int - if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { + if br.off > 4+((maxOffsetBits+16+16)>>3) { // inlined function: // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) @@ -452,13 +452,18 @@ func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) // extra bits are stored in reverse order. br.fill() - mo += br.getBits(moB) - if s.maxBits > 32 { + if s.maxBits <= 32 { + mo += br.getBits(moB) + ml += br.getBits(mlB) + ll += br.getBits(llB) + } else { + mo += br.getBits(moB) br.fill() + // matchlength+literal length, max 32 bits + ml += br.getBits(mlB) + ll += br.getBits(llB) + } - // matchlength+literal length, max 32 bits - ml += br.getBits(mlB) - ll += br.getBits(llB) mo = s.adjustOffset(mo, ll, moB) return } diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index 974b99725fd..b6f4ba6fc59 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -5,11 +5,11 @@ // func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: CMOV TEXT ·sequenceDecs_decode_amd64(SB), $8-32 - MOVQ br+8(FP), CX - MOVQ 24(CX), DX - MOVBQZX 32(CX), BX - MOVQ (CX), AX - MOVQ 8(CX), SI + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX ADDQ SI, AX MOVQ AX, (SP) MOVQ ctx+16(FP), AX @@ -301,9 +301,9 @@ sequenceDecs_decode_amd64_match_len_ofs_ok: MOVQ R12, 152(AX) MOVQ R13, 160(AX) MOVQ br+8(FP), AX - MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) // Return success MOVQ $0x00000000, ret+24(FP) @@ -336,11 +336,11 @@ error_overread: // func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: CMOV TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 - MOVQ br+8(FP), CX - MOVQ 24(CX), DX - MOVBQZX 32(CX), BX - MOVQ (CX), AX - MOVQ 8(CX), SI + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX ADDQ SI, AX MOVQ AX, (SP) MOVQ ctx+16(FP), AX @@ -603,9 +603,9 @@ sequenceDecs_decode_56_amd64_match_len_ofs_ok: MOVQ R12, 152(AX) MOVQ R13, 160(AX) MOVQ br+8(FP), AX - MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) // Return success MOVQ $0x00000000, ret+24(FP) @@ -638,11 +638,11 @@ error_overread: // func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: BMI, BMI2, CMOV TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 - MOVQ br+8(FP), BX - MOVQ 24(BX), AX - MOVBQZX 32(BX), DX - MOVQ (BX), CX - MOVQ 8(BX), BX + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX ADDQ BX, CX MOVQ CX, (SP) MOVQ ctx+16(FP), CX @@ -892,9 +892,9 @@ sequenceDecs_decode_bmi2_match_len_ofs_ok: MOVQ R11, 152(CX) MOVQ R12, 160(CX) MOVQ br+8(FP), CX - MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) // Return success MOVQ $0x00000000, ret+24(FP) @@ -927,11 +927,11 @@ error_overread: // func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: BMI, BMI2, CMOV TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 - MOVQ br+8(FP), BX - MOVQ 24(BX), AX - MOVBQZX 32(BX), DX - MOVQ (BX), CX - MOVQ 8(BX), BX + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX ADDQ BX, CX MOVQ CX, (SP) MOVQ ctx+16(FP), CX @@ -1152,9 +1152,9 @@ sequenceDecs_decode_56_bmi2_match_len_ofs_ok: MOVQ R11, 152(CX) MOVQ R12, 160(CX) MOVQ br+8(FP), CX - MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) // Return success MOVQ $0x00000000, ret+24(FP) @@ -1797,11 +1797,11 @@ empty_seqs: // func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int // Requires: CMOV, SSE TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 - MOVQ br+8(FP), CX - MOVQ 24(CX), DX - MOVBQZX 32(CX), BX - MOVQ (CX), AX - MOVQ 8(CX), SI + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX ADDQ SI, AX MOVQ AX, (SP) MOVQ ctx+16(FP), AX @@ -2295,9 +2295,9 @@ handle_loop: loop_finished: MOVQ br+8(FP), AX - MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) // Update the context MOVQ ctx+16(FP), AX @@ -2362,11 +2362,11 @@ error_not_enough_space: // func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int // Requires: BMI, BMI2, CMOV, SSE TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 - MOVQ br+8(FP), BX - MOVQ 24(BX), AX - MOVBQZX 32(BX), DX - MOVQ (BX), CX - MOVQ 8(BX), BX + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX ADDQ BX, CX MOVQ CX, (SP) MOVQ ctx+16(FP), CX @@ -2818,9 +2818,9 @@ handle_loop: loop_finished: MOVQ br+8(FP), CX - MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) // Update the context MOVQ ctx+16(FP), AX @@ -2885,11 +2885,11 @@ error_not_enough_space: // func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int // Requires: CMOV, SSE TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 - MOVQ br+8(FP), CX - MOVQ 24(CX), DX - MOVBQZX 32(CX), BX - MOVQ (CX), AX - MOVQ 8(CX), SI + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX ADDQ SI, AX MOVQ AX, (SP) MOVQ ctx+16(FP), AX @@ -3485,9 +3485,9 @@ handle_loop: loop_finished: MOVQ br+8(FP), AX - MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) // Update the context MOVQ ctx+16(FP), AX @@ -3552,11 +3552,11 @@ error_not_enough_space: // func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int // Requires: BMI, BMI2, CMOV, SSE TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 - MOVQ br+8(FP), BX - MOVQ 24(BX), AX - MOVBQZX 32(BX), DX - MOVQ (BX), CX - MOVQ 8(BX), BX + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX ADDQ BX, CX MOVQ CX, (SP) MOVQ ctx+16(FP), CX @@ -4110,9 +4110,9 @@ handle_loop: loop_finished: MOVQ br+8(FP), CX - MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) // Update the context MOVQ ctx+16(FP), AX diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go index 2fb35b788c1..ac2a80d2911 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -29,7 +29,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { } for i := range seqs { var ll, mo, ml int - if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { + if br.off > 4+((maxOffsetBits+16+16)>>3) { // inlined function: // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go index ec13594e89b..9e1baad73be 100644 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -95,9 +95,10 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { var written int64 var readHeader bool { - header := frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) - + var header []byte var n int + header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) + n, r.err = w.Write(header) if r.err != nil { return written, r.err diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 4be7cc73671..89396673d97 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -9,6 +9,7 @@ import ( "errors" "log" "math" + "math/bits" ) // enable debug printing @@ -105,6 +106,27 @@ func printf(format string, a ...interface{}) { } } +// matchLen returns the maximum common prefix length of a and b. +// a must be the shortest of the two. +func matchLen(a, b []byte) (n int) { + for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { + diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + if diff != 0 { + return n + bits.TrailingZeros64(diff)>>3 + } + n += 8 + } + + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n + +} + func load3232(b []byte, i int32) uint32 { return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:]) } diff --git a/vendor/github.com/moby/moby/LICENSE b/vendor/github.com/mauromorales/xpasswd/LICENSE similarity index 93% rename from vendor/github.com/moby/moby/LICENSE rename to vendor/github.com/mauromorales/xpasswd/LICENSE index 6d8d58fb676..c6970342ba1 100644 --- a/vendor/github.com/moby/moby/LICENSE +++ b/vendor/github.com/mauromorales/xpasswd/LICENSE @@ -1,7 +1,6 @@ - Apache License Version 2.0, January 2004 - https://www.apache.org/licenses/ + http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -176,13 +175,24 @@ END OF TERMS AND CONDITIONS - Copyright 2013-2018 Docker, Inc. + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [2024] [Mauro Miguel Morales Mejia] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/vendor/github.com/mauromorales/xpasswd/pkg/users/users.go b/vendor/github.com/mauromorales/xpasswd/pkg/users/users.go new file mode 100644 index 00000000000..878b58f644f --- /dev/null +++ b/vendor/github.com/mauromorales/xpasswd/pkg/users/users.go @@ -0,0 +1,132 @@ +// Package users provides primitives for user information on a system. +package users + +import ( + "errors" + "fmt" + "strconv" +) + +// User is an interface that represents a user on a system +type User interface { + // UID returns the user's unique ID + UID() (int, error) + // GID returns the user's group ID + GID() (int, error) + // Username returns the user's username + Username() string + // Password returns the user's password (this is usually not used but we include it for completeness) + Password() string + // HomeDir returns the user's home directory + HomeDir() string + // Shell returns the user's shell + Shell() string + // RealName returns the user's real name + RealName() string +} + +type CommonUser struct { + uid string + gid string + username string + password string + homeDir string + shell string + realName string +} + +func (u CommonUser) UID() (int, error) { + return strconv.Atoi(u.uid) +} + +func (u CommonUser) GID() (int, error) { + return strconv.Atoi(u.gid) +} + +func (u CommonUser) Username() string { + return u.username +} + +func (u CommonUser) Password() string { + return u.password +} + +func (u CommonUser) HomeDir() string { + return u.homeDir +} + +func (u CommonUser) Shell() string { + return u.shell +} + +func (u CommonUser) RealName() string { + return u.realName +} + +// UserList is an interface that represents a list of users +type UserList interface { + // Get returns a user from the list by username + Get(username string) User + // GetAll returns all users in the list + GetAll() ([]User, error) + GenerateUID() int + GenerateUIDInRange(int, int) (int, error) + LastUID() int + SetPath(path string) + Load() error +} + +// CommonUserList is a common implementation of UserList +type CommonUserList struct { + users []User + lastUID int +} + +// Get checks if a user with the given username exists +func (list CommonUserList) Get(username string) User { + for _, user := range list.users { + if user.Username() == username { + return user + } + } + return nil +} + +func (list CommonUserList) LastUID() int { + return list.lastUID +} + +func (list CommonUserList) GenerateUID() int { + if len(list.users) == 0 { + return 0 + } + return list.lastUID + 1 +} + +// Finds the lowest available uid in the specified range. +// Returns an error if there is no available uid in that range. +func (list CommonUserList) GenerateUIDInRange(minimum, maximum int) (int, error) { + userSet := make(map[int]struct{}) + for _, user := range list.users { + uid, err := user.UID() + if err != nil { + return -1, fmt.Errorf("getting user's uid: %w", err) + } + userSet[uid] = struct{}{} + } + + result := -1 + for i := minimum; i <= maximum; i++ { + if _, found := userSet[i]; found { + continue // uid in use, skip it + } + result = i // found a free one, stop here + break + } + + if result == -1 { + return result, errors.New("no available uid in range") + } + + return result, nil +} diff --git a/vendor/github.com/mauromorales/xpasswd/pkg/users/users_darwin.go b/vendor/github.com/mauromorales/xpasswd/pkg/users/users_darwin.go new file mode 100644 index 00000000000..96dd46b8cc9 --- /dev/null +++ b/vendor/github.com/mauromorales/xpasswd/pkg/users/users_darwin.go @@ -0,0 +1,143 @@ +package users + +import ( + "bytes" + "fmt" + "os/exec" + "strconv" + "strings" +) + +// DarwinUser matches the fields in ds +type DarwinUser struct { + recordName string + password string + uniqueID string + primaryGroupID string + realName string + nFSHomeDirectory string + userShell string +} + +func (u DarwinUser) UID() (int, error) { + return strconv.Atoi(u.uniqueID) +} + +func (u DarwinUser) GID() (int, error) { + return strconv.Atoi(u.primaryGroupID) +} + +func (u DarwinUser) Username() string { + return u.recordName +} + +func (u DarwinUser) Password() string { + return u.password +} + +func (u DarwinUser) HomeDir() string { + return u.nFSHomeDirectory +} + +func (u DarwinUser) Shell() string { + return u.userShell +} + +func (u DarwinUser) RealName() string { + return u.realName +} + +func NewUserList() UserList { + return &DarwinUserList{} +} + +// DarwinUserList is a list of Linux users +type DarwinUserList struct { + CommonUserList +} + +func (l *DarwinUserList) SetPath(path string) { +} + +func (l DarwinUserList) Load() error { + _, err := l.GetAll() + return err +} + +// GetAll returns a list of users on a Darwin system +func (l DarwinUserList) GetAll() ([]User, error) { + users := make([]User, 0) + + output, err := execDSCL("-readall", "/Users", "UniqueID", "PrimaryGroupID", "RealName", "UserShell", "NFSHomeDirectory", "RecordName", "Password") + if err != nil { + return users, fmt.Errorf("failed to execute command: %w", err) + } + + records := strings.Split(strings.TrimSpace(output), "\n-\n") + + for _, record := range records { + user := parseRecord(record) + users = append(users, user) + uid, err := user.UID() + if err != nil { + return users, fmt.Errorf("failed to convert UID to int: %w", err) + } + if uid > l.lastUID { + l.lastUID = uid + } + } + + l.users = users + + return users, nil +} + +func parseRecord(record string) DarwinUser { + user := DarwinUser{} + lines := strings.Split(strings.TrimSpace(record), "\n") + preK := "" + for _, line := range lines { + k, v, ok := strings.Cut(strings.TrimSpace(line), ":") + + key := k + val := strings.TrimSpace(v) + + if ok { + preK = k + } else if !ok && preK != "" { + key = preK + val = k + preK = "" + } + + switch key { + case "RecordName": + user.recordName = val + case "Password": + user.password = val + case "UniqueID": + user.uniqueID = val + case "PrimaryGroupID": + user.primaryGroupID = val + case "RealName": + user.realName = val + case "NFSHomeDirectory": + user.nFSHomeDirectory = val + case "UserShell": + user.userShell = val + } + } + return user +} + +func execDSCL(arg ...string) (string, error) { + args := append([]string{"."}, arg...) + cmd := exec.Command("dscl", args...) + var out bytes.Buffer + cmd.Stdout = &out + err := cmd.Run() + if err != nil { + return "", err + } + return out.String(), nil +} diff --git a/vendor/github.com/mauromorales/xpasswd/pkg/users/users_linux.go b/vendor/github.com/mauromorales/xpasswd/pkg/users/users_linux.go new file mode 100644 index 00000000000..decb2500bc3 --- /dev/null +++ b/vendor/github.com/mauromorales/xpasswd/pkg/users/users_linux.go @@ -0,0 +1,128 @@ +package users + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// LinuxUser matches the fields in /etc/passwd. See man 5 passwd for more information +type LinuxUser struct { + login string + password string + uid string + gid string + userNameOrComment string + userHomeDir string + usercommandInterpreter string +} + +func NewUserList() UserList { + return &LinuxUserList{path: "/etc/passwd"} +} + +// LinuxUserList is a list of Linux users +type LinuxUserList struct { + CommonUserList + path string +} + +func (u LinuxUser) UID() (int, error) { + return strconv.Atoi(u.uid) +} + +func (u LinuxUser) GID() (int, error) { + return strconv.Atoi(u.gid) +} + +func (u LinuxUser) Username() string { + return u.login +} + +func (u LinuxUser) Password() string { + return u.password +} + +func (u LinuxUser) HomeDir() string { + return u.userHomeDir +} + +func (u LinuxUser) Shell() string { + return u.usercommandInterpreter +} + +func (u LinuxUser) RealName() string { + return u.userNameOrComment +} + +func (l *LinuxUserList) SetPath(path string) { + l.path = path +} + +func (l *LinuxUserList) Load() error { + _, err := l.GetAll() + return err +} + +// GetAll returns all users in the list +func (l *LinuxUserList) GetAll() ([]User, error) { + users := make([]User, 0) + + file, err := os.Open(l.path) + if err != nil { + return users, err + } + defer file.Close() + + // Create a scanner to read the file line by line + scanner := bufio.NewScanner(file) + + // Read each line + for scanner.Scan() { + line := scanner.Text() + + user, lineErr := parseRecord(line) + if lineErr == nil { + users = append(users, user) + } + + uid, lineErr := user.UID() + if lineErr != nil { + err = lineErr + } + + if lineErr == nil && uid > l.lastUID { + l.lastUID = uid + } + } + + // Check if there were errors during scanning + if err := scanner.Err(); err != nil { + return users, fmt.Errorf("error reading the file: %w", err) + } + + l.users = users + + return users, err +} + +func parseRecord(record string) (LinuxUser, error) { + user := LinuxUser{} + fields := strings.Split(record, ":") + // Check if the line is correctly formatted with 7 fields + if len(fields) != 7 { + return user, fmt.Errorf("unexpected format: %s", record) + } + + user.login = fields[0] + user.password = fields[1] + user.uid = fields[2] + user.gid = fields[3] + user.userNameOrComment = fields[4] + user.userHomeDir = fields[5] + user.usercommandInterpreter = fields[6] + + return user, nil +} diff --git a/vendor/github.com/moby/moby/AUTHORS b/vendor/github.com/moby/moby/AUTHORS deleted file mode 100644 index 48d04f9a983..00000000000 --- a/vendor/github.com/moby/moby/AUTHORS +++ /dev/null @@ -1,2429 +0,0 @@ -# File @generated by hack/generate-authors.sh. DO NOT EDIT. -# This file lists all contributors to the repository. -# See hack/generate-authors.sh to make modifications. - -Aanand Prasad -Aaron Davidson -Aaron Feng -Aaron Hnatiw -Aaron Huslage -Aaron L. Xu -Aaron Lehmann -Aaron Welch -Abel Muiño -Abhijeet Kasurde -Abhinandan Prativadi -Abhinav Ajgaonkar -Abhishek Chanda -Abhishek Sharma -Abin Shahab -Abirdcfly -Ada Mancini -Adam Avilla -Adam Dobrawy -Adam Eijdenberg -Adam Kunk -Adam Miller -Adam Mills -Adam Pointer -Adam Singer -Adam Thornton -Adam Walz -Adam Williams -AdamKorcz -Addam Hardy -Aditi Rajagopal -Aditya -Adnan Khan -Adolfo Ochagavía -Adria Casas -Adrian Moisey -Adrian Mouat -Adrian Oprea -Adrien Folie -Adrien Gallouët -Ahmed Kamal -Ahmet Alp Balkan -Aidan Feldman -Aidan Hobson Sayers -AJ Bowen -Ajey Charantimath -ajneu -Akash Gupta -Akhil Mohan -Akihiro Matsushima -Akihiro Suda -Akim Demaille -Akira Koyasu -Akshay Karle -Akshay Moghe -Al Tobey -alambike -Alan Hoyle -Alan Scherger -Alan Thompson -Albert Callarisa -Albert Zhang -Albin Kerouanton -Alec Benson -Alejandro González Hevia -Aleksa Sarai -Aleksandr Chebotov -Aleksandrs Fadins -Alena Prokharchyk -Alessandro Boch -Alessio Biancalana -Alex Chan -Alex Chen -Alex Coventry -Alex Crawford -Alex Ellis -Alex Gaynor -Alex Goodman -Alex Nordlund -Alex Olshansky -Alex Samorukov -Alex Stockinger -Alex Warhawk -Alexander Artemenko -Alexander Boyd -Alexander Larsson -Alexander Midlash -Alexander Morozov -Alexander Polakov -Alexander Shopov -Alexandre Beslic -Alexandre Garnier -Alexandre González -Alexandre Jomin -Alexandru Sfirlogea -Alexei Margasov -Alexey Guskov -Alexey Kotlyarov -Alexey Shamrin -Alexis Ries -Alexis Thomas -Alfred Landrum -Ali Dehghani -Alicia Lauerman -Alihan Demir -Allen Madsen -Allen Sun -almoehi -Alvaro Saurin -Alvin Deng -Alvin Richards -amangoel -Amen Belayneh -Ameya Gawde -Amir Goldstein -Amit Bakshi -Amit Krishnan -Amit Shukla -Amr Gawish -Amy Lindburg -Anand Patil -AnandkumarPatel -Anatoly Borodin -Anca Iordache -Anchal Agrawal -Anda Xu -Anders Janmyr -Andre Dublin <81dublin@gmail.com> -Andre Granovsky -Andrea Denisse Gómez -Andrea Luzzardi -Andrea Turli -Andreas Elvers -Andreas Köhler -Andreas Savvides -Andreas Tiefenthaler -Andrei Gherzan -Andrei Ushakov -Andrei Vagin -Andrew C. Bodine -Andrew Clay Shafer -Andrew Duckworth -Andrew France -Andrew Gerrand -Andrew Guenther -Andrew He -Andrew Hsu -Andrew Kim -Andrew Kuklewicz -Andrew Macgregor -Andrew Macpherson -Andrew Martin -Andrew McDonnell -Andrew Munsell -Andrew Pennebaker -Andrew Po -Andrew Weiss -Andrew Williams -Andrews Medina -Andrey Kolomentsev -Andrey Petrov -Andrey Stolbovsky -André Martins -Andy Chambers -andy diller -Andy Goldstein -Andy Kipp -Andy Lindeman -Andy Rothfusz -Andy Smith -Andy Wilson -Andy Zhang -Aneesh Kulkarni -Anes Hasicic -Angel Velazquez -Anil Belur -Anil Madhavapeddy -Ankit Jain -Ankush Agarwal -Anonmily -Anran Qiao -Anshul Pundir -Anthon van der Neut -Anthony Baire -Anthony Bishopric -Anthony Dahanne -Anthony Sottile -Anton Löfgren -Anton Nikitin -Anton Polonskiy -Anton Tiurin -Antonio Murdaca -Antonis Kalipetis -Antony Messerli -Anuj Bahuguna -Anuj Varma -Anusha Ragunathan -Anyu Wang -apocas -Arash Deshmeh -arcosx -ArikaChen -Arko Dasgupta -Arnaud Lefebvre -Arnaud Porterie -Arnaud Rebillout -Artem Khramov -Arthur Barr -Arthur Gautier -Artur Meyster -Arun Gupta -Asad Saeeduddin -Asbjørn Enge -Austin Vazquez -averagehuman -Avi Das -Avi Kivity -Avi Miller -Avi Vaid -ayoshitake -Azat Khuyiyakhmetov -Bao Yonglei -Bardia Keyoumarsi -Barnaby Gray -Barry Allard -Bartłomiej Piotrowski -Bastiaan Bakker -Bastien Pascard -bdevloed -Bearice Ren -Ben Bonnefoy -Ben Firshman -Ben Golub -Ben Gould -Ben Hall -Ben Langfeld -Ben Lovy -Ben Sargent -Ben Severson -Ben Toews -Ben Wiklund -Benjamin Atkin -Benjamin Baker -Benjamin Boudreau -Benjamin Böhmke -Benjamin Wang -Benjamin Yolken -Benny Ng -Benoit Chesneau -Bernerd Schaefer -Bernhard M. Wiedemann -Bert Goethals -Bertrand Roussel -Bevisy Zhang -Bharath Thiruveedula -Bhiraj Butala -Bhumika Bayani -Bilal Amarni -Bill Wang -Billy Ridgway -Bily Zhang -Bin Liu -Bingshen Wang -Bjorn Neergaard -Blake Geno -Boaz Shuster -bobby abbott -Bojun Zhu -Boqin Qin -Boris Pruessmann -Boshi Lian -Bouke Haarsma -Boyd Hemphill -boynux -Bradley Cicenas -Bradley Wright -Brandon Liu -Brandon Philips -Brandon Rhodes -Brendan Dixon -Brennan Kinney <5098581+polarathene@users.noreply.github.com> -Brent Salisbury -Brett Higgins -Brett Kochendorfer -Brett Milford -Brett Randall -Brian (bex) Exelbierd -Brian Bland -Brian DeHamer -Brian Dorsey -Brian Flad -Brian Goff -Brian McCallister -Brian Olsen -Brian Schwind -Brian Shumate -Brian Torres-Gil -Brian Trump -Brice Jaglin -Briehan Lombaard -Brielle Broder -Bruno Bigras -Bruno Binet -Bruno Gazzera -Bruno Renié -Bruno Tavares -Bryan Bess -Bryan Boreham -Bryan Matsuo -Bryan Murphy -Burke Libbey -Byung Kang -Caleb Spare -Calen Pennington -Cameron Boehmer -Cameron Sparr -Cameron Spear -Campbell Allen -Candid Dauth -Cao Weiwei -Carl Henrik Lunde -Carl Loa Odin -Carl X. Su -Carlo Mion -Carlos Alexandro Becker -Carlos de Paula -Carlos Sanchez -Carol Fager-Higgins -Cary -Casey Bisson -Catalin Pirvu -Ce Gao -Cedric Davies -Cezar Sa Espinola -Chad Swenson -Chance Zibolski -Chander Govindarajan -Chanhun Jeong -Chao Wang -Charles Chan -Charles Hooper -Charles Law -Charles Lindsay -Charles Merriam -Charles Sarrazin -Charles Smith -Charlie Drage -Charlie Lewis -Chase Bolt -ChaYoung You -Chee Hau Lim -Chen Chao -Chen Chuanliang -Chen Hanxiao -Chen Min -Chen Mingjie -Chen Qiu -Cheng-mean Liu -Chengfei Shang -Chengguang Xu -Chenyang Yan -chenyuzhu -Chetan Birajdar -Chewey -Chia-liang Kao -Chiranjeevi Tirunagari -chli -Cholerae Hu -Chris Alfonso -Chris Armstrong -Chris Dias -Chris Dituri -Chris Fordham -Chris Gavin -Chris Gibson -Chris Khoo -Chris Kreussling (Flatbush Gardener) -Chris McKinnel -Chris McKinnel -Chris Price -Chris Seto -Chris Snow -Chris St. Pierre -Chris Stivers -Chris Swan -Chris Telfer -Chris Wahl -Chris Weyl -Chris White -Christian Becker -Christian Berendt -Christian Brauner -Christian Böhme -Christian Muehlhaeuser -Christian Persson -Christian Rotzoll -Christian Simon -Christian Stefanescu -Christoph Ziebuhr -Christophe Mehay -Christophe Troestler -Christophe Vidal -Christopher Biscardi -Christopher Crone -Christopher Currie -Christopher Jones -Christopher Latham -Christopher Rigor -Christy Norman -Chun Chen -Ciro S. Costa -Clayton Coleman -Clint Armstrong -Clinton Kitson -clubby789 -Cody Roseborough -Coenraad Loubser -Colin Dunklau -Colin Hebert -Colin Panisset -Colin Rice -Colin Walters -Collin Guarino -Colm Hally -companycy -Conor Evans -Corbin Coleman -Corey Farrell -Cory Forsyth -Cory Snider -cressie176 -Cristian Ariza -Cristian Staretu -cristiano balducci -Cristina Yenyxe Gonzalez Garcia -Cruceru Calin-Cristian -cui fliter -CUI Wei -Cuong Manh Le -Cyprian Gracz -Cyril F -Da McGrady -Daan van Berkel -Daehyeok Mun -Dafydd Crosby -dalanlan -Damian Smyth -Damien Nadé -Damien Nozay -Damjan Georgievski -Dan Anolik -Dan Buch -Dan Cotora -Dan Feldman -Dan Griffin -Dan Hirsch -Dan Keder -Dan Levy -Dan McPherson -Dan Plamadeala -Dan Stine -Dan Williams -Dani Hodovic -Dani Louca -Daniel Antlinger -Daniel Black -Daniel Dao -Daniel Exner -Daniel Farrell -Daniel Garcia -Daniel Gasienica -Daniel Grunwell -Daniel Helfand -Daniel Hiltgen -Daniel J Walsh -Daniel Menet -Daniel Mizyrycki -Daniel Nephin -Daniel Norberg -Daniel Nordberg -Daniel P. Berrangé -Daniel Robinson -Daniel S -Daniel Sweet -Daniel Von Fange -Daniel Watkins -Daniel X Moore -Daniel YC Lin -Daniel Zhang -Daniele Rondina -Danny Berger -Danny Milosavljevic -Danny Yates -Danyal Khaliq -Darren Coxall -Darren Shepherd -Darren Stahl -Dattatraya Kumbhar -Davanum Srinivas -Dave Barboza -Dave Goodchild -Dave Henderson -Dave MacDonald -Dave Tucker -David Anderson -David Bellotti -David Calavera -David Chung -David Corking -David Cramer -David Currie -David Davis -David Dooling -David Gageot -David Gebler -David Glasser -David Karlsson <35727626+dvdksn@users.noreply.github.com> -David Lawrence -David Lechner -David M. Karr -David Mackey -David Manouchehri -David Mat -David Mcanulty -David McKay -David O'Rourke -David P Hilton -David Pelaez -David R. Jenni -David Röthlisberger -David Sheets -David Sissitka -David Trott -David Wang <00107082@163.com> -David Williamson -David Xia -David Young -Davide Ceretti -Dawn Chen -dbdd -dcylabs -Debayan De -Deborah Gertrude Digges -deed02392 -Deep Debroy -Deng Guangxing -Deni Bertovic -Denis Defreyne -Denis Gladkikh -Denis Ollier -Dennis Chen -Dennis Chen -Dennis Docter -Derek -Derek -Derek Ch -Derek McGowan -Deric Crago -Deshi Xiao -Devon Estes -Devvyn Murphy -Dharmit Shah -Dhawal Yogesh Bhanushali -Dhilip Kumars -Diego Romero -Diego Siqueira -Dieter Reuter -Dillon Dixon -Dima Stopel -Dimitri John Ledkov -Dimitris Mandalidis -Dimitris Rozakis -Dimitry Andric -Dinesh Subhraveti -Ding Fei -dingwei -Diogo Monica -DiuDiugirl -Djibril Koné -Djordje Lukic -dkumor -Dmitri Logvinenko -Dmitri Shuralyov -Dmitry Demeshchuk -Dmitry Gusev -Dmitry Kononenko -Dmitry Sharshakov -Dmitry Shyshkin -Dmitry Smirnov -Dmitry V. Krivenok -Dmitry Vorobev -Dmytro Iakovliev -docker-unir[bot] -Dolph Mathews -Dominic Tubach -Dominic Yin -Dominik Dingel -Dominik Finkbeiner -Dominik Honnef -Don Kirkby -Don Kjer -Don Spaulding -Donald Huang -Dong Chen -Donghwa Kim -Donovan Jones -Dorin Geman -Doron Podoleanu -Doug Davis -Doug MacEachern -Doug Tangren -Douglas Curtis -Dr Nic Williams -dragon788 -Dražen Lučanin -Drew Erny -Drew Hubl -Dustin Sallings -Ed Costello -Edmund Wagner -Eiichi Tsukata -Eike Herzbach -Eivin Giske Skaaren -Eivind Uggedal -Elan Ruusamäe -Elango Sivanandam -Elena Morozova -Eli Uriegas -Elias Faxö -Elias Koromilas -Elias Probst -Elijah Zupancic -eluck -Elvir Kuric -Emil Davtyan -Emil Hernvall -Emily Maier -Emily Rose -Emir Ozer -Eng Zer Jun -Enguerran -Enrico Weigelt, metux IT consult -Eohyung Lee -epeterso -er0k -Eric Barch -Eric Curtin -Eric G. Noriega -Eric Hanchrow -Eric Lee -Eric Mountain -Eric Myhre -Eric Paris -Eric Rafaloff -Eric Rosenberg -Eric Sage -Eric Soderstrom -Eric Yang -Eric-Olivier Lamey -Erica Windisch -Erich Cordoba -Erik Bray -Erik Dubbelboer -Erik Hollensbe -Erik Inge Bolsø -Erik Kristensen -Erik Sipsma -Erik St. Martin -Erik Weathers -Erno Hopearuoho -Erwin van der Koogh -Espen Suenson -Ethan Bell -Ethan Mosbaugh -Euan Harris -Euan Kemp -Eugen Krizo -Eugene Yakubovich -Evan Allrich -Evan Carmi -Evan Hazlett -Evan Krall -Evan Lezar -Evan Phoenix -Evan Wies -Evelyn Xu -Everett Toews -Evgeniy Makhrov -Evgeny Shmarnev -Evgeny Vereshchagin -Ewa Czechowska -Eystein Måløy Stenberg -ezbercih -Ezra Silvera -Fabian Kramm -Fabian Lauer -Fabian Raetz -Fabiano Rosas -Fabio Falci -Fabio Kung -Fabio Rapposelli -Fabio Rehm -Fabrizio Regini -Fabrizio Soppelsa -Faiz Khan -falmp -Fangming Fang -Fangyuan Gao <21551127@zju.edu.cn> -fanjiyun -Fareed Dudhia -Fathi Boudra -Federico Gimenez -Felipe Oliveira -Felipe Ruhland -Felix Abecassis -Felix Geisendörfer -Felix Hupfeld -Felix Rabe -Felix Ruess -Felix Schindler -Feng Yan -Fengtu Wang -Ferenc Szabo -Fernando -Fero Volar -Feroz Salam -Ferran Rodenas -Filipe Brandenburger -Filipe Oliveira -Flavio Castelli -Flavio Crisciani -Florian -Florian Klein -Florian Maier -Florian Noeding -Florian Schmaus -Florian Weingarten -Florin Asavoaie -Florin Patan -fonglh -Foysal Iqbal -Francesc Campoy -Francesco Degrassi -Francesco Mari -Francis Chuang -Francisco Carriedo -Francisco Souza -Frank Groeneveld -Frank Herrmann -Frank Macreery -Frank Rosquin -Frank Villaro-Dixon -Frank Yang -Fred Lifton -Frederick F. Kautz IV -Frederico F. de Oliveira -Frederik Loeffert -Frederik Nordahl Jul Sabroe -Freek Kalter -Frieder Bluemle -frobnicaty <92033765+frobnicaty@users.noreply.github.com> -Frédéric Dalleau -Fu JinLin -Félix Baylac-Jacqué -Félix Cantournet -Gabe Rosenhouse -Gabor Nagy -Gabriel Adrian Samfira -Gabriel Goller -Gabriel L. Somlo -Gabriel Linder -Gabriel Monroy -Gabriel Nicolas Avellaneda -Gaetan de Villele -Galen Sampson -Gang Qiao -Gareth Rushgrove -Garrett Barboza -Gary Schaetz -Gaurav -Gaurav Singh -Gaël PORTAY -Genki Takiuchi -GennadySpb -Geoff Levand -Geoffrey Bachelet -Geon Kim -George Kontridze -George MacRorie -George Xie -Georgi Hristozov -Georgy Yakovlev -Gereon Frey -German DZ -Gert van Valkenhoef -Gerwim Feiken -Ghislain Bourgeois -Giampaolo Mancini -Gianluca Borello -Gildas Cuisinier -Giovan Isa Musthofa -gissehel -Giuseppe Mazzotta -Giuseppe Scrivano -Gleb Fotengauer-Malinovskiy -Gleb M Borisov -Glyn Normington -GoBella -Goffert van Gool -Goldwyn Rodrigues -Gopikannan Venugopalsamy -Gosuke Miyashita -Gou Rao -Govinda Fichtner -Grant Millar -Grant Reaber -Graydon Hoare -Greg Fausak -Greg Pflaum -Greg Stephens -Greg Thornton -Grzegorz Jaśkiewicz -Guilhem Lettron -Guilherme Salgado -Guillaume Dufour -Guillaume J. Charmes -Gunadhya S. <6939749+gunadhya@users.noreply.github.com> -Guoqiang QI -guoxiuyan -Guri -Gurjeet Singh -Guruprasad -Gustav Sinder -gwx296173 -Günter Zöchbauer -Haichao Yang -haikuoliu -haining.cao -Hakan Özler -Hamish Hutchings -Hannes Ljungberg -Hans Kristian Flaatten -Hans Rødtang -Hao Shu Wei -Hao Zhang <21521210@zju.edu.cn> -Harald Albers -Harald Niesche -Harley Laue -Harold Cooper -Harrison Turton -Harry Zhang -Harshal Patil -Harshal Patil -He Simei -He Xiaoxi -He Xin -heartlock <21521209@zju.edu.cn> -Hector Castro -Helen Xie -Henning Sprang -Hiroshi Hatake -Hiroyuki Sasagawa -Hobofan -Hollie Teal -Hong Xu -Hongbin Lu -Hongxu Jia -Honza Pokorny -Hsing-Hui Hsu -Hsing-Yu (David) Chen -hsinko <21551195@zju.edu.cn> -Hu Keping -Hu Tao -HuanHuan Ye -Huanzhong Zhang -Huayi Zhang -Hugo Barrera -Hugo Duncan -Hugo Marisco <0x6875676f@gmail.com> -Hui Kang -Hunter Blanks -huqun -Huu Nguyen -Hyeongkyu Lee -Hyzhou Zhy -Iago López Galeiras -Ian Bishop -Ian Bull -Ian Calvert -Ian Campbell -Ian Chen -Ian Lee -Ian Main -Ian Philpot -Ian Truslove -Iavael -Icaro Seara -Ignacio Capurro -Igor Dolzhikov -Igor Karpovich -Iliana Weller -Ilkka Laukkanen -Illia Antypenko -Illo Abdulrahim -Ilya Dmitrichenko -Ilya Gusev -Ilya Khlopotov -imre Fitos -inglesp -Ingo Gottwald -Innovimax -Isaac Dupree -Isabel Jimenez -Isaiah Grace -Isao Jonas -Iskander Sharipov -Ivan Babrou -Ivan Fraixedes -Ivan Grcic -Ivan Markin -J Bruni -J. Nunn -Jack Danger Canty -Jack Laxson -Jacob Atzen -Jacob Edelman -Jacob Tomlinson -Jacob Vallejo -Jacob Wen -Jaime Cepeda -Jaivish Kothari -Jake Champlin -Jake Moshenko -Jake Sanders -Jakub Drahos -Jakub Guzik -James Allen -James Carey -James Carr -James DeFelice -James Harrison Fisher -James Kyburz -James Kyle -James Lal -James Mills -James Nesbitt -James Nugent -James Sanders -James Turnbull -James Watkins-Harvey -Jamie Hannaford -Jamshid Afshar -Jan Breig -Jan Chren -Jan Garcia -Jan Götte -Jan Keromnes -Jan Koprowski -Jan Pazdziora -Jan Toebes -Jan-Gerd Tenberge -Jan-Jaap Driessen -Jana Radhakrishnan -Jannick Fahlbusch -Januar Wayong -Jared Biel -Jared Hocutt -Jaroslaw Zabiello -Jasmine Hegman -Jason A. Donenfeld -Jason Divock -Jason Giedymin -Jason Green -Jason Hall -Jason Heiss -Jason Livesay -Jason McVetta -Jason Plum -Jason Shepherd -Jason Smith -Jason Sommer -Jason Stangroome -Javier Bassi -jaxgeller -Jay -Jay Kamat -Jay Lim -Jean Rouge -Jean-Baptiste Barth -Jean-Baptiste Dalido -Jean-Christophe Berthon -Jean-Michel Rouet -Jean-Paul Calderone -Jean-Pierre Huynh -Jean-Tiare Le Bigot -Jeeva S. Chelladhurai -Jeff Anderson -Jeff Hajewski -Jeff Johnston -Jeff Lindsay -Jeff Mickey -Jeff Minard -Jeff Nickoloff -Jeff Silberman -Jeff Welch -Jeff Zvier -Jeffrey Bolle -Jeffrey Morgan -Jeffrey van Gogh -Jenny Gebske -Jeremy Chambers -Jeremy Grosser -Jeremy Huntwork -Jeremy Price -Jeremy Qian -Jeremy Unruh -Jeremy Yallop -Jeroen Franse -Jeroen Jacobs -Jesse Dearing -Jesse Dubay -Jessica Frazelle -Jeyanthinath Muthuram -Jezeniel Zapanta -Jhon Honce -Ji.Zhilong -Jian Liao -Jian Zhang -Jiang Jinyang -Jianyong Wu -Jie Luo -Jie Ma -Jihyun Hwang -Jilles Oldenbeuving -Jim Alateras -Jim Carroll -Jim Ehrismann -Jim Galasyn -Jim Lin -Jim Minter -Jim Perrin -Jimmy Cuadra -Jimmy Puckett -Jimmy Song -Jinsoo Park -Jintao Zhang -Jiri Appl -Jiri Popelka -Jiuyue Ma -Jiří Župka -Joakim Roubert -Joao Fernandes -Joao Trindade -Joe Beda -Joe Doliner -Joe Ferguson -Joe Gordon -Joe Shaw -Joe Van Dyk -Joel Friedly -Joel Handwell -Joel Hansson -Joel Wurtz -Joey Geiger -Joey Geiger -Joey Gibson -Joffrey F -Johan Euphrosine -Johan Rydberg -Johanan Lieberman -Johannes 'fish' Ziemke -John Costa -John Feminella -John Gardiner Myers -John Gossman -John Harris -John Howard -John Laswell -John Maguire -John Mulhausen -John OBrien III -John Starks -John Stephens -John Tims -John V. Martinez -John Warwick -John Willis -Jon Johnson -Jon Surrell -Jon Wedaman -Jonas Dohse -Jonas Heinrich -Jonas Pfenniger -Jonathan A. Schweder -Jonathan A. Sternberg -Jonathan Boulle -Jonathan Camp -Jonathan Choy -Jonathan Dowland -Jonathan Lebon -Jonathan Lomas -Jonathan McCrohan -Jonathan Mueller -Jonathan Pares -Jonathan Rudenberg -Jonathan Stoppani -Jonh Wendell -Joni Sar -Joost Cassee -Jordan Arentsen -Jordan Jennings -Jordan Sissel -Jordi Massaguer Pla -Jorge Marin -Jorit Kleine-Möllhoff -Jose Diaz-Gonzalez -Joseph Anthony Pasquale Holsten -Joseph Hager -Joseph Kern -Joseph Rothrock -Josh -Josh Bodah -Josh Bonczkowski -Josh Chorlton -Josh Eveleth -Josh Hawn -Josh Horwitz -Josh Poimboeuf -Josh Soref -Josh Wilson -Josiah Kiehl -José Tomás Albornoz -Joyce Jang -JP -Julian Taylor -Julien Barbier -Julien Bisconti -Julien Bordellier -Julien Dubois -Julien Kassar -Julien Maitrehenry -Julien Pervillé -Julien Pivotto -Julio Guerra -Julio Montes -Jun Du -Jun-Ru Chang -junxu -Jussi Nummelin -Justas Brazauskas -Justen Martin -Justin Chadwell -Justin Cormack -Justin Force -Justin Keller <85903732+jk-vb@users.noreply.github.com> -Justin Menga -Justin Plock -Justin Simonelis -Justin Terry -Justyn Temme -Jyrki Puttonen -Jérémy Leherpeur -Jérôme Petazzoni -Jörg Thalheim -K. Heller -Kai Blin -Kai Qiang Wu (Kennan) -Kaijie Chen -Kamil Domański -Kamjar Gerami -Kanstantsin Shautsou -Kara Alexandra -Karan Lyons -Kareem Khazem -kargakis -Karl Grzeszczak -Karol Duleba -Karthik Karanth -Karthik Nayak -Kasper Fabæch Brandt -Kate Heddleston -Katie McLaughlin -Kato Kazuyoshi -Katrina Owen -Kawsar Saiyeed -Kay Yan -kayrus -Kazuhiro Sera -Kazuyoshi Kato -Ke Li -Ke Xu -Kei Ohmura -Keith Hudgins -Keli Hu -Ken Bannister -Ken Cochrane -Ken Herner -Ken ICHIKAWA -Ken Reese -Kenfe-Mickaël Laventure -Kenjiro Nakayama -Kent Johnson -Kenta Tada -Kevin "qwazerty" Houdebert -Kevin Alvarez -Kevin Burke -Kevin Clark -Kevin Feyrer -Kevin J. Lynagh -Kevin Jing Qiu -Kevin Kern -Kevin Menard -Kevin Meredith -Kevin P. Kucharczyk -Kevin Parsons -Kevin Richardson -Kevin Shi -Kevin Wallace -Kevin Yap -Keyvan Fatehi -kies -Kim BKC Carlbacker -Kim Eik -Kimbro Staken -Kir Kolyshkin -Kiran Gangadharan -Kirill SIbirev -Kirk Easterson -knappe -Kohei Tsuruta -Koichi Shiraishi -Konrad Kleine -Konrad Ponichtera -Konstantin Gribov -Konstantin L -Konstantin Pelykh -Kostadin Plachkov -kpcyrd -Krasi Georgiev -Krasimir Georgiev -Kris-Mikael Krister -Kristian Haugene -Kristina Zabunova -Krystian Wojcicki -Kunal Kushwaha -Kunal Tyagi -Kyle Conroy -Kyle Linden -Kyle Squizzato -Kyle Wuolle -kyu -Lachlan Coote -Lai Jiangshan -Lajos Papp -Lakshan Perera -Lalatendu Mohanty -Lance Chen -Lance Kinley -Lars Butler -Lars Kellogg-Stedman -Lars R. Damerow -Lars-Magnus Skog -Laszlo Meszaros -Laura Brehm -Laura Frank -Laurent Bernaille -Laurent Erignoux -Laurie Voss -Leandro Motta Barros -Leandro Siqueira -Lee Calcote -Lee Chao <932819864@qq.com> -Lee, Meng-Han -Lei Gong -Lei Jitang -Leiiwang -Len Weincier -Lennie -Leo Gallucci -Leonardo Nodari -Leonardo Taccari -Leszek Kowalski -Levi Blackstone -Levi Gross -Levi Harrison -Lewis Daly -Lewis Marshall -Lewis Peckover -Li Yi -Liam Macgillavry -Liana Lo -Liang Mingqiang -Liang-Chi Hsieh -liangwei -Liao Qingwei -Lifubang -Lihua Tang -Lily Guo -limeidan -Lin Lu -LingFaKe -Linus Heckemann -Liran Tal -Liron Levin -Liu Bo -Liu Hua -liwenqi -lixiaobing10051267 -Liz Zhang -LIZAO LI -Lizzie Dixon <_@lizzie.io> -Lloyd Dewolf -Lokesh Mandvekar -longliqiang88 <394564827@qq.com> -Lorenz Leutgeb -Lorenzo Fontana -Lotus Fenn -Louis Delossantos -Louis Opter -Luboslav Pivarc -Luca Favatella -Luca Marturana -Luca Orlandi -Luca-Bogdan Grigorescu -Lucas Chan -Lucas Chi -Lucas Molas -Lucas Silvestre -Luciano Mores -Luis Henrique Mulinari -Luis Martínez de Bartolomé Izquierdo -Luiz Svoboda -Lukas Heeren -Lukas Waslowski -lukaspustina -Lukasz Zajaczkowski -Luke Marsden -Lyn -Lynda O'Leary -Lénaïc Huard -Ma Müller -Ma Shimiao -Mabin -Madhan Raj Mookkandy -Madhav Puri -Madhu Venugopal -Mageee -Mahesh Tiyyagura -malnick -Malte Janduda -Manfred Touron -Manfred Zabarauskas -Manjunath A Kumatagi -Mansi Nahar -Manuel Meurer -Manuel Rüger -Manuel Woelker -mapk0y -Marat Radchenko -Marc Abramowitz -Marc Kuo -Marc Tamsky -Marcel Edmund Franke -Marcelo Horacio Fortino -Marcelo Salazar -Marco Hennings -Marcus Cobden -Marcus Farkas -Marcus Linke -Marcus Martins -Marcus Ramberg -Marek Goldmann -Marian Marinov -Marianna Tessel -Mario Loriedo -Marius Gundersen -Marius Sturm -Marius Voila -Mark Allen -Mark Feit -Mark Jeromin -Mark McGranaghan -Mark McKinstry -Mark Milstein -Mark Oates -Mark Parker -Mark Vainomaa -Mark West -Markan Patel -Marko Mikulicic -Marko Tibold -Markus Fix -Markus Kortlang -Martijn Dwars -Martijn van Oosterhout -Martin Braun -Martin Dojcak -Martin Honermeyer -Martin Jirku -Martin Kelly -Martin Mosegaard Amdisen -Martin Muzatko -Martin Redmond -Maru Newby -Mary Anthony -Masahito Zembutsu -Masato Ohba -Masayuki Morita -Mason Malone -Mateusz Sulima -Mathias Monnerville -Mathieu Champlon -Mathieu Le Marec - Pasquet -Mathieu Parent -Mathieu Paturel -Matt Apperson -Matt Bachmann -Matt Bajor -Matt Bentley -Matt Haggard -Matt Hoyle -Matt McCormick -Matt Moore -Matt Morrison <3maven@gmail.com> -Matt Richardson -Matt Rickard -Matt Robenolt -Matt Schurenko -Matt Williams -Matthew Heon -Matthew Lapworth -Matthew Mayer -Matthew Mosesohn -Matthew Mueller -Matthew Riley -Matthias Klumpp -Matthias Kühnle -Matthias Rampke -Matthieu Fronton -Matthieu Hauglustaine -Mattias Jernberg -Mauricio Garavaglia -mauriyouth -Max Harmathy -Max Shytikov -Max Timchenko -Maxim Fedchyshyn -Maxim Ivanov -Maxim Kulkin -Maxim Treskin -Maxime Petazzoni -Maximiliano Maccanti -Maxwell -Meaglith Ma -meejah -Megan Kostick -Mehul Kar -Mei ChunTao -Mengdi Gao -Menghui Chen -Mert Yazıcıoğlu -mgniu -Micah Zoltu -Michael A. Smith -Michael Beskin -Michael Bridgen -Michael Brown -Michael Chiang -Michael Crosby -Michael Currie -Michael Friis -Michael Gorsuch -Michael Grauer -Michael Holzheu -Michael Hudson-Doyle -Michael Huettermann -Michael Irwin -Michael Kebe -Michael Kuehn -Michael Käufl -Michael Neale -Michael Nussbaum -Michael Prokop -Michael Scharf -Michael Spetsiotis -Michael Stapelberg -Michael Steinert -Michael Thies -Michael Weidmann -Michael West -Michael Zhao -Michal Fojtik -Michal Gebauer -Michal Jemala -Michal Kostrzewa -Michal Minář -Michal Rostecki -Michal Wieczorek -Michaël Pailloncy -Michał Czeraszkiewicz -Michał Gryko -Michał Kosek -Michiel de Jong -Mickaël Fortunato -Mickaël Remars -Miguel Angel Fernández -Miguel Morales -Miguel Perez -Mihai Borobocea -Mihuleacc Sergiu -Mikael Davranche -Mike Brown -Mike Bush -Mike Casas -Mike Chelen -Mike Danese -Mike Dillon -Mike Dougherty -Mike Estes -Mike Gaffney -Mike Goelzer -Mike Leone -Mike Lundy -Mike MacCana -Mike Naberezny -Mike Snitzer -Mike Sul -mikelinjie <294893458@qq.com> -Mikhail Sobolev -Miklos Szegedi -Milas Bowman -Milind Chawre -Miloslav Trmač -mingqing -Mingzhen Feng -Misty Stanley-Jones -Mitch Capper -Mizuki Urushida -mlarcher -Mohammad Banikazemi -Mohammad Nasirifar -Mohammed Aaqib Ansari -Mohd Sadiq -Mohit Soni -Moorthy RS -Morgan Bauer -Morgante Pell -Morgy93 -Morten Siebuhr -Morton Fox -Moysés Borges -mrfly -Mrunal Patel -Muayyad Alsadi -Muhammad Zohaib Aslam -Mustafa Akın -Muthukumar R -Máximo Cuadros -Médi-Rémi Hashim -Nace Oroz -Nahum Shalman -Nakul Pathak -Nalin Dahyabhai -Nan Monnand Deng -Naoki Orii -Natalie Parker -Natanael Copa -Natasha Jarus -Nate Brennand -Nate Eagleson -Nate Jones -Nathan Carlson -Nathan Herald -Nathan Hsieh -Nathan Kleyn -Nathan LeClaire -Nathan McCauley -Nathan Williams -Naveed Jamil -Neal McBurnett -Neil Horman -Neil Peterson -Nelson Chen -Neyazul Haque -Nghia Tran -Niall O'Higgins -Nicholas E. Rabenau -Nick Adcock -Nick DeCoursin -Nick Irvine -Nick Neisen -Nick Parker -Nick Payne -Nick Russo -Nick Santos -Nick Stenning -Nick Stinemates -Nick Wood -NickrenREN -Nicola Kabar -Nicolas Borboën -Nicolas De Loof -Nicolas Dudebout -Nicolas Goy -Nicolas Kaiser -Nicolas Sterchele -Nicolas V Castet -Nicolás Hock Isaza -Niel Drummond -Nigel Poulton -Nik Nyby -Nikhil Chawla -NikolaMandic -Nikolas Garofil -Nikolay Edigaryev -Nikolay Milovanov -ningmingxiao -Nirmal Mehta -Nishant Totla -NIWA Hideyuki -Noah Meyerhans -Noah Treuhaft -NobodyOnSE -noducks -Nolan Darilek -Nolan Miles -Noriki Nakamura -nponeccop -Nurahmadie -Nuutti Kotivuori -nzwsch -O.S. Tezer -objectified -Odin Ugedal -Oguz Bilgic -Oh Jinkyun -Ohad Schneider -ohmystack -Ole Reifschneider -Oliver Neal -Oliver Reason -Olivier Gambier -Olle Jonsson -Olli Janatuinen -Olly Pomeroy -Omri Shiv -Onur Filiz -Oriol Francès -Oscar Bonilla <6f6231@gmail.com> -oscar.chen <2972789494@qq.com> -Oskar Niburski -Otto Kekäläinen -Ouyang Liduo -Ovidio Mallo -Panagiotis Moustafellos -Paolo G. Giarrusso -Pascal -Pascal Bach -Pascal Borreli -Pascal Hartig -Patrick Böänziger -Patrick Devine -Patrick Haas -Patrick Hemmer -Patrick Stapleton -Patrik Cyvoct -pattichen -Paul "TBBle" Hampson -Paul -paul -Paul Annesley -Paul Bellamy -Paul Bowsher -Paul Furtado -Paul Hammond -Paul Jimenez -Paul Kehrer -Paul Lietar -Paul Liljenberg -Paul Morie -Paul Nasrat -Paul Seiffert -Paul Weaver -Paulo Gomes -Paulo Ribeiro -Pavel Lobashov -Pavel Matěja -Pavel Pletenev -Pavel Pospisil -Pavel Sutyrin -Pavel Tikhomirov -Pavlos Ratis -Pavol Vargovcik -Pawel Konczalski -Paweł Gronowski -payall4u -Peeyush Gupta -Peggy Li -Pei Su -Peng Tao -Penghan Wang -Per Weijnitz -perhapszzy@sina.com -Pete Woods -Peter Bourgon -Peter Braden -Peter Bücker -Peter Choi -Peter Dave Hello -Peter Edge -Peter Ericson -Peter Esbensen -Peter Jaffe -Peter Kang -Peter Malmgren -Peter Salvatore -Peter Volpe -Peter Waller -Petr Švihlík -Petros Angelatos -Phil -Phil Estes -Phil Sphicas -Phil Spitler -Philip Alexander Etling -Philip K. Warren -Philip Monroe -Philipp Fruck -Philipp Gillé -Philipp Wahala -Philipp Weissensteiner -Phillip Alexander -phineas -pidster -Piergiuliano Bossi -Pierre -Pierre Carrier -Pierre Dal-Pra -Pierre Wacrenier -Pierre-Alain RIVIERE -Piotr Bogdan -Piotr Karbowski -Porjo -Poul Kjeldager Sørensen -Pradeep Chhetri -Pradip Dhara -Pradipta Kr. Banerjee -Prasanna Gautam -Pratik Karki -Prayag Verma -Priya Wadhwa -Projjol Banerji -Przemek Hejman -Puneet Pruthi -Pure White -pysqz -Qiang Huang -Qin TianHuan -Qinglan Peng -Quan Tian -qudongfang -Quentin Brossard -Quentin Perez -Quentin Tayssier -r0n22 -Rachit Sharma -Radostin Stoyanov -Rafal Jeczalik -Rafe Colton -Raghavendra K T -Raghuram Devarakonda -Raja Sami -Rajat Pandit -Rajdeep Dua -Ralf Sippl -Ralle -Ralph Bean -Ramkumar Ramachandra -Ramon Brooker -Ramon van Alteren -RaviTeja Pothana -Ray Tsang -ReadmeCritic -realityone -Recursive Madman -Reficul -Regan McCooey -Remi Rampin -Remy Suen -Renato Riccieri Santos Zannon -Renaud Gaubert -Rhys Hiltner -Ri Xu -Ricardo N Feliciano -Rich Horwood -Rich Moyse -Rich Seymour -Richard Burnison -Richard Hansen -Richard Harvey -Richard Mathie -Richard Metzler -Richard Scothern -Richo Healey -Rick Bradley -Rick van de Loo -Rick Wieman -Rik Nijessen -Riku Voipio -Riley Guerin -Ritesh H Shukla -Riyaz Faizullabhoy -Rob Cowsill <42620235+rcowsill@users.noreply.github.com> -Rob Gulewich -Rob Murray -Rob Vesse -Robert Bachmann -Robert Bittle -Robert Obryk -Robert Schneider -Robert Shade -Robert Stern -Robert Terhaar -Robert Wallis -Robert Wang -Roberto G. Hashioka -Roberto Muñoz Fernández -Robin Naundorf -Robin Schneider -Robin Speekenbrink -Robin Thoni -robpc -Rodolfo Carvalho -Rodrigo Campos -Rodrigo Vaz -Roel Van Nyen -Roger Peppe -Rohit Jnagal -Rohit Kadam -Rohit Kapur -Rojin George -Roland Huß -Roland Kammerer -Roland Moriz -Roma Sokolov -Roman Dudin -Roman Mazur -Roman Strashkin -Roman Volosatovs -Roman Zabaluev -Ron Smits -Ron Williams -Rong Gao -Rong Zhang -Rongxiang Song -Rony Weng -root -root -root -root -Rory Hunter -Rory McCune -Ross Boucher -Rovanion Luckey -Roy Reznik -Royce Remer -Rozhnov Alexandr -Rudolph Gottesheim -Rui Cao -Rui Lopes -Ruilin Li -Runshen Zhu -Russ Magee -Ryan Abrams -Ryan Anderson -Ryan Aslett -Ryan Barry -Ryan Belgrave -Ryan Campbell -Ryan Detzel -Ryan Fowler -Ryan Liu -Ryan McLaughlin -Ryan O'Donnell -Ryan Seto -Ryan Shea -Ryan Simmen -Ryan Stelly -Ryan Thomas -Ryan Trauntvein -Ryan Wallner -Ryan Zhang -ryancooper7 -RyanDeng -Ryo Nakao -Ryoga Saito -Régis Behmo -Rémy Greinhofer -s. rannou -Sabin Basyal -Sachin Joshi -Sagar Hani -Sainath Grandhi -Sakeven Jiang -Salahuddin Khan -Sally O'Malley -Sam Abed -Sam Alba -Sam Bailey -Sam J Sharpe -Sam Neirinck -Sam Reis -Sam Rijs -Sam Thibault -Sam Whited -Sambuddha Basu -Sami Wagiaalla -Samuel Andaya -Samuel Dion-Girardeau -Samuel Karp -Samuel PHAN -sanchayanghosh -Sandeep Bansal -Sankar சங்கர் -Sanket Saurav -Santhosh Manohar -sapphiredev -Sargun Dhillon -Sascha Andres -Sascha Grunert -SataQiu -Satnam Singh -Satoshi Amemiya -Satoshi Tagomori -Scott Bessler -Scott Collier -Scott Johnston -Scott Moser -Scott Percival -Scott Stamp -Scott Walls -sdreyesg -Sean Christopherson -Sean Cronin -Sean Lee -Sean McIntyre -Sean OMeara -Sean P. Kane -Sean Rodman -Sebastiaan van Steenis -Sebastiaan van Stijn -Sebastian Höffner -Sebastian Radloff -Sebastian Thomschke -Sebastien Goasguen -Senthil Kumar Selvaraj -Senthil Kumaran -SeongJae Park -Seongyeol Lim -Serge Hallyn -Sergey Alekseev -Sergey Evstifeev -Sergii Kabashniuk -Sergio Lopez -Serhat Gülçiçek -SeungUkLee -Sevki Hasirci -Shane Canon -Shane da Silva -Shaun Kaasten -shaunol -Shawn Landden -Shawn Siefkas -shawnhe -Shayan Pooya -Shayne Wang -Shekhar Gulati -Sheng Yang -Shengbo Song -Shengjing Zhu -Shev Yan -Shih-Yuan Lee -Shihao Xia -Shijiang Wei -Shijun Qin -Shishir Mahajan -Shoubhik Bose -Shourya Sarcar -Shu-Wai Chow -shuai-z -Shukui Yang -Sian Lerk Lau -Siarhei Rasiukevich -Sidhartha Mani -sidharthamani -Silas Sewell -Silvan Jegen -Simão Reis -Simon Barendse -Simon Eskildsen -Simon Ferquel -Simon Leinen -Simon Menke -Simon Taranto -Simon Vikstrom -Sindhu S -Sjoerd Langkemper -skanehira -Smark Meng -Solganik Alexander -Solomon Hykes -Song Gao -Soshi Katsuta -Sotiris Salloumis -Soulou -Spencer Brown -Spencer Smith -Spike Curtis -Sridatta Thatipamala -Sridhar Ratnakumar -Srini Brahmaroutu -Srinivasan Srivatsan -Staf Wagemakers -Stanislav Bondarenko -Stanislav Levin -Steeve Morin -Stefan Berger -Stefan Gehrig -Stefan J. Wernli -Stefan Praszalowicz -Stefan S. -Stefan Scherer -Stefan Staudenmeyer -Stefan Weil -Steffen Butzer -Stephan Henningsen -Stephan Spindler -Stephen Benjamin -Stephen Crosby -Stephen Day -Stephen Drake -Stephen Rust -Steve Desmond -Steve Dougherty -Steve Durrheimer -Steve Francia -Steve Koch -Steven Burgess -Steven Erenst -Steven Hartland -Steven Iveson -Steven Merrill -Steven Richards -Steven Taylor -Stéphane Este-Gracias -Stig Larsson -Su Wang -Subhajit Ghosh -Sujith Haridasan -Sun Gengze <690388648@qq.com> -Sun Jianbo -Sune Keller -Sunny Gogoi -Suryakumar Sudar -Sven Dowideit -Swapnil Daingade -Sylvain Baubeau -Sylvain Bellemare -Sébastien -Sébastien HOUZÉ -Sébastien Luttringer -Sébastien Stormacq -Sören Tempel -Tabakhase -Tadej Janež -Takuto Sato -tang0th -Tangi Colin -Tatsuki Sugiura -Tatsushi Inagaki -Taylan Isikdemir -Taylor Jones -Ted M. Young -Tehmasp Chaudhri -Tejaswini Duggaraju -Tejesh Mehta -Terry Chu -terryding77 <550147740@qq.com> -Thatcher Peskens -theadactyl -Thell 'Bo' Fowler -Thermionix -Thiago Alves Silva -Thijs Terlouw -Thomas Bikeev -Thomas Frössman -Thomas Gazagnaire -Thomas Graf -Thomas Grainger -Thomas Hansen -Thomas Ledos -Thomas Leonard -Thomas Léveil -Thomas Orozco -Thomas Riccardi -Thomas Schroeter -Thomas Sjögren -Thomas Swift -Thomas Tanaka -Thomas Texier -Ti Zhou -Tiago Seabra -Tianon Gravi -Tianyi Wang -Tibor Vass -Tiffany Jernigan -Tiffany Low -Till Claassen -Till Wegmüller -Tim -Tim Bart -Tim Bosse -Tim Dettrick -Tim Düsterhus -Tim Hockin -Tim Potter -Tim Ruffles -Tim Smith -Tim Terhorst -Tim Wagner -Tim Wang -Tim Waugh -Tim Wraight -Tim Zju <21651152@zju.edu.cn> -timchenxiaoyu <837829664@qq.com> -timfeirg -Timo Rothenpieler -Timothy Hobbs -tjwebb123 -tobe -Tobias Bieniek -Tobias Bradtke -Tobias Gesellchen -Tobias Klauser -Tobias Munk -Tobias Pfandzelter -Tobias Schmidt -Tobias Schwab -Todd Crane -Todd Lunter -Todd Whiteman -Toli Kuznets -Tom Barlow -Tom Booth -Tom Denham -Tom Fotherby -Tom Howe -Tom Hulihan -Tom Maaswinkel -Tom Parker -Tom Sweeney -Tom Wilkie -Tom X. Tobin -Tom Zhao -Tomas Janousek -Tomas Kral -Tomas Tomecek -Tomasz Kopczynski -Tomasz Lipinski -Tomasz Nurkiewicz -Tomek Mańko -Tommaso Visconti -Tomoya Tabuchi -Tomáš Hrčka -tonic -Tonny Xu -Tony Abboud -Tony Daws -Tony Miller -toogley -Torstein Husebø -Toshiaki Makita -Tõnis Tiigi -Trace Andreason -tracylihui <793912329@qq.com> -Trapier Marshall -Travis Cline -Travis Thieman -Trent Ogren -Trevor -Trevor Pounds -Trevor Sullivan -Trishna Guha -Tristan Carel -Troy Denton -Tudor Brindus -Ty Alexander -Tycho Andersen -Tyler Brock -Tyler Brown -Tzu-Jung Lee -uhayate -Ulysse Carion -Umesh Yadav -Utz Bacher -vagrant -Vaidas Jablonskis -Valentin Kulesh -vanderliang -Velko Ivanov -Veres Lajos -Victor Algaze -Victor Coisne -Victor Costan -Victor I. Wood -Victor Lyuboslavsky -Victor Marmol -Victor Palma -Victor Vieux -Victoria Bialas -Vijaya Kumar K -Vikas Choudhary -Vikram bir Singh -Viktor Stanchev -Viktor Vojnovski -VinayRaghavanKS -Vincent Batts -Vincent Bernat -Vincent Boulineau -Vincent Demeester -Vincent Giersch -Vincent Mayers -Vincent Woo -Vinod Kulkarni -Vishal Doshi -Vishnu Kannan -Vitaly Ostrosablin -Vitor Anjos -Vitor Monteiro -Vivek Agarwal -Vivek Dasgupta -Vivek Goyal -Vladimir Bulyga -Vladimir Kirillov -Vladimir Pouzanov -Vladimir Rutsky -Vladimir Varankin -VladimirAus -Vladislav Kolesnikov -Vlastimil Zeman -Vojtech Vitek (V-Teq) -Walter Leibbrandt -Walter Stanish -Wang Chao -Wang Guoliang -Wang Jie -Wang Long -Wang Ping -Wang Xing -Wang Yuexiao -Wang Yumu <37442693@qq.com> -wanghuaiqing -Ward Vandewege -WarheadsSE -Wassim Dhif -Wataru Ishida -Wayne Chang -Wayne Song -Weerasak Chongnguluam -Wei Fu -Wei Wu -Wei-Ting Kuo -weipeng -weiyan -Weiyang Zhu -Wen Cheng Ma -Wendel Fleming -Wenjun Tang -Wenkai Yin -wenlxie -Wenxuan Zhao -Wenyu You <21551128@zju.edu.cn> -Wenzhi Liang -Wes Morgan -Wesley Pettit -Wewang Xiaorenfine -Wiktor Kwapisiewicz -Will Dietz -Will Rouesnel -Will Weaver -willhf -William Delanoue -William Henry -William Hubbs -William Martin -William Riancho -William Thurston -Wilson Júnior -Wing-Kam Wong -WiseTrem -Wolfgang Nagele -Wolfgang Powisch -Wonjun Kim -WuLonghui -xamyzhao -Xia Wu -Xian Chaobo -Xianglin Gao -Xianjie -Xianlu Bird -Xiao YongBiao -Xiao Zhang -XiaoBing Jiang -Xiaodong Liu -Xiaodong Zhang -Xiaohua Ding -Xiaoxi He -Xiaoxu Chen -Xiaoyu Zhang -xichengliudui <1693291525@qq.com> -xiekeyang -Ximo Guanter Gonzálbez -xin.li -Xinbo Weng -Xinfeng Liu -Xinzi Zhou -Xiuming Chen -Xuecong Liao -xuzhaokui -Yadnyawalkya Tale -Yahya -yalpul -YAMADA Tsuyoshi -Yamasaki Masahide -Yamazaki Masashi -Yan Feng -Yan Zhu -Yang Bai -Yang Li -Yang Pengfei -yangchenliang -Yann Autissier -Yanqiang Miao -Yao Zaiyong -Yash Murty -Yassine Tijani -Yasunori Mahata -Yazhong Liu -Yestin Sun -Yi EungJun -Yibai Zhang -Yihang Ho -Ying Li -Yohei Ueda -Yong Tang -Yongxin Li -Yongzhi Pan -Yosef Fertel -You-Sheng Yang (楊有勝) -youcai -Youcef YEKHLEF -Youfu Zhang -Yu Changchun -Yu Chengxia -Yu Peng -Yu-Ju Hong -Yuan Sun -Yuanhong Peng -Yue Zhang -Yufei Xiong -Yuhao Fang -Yuichiro Kaneko -YujiOshima -Yunxiang Huang -Yurii Rashkovskii -Yusuf Tarık Günaydın -Yves Blusseau <90z7oey02@sneakemail.com> -Yves Junqueira -Zac Dover -Zach Borboa -Zach Gershman -Zachary Jaffee -Zain Memon -Zaiste! -Zane DeGraffenried -Zefan Li -Zen Lin(Zhinan Lin) -Zhang Kun -Zhang Wei -Zhang Wentao -zhangguanzhang -ZhangHang -zhangxianwei -Zhenan Ye <21551168@zju.edu.cn> -zhenghenghuo -Zhenhai Gao -Zhenkun Bi -ZhiPeng Lu -zhipengzuo -Zhou Hao -Zhoulin Xie -Zhu Guihua -Zhu Kunjia -Zhuoyun Wei -Ziheng Liu -Zilin Du -zimbatm -Ziming Dong -ZJUshuaizhou <21551191@zju.edu.cn> -zmarouf -Zoltan Tombol -Zou Yu -zqh -Zuhayr Elahi -Zunayed Ali -Álvaro Lázaro -Átila Camurça Alves -吴小白 <296015668@qq.com> -尹吉峰 -屈骏 -徐俊杰 -慕陶 -搏通 -黄艳红00139573 -정재영 diff --git a/vendor/github.com/moby/moby/NOTICE b/vendor/github.com/moby/moby/NOTICE deleted file mode 100644 index 58b19b6d15b..00000000000 --- a/vendor/github.com/moby/moby/NOTICE +++ /dev/null @@ -1,19 +0,0 @@ -Docker -Copyright 2012-2017 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -This product contains software (https://github.com/creack/pty) developed -by Keith Rarick, licensed under the MIT License. - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/moby/moby/libnetwork/resolvconf/resolvconf.go b/vendor/github.com/moby/moby/libnetwork/resolvconf/resolvconf.go deleted file mode 100644 index da20e1c0318..00000000000 --- a/vendor/github.com/moby/moby/libnetwork/resolvconf/resolvconf.go +++ /dev/null @@ -1,248 +0,0 @@ -// Package resolvconf provides utility code to query and update DNS configuration in /etc/resolv.conf -package resolvconf - -import ( - "bytes" - "context" - "os" - "regexp" - "strings" - "sync" - - "github.com/containerd/log" -) - -const ( - // defaultPath is the default path to the resolv.conf that contains information to resolve DNS. See Path(). - defaultPath = "/etc/resolv.conf" - // alternatePath is a path different from defaultPath, that may be used to resolve DNS. See Path(). - alternatePath = "/run/systemd/resolve/resolv.conf" -) - -// constants for the IP address type -const ( - IP = iota // IPv4 and IPv6 - IPv4 - IPv6 -) - -var ( - detectSystemdResolvConfOnce sync.Once - pathAfterSystemdDetection = defaultPath -) - -// Path returns the path to the resolv.conf file that libnetwork should use. -// -// When /etc/resolv.conf contains 127.0.0.53 as the only nameserver, then -// it is assumed systemd-resolved manages DNS. Because inside the container 127.0.0.53 -// is not a valid DNS server, Path() returns /run/systemd/resolve/resolv.conf -// which is the resolv.conf that systemd-resolved generates and manages. -// Otherwise Path() returns /etc/resolv.conf. -// -// Errors are silenced as they will inevitably resurface at future open/read calls. -// -// More information at https://www.freedesktop.org/software/systemd/man/systemd-resolved.service.html#/etc/resolv.conf -func Path() string { - detectSystemdResolvConfOnce.Do(func() { - candidateResolvConf, err := os.ReadFile(defaultPath) - if err != nil { - // silencing error as it will resurface at next calls trying to read defaultPath - return - } - ns := GetNameservers(candidateResolvConf, IP) - if len(ns) == 1 && ns[0] == "127.0.0.53" { - pathAfterSystemdDetection = alternatePath - log.G(context.TODO()).Infof("detected 127.0.0.53 nameserver, assuming systemd-resolved, so using resolv.conf: %s", alternatePath) - } - }) - return pathAfterSystemdDetection -} - -const ( - // ipLocalhost is a regex pattern for IPv4 or IPv6 loopback range. - ipLocalhost = `((127\.([0-9]{1,3}\.){2}[0-9]{1,3})|(::1)$)` - ipv4NumBlock = `(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)` - ipv4Address = `(` + ipv4NumBlock + `\.){3}` + ipv4NumBlock - - // This is not an IPv6 address verifier as it will accept a super-set of IPv6, and also - // will *not match* IPv4-Embedded IPv6 Addresses (RFC6052), but that and other variants - // -- e.g. other link-local types -- either won't work in containers or are unnecessary. - // For readability and sufficiency for Docker purposes this seemed more reasonable than a - // 1000+ character regexp with exact and complete IPv6 validation - ipv6Address = `([0-9A-Fa-f]{0,4}:){2,7}([0-9A-Fa-f]{0,4})(%\w+)?` -) - -var ( - // Note: the default IPv4 & IPv6 resolvers are set to Google's Public DNS - defaultIPv4Dns = []string{"nameserver 8.8.8.8", "nameserver 8.8.4.4"} - defaultIPv6Dns = []string{"nameserver 2001:4860:4860::8888", "nameserver 2001:4860:4860::8844"} - - localhostNSRegexp = regexp.MustCompile(`(?m)^nameserver\s+` + ipLocalhost + `\s*\n*`) - nsIPv6Regexp = regexp.MustCompile(`(?m)^nameserver\s+` + ipv6Address + `\s*\n*`) - nsRegexp = regexp.MustCompile(`^\s*nameserver\s*((` + ipv4Address + `)|(` + ipv6Address + `))\s*$`) - nsIPv6Regexpmatch = regexp.MustCompile(`^\s*nameserver\s*((` + ipv6Address + `))\s*$`) - nsIPv4Regexpmatch = regexp.MustCompile(`^\s*nameserver\s*((` + ipv4Address + `))\s*$`) - searchRegexp = regexp.MustCompile(`^\s*search\s*(([^\s]+\s*)*)$`) - optionsRegexp = regexp.MustCompile(`^\s*options\s*(([^\s]+\s*)*)$`) -) - -// File contains the resolv.conf content and its hash -type File struct { - Content []byte - Hash []byte -} - -// Get returns the contents of /etc/resolv.conf and its hash -func Get() (*File, error) { - return GetSpecific(Path()) -} - -// GetSpecific returns the contents of the user specified resolv.conf file and its hash -func GetSpecific(path string) (*File, error) { - resolv, err := os.ReadFile(path) - if err != nil { - return nil, err - } - return &File{Content: resolv, Hash: hashData(resolv)}, nil -} - -// FilterResolvDNS cleans up the config in resolvConf. It has two main jobs: -// 1. It looks for localhost (127.*|::1) entries in the provided -// resolv.conf, removing local nameserver entries, and, if the resulting -// cleaned config has no defined nameservers left, adds default DNS entries -// 2. Given the caller provides the enable/disable state of IPv6, the filter -// code will remove all IPv6 nameservers if it is not enabled for containers -func FilterResolvDNS(resolvConf []byte, ipv6Enabled bool) (*File, error) { - cleanedResolvConf := localhostNSRegexp.ReplaceAll(resolvConf, []byte{}) - // if IPv6 is not enabled, also clean out any IPv6 address nameserver - if !ipv6Enabled { - cleanedResolvConf = nsIPv6Regexp.ReplaceAll(cleanedResolvConf, []byte{}) - } - // if the resulting resolvConf has no more nameservers defined, add appropriate - // default DNS servers for IPv4 and (optionally) IPv6 - if len(GetNameservers(cleanedResolvConf, IP)) == 0 { - log.G(context.TODO()).Infof("No non-localhost DNS nameservers are left in resolv.conf. Using default external servers: %v", defaultIPv4Dns) - dns := defaultIPv4Dns - if ipv6Enabled { - log.G(context.TODO()).Infof("IPv6 enabled; Adding default IPv6 external servers: %v", defaultIPv6Dns) - dns = append(dns, defaultIPv6Dns...) - } - cleanedResolvConf = append(cleanedResolvConf, []byte("\n"+strings.Join(dns, "\n"))...) - } - return &File{Content: cleanedResolvConf, Hash: hashData(cleanedResolvConf)}, nil -} - -// getLines parses input into lines and strips away comments. -func getLines(input []byte, commentMarker []byte) [][]byte { - lines := bytes.Split(input, []byte("\n")) - var output [][]byte - for _, currentLine := range lines { - commentIndex := bytes.Index(currentLine, commentMarker) - if commentIndex == -1 { - output = append(output, currentLine) - } else { - output = append(output, currentLine[:commentIndex]) - } - } - return output -} - -// GetNameservers returns nameservers (if any) listed in /etc/resolv.conf -func GetNameservers(resolvConf []byte, kind int) []string { - var nameservers []string - for _, line := range getLines(resolvConf, []byte("#")) { - var ns [][]byte - if kind == IP { - ns = nsRegexp.FindSubmatch(line) - } else if kind == IPv4 { - ns = nsIPv4Regexpmatch.FindSubmatch(line) - } else if kind == IPv6 { - ns = nsIPv6Regexpmatch.FindSubmatch(line) - } - if len(ns) > 0 { - nameservers = append(nameservers, string(ns[1])) - } - } - return nameservers -} - -// GetNameserversAsCIDR returns nameservers (if any) listed in -// /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32") -// This function's output is intended for net.ParseCIDR -func GetNameserversAsCIDR(resolvConf []byte) []string { - var nameservers []string - for _, nameserver := range GetNameservers(resolvConf, IP) { - var address string - // If IPv6, strip zone if present - if strings.Contains(nameserver, ":") { - address = strings.Split(nameserver, "%")[0] + "/128" - } else { - address = nameserver + "/32" - } - nameservers = append(nameservers, address) - } - return nameservers -} - -// GetSearchDomains returns search domains (if any) listed in /etc/resolv.conf -// If more than one search line is encountered, only the contents of the last -// one is returned. -func GetSearchDomains(resolvConf []byte) []string { - var domains []string - for _, line := range getLines(resolvConf, []byte("#")) { - match := searchRegexp.FindSubmatch(line) - if match == nil { - continue - } - domains = strings.Fields(string(match[1])) - } - return domains -} - -// GetOptions returns options (if any) listed in /etc/resolv.conf -// If more than one options line is encountered, only the contents of the last -// one is returned. -func GetOptions(resolvConf []byte) []string { - var options []string - for _, line := range getLines(resolvConf, []byte("#")) { - match := optionsRegexp.FindSubmatch(line) - if match == nil { - continue - } - options = strings.Fields(string(match[1])) - } - return options -} - -// Build generates and writes a configuration file to path containing a nameserver -// entry for every element in nameservers, a "search" entry for every element in -// dnsSearch, and an "options" entry for every element in dnsOptions. It returns -// a File containing the generated content and its (sha256) hash. -func Build(path string, nameservers, dnsSearch, dnsOptions []string) (*File, error) { - content := bytes.NewBuffer(nil) - if len(dnsSearch) > 0 { - if searchString := strings.Join(dnsSearch, " "); strings.Trim(searchString, " ") != "." { - if _, err := content.WriteString("search " + searchString + "\n"); err != nil { - return nil, err - } - } - } - for _, dns := range nameservers { - if _, err := content.WriteString("nameserver " + dns + "\n"); err != nil { - return nil, err - } - } - if len(dnsOptions) > 0 { - if optsString := strings.Join(dnsOptions, " "); strings.Trim(optsString, " ") != "" { - if _, err := content.WriteString("options " + optsString + "\n"); err != nil { - return nil, err - } - } - } - - if err := os.WriteFile(path, content.Bytes(), 0o644); err != nil { - return nil, err - } - - return &File{Content: content.Bytes(), Hash: hashData(content.Bytes())}, nil -} diff --git a/vendor/github.com/moby/moby/libnetwork/resolvconf/utils.go b/vendor/github.com/moby/moby/libnetwork/resolvconf/utils.go deleted file mode 100644 index 8e005e2a192..00000000000 --- a/vendor/github.com/moby/moby/libnetwork/resolvconf/utils.go +++ /dev/null @@ -1,14 +0,0 @@ -package resolvconf - -import ( - "crypto/sha256" - "encoding/hex" -) - -// hashData returns the sha256 sum of data. -func hashData(data []byte) []byte { - f := sha256.Sum256(data) - out := make([]byte, 2*sha256.Size) - hex.Encode(out, f[:]) - return append([]byte("sha256:"), out...) -} diff --git a/vendor/github.com/mudler/entities/pkg/entities/entity.go b/vendor/github.com/mudler/entities/pkg/entities/entity.go index ae1992430ad..cc08c0997a3 100644 --- a/vendor/github.com/mudler/entities/pkg/entities/entity.go +++ b/vendor/github.com/mudler/entities/pkg/entities/entity.go @@ -11,13 +11,13 @@ See the License for the specific language governing permissions and limitations under the License. */ - package entities import ( + "fmt" "os" - "strconv" "strings" + "time" ) const ( @@ -26,6 +26,13 @@ const ( ENTITY_ENV_DEF_SHADOW = "ENTITY_DEFAULT_SHADOW" ENTITY_ENV_DEF_GSHADOW = "ENTITY_DEFAULT_GSHADOW" ENTITY_ENV_DEF_DYNAMIC_RANGE = "ENTITY_DYNAMIC_RANGE" + ENTITY_ENV_DEF_DELAY = "ENTITY_DEFAULT_DELAY" + ENTITY_ENV_DEF_INTERVAL = "ENTITY_DEFAULT_INTERVAL" + + // https://systemd.io/UIDS-GIDS/#summary + // https://systemd.io/UIDS-GIDS/#special-distribution-uid-ranges + HumanIDMin = 1000 + HumanIDMax = 60000 ) // Entity represent something that needs to be applied to a file @@ -47,34 +54,22 @@ func entityIdentifier(s string) string { return fs[0] } -func DynamicRange() (int, int) { - // Follow Gentoo way - uid_start := 999 - uid_end := 500 +func RetryForDuration() (time.Duration, error) { + s := os.Getenv(ENTITY_ENV_DEF_DELAY) + if s != "" { + // convert string to int64 + return time.ParseDuration(fmt.Sprintf("%s", s)) + } - // Environment variable must be in the format: + '-' + - env := os.Getenv(ENTITY_ENV_DEF_DYNAMIC_RANGE) - if env != "" { - ranges := strings.Split(env, "-") - if len(ranges) == 2 { - minUid, err := strconv.Atoi(ranges[0]) - if err != nil { - // Ignore error - goto end - } - maxUid, err := strconv.Atoi(ranges[1]) - if err != nil { - // ignore error - goto end - } + return time.ParseDuration("5s") +} - if minUid < maxUid && minUid >= 0 && minUid < 65534 && maxUid > 0 && maxUid < 65534 { - uid_start = maxUid - uid_end = minUid - } - } +func RetryIntervalDuration() (time.Duration, error) { + s := os.Getenv(ENTITY_ENV_DEF_INTERVAL) + if s != "" { + // convert string to int64 + return time.ParseDuration(fmt.Sprintf("%s", s)) } -end: - return uid_start, uid_end + return time.ParseDuration("300ms") } diff --git a/vendor/github.com/mudler/entities/pkg/entities/group.go b/vendor/github.com/mudler/entities/pkg/entities/group.go index 95d29efd41a..ffe0eef59a1 100644 --- a/vendor/github.com/mudler/entities/pkg/entities/group.go +++ b/vendor/github.com/mudler/entities/pkg/entities/group.go @@ -11,18 +11,19 @@ See the License for the specific language governing permissions and limitations under the License. */ - package entities import ( "bufio" + "context" "fmt" "io" - "io/ioutil" "os" + "path/filepath" "strconv" "strings" + "github.com/gofrs/flock" permbits "github.com/phayes/permbits" "github.com/pkg/errors" ) @@ -84,31 +85,27 @@ func parseGroupLine(line string) (string, Group, error) { } func groupGetFreeGid(path string) (int, error) { - uidStart, uidEnd := DynamicRange() - mGids := make(map[int]*Group) - ans := -1 - - current, err := ParseGroup(path) - if err != nil { - return ans, err - } + result := -1 + allGroups, _ := ParseGroup(path) + groupSet := make(map[int]struct{}) - for _, e := range current { - mGids[*e.Gid] = &e + for _, groupID := range allGroups { + groupSet[*groupID.Gid] = struct{}{} } - for i := uidStart; i >= uidEnd; i-- { - if _, ok := mGids[i]; !ok { - ans = i - break + for i := HumanIDMin; i <= HumanIDMax; i++ { + if _, found := groupSet[i]; found { + continue // uid in use, skip it } + result = i // found a free one, stop here + break } - if ans < 0 { - return ans, errors.New("No free GID found") + if result == -1 { + return result, errors.New("no available gid in range") } - return ans, nil + return result, nil } type Group struct { @@ -149,7 +146,26 @@ func (u Group) String() string { func (u Group) Delete(s string) error { s = GroupsDefault(s) - input, err := ioutil.ReadFile(s) + d, err := RetryForDuration() + if err != nil { + return errors.Wrap(err, "Failed getting delay") + } + + baseName := filepath.Base(s) + fileLock := flock.New(fmt.Sprintf("/var/lock/%s.lock", baseName)) + defer os.Remove(fileLock.Path()) + defer fileLock.Close() + lockCtx, cancel := context.WithTimeout(context.Background(), d) + defer cancel() + i, err := RetryIntervalDuration() + if err != nil { + return errors.Wrap(err, "Failed getting interval") + } + locked, err := fileLock.TryLockContext(lockCtx, i) + if err != nil || !locked { + return errors.Wrap(err, "Failed locking file") + } + input, err := os.ReadFile(s) if err != nil { return errors.Wrap(err, "Could not read input file") } @@ -174,7 +190,7 @@ func (u Group) Delete(s string) error { output := strings.Join(lines, "\n") - err = ioutil.WriteFile(s, []byte(output), os.FileMode(permissions)) + err = os.WriteFile(s, []byte(output), os.FileMode(permissions)) if err != nil { return errors.Wrap(err, "Could not write") } @@ -184,8 +200,27 @@ func (u Group) Delete(s string) error { func (u Group) Create(s string) error { s = GroupsDefault(s) + d, err := RetryForDuration() + if err != nil { + return errors.Wrap(err, "Failed getting delay") + } - u, err := u.prepare(s) + baseName := filepath.Base(s) + fileLock := flock.New(fmt.Sprintf("/var/lock/%s.lock", baseName)) + defer os.Remove(fileLock.Path()) + defer fileLock.Close() + lockCtx, cancel := context.WithTimeout(context.Background(), d) + defer cancel() + i, err := RetryIntervalDuration() + if err != nil { + return errors.Wrap(err, "Failed getting interval") + } + locked, err := fileLock.TryLockContext(lockCtx, i) + if err != nil || !locked { + return errors.Wrap(err, "Failed locking file") + } + + u, err = u.prepare(s) if err != nil { return errors.Wrap(err, "Failed entity preparation") } @@ -277,7 +312,27 @@ func (u Group) Apply(s string, safe bool) error { } if _, ok := current[u.Name]; ok { - input, err := ioutil.ReadFile(s) + d, err := RetryForDuration() + if err != nil { + return errors.Wrap(err, "Failed getting delay") + } + + baseName := filepath.Base(s) + fileLock := flock.New(fmt.Sprintf("/var/lock/%s.lock", baseName)) + defer os.Remove(fileLock.Path()) + defer fileLock.Close() + lockCtx, cancel := context.WithTimeout(context.Background(), d) + defer cancel() + i, err := RetryIntervalDuration() + if err != nil { + return errors.Wrap(err, "Failed getting interval") + } + locked, err := fileLock.TryLockContext(lockCtx, i) + if err != nil || !locked { + return errors.Wrap(err, "Failed locking file") + } + + input, err := os.ReadFile(s) if err != nil { return errors.Wrap(err, "Could not read input file") } @@ -316,7 +371,7 @@ func (u Group) Apply(s string, safe bool) error { } } output := strings.Join(lines, "\n") - err = ioutil.WriteFile(s, []byte(output), os.FileMode(permissions)) + err = os.WriteFile(s, []byte(output), os.FileMode(permissions)) if err != nil { return errors.Wrap(err, "Could not write") } diff --git a/vendor/github.com/mudler/entities/pkg/entities/gshadow.go b/vendor/github.com/mudler/entities/pkg/entities/gshadow.go index 4e44c9a2a80..e4bf76815ed 100644 --- a/vendor/github.com/mudler/entities/pkg/entities/gshadow.go +++ b/vendor/github.com/mudler/entities/pkg/entities/gshadow.go @@ -11,14 +11,12 @@ See the License for the specific language governing permissions and limitations under the License. */ - package entities import ( "bufio" "bytes" "io" - "io/ioutil" "os" "strconv" "strings" @@ -97,7 +95,7 @@ func (u GShadow) String() string { func (u GShadow) Delete(s string) error { s = GShadowDefault(s) - input, err := ioutil.ReadFile(s) + input, err := os.ReadFile(s) if err != nil { return errors.Wrap(err, "Could not read input file") } @@ -107,7 +105,7 @@ func (u GShadow) Delete(s string) error { } lines := bytes.Replace(input, []byte(u.String()+"\n"), []byte(""), 1) - err = ioutil.WriteFile(s, []byte(lines), os.FileMode(permissions)) + err = os.WriteFile(s, []byte(lines), os.FileMode(permissions)) if err != nil { return errors.Wrap(err, "Could not write") } @@ -169,7 +167,7 @@ func (u GShadow) Apply(s string, safe bool) error { } if _, ok := current[u.Name]; ok { - input, err := ioutil.ReadFile(s) + input, err := os.ReadFile(s) if err != nil { return errors.Wrap(err, "Could not read input file") } @@ -182,7 +180,7 @@ func (u GShadow) Apply(s string, safe bool) error { } } output := strings.Join(lines, "\n") - err = ioutil.WriteFile(s, []byte(output), os.FileMode(permissions)) + err = os.WriteFile(s, []byte(output), os.FileMode(permissions)) if err != nil { return errors.Wrap(err, "Could not write") } diff --git a/vendor/github.com/mudler/entities/pkg/entities/parser.go b/vendor/github.com/mudler/entities/pkg/entities/parser.go index af7650307ee..8878a50beb1 100644 --- a/vendor/github.com/mudler/entities/pkg/entities/parser.go +++ b/vendor/github.com/mudler/entities/pkg/entities/parser.go @@ -11,11 +11,10 @@ See the License for the specific language governing permissions and limitations under the License. */ - package entities import ( - "io/ioutil" + "os" "github.com/pkg/errors" "gopkg.in/yaml.v2" @@ -85,7 +84,7 @@ func (p Parser) ReadEntityFromBytes(yamlFile []byte) (Entity, error) { return nil, errors.New("Unsupported format") } func (p Parser) ReadEntity(entity string) (Entity, error) { - yamlFile, err := ioutil.ReadFile(entity) + yamlFile, err := os.ReadFile(entity) if err != nil { return nil, errors.Wrap(err, "Failed while reading entity file") } diff --git a/vendor/github.com/mudler/entities/pkg/entities/shadow.go b/vendor/github.com/mudler/entities/pkg/entities/shadow.go index 72193b5aec1..a01531f47c4 100644 --- a/vendor/github.com/mudler/entities/pkg/entities/shadow.go +++ b/vendor/github.com/mudler/entities/pkg/entities/shadow.go @@ -11,22 +11,23 @@ See the License for the specific language governing permissions and limitations under the License. */ - package entities import ( "bufio" "bytes" + "context" "fmt" "io" - "io/ioutil" "math/rand" "os" + "path/filepath" "strconv" "strings" "time" - "github.com/tredoe/osutil/v2/userutil/crypt/sha512_crypt" + "github.com/gofrs/flock" + "github.com/tredoe/osutil/user/crypt/sha512_crypt" permbits "github.com/phayes/permbits" "github.com/pkg/errors" @@ -168,7 +169,27 @@ func (u Shadow) prepare() Shadow { // FIXME: Delete can be shared across all of the supported Entities func (u Shadow) Delete(s string) error { s = ShadowDefault(s) - input, err := ioutil.ReadFile(s) + d, err := RetryForDuration() + if err != nil { + return errors.Wrap(err, "Failed getting delay") + } + + baseName := filepath.Base(s) + fileLock := flock.New(fmt.Sprintf("/var/lock/%s.lock", baseName)) + defer os.Remove(fileLock.Path()) + defer fileLock.Close() + lockCtx, cancel := context.WithTimeout(context.Background(), d) + defer cancel() + i, err := RetryIntervalDuration() + if err != nil { + return errors.Wrap(err, "Failed getting interval") + } + locked, err := fileLock.TryLockContext(lockCtx, i) + if err != nil || !locked { + return errors.Wrap(err, "Failed locking file") + } + + input, err := os.ReadFile(s) if err != nil { return errors.Wrap(err, "Could not read input file") } @@ -178,7 +199,7 @@ func (u Shadow) Delete(s string) error { } lines := bytes.Replace(input, []byte(u.String()+"\n"), []byte(""), 1) - err = ioutil.WriteFile(s, []byte(lines), os.FileMode(permissions)) + err = os.WriteFile(s, []byte(lines), os.FileMode(permissions)) if err != nil { return errors.Wrap(err, "Could not write") } @@ -190,6 +211,26 @@ func (u Shadow) Delete(s string) error { func (u Shadow) Create(s string) error { s = ShadowDefault(s) + d, err := RetryForDuration() + if err != nil { + return errors.Wrap(err, "Failed getting delay") + } + + baseName := filepath.Base(s) + fileLock := flock.New(fmt.Sprintf("/var/lock/%s.lock", baseName)) + defer os.Remove(fileLock.Path()) + defer fileLock.Close() + lockCtx, cancel := context.WithTimeout(context.Background(), d) + defer cancel() + i, err := RetryIntervalDuration() + if err != nil { + return errors.Wrap(err, "Failed getting interval") + } + locked, err := fileLock.TryLockContext(lockCtx, i) + if err != nil || !locked { + return errors.Wrap(err, "Failed locking file") + } + u = u.prepare() current, err := ParseShadow(s) if err != nil { @@ -229,7 +270,27 @@ func (u Shadow) Apply(s string, safe bool) error { } if _, ok := current[u.Username]; ok { - input, err := ioutil.ReadFile(s) + d, err := RetryForDuration() + if err != nil { + return errors.Wrap(err, "Failed getting delay") + } + + baseName := filepath.Base(s) + fileLock := flock.New(fmt.Sprintf("/var/lock/%s.lock", baseName)) + defer os.Remove(fileLock.Path()) + defer fileLock.Close() + lockCtx, cancel := context.WithTimeout(context.Background(), d) + defer cancel() + i, err := RetryIntervalDuration() + if err != nil { + return errors.Wrap(err, "Failed getting interval") + } + locked, err := fileLock.TryLockContext(lockCtx, i) + if err != nil || !locked { + return errors.Wrap(err, "Failed locking file") + } + + input, err := os.ReadFile(s) if err != nil { return errors.Wrap(err, "Could not read input file") } @@ -242,7 +303,7 @@ func (u Shadow) Apply(s string, safe bool) error { } } output := strings.Join(lines, "\n") - err = ioutil.WriteFile(s, []byte(output), os.FileMode(permissions)) + err = os.WriteFile(s, []byte(output), os.FileMode(permissions)) if err != nil { return errors.Wrap(err, "Could not write") } diff --git a/vendor/github.com/mudler/entities/pkg/entities/store.go b/vendor/github.com/mudler/entities/pkg/entities/store.go index 5509cb157cd..c79f19e5a52 100644 --- a/vendor/github.com/mudler/entities/pkg/entities/store.go +++ b/vendor/github.com/mudler/entities/pkg/entities/store.go @@ -13,12 +13,11 @@ See the License for the specific language governing permissions and limitations under the License. */ - package entities import ( "errors" - "io/ioutil" + "os" "path/filepath" "regexp" ) @@ -42,7 +41,7 @@ func NewEntitiesStore() *EntitiesStore { func (s *EntitiesStore) Load(dir string) error { var regexConfs = regexp.MustCompile(`.yml$|.yaml$`) - files, err := ioutil.ReadDir(dir) + files, err := os.ReadDir(dir) if err != nil { return err } diff --git a/vendor/github.com/mudler/entities/pkg/entities/user.go b/vendor/github.com/mudler/entities/pkg/entities/user.go index 30b8814b421..08de1af27c6 100644 --- a/vendor/github.com/mudler/entities/pkg/entities/user.go +++ b/vendor/github.com/mudler/entities/pkg/entities/user.go @@ -11,20 +11,22 @@ See the License for the specific language governing permissions and limitations under the License. */ - package entities import ( "bytes" + "context" "fmt" - "io/ioutil" "os" + "path/filepath" "strconv" "strings" + xusers "github.com/mauromorales/xpasswd/pkg/users" permbits "github.com/phayes/permbits" "github.com/pkg/errors" - passwd "github.com/willdonnelly/passwd" + + "github.com/gofrs/flock" ) func UserDefault(s string) string { @@ -39,31 +41,19 @@ func UserDefault(s string) string { } func userGetFreeUid(path string) (int, error) { - uidStart, uidEnd := DynamicRange() - mUids := make(map[int]*UserPasswd) - ans := -1 - - current, err := ParseUser(path) + list := xusers.NewUserList() + list.SetPath(path) + err := list.Load() if err != nil { - return ans, err - } - - for _, e := range current { - mUids[e.Uid] = &e + return 0, errors.Wrap(err, "Failed parsing passwd") } - for i := uidStart; i >= uidEnd; i-- { - if _, ok := mUids[i]; !ok { - ans = i - break - } - } - - if ans < 0 { - return ans, errors.New("No free UID found") + id, err := list.GenerateUIDInRange(HumanIDMin, HumanIDMax) + if err != nil { + return 0, errors.Wrap(err, "Failed generating a unique uid") } - return ans, nil + return id, nil } type UserPasswd struct { @@ -80,44 +70,47 @@ type UserPasswd struct { func ParseUser(path string) (map[string]UserPasswd, error) { ans := make(map[string]UserPasswd, 0) - current, err := passwd.ParseFile(path) - if err != nil { - return ans, errors.Wrap(err, "Failed parsing passwd") + list := xusers.NewUserList() + list.SetPath(path) + users, err := list.GetAll() + if err != nil && len(users) == 0 { + return ans, errors.Wrap(err, "Failed loading user list") } + _, err = permbits.Stat(path) if err != nil { return ans, errors.Wrap(err, "Failed getting permissions") } - for k, v := range current { - - uid, err := strconv.Atoi(v.Uid) + for _, user := range users { + username := user.Username() + uid, err := user.UID() if err != nil { fmt.Println(fmt.Sprintf( "WARN: Found invalid uid for user %s: %s.\nSetting 0. Check the file soon.", - k, err.Error(), + username, err.Error(), )) uid = 0 } - gid, err := strconv.Atoi(v.Gid) + gid, err := user.GID() if err != nil { fmt.Println(fmt.Sprintf( "WARN: Found invalid gid for user %s and uid %d: %s", - k, uid, err.Error(), + username, uid, err.Error(), )) // Set gid with the same value of uid gid = uid } - ans[k] = UserPasswd{ - Username: k, - Password: v.Pass, + ans[username] = UserPasswd{ + Username: username, + Password: user.Password(), Uid: uid, Gid: gid, - Info: v.Gecos, - Homedir: v.Home, - Shell: v.Shell, + Info: user.RealName(), + Homedir: user.HomeDir(), + Shell: user.Shell(), } } @@ -175,7 +168,29 @@ func (u UserPasswd) String() string { func (u UserPasswd) Delete(s string) error { s = UserDefault(s) - input, err := ioutil.ReadFile(s) + d, err := RetryForDuration() + if err != nil { + return errors.Wrap(err, "Failed getting delay") + } + + baseName := filepath.Base(s) + fileLock := flock.New(fmt.Sprintf("/var/lock/%s.lock", baseName)) + defer os.Remove(fileLock.Path()) + defer fileLock.Close() + + lockCtx, cancel := context.WithTimeout(context.Background(), d) + defer cancel() + + i, err := RetryIntervalDuration() + if err != nil { + return errors.Wrap(err, "Failed getting interval") + } + locked, err := fileLock.TryLockContext(lockCtx, i) + if err != nil || !locked { + return errors.Wrap(err, "Failed locking file") + } + + input, err := os.ReadFile(s) if err != nil { return errors.Wrap(err, "Could not read input file") } @@ -185,7 +200,7 @@ func (u UserPasswd) Delete(s string) error { } lines := bytes.Replace(input, []byte(u.String()+"\n"), []byte(""), 1) - err = ioutil.WriteFile(s, []byte(lines), os.FileMode(permissions)) + err = os.WriteFile(s, []byte(lines), os.FileMode(permissions)) if err != nil { return errors.Wrap(err, "Could not write") } @@ -195,17 +210,41 @@ func (u UserPasswd) Delete(s string) error { func (u UserPasswd) Create(s string) error { s = UserDefault(s) + d, err := RetryForDuration() + if err != nil { + return errors.Wrap(err, "Failed getting delay") + } - u, err := u.prepare(s) + baseName := filepath.Base(s) + fileLock := flock.New(fmt.Sprintf("/var/lock/%s.lock", baseName)) + defer os.Remove(fileLock.Path()) + defer fileLock.Close() + lockCtx, cancel := context.WithTimeout(context.Background(), d) + defer cancel() + + i, err := RetryIntervalDuration() + if err != nil { + return errors.Wrap(err, "Failed getting interval") + } + locked, err := fileLock.TryLockContext(lockCtx, i) + if err != nil || !locked { + return errors.Wrap(err, "Failed locking file") + } + + u, err = u.prepare(s) if err != nil { return errors.Wrap(err, "Failed entity preparation") } - current, err := passwd.ParseFile(s) + list := xusers.NewUserList() + list.SetPath(s) + err = list.Load() if err != nil { return errors.Wrap(err, "Failed parsing passwd") } - if _, ok := current[u.Username]; ok { + + user := list.Get(u.Username) + if user != nil { return errors.New("Entity already present") } permissions, err := permbits.Stat(s) @@ -266,8 +305,28 @@ func (u UserPasswd) Apply(s string, safe bool) error { } if _, ok := current[u.Username]; ok { + d, err := RetryForDuration() + if err != nil { + return errors.Wrap(err, "Failed getting delay") + } + + baseName := filepath.Base(s) + fileLock := flock.New(fmt.Sprintf("/var/lock/%s.lock", baseName)) + defer os.Remove(fileLock.Path()) + defer fileLock.Close() + lockCtx, cancel := context.WithTimeout(context.Background(), d) + defer cancel() + + i, err := RetryIntervalDuration() + if err != nil { + return errors.Wrap(err, "Failed getting interval") + } + locked, err := fileLock.TryLockContext(lockCtx, i) + if err != nil || !locked { + return errors.Wrap(err, "Failed locking file") + } - input, err := ioutil.ReadFile(s) + input, err := os.ReadFile(s) if err != nil { return errors.Wrap(err, "Could not read input file") } @@ -282,7 +341,7 @@ func (u UserPasswd) Apply(s string, safe bool) error { } } output := strings.Join(lines, "\n") - err = ioutil.WriteFile(s, []byte(output), os.FileMode(permissions)) + err = os.WriteFile(s, []byte(output), os.FileMode(permissions)) if err != nil { return errors.Wrap(err, "Could not write") } diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index 9a65dd10c5d..76577dc78e5 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,83 @@ +## 2.19.0 + +### Features + +[Label Sets](https://onsi.github.io/ginkgo/#label-sets) allow for more expressive and flexible label filtering. + +## 2.18.0 + +### Features +- Add --slience-skips and --force-newlines [f010b65] +- fail when no tests were run and --fail-on-empty was set [d80eebe] + +### Fixes +- Fix table entry context edge case [42013d6] + +### Maintenance +- Bump golang.org/x/tools from 0.20.0 to 0.21.0 (#1406) [fcf1fd7] +- Bump github.com/onsi/gomega from 1.33.0 to 1.33.1 (#1399) [8bb14fd] +- Bump golang.org/x/net from 0.24.0 to 0.25.0 (#1407) [04bfad7] + +## 2.17.3 + +### Fixes +`ginkgo watch` now ignores hidden files [bde6e00] + +## 2.17.2 + +### Fixes +- fix: close files [32259c8] +- fix github output log level for skipped specs [780e7a3] + +### Maintenance +- Bump github.com/google/pprof [d91fe4e] +- Bump github.com/go-task/slim-sprig to v3 [8cb662e] +- Bump golang.org/x/net in /integration/_fixtures/version_mismatch_fixture (#1391) [3134422] +- Bump github-pages from 230 to 231 in /docs (#1384) [eca81b4] +- Bump golang.org/x/tools from 0.19.0 to 0.20.0 (#1383) [760def8] +- Bump golang.org/x/net from 0.23.0 to 0.24.0 (#1381) [4ce33f4] +- Fix test for gomega version bump [f2fcd97] +- Bump github.com/onsi/gomega from 1.30.0 to 1.33.0 (#1390) [fd622d2] +- Bump golang.org/x/tools from 0.17.0 to 0.19.0 (#1368) [5474a26] +- Bump github-pages from 229 to 230 in /docs (#1359) [e6d1170] +- Bump google.golang.org/protobuf from 1.28.0 to 1.33.0 (#1374) [7f447b2] +- Bump golang.org/x/net from 0.20.0 to 0.23.0 (#1380) [f15239a] + +## 2.17.1 + +### Fixes +- If the user sets --seed=0, make sure all parallel nodes get the same seed [af0330d] + +## 2.17.0 + +### Features + +- add `--github-output` for nicer output in github actions [e8a2056] + +### Maintenance + +- fix typo in core_dsl.go [977bc6f] +- Fix typo in docs [e297e7b] + +## 2.16.0 + +### Features +- add SpecContext to reporting nodes + +### Fixes +- merge coverages instead of combining them (#1329) (#1340) [23f0cc5] +- core_dsl: disable Getwd() with environment variable (#1357) [cd418b7] + +### Maintenance +- docs/index.md: Typo [2cebe8d] +- fix docs [06de431] +- chore: test with Go 1.22 (#1352) [898cba9] +- Bump golang.org/x/tools from 0.16.1 to 0.17.0 (#1336) [17ae120] +- Bump golang.org/x/sys from 0.15.0 to 0.16.0 (#1327) [5a179ed] +- Bump github.com/go-logr/logr from 1.3.0 to 1.4.1 (#1321) [a1e6b69] +- Bump github-pages and jekyll-feed in /docs (#1351) [d52951d] +- Fix docs for handling failures in goroutines (#1339) [4471b2e] + ## 2.15.0 ### Features diff --git a/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md index 1da92fe7ee2..80de566a522 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md +++ b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md @@ -6,8 +6,10 @@ Your contributions to Ginkgo are essential for its long-term maintenance and imp - Ensure adequate test coverage: - When adding to the Ginkgo library, add unit and/or integration tests (under the `integration` folder). - When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test. -- Make sure all the tests succeed via `ginkgo -r -p` -- Vet your changes via `go vet ./...` -- Update the documentation. Ginkgo uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle exec jekyll serve` in the `docs` directory to preview your changes. +- Run `make` or: + - Install ginkgo locally via `go install ./...` + - Make sure all the tests succeed via `ginkgo -r -p` + - Vet your changes via `go vet ./...` +- Update the documentation. Ginkgo uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle && bundle exec jekyll serve` in the `docs` directory to preview your changes. -Thanks for supporting Ginkgo! \ No newline at end of file +Thanks for supporting Ginkgo! diff --git a/vendor/github.com/onsi/ginkgo/v2/Makefile b/vendor/github.com/onsi/ginkgo/v2/Makefile new file mode 100644 index 00000000000..cb099aff950 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/Makefile @@ -0,0 +1,11 @@ +# default task since it's first +.PHONY: all +all: vet test + +.PHONY: test +test: + go run github.com/onsi/ginkgo/v2/ginkgo -r -p + +.PHONY: vet +vet: + go vet ./... diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go index 2d7a70eccbb..a3e8237e938 100644 --- a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go @@ -292,7 +292,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool { err = global.Suite.BuildTree() exitIfErr(err) - suitePath, err := os.Getwd() + suitePath, err := getwd() exitIfErr(err) suitePath, err = filepath.Abs(suitePath) exitIfErr(err) @@ -345,6 +345,15 @@ func extractSuiteConfiguration(args []interface{}) Labels { return suiteLabels } +func getwd() (string, error) { + if !strings.EqualFold(os.Getenv("GINKGO_PRESERVE_CACHE"), "true") { + // Getwd calls os.Getenv("PWD"), which breaks test caching if the cache + // is shared between two different directories with the same test code. + return os.Getwd() + } + return "", nil +} + /* PreviewSpecs walks the testing tree and produces a report without actually invoking the specs. See http://onsi.github.io/ginkgo/#previewing-specs for more information. @@ -369,7 +378,7 @@ func PreviewSpecs(description string, args ...any) Report { err = global.Suite.BuildTree() exitIfErr(err) - suitePath, err := os.Getwd() + suitePath, err := getwd() exitIfErr(err) suitePath, err = filepath.Abs(suitePath) exitIfErr(err) @@ -783,8 +792,8 @@ DeferCleanup can be passed: For example: BeforeEach(func() { - DeferCleanup(os.SetEnv, "FOO", os.GetEnv("FOO")) - os.SetEnv("FOO", "BAR") + DeferCleanup(os.Setenv, "FOO", os.GetEnv("FOO")) + os.Setenv("FOO", "BAR") }) will register a cleanup handler that will set the environment variable "FOO" to its current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec. diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go index 73aff0b7a18..b2dc59be66f 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go @@ -7,7 +7,7 @@ import ( "os" "text/template" - sprig "github.com/go-task/slim-sprig" + sprig "github.com/go-task/slim-sprig/v3" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/internal" "github.com/onsi/ginkgo/v2/types" diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go index be01dec979d..cf3b7cb6d6d 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go @@ -10,7 +10,7 @@ import ( "strings" "text/template" - sprig "github.com/go-task/slim-sprig" + sprig "github.com/go-task/slim-sprig/v3" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/internal" "github.com/onsi/ginkgo/v2/types" @@ -174,6 +174,7 @@ func moduleName(modRoot string) string { if err != nil { return "" } + defer modFile.Close() mod := make([]byte, 128) _, err = modFile.Read(mod) diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go new file mode 100644 index 00000000000..3c5079ff4c7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go @@ -0,0 +1,129 @@ +// Copyright (c) 2015, Wade Simmons +// All rights reserved. + +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: + +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. + +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package gocovmerge takes the results from multiple `go test -coverprofile` +// runs and merges them into one profile + +// this file was originally taken from the gocovmerge project +// see also: https://go.shabbyrobe.org/gocovmerge +package internal + +import ( + "fmt" + "io" + "sort" + + "golang.org/x/tools/cover" +) + +func AddCoverProfile(profiles []*cover.Profile, p *cover.Profile) []*cover.Profile { + i := sort.Search(len(profiles), func(i int) bool { return profiles[i].FileName >= p.FileName }) + if i < len(profiles) && profiles[i].FileName == p.FileName { + MergeCoverProfiles(profiles[i], p) + } else { + profiles = append(profiles, nil) + copy(profiles[i+1:], profiles[i:]) + profiles[i] = p + } + return profiles +} + +func DumpCoverProfiles(profiles []*cover.Profile, out io.Writer) error { + if len(profiles) == 0 { + return nil + } + if _, err := fmt.Fprintf(out, "mode: %s\n", profiles[0].Mode); err != nil { + return err + } + for _, p := range profiles { + for _, b := range p.Blocks { + if _, err := fmt.Fprintf(out, "%s:%d.%d,%d.%d %d %d\n", p.FileName, b.StartLine, b.StartCol, b.EndLine, b.EndCol, b.NumStmt, b.Count); err != nil { + return err + } + } + } + return nil +} + +func MergeCoverProfiles(into *cover.Profile, merge *cover.Profile) error { + if into.Mode != merge.Mode { + return fmt.Errorf("cannot merge profiles with different modes") + } + // Since the blocks are sorted, we can keep track of where the last block + // was inserted and only look at the blocks after that as targets for merge + startIndex := 0 + for _, b := range merge.Blocks { + var err error + startIndex, err = mergeProfileBlock(into, b, startIndex) + if err != nil { + return err + } + } + return nil +} + +func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int) (int, error) { + sortFunc := func(i int) bool { + pi := p.Blocks[i+startIndex] + return pi.StartLine >= pb.StartLine && (pi.StartLine != pb.StartLine || pi.StartCol >= pb.StartCol) + } + + i := 0 + if sortFunc(i) != true { + i = sort.Search(len(p.Blocks)-startIndex, sortFunc) + } + + i += startIndex + if i < len(p.Blocks) && p.Blocks[i].StartLine == pb.StartLine && p.Blocks[i].StartCol == pb.StartCol { + if p.Blocks[i].EndLine != pb.EndLine || p.Blocks[i].EndCol != pb.EndCol { + return i, fmt.Errorf("gocovmerge: overlapping merge %v %v %v", p.FileName, p.Blocks[i], pb) + } + switch p.Mode { + case "set": + p.Blocks[i].Count |= pb.Count + case "count", "atomic": + p.Blocks[i].Count += pb.Count + default: + return i, fmt.Errorf("gocovmerge: unsupported covermode '%s'", p.Mode) + } + + } else { + if i > 0 { + pa := p.Blocks[i-1] + if pa.EndLine >= pb.EndLine && (pa.EndLine != pb.EndLine || pa.EndCol > pb.EndCol) { + return i, fmt.Errorf("gocovmerge: overlap before %v %v %v", p.FileName, pa, pb) + } + } + if i < len(p.Blocks)-1 { + pa := p.Blocks[i+1] + if pa.StartLine <= pb.StartLine && (pa.StartLine != pb.StartLine || pa.StartCol < pb.StartCol) { + return i, fmt.Errorf("gocovmerge: overlap after %v %v %v", p.FileName, pa, pb) + } + } + p.Blocks = append(p.Blocks, cover.ProfileBlock{}) + copy(p.Blocks[i+1:], p.Blocks[i:]) + p.Blocks[i] = pb + } + + return i + 1, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go index 26de28b5707..8e16d2bb034 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go @@ -1,7 +1,6 @@ package internal import ( - "bytes" "fmt" "os" "os/exec" @@ -12,6 +11,7 @@ import ( "github.com/google/pprof/profile" "github.com/onsi/ginkgo/v2/reporters" "github.com/onsi/ginkgo/v2/types" + "golang.org/x/tools/cover" ) func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string { @@ -144,38 +144,27 @@ func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIC return messages, nil } -// loads each profile, combines them, deletes them, stores them in destination +// loads each profile, merges them, deletes them, stores them in destination func MergeAndCleanupCoverProfiles(profiles []string, destination string) error { - combined := &bytes.Buffer{} - modeRegex := regexp.MustCompile(`^mode: .*\n`) - for i, profile := range profiles { - contents, err := os.ReadFile(profile) + var merged []*cover.Profile + for _, file := range profiles { + parsedProfiles, err := cover.ParseProfiles(file) if err != nil { - return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error()) + return err } - os.Remove(profile) - - // remove the cover mode line from every file - // except the first one - if i > 0 { - contents = modeRegex.ReplaceAll(contents, []byte{}) - } - - _, err = combined.Write(contents) - - // Add a newline to the end of every file if missing. - if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' { - _, err = combined.Write([]byte("\n")) - } - - if err != nil { - return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error()) + os.Remove(file) + for _, p := range parsedProfiles { + merged = AddCoverProfile(merged, p) } } - - err := os.WriteFile(destination, combined.Bytes(), 0666) + dst, err := os.OpenFile(destination, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return err + } + defer dst.Close() + err = DumpCoverProfiles(merged, dst) if err != nil { - return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error()) + return err } return nil } @@ -208,6 +197,7 @@ func MergeProfiles(profilePaths []string, destination string) error { return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error()) } prof, err := profile.Parse(proFile) + _ = proFile.Close() if err != nil { return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error()) } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go index 17d052bdc3c..0e6ae1f2903 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "regexp" + "strings" "time" ) @@ -79,6 +80,10 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti continue } + if isHiddenFile(info) { + continue + } + if goTestRegExp.MatchString(info.Name()) { testHash += p.hashForFileInfo(info) if info.ModTime().After(testModifiedTime) { @@ -103,6 +108,10 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti return } +func isHiddenFile(info os.FileInfo) bool { + return strings.HasPrefix(info.Name(), ".") || strings.HasPrefix(info.Name(), "_") +} + func (p *PackageHash) hashForFileInfo(info os.FileInfo) string { return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano()) } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/vendor/github.com/onsi/ginkgo/v2/internal/node.go index 16f0dc22782..6a15f19ae04 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/node.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/node.go @@ -5,9 +5,8 @@ import ( "fmt" "reflect" "sort" - "time" - "sync" + "time" "github.com/onsi/ginkgo/v2/types" ) @@ -16,8 +15,8 @@ var _global_node_id_counter = uint(0) var _global_id_mutex = &sync.Mutex{} func UniqueNodeID() uint { - //There's a reace in the internal integration tests if we don't make - //accessing _global_node_id_counter safe across goroutines. + // There's a reace in the internal integration tests if we don't make + // accessing _global_node_id_counter safe across goroutines. _global_id_mutex.Lock() defer _global_id_mutex.Unlock() _global_node_id_counter += 1 @@ -44,8 +43,8 @@ type Node struct { SynchronizedAfterSuiteProc1Body func(SpecContext) SynchronizedAfterSuiteProc1BodyHasContext bool - ReportEachBody func(types.SpecReport) - ReportSuiteBody func(types.Report) + ReportEachBody func(SpecContext, types.SpecReport) + ReportSuiteBody func(SpecContext, types.Report) MarkedFocus bool MarkedPending bool @@ -209,7 +208,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy args = unrollInterfaceSlice(args) remainingArgs := []interface{}{} - //First get the CodeLocation up-to-date + // First get the CodeLocation up-to-date for _, arg := range args { switch v := arg.(type) { case Offset: @@ -225,11 +224,11 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy trackedFunctionError := false args = remainingArgs remainingArgs = []interface{}{} - //now process the rest of the args + // now process the rest of the args for _, arg := range args { switch t := reflect.TypeOf(arg); { case t == reflect.TypeOf(float64(0)): - break //ignore deprecated timeouts + break // ignore deprecated timeouts case t == reflect.TypeOf(Focus): node.MarkedFocus = bool(arg.(focusType)) if !nodeType.Is(types.NodeTypesForContainerAndIt) { @@ -325,7 +324,12 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy node.Body = func(SpecContext) { body() } } else if nodeType.Is(types.NodeTypeReportBeforeEach | types.NodeTypeReportAfterEach) { if node.ReportEachBody == nil { - node.ReportEachBody = arg.(func(types.SpecReport)) + if fn, ok := arg.(func(types.SpecReport)); ok { + node.ReportEachBody = func(_ SpecContext, r types.SpecReport) { fn(r) } + } else { + node.ReportEachBody = arg.(func(SpecContext, types.SpecReport)) + node.HasContext = true + } } else { appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) trackedFunctionError = true @@ -333,7 +337,12 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy } } else if nodeType.Is(types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) { if node.ReportSuiteBody == nil { - node.ReportSuiteBody = arg.(func(types.Report)) + if fn, ok := arg.(func(types.Report)); ok { + node.ReportSuiteBody = func(_ SpecContext, r types.Report) { fn(r) } + } else { + node.ReportSuiteBody = arg.(func(SpecContext, types.Report)) + node.HasContext = true + } } else { appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) trackedFunctionError = true @@ -395,7 +404,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy } } - //validations + // validations if node.MarkedPending && node.MarkedFocus { appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType)) } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go index 2b4db48af87..a3c9e6bf18f 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -489,10 +489,15 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s newGroup(suite).run(specs.AtIndices(groupedSpecIndices[groupedSpecIdx])) } - if specs.HasAnySpecsMarkedPending() && suite.config.FailOnPending { + if suite.config.FailOnPending && specs.HasAnySpecsMarkedPending() { suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected pending specs and --fail-on-pending is set") suite.report.SuiteSucceeded = false } + + if suite.config.FailOnEmpty && specs.CountWithoutSkip() == 0 { + suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected no specs ran and --fail-on-empty is set") + suite.report.SuiteSucceeded = false + } } if ranBeforeSuite { @@ -594,8 +599,8 @@ func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) { suite.writer.Truncate() suite.outputInterceptor.StartInterceptingOutput() report := suite.currentSpecReport - nodes[i].Body = func(SpecContext) { - nodes[i].ReportEachBody(report) + nodes[i].Body = func(ctx SpecContext) { + nodes[i].ReportEachBody(ctx, report) } state, failure := suite.runNode(nodes[i], time.Time{}, spec.Nodes.BestTextFor(nodes[i])) @@ -762,7 +767,7 @@ func (suite *Suite) runReportSuiteNode(node Node, report types.Report) { report = report.Add(aggregatedReport) } - node.Body = func(SpecContext) { node.ReportSuiteBody(report) } + node.Body = func(ctx SpecContext) { node.ReportSuiteBody(ctx, report) } suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") suite.currentSpecReport.EndTime = time.Now() @@ -840,7 +845,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ timeoutInPlay = "node" } if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() { - //we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't + // we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't if node.NodeTimeout > 0 { deadline = now.Add(node.NodeTimeout) timeoutInPlay = "node" @@ -918,9 +923,9 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ if outcomeFromRun != types.SpecStatePassed { additionalFailure := types.AdditionalFailure{ State: outcomeFromRun, - Failure: failure, //we make a copy - this will include all the configuration set up above... + Failure: failure, // we make a copy - this will include all the configuration set up above... } - //...and then we update the failure with the details from failureFromRun + // ...and then we update the failure with the details from failureFromRun additionalFailure.Failure.Location, additionalFailure.Failure.ForwardedPanic, additionalFailure.Failure.TimelineLocation = failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation additionalFailure.Failure.ProgressReport = types.ProgressReport{} if outcome == types.SpecStateTimedout { @@ -959,7 +964,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ // tell the spec to stop. it's important we generate the progress report first to make sure we capture where // the spec is actually stuck sc.cancel(fmt.Errorf("%s timeout occurred", timeoutInPlay)) - //and now we wait for the grace period + // and now we wait for the grace period gracePeriodChannel = time.After(gracePeriod) case <-interruptStatus.Channel: interruptStatus = suite.interruptHandler.Status() diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index 56b7be75879..480730486af 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -182,10 +182,31 @@ func (r *DefaultReporter) WillRun(report types.SpecReport) { r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false))) } +func (r *DefaultReporter) wrapTextBlock(sectionName string, fn func()) { + r.emitBlock("\n") + if r.conf.GithubOutput { + r.emitBlock(r.fi(1, "::group::%s", sectionName)) + } else { + r.emitBlock(r.fi(1, "{{gray}}%s >>{{/}}", sectionName)) + } + fn() + if r.conf.GithubOutput { + r.emitBlock(r.fi(1, "::endgroup::")) + } else { + r.emitBlock(r.fi(1, "{{gray}}<< %s{{/}}", sectionName)) + } + +} + func (r *DefaultReporter) DidRun(report types.SpecReport) { v := r.conf.Verbosity() inParallel := report.RunningInParallel + //should we completely omit this spec? + if report.State.Is(types.SpecStateSkipped) && r.conf.SilenceSkips { + return + } + header := r.specDenoter if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { header = fmt.Sprintf("[%s]", report.LeafNodeType) @@ -262,9 +283,12 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { } } - // If we have no content to show, jsut emit the header and return + // If we have no content to show, just emit the header and return if !reportHasContent { r.emit(r.f(highlightColor + header + "{{/}}")) + if r.conf.ForceNewlines { + r.emit("\n") + } return } @@ -283,26 +307,23 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { //Emit Stdout/Stderr Output if showSeparateStdSection { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Captured StdOut/StdErr Output >>{{/}}")) - r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) - r.emitBlock(r.fi(1, "{{gray}}<< Captured StdOut/StdErr Output{{/}}")) + r.wrapTextBlock("Captured StdOut/StdErr Output", func() { + r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) + }) } if showSeparateVisibilityAlwaysReportsSection { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Report Entries >>{{/}}")) - for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { - r.emitReportEntry(1, entry) - } - r.emitBlock(r.fi(1, "{{gray}}<< Report Entries{{/}}")) + r.wrapTextBlock("Report Entries", func() { + for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { + r.emitReportEntry(1, entry) + } + }) } if showTimeline { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Timeline >>{{/}}")) - r.emitTimeline(1, report, timeline) - r.emitBlock(r.fi(1, "{{gray}}<< Timeline{{/}}")) + r.wrapTextBlock("Timeline", func() { + r.emitTimeline(1, report, timeline) + }) } // Emit Failure Message @@ -405,7 +426,15 @@ func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, f func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) { highlightColor := r.highlightColorForState(state) r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message)) - r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + if r.conf.GithubOutput { + level := "error" + if state.Is(types.SpecStateSkipped) { + level = "notice" + } + r.emitBlock(r.fi(indent, "::%s file=%s,line=%d::%s %s", level, failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } else { + r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } if failure.ForwardedPanic != "" { r.emitBlock("\n") r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic)) diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index 43244a9bd51..562e0f62ba2 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -177,6 +177,7 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit {"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")}, {"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")}, {"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)}, + {"FailOnEmpty", fmt.Sprintf("%t", report.SuiteConfig.FailOnEmpty)}, {"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)}, {"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)}, {"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)}, @@ -324,6 +325,7 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) continue } err = xml.NewDecoder(f).Decode(&report) + _ = f.Close() if err != nil { messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error())) continue diff --git a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go index f33786a2d65..aa1a35176aa 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go @@ -74,12 +74,21 @@ func AddReportEntry(name string, args ...interface{}) { /* ReportBeforeEach nodes are run for each spec, even if the spec is skipped or pending. ReportBeforeEach nodes take a function that -receives a SpecReport. They are called before the spec starts. +receives a SpecReport or both SpecContext and Report for interruptible behavior. They are called before the spec starts. + +Example: + + ReportBeforeEach(func(report SpecReport) { // process report }) + ReportBeforeEach(func(ctx SpecContext, report SpecReport) { + // process report + }), NodeTimeout(1 * time.Minute)) You cannot nest any other Ginkgo nodes within a ReportBeforeEach node's closure. You can learn more about ReportBeforeEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically + +You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ -func ReportBeforeEach(body func(SpecReport), args ...interface{}) bool { +func ReportBeforeEach(body any, args ...any) bool { combinedArgs := []interface{}{body} combinedArgs = append(combinedArgs, args...) @@ -87,13 +96,23 @@ func ReportBeforeEach(body func(SpecReport), args ...interface{}) bool { } /* -ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending. ReportAfterEach nodes take a function that -receives a SpecReport. They are called after the spec has completed and receive the final report for the spec. +ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending. +ReportAfterEach nodes take a function that receives a SpecReport or both SpecContext and Report for interruptible behavior. +They are called after the spec has completed and receive the final report for the spec. + +Example: + + ReportAfterEach(func(report SpecReport) { // process report }) + ReportAfterEach(func(ctx SpecContext, report SpecReport) { + // process report + }), NodeTimeout(1 * time.Minute)) You cannot nest any other Ginkgo nodes within a ReportAfterEach node's closure. You can learn more about ReportAfterEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically + +You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ -func ReportAfterEach(body func(SpecReport), args ...interface{}) bool { +func ReportAfterEach(body any, args ...any) bool { combinedArgs := []interface{}{body} combinedArgs = append(combinedArgs, args...) @@ -101,7 +120,15 @@ func ReportAfterEach(body func(SpecReport), args ...interface{}) bool { } /* -ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function that receives a suite Report. +ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function +that can either receive Report or both SpecContext and Report for interruptible behavior. + +Example Usage: + + ReportBeforeSuite(func(r Report) { // process report }) + ReportBeforeSuite(func(ctx SpecContext, r Report) { + // process report + }, NodeTimeout(1 * time.Minute)) They are called at the beginning of the suite, before any specs have run and any BeforeSuite or SynchronizedBeforeSuite nodes, and are passed in the initial report for the suite. ReportBeforeSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node) @@ -112,18 +139,28 @@ You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure. You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports + +You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ -func ReportBeforeSuite(body func(Report), args ...interface{}) bool { +func ReportBeforeSuite(body any, args ...any) bool { combinedArgs := []interface{}{body} combinedArgs = append(combinedArgs, args...) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...)) } /* -ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes take a function that receives a suite Report. +ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes execute at the suite's conclusion, +and accept a function that can either receive Report or both SpecContext and Report for interruptible behavior. + +Example Usage: + + ReportAfterSuite("Non-interruptible ReportAfterSuite", func(r Report) { // process report }) + ReportAfterSuite("Interruptible ReportAfterSuite", func(ctx SpecContext, r Report) { + // process report + }, NodeTimeout(1 * time.Minute)) They are called at the end of the suite, after all specs have run and any AfterSuite or SynchronizedAfterSuite nodes, and are passed in the final report for the suite. -ReportAftersuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node) +ReportAfterSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node) When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportAfterSuite and that it is passed a report that is aggregated across all parallel nodes @@ -134,8 +171,10 @@ You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure. You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports + +You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ -func ReportAfterSuite(text string, body func(Report), args ...interface{}) bool { +func ReportAfterSuite(text string, body any, args ...interface{}) bool { combinedArgs := []interface{}{body} combinedArgs = append(combinedArgs, args...) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...)) diff --git a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go index a3aef821bff..c7de7a8be09 100644 --- a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go @@ -269,11 +269,15 @@ func generateTable(description string, isSubtree bool, args ...interface{}) { internalNodeArgs = append(internalNodeArgs, entry.decorations...) hasContext := false - if internalBodyType.NumIn() > 0. { + if internalBodyType.NumIn() > 0 { if internalBodyType.In(0).Implements(specContextType) { hasContext = true - } else if internalBodyType.In(0).Implements(contextType) && (len(entry.parameters) == 0 || !reflect.TypeOf(entry.parameters[0]).Implements(contextType)) { + } else if internalBodyType.In(0).Implements(contextType) { hasContext = true + if len(entry.parameters) > 0 && reflect.TypeOf(entry.parameters[0]) != nil && reflect.TypeOf(entry.parameters[0]).Implements(contextType) { + // we allow you to pass in a non-nil context + hasContext = false + } } } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index c88fc85a75f..66463cf5ea1 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -25,6 +25,7 @@ type SuiteConfig struct { SkipFiles []string LabelFilter string FailOnPending bool + FailOnEmpty bool FailFast bool FlakeAttempts int MustPassRepeatedly int @@ -89,6 +90,9 @@ type ReporterConfig struct { VeryVerbose bool FullTrace bool ShowNodeEvents bool + GithubOutput bool + SilenceSkips bool + ForceNewlines bool JSONReport string JUnitReport string @@ -264,7 +268,7 @@ var FlagSections = GinkgoFlagSections{ // SuiteConfigFlags provides flags for the Ginkgo test process, and CLI var SuiteConfigFlags = GinkgoFlags{ {KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo", - Usage: "The seed used to randomize the spec suite."}, + Usage: "The seed used to randomize the spec suite.", AlwaysExport: true}, {KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."}, @@ -274,6 +278,8 @@ var SuiteConfigFlags = GinkgoFlags{ Usage: "If set, ginkgo will stop running a test suite after a failure occurs."}, {KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags", Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."}, + {KeyPath: "S.FailOnEmpty", Name: "fail-on-empty", SectionKey: "failure", + Usage: "If set, ginkgo will mark the test suite as failed if no specs are run."}, {KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."}, @@ -331,6 +337,12 @@ var ReporterConfigFlags = GinkgoFlags{ Usage: "If set, default reporter prints out the full stack trace when a failure occurs"}, {KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output", Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"}, + {KeyPath: "R.GithubOutput", Name: "github-output", SectionKey: "output", + Usage: "If set, default reporter prints easier to manage output in Github Actions."}, + {KeyPath: "R.SilenceSkips", Name: "silence-skips", SectionKey: "output", + Usage: "If set, default reporter will not print out skipped tests."}, + {KeyPath: "R.ForceNewlines", Name: "force-newlines", SectionKey: "output", + Usage: "If set, default reporter will ensure a newline appears after each test."}, {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, diff --git a/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/vendor/github.com/onsi/ginkgo/v2/types/flags.go index 9186ae873d0..de69f3022de 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/flags.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/flags.go @@ -24,7 +24,8 @@ type GinkgoFlag struct { DeprecatedDocLink string DeprecatedVersion string - ExportAs string + ExportAs string + AlwaysExport bool } type GinkgoFlags []GinkgoFlag @@ -431,7 +432,7 @@ func (ssv stringSliceVar) Set(s string) error { return nil } -//given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. +// given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) { result := []string{} for _, flag := range flags { @@ -451,19 +452,19 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) iface := value.Interface() switch value.Type() { case reflect.TypeOf(string("")): - if iface.(string) != "" { + if iface.(string) != "" || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%s", name, iface)) } case reflect.TypeOf(int64(0)): - if iface.(int64) != 0 { + if iface.(int64) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%d", name, iface)) } case reflect.TypeOf(float64(0)): - if iface.(float64) != 0 { + if iface.(float64) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%f", name, iface)) } case reflect.TypeOf(int(0)): - if iface.(int) != 0 { + if iface.(int) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%d", name, iface)) } case reflect.TypeOf(bool(true)): @@ -471,7 +472,7 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) result = append(result, fmt.Sprintf("--%s", name)) } case reflect.TypeOf(time.Duration(0)): - if iface.(time.Duration) != time.Duration(0) { + if iface.(time.Duration) != time.Duration(0) || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%s", name, iface)) } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go index b0d3b651e7e..7fdc8aa23f3 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go @@ -45,6 +45,83 @@ func orAction(a, b LabelFilter) LabelFilter { return func(labels []string) bool { return a(labels) || b(labels) } } +func labelSetFor(key string, labels []string) map[string]bool { + key = strings.ToLower(strings.TrimSpace(key)) + out := map[string]bool{} + for _, label := range labels { + components := strings.SplitN(label, ":", 2) + if len(components) < 2 { + continue + } + if key == strings.ToLower(strings.TrimSpace(components[0])) { + out[strings.ToLower(strings.TrimSpace(components[1]))] = true + } + } + + return out +} + +func isEmptyLabelSetAction(key string) LabelFilter { + return func(labels []string) bool { + return len(labelSetFor(key, labels)) == 0 + } +} + +func containsAnyLabelSetAction(key string, expectedValues []string) LabelFilter { + return func(labels []string) bool { + set := labelSetFor(key, labels) + for _, value := range expectedValues { + if set[value] { + return true + } + } + return false + } +} + +func containsAllLabelSetAction(key string, expectedValues []string) LabelFilter { + return func(labels []string) bool { + set := labelSetFor(key, labels) + for _, value := range expectedValues { + if !set[value] { + return false + } + } + return true + } +} + +func consistsOfLabelSetAction(key string, expectedValues []string) LabelFilter { + return func(labels []string) bool { + set := labelSetFor(key, labels) + if len(set) != len(expectedValues) { + return false + } + for _, value := range expectedValues { + if !set[value] { + return false + } + } + return true + } +} + +func isSubsetOfLabelSetAction(key string, expectedValues []string) LabelFilter { + expectedSet := map[string]bool{} + for _, value := range expectedValues { + expectedSet[value] = true + } + return func(labels []string) bool { + set := labelSetFor(key, labels) + for value := range set { + if !expectedSet[value] { + return false + } + } + return true + } +} + type lfToken uint const ( @@ -58,6 +135,9 @@ const ( lfTokenOr lfTokenRegexp lfTokenLabel + lfTokenSetKey + lfTokenSetOperation + lfTokenSetArgument lfTokenEOF ) @@ -71,6 +151,8 @@ func (l lfToken) Precedence() int { return 2 case lfTokenNot: return 3 + case lfTokenSetOperation: + return 4 } return -1 } @@ -93,6 +175,12 @@ func (l lfToken) String() string { return "/regexp/" case lfTokenLabel: return "label" + case lfTokenSetKey: + return "set_key" + case lfTokenSetOperation: + return "set_operation" + case lfTokenSetArgument: + return "set_argument" case lfTokenEOF: return "EOF" } @@ -148,6 +236,35 @@ func (tn *treeNode) constructLabelFilter(input string) (LabelFilter, error) { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err)) } return matchLabelRegexAction(re), nil + case lfTokenSetOperation: + tokenSetOperation := strings.ToLower(tn.value) + if tokenSetOperation == "isempty" { + return isEmptyLabelSetAction(tn.leftNode.value), nil + } + if tn.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Set operation '%s' is missing an argument.", tn.value)) + } + + rawValues := strings.Split(tn.rightNode.value, ",") + values := make([]string, len(rawValues)) + for i := range rawValues { + values[i] = strings.ToLower(strings.TrimSpace(rawValues[i])) + if strings.ContainsAny(values[i], "&|!,()/") { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, fmt.Sprintf("Invalid label value '%s' in set operation argument.", values[i])) + } else if values[i] == "" { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, "Empty label value in set operation argument.") + } + } + switch tokenSetOperation { + case "containsany": + return containsAnyLabelSetAction(tn.leftNode.value, values), nil + case "containsall": + return containsAllLabelSetAction(tn.leftNode.value, values), nil + case "consistsof": + return consistsOfLabelSetAction(tn.leftNode.value, values), nil + case "issubsetof": + return isSubsetOfLabelSetAction(tn.leftNode.value, values), nil + } } if tn.rightNode == nil { @@ -203,7 +320,17 @@ func (tn *treeNode) toString(indent int) string { return out } +var validSetOperations = map[string]string{ + "containsany": "containsAny", + "containsall": "containsAll", + "consistsof": "consistsOf", + "issubsetof": "isSubsetOf", + "isempty": "isEmpty", +} + func tokenize(input string) func() (*treeNode, error) { + lastToken := lfTokenInvalid + lastValue := "" runes, i := []rune(input), 0 peekIs := func(r rune) bool { @@ -233,6 +360,53 @@ func tokenize(input string) func() (*treeNode, error) { } node := &treeNode{location: i} + defer func() { + lastToken = node.token + lastValue = node.value + }() + + if lastToken == lfTokenSetKey { + //we should get a valid set operation next + value, n := consumeUntil(" )") + if validSetOperations[strings.ToLower(value)] == "" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, fmt.Sprintf("Invalid set operation '%s'.", value)) + } + i += n + node.token, node.value = lfTokenSetOperation, value + return node, nil + } + if lastToken == lfTokenSetOperation { + //we should get an argument next, if we aren't isempty + var arg = "" + origI := i + if runes[i] == '{' { + i += 1 + value, n := consumeUntil("}") + if i+n >= len(runes) { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-1, "Missing closing '}' in set operation argument?") + } + i += n + 1 + arg = value + } else { + value, n := consumeUntil("&|!,()/") + i += n + arg = strings.TrimSpace(value) + } + if strings.ToLower(lastValue) == "isempty" && arg != "" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("isEmpty does not take arguments, was passed '%s'.", arg)) + } + if arg == "" && strings.ToLower(lastValue) != "isempty" { + if i < len(runes) && runes[i] == '/' { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, "Set operations do not support regular expressions.") + } else { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("Set operation '%s' requires an argument.", lastValue)) + } + } + // note that we sent an empty SetArgument token if we are isempty + node.token, node.value = lfTokenSetArgument, arg + return node, nil + } + switch runes[i] { case '&': if !peekIs('&') { @@ -264,8 +438,38 @@ func tokenize(input string) func() (*treeNode, error) { i += n + 1 node.token, node.value = lfTokenRegexp, value default: - value, n := consumeUntil("&|!,()/") + value, n := consumeUntil("&|!,()/:") i += n + value = strings.TrimSpace(value) + + //are we the beginning of a set operation? + if i < len(runes) && runes[i] == ':' { + if peekIs(' ') { + if value == "" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set key.") + } + i += 1 + //we are the beginning of a set operation + node.token, node.value = lfTokenSetKey, value + return node, nil + } + additionalValue, n := consumeUntil("&|!,()/") + additionalValue = strings.TrimSpace(additionalValue) + if additionalValue == ":" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set operation.") + } + i += n + value += additionalValue + } + + valueToCheckForSetOperation := strings.ToLower(value) + for setOperation := range validSetOperations { + idx := strings.Index(valueToCheckForSetOperation, " "+setOperation) + if idx > 0 { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-n+idx+1, fmt.Sprintf("Looks like you are using the set operator '%s' but did not provide a set key. Did you forget the ':'?", validSetOperations[setOperation])) + } + } + node.token, node.value = lfTokenLabel, strings.TrimSpace(value) } return node, nil @@ -307,7 +511,7 @@ LOOP: switch node.token { case lfTokenEOF: break LOOP - case lfTokenLabel, lfTokenRegexp: + case lfTokenLabel, lfTokenRegexp, lfTokenSetKey: if current.rightNode != nil { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.") } @@ -326,6 +530,18 @@ LOOP: node.setLeftNode(nodeToStealFrom.rightNode) nodeToStealFrom.setRightNode(node) current = node + case lfTokenSetOperation: + if current.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Set operation '%s' missing left hand operand.", node.value)) + } + node.setLeftNode(current.rightNode) + current.setRightNode(node) + current = node + case lfTokenSetArgument: + if current.rightNode != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unexpected set argument '%s'.", node.token)) + } + current.setRightNode(node) case lfTokenCloseGroup: firstUnmatchedOpenNode := current.firstUnmatchedOpenNode() if firstUnmatchedOpenNode == nil { @@ -354,5 +570,14 @@ func ValidateAndCleanupLabel(label string, cl CodeLocation) (string, error) { if strings.ContainsAny(out, "&|!,()/") { return "", GinkgoErrors.InvalidLabel(label, cl) } + if out[0] == ':' { + return "", GinkgoErrors.InvalidLabel(label, cl) + } + if strings.Contains(out, ":") { + components := strings.SplitN(out, ":", 2) + if len(components) < 2 || components[1] == "" { + return "", GinkgoErrors.InvalidLabel(label, cl) + } + } return out, nil } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index ed934647454..acab03492ac 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.15.0" +const VERSION = "2.19.0" diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index fe72a7b183f..62af14ad2f2 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,60 @@ +## 1.33.1 + +### Fixes +- fix confusing eventually docs [3a66379] + +### Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.17.1 to 2.17.2 [e9bc35a] + +## 1.33.0 + +### Features + +`Receive` not accepts `Receive(, MATCHER>)`, allowing you to pick out a specific value on the channel that satisfies the provided matcher and is stored in the provided pointer. + +### Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.15.0 to 2.17.1 (#745) [9999deb] +- Bump github-pages from 229 to 230 in /docs (#735) [cb5ff21] +- Bump golang.org/x/net from 0.20.0 to 0.23.0 (#746) [bac6596] + +## 1.32.0 + +### Maintenance +- Migrate github.com/golang/protobuf to google.golang.org/protobuf [436a197] + + This release drops the deprecated github.com/golang/protobuf and adopts google.golang.org/protobuf. Care was taken to ensure the release is backwards compatible (thanks @jbduncan !). Please open an issue if you run into one. + +- chore: test with Go 1.22 (#733) [32ef35e] +- Bump golang.org/x/net from 0.19.0 to 0.20.0 (#717) [a0d0387] +- Bump github-pages and jekyll-feed in /docs (#732) [b71e477] +- docs: fix typo and broken anchor link to gstruct [f460154] +- docs: fix HaveEach matcher signature [a2862e4] + +## 1.31.1 + +### Fixes +- Inverted arguments order of FailureMessage of BeComparableToMatcher [e0dd999] +- Update test in case keeping msg is desired [ad1a367] + +### Maintenance +- Show how to import the format sub package [24e958d] +- tidy up go.sum [26661b8] +- bump dependencies [bde8f7a] + +## 1.31.0 + +### Features +- Async assertions include context cancellation cause if present [121c37f] + +### Maintenance +- Bump minimum go version [dee1e3c] +- docs: fix typo in example usage "occured" -> "occurred" [49005fe] +- Bump actions/setup-go from 4 to 5 (#714) [f1c8757] +- Bump github/codeql-action from 2 to 3 (#715) [9836e76] +- Bump github.com/onsi/ginkgo/v2 from 2.13.0 to 2.13.2 (#713) [54726f0] +- Bump golang.org/x/net from 0.17.0 to 0.19.0 (#711) [df97ecc] +- docs: fix `HaveExactElement` typo (#712) [a672c86] + ## 1.30.0 ### Features diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index c271a366ae0..9697d5134ff 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.30.0" +const GOMEGA_VERSION = "1.33.1" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -372,11 +372,11 @@ You can ensure that you get a number of consecutive successful tries before succ Finally, in addition to passing timeouts and a context to Eventually you can be more explicit with Eventually's chaining configuration methods: - Eventually(..., "1s", "2s", ctx).Should(...) + Eventually(..., "10s", "2s", ctx).Should(...) is equivalent to - Eventually(...).WithTimeout(time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...) + Eventually(...).WithTimeout(10*time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...) */ func Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() diff --git a/vendor/github.com/onsi/gomega/internal/async_assertion.go b/vendor/github.com/onsi/gomega/internal/async_assertion.go index 1188b0bce37..cde9e2ec8bd 100644 --- a/vendor/github.com/onsi/gomega/internal/async_assertion.go +++ b/vendor/github.com/onsi/gomega/internal/async_assertion.go @@ -553,7 +553,12 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch lock.Unlock() } case <-contextDone: - fail("Context was cancelled") + err := context.Cause(assertion.ctx) + if err != nil && err != context.Canceled { + fail(fmt.Sprintf("Context was cancelled (cause: %s)", err)) + } else { + fail("Context was cancelled") + } return false case <-timeout: if assertion.asyncType == AsyncAssertionTypeEventually { diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go index 43f994374da..7ef27dc9c95 100644 --- a/vendor/github.com/onsi/gomega/matchers.go +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -194,20 +194,21 @@ func BeClosed() types.GomegaMatcher { // // will repeatedly attempt to pull values out of `c` until a value matching "bar" is received. // -// Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type: +// Furthermore, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type: // // var myThing thing // Eventually(thingChan).Should(Receive(&myThing)) // Expect(myThing.Sprocket).Should(Equal("foo")) // Expect(myThing.IsValid()).Should(BeTrue()) +// +// Finally, if you want to match the received object as well as get the actual received value into a variable, so you can reason further about the value received, +// you can pass a pointer to a variable of the approriate type first, and second a matcher: +// +// var myThing thing +// Eventually(thingChan).Should(Receive(&myThing, ContainSubstring("bar"))) func Receive(args ...interface{}) types.GomegaMatcher { - var arg interface{} - if len(args) > 0 { - arg = args[0] - } - return &matchers.ReceiveMatcher{ - Arg: arg, + Args: args, } } @@ -394,7 +395,7 @@ func ConsistOf(elements ...interface{}) types.GomegaMatcher { } } -// HaveExactElemets succeeds if actual contains elements that precisely match the elemets passed into the matcher. The ordering of the elements does matter. +// HaveExactElements succeeds if actual contains elements that precisely match the elemets passed into the matcher. The ordering of the elements does matter. // By default HaveExactElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: // // Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements("Foo", "FooBar")) diff --git a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go index 8ab4bb91949..4e3897858c7 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go @@ -41,9 +41,9 @@ func (matcher *BeComparableToMatcher) Match(actual interface{}) (success bool, m } func (matcher *BeComparableToMatcher) FailureMessage(actual interface{}) (message string) { - return cmp.Diff(matcher.Expected, actual, matcher.Options) + return fmt.Sprint("Expected object to be comparable, diff: ", cmp.Diff(actual, matcher.Expected, matcher.Options...)) } func (matcher *BeComparableToMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to equal", matcher.Expected) + return format.Message(actual, "not to be comparable to", matcher.Expected) } diff --git a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go index 1936a2ba52f..948164eaf88 100644 --- a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go @@ -3,6 +3,7 @@ package matchers import ( + "errors" "fmt" "reflect" @@ -10,7 +11,7 @@ import ( ) type ReceiveMatcher struct { - Arg interface{} + Args []interface{} receivedValue reflect.Value channelClosed bool } @@ -29,15 +30,38 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro var subMatcher omegaMatcher var hasSubMatcher bool - - if matcher.Arg != nil { - subMatcher, hasSubMatcher = (matcher.Arg).(omegaMatcher) + var resultReference interface{} + + // Valid arg formats are as follows, always with optional POINTER before + // optional MATCHER: + // - Receive() + // - Receive(POINTER) + // - Receive(MATCHER) + // - Receive(POINTER, MATCHER) + args := matcher.Args + if len(args) > 0 { + arg := args[0] + _, isSubMatcher := arg.(omegaMatcher) + if !isSubMatcher && reflect.ValueOf(arg).Kind() == reflect.Ptr { + // Consume optional POINTER arg first, if it ain't no matcher ;) + resultReference = arg + args = args[1:] + } + } + if len(args) > 0 { + arg := args[0] + subMatcher, hasSubMatcher = arg.(omegaMatcher) if !hasSubMatcher { - argType := reflect.TypeOf(matcher.Arg) - if argType.Kind() != reflect.Ptr { - return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(matcher.Arg, 1)) - } + // At this point we assume the dev user wanted to assign a received + // value, so [POINTER,]MATCHER. + return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(arg, 1)) } + // Consume optional MATCHER arg. + args = args[1:] + } + if len(args) > 0 { + // If there are still args present, reject all. + return false, errors.New("Receive matcher expects at most an optional pointer and/or an optional matcher") } winnerIndex, value, open := reflect.Select([]reflect.SelectCase{ @@ -58,16 +82,20 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro } if hasSubMatcher { - if didReceive { - matcher.receivedValue = value - return subMatcher.Match(matcher.receivedValue.Interface()) + if !didReceive { + return false, nil } - return false, nil + matcher.receivedValue = value + if match, err := subMatcher.Match(matcher.receivedValue.Interface()); err != nil || !match { + return match, err + } + // if we received a match, then fall through in order to handle an + // optional assignment of the received value to the specified reference. } if didReceive { - if matcher.Arg != nil { - outValue := reflect.ValueOf(matcher.Arg) + if resultReference != nil { + outValue := reflect.ValueOf(resultReference) if value.Type().AssignableTo(outValue.Elem().Type()) { outValue.Elem().Set(value) @@ -77,7 +105,7 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro outValue.Elem().Set(value.Elem()) return true, nil } else { - return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nType:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(value.Interface(), 1), format.Object(matcher.Arg, 1)) + return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nType:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(value.Interface(), 1), format.Object(resultReference, 1)) } } @@ -88,7 +116,11 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro } func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message string) { - subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher) + var matcherArg interface{} + if len(matcher.Args) > 0 { + matcherArg = matcher.Args[len(matcher.Args)-1] + } + subMatcher, hasSubMatcher := (matcherArg).(omegaMatcher) closedAddendum := "" if matcher.channelClosed { @@ -105,7 +137,11 @@ func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message strin } func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (message string) { - subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher) + var matcherArg interface{} + if len(matcher.Args) > 0 { + matcherArg = matcher.Args[len(matcher.Args)-1] + } + subMatcher, hasSubMatcher := (matcherArg).(omegaMatcher) closedAddendum := "" if matcher.channelClosed { diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go index 581cf7cdfad..e6289204604 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go @@ -59,4 +59,10 @@ const ( // AnnotationBaseImageName is the annotation key for the image reference of the image's base image. AnnotationBaseImageName = "org.opencontainers.image.base.name" + + // AnnotationArtifactCreated is the annotation key for the date and time on which the artifact was built, conforming to RFC 3339. + AnnotationArtifactCreated = "org.opencontainers.artifact.created" + + // AnnotationArtifactDescription is the annotation key for the human readable description for the artifact. + AnnotationArtifactDescription = "org.opencontainers.artifact.description" ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go index 1881b11814b..9654aa5af68 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go @@ -21,7 +21,7 @@ import digest "github.com/opencontainers/go-digest" // when marshalled to JSON. type Descriptor struct { // MediaType is the media type of the object this schema refers to. - MediaType string `json:"mediaType"` + MediaType string `json:"mediaType,omitempty"` // Digest is the digest of the targeted content. Digest digest.Digest `json:"digest"` @@ -52,7 +52,7 @@ type Descriptor struct { // Platform describes the platform which the image in the manifest runs on. type Platform struct { // Architecture field specifies the CPU architecture, for example - // `amd64` or `ppc64le`. + // `amd64` or `ppc64`. Architecture string `json:"architecture"` // OS specifies the operating system, for example `linux` or `windows`. @@ -70,11 +70,3 @@ type Platform struct { // example `v7` to specify ARMv7 when architecture is `arm`. Variant string `json:"variant,omitempty"` } - -// DescriptorEmptyJSON is the descriptor of a blob with content of `{}`. -var DescriptorEmptyJSON = Descriptor{ - MediaType: MediaTypeEmptyJSON, - Digest: `sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a`, - Size: 2, - Data: []byte(`{}`), -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go index e2bed9d4e46..ed4a56e59e8 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go @@ -24,15 +24,9 @@ type Index struct { // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json` MediaType string `json:"mediaType,omitempty"` - // ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact. - ArtifactType string `json:"artifactType,omitempty"` - // Manifests references platform specific manifests. Manifests []Descriptor `json:"manifests"` - // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. - Subject *Descriptor `json:"subject,omitempty"` - // Annotations contains arbitrary metadata for the image index. Annotations map[string]string `json:"annotations,omitempty"` } diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go index c5503cb3053..fc79e9e0d14 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go @@ -15,14 +15,10 @@ package v1 const ( - // ImageLayoutFile is the file name containing ImageLayout in an OCI Image Layout + // ImageLayoutFile is the file name of oci image layout file ImageLayoutFile = "oci-layout" // ImageLayoutVersion is the version of ImageLayout ImageLayoutVersion = "1.0.0" - // ImageIndexFile is the file name of the entry point for references and descriptors in an OCI Image Layout - ImageIndexFile = "index.json" - // ImageBlobsDir is the directory name containing content addressable blobs in an OCI Image Layout - ImageBlobsDir = "blobs" ) // ImageLayout is the structure in the "oci-layout" file, found in the root diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go index 26fec52a6bc..4ce7b54ccde 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go @@ -39,3 +39,11 @@ type Manifest struct { // Annotations contains arbitrary metadata for the image manifest. Annotations map[string]string `json:"annotations,omitempty"` } + +// ScratchDescriptor is the descriptor of a blob with content of `{}`. +var ScratchDescriptor = Descriptor{ + MediaType: MediaTypeScratch, + Digest: `sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a`, + Size: 2, + Data: []byte(`{}`), +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go index ce8313e7962..5dd31255eb0 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -21,20 +21,12 @@ const ( // MediaTypeLayoutHeader specifies the media type for the oci-layout. MediaTypeLayoutHeader = "application/vnd.oci.layout.header.v1+json" - // MediaTypeImageIndex specifies the media type for an image index. - MediaTypeImageIndex = "application/vnd.oci.image.index.v1+json" - // MediaTypeImageManifest specifies the media type for an image manifest. MediaTypeImageManifest = "application/vnd.oci.image.manifest.v1+json" - // MediaTypeImageConfig specifies the media type for the image configuration. - MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" - - // MediaTypeEmptyJSON specifies the media type for an unused blob containing the value "{}". - MediaTypeEmptyJSON = "application/vnd.oci.empty.v1+json" -) + // MediaTypeImageIndex specifies the media type for an image index. + MediaTypeImageIndex = "application/vnd.oci.image.index.v1+json" -const ( // MediaTypeImageLayer is the media type used for layers referenced by the manifest. MediaTypeImageLayer = "application/vnd.oci.image.layer.v1.tar" @@ -45,15 +37,7 @@ const ( // MediaTypeImageLayerZstd is the media type used for zstd compressed // layers referenced by the manifest. MediaTypeImageLayerZstd = "application/vnd.oci.image.layer.v1.tar+zstd" -) -// Non-distributable layer media-types. -// -// Deprecated: Non-distributable layers are deprecated, and not recommended -// for future use. Implementations SHOULD NOT produce new non-distributable -// layers. -// https://github.com/opencontainers/image-spec/pull/965 -const ( // MediaTypeImageLayerNonDistributable is the media type for layers referenced by // the manifest but with distribution restrictions. // @@ -82,4 +66,10 @@ const ( // layers. // https://github.com/opencontainers/image-spec/pull/965 MediaTypeImageLayerNonDistributableZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd" + + // MediaTypeImageConfig specifies the media type for the image configuration. + MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" + + // MediaTypeScratch specifies the media type for an unused blob containing the value `{}` + MediaTypeScratch = "application/vnd.oci.scratch.v1+json" ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go index 6d01f6e7175..3d4119b4416 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -25,7 +25,7 @@ const ( VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "-rc.6" + VersionDev = "-rc.3" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/pelletier/go-toml/v2/README.md b/vendor/github.com/pelletier/go-toml/v2/README.md index d53f4397145..9f8439cc7d1 100644 --- a/vendor/github.com/pelletier/go-toml/v2/README.md +++ b/vendor/github.com/pelletier/go-toml/v2/README.md @@ -553,7 +553,7 @@ complete solutions exist out there. ## Versioning -Go-toml follows [Semantic Versioning](https://semver.org). The supported version +Go-toml follows [Semantic Versioning](http://semver.org/). The supported version of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of this document. The last two major versions of Go are supported (see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)). diff --git a/vendor/github.com/pelletier/go-toml/v2/marshaler.go b/vendor/github.com/pelletier/go-toml/v2/marshaler.go index b3b177e7cea..07aceb9024f 100644 --- a/vendor/github.com/pelletier/go-toml/v2/marshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/marshaler.go @@ -357,9 +357,9 @@ func (enc *Encoder) encodeKv(b []byte, ctx encoderCtx, options valueOptions, v r if !ctx.inline { b = enc.encodeComment(ctx.indent, options.comment, b) - b = enc.indent(ctx.indent, b) } + b = enc.indent(ctx.indent, b) b = enc.encodeKey(b, ctx.key) b = append(b, " = "...) diff --git a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go index 5c19845e1e6..70f6ec5720c 100644 --- a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go @@ -60,7 +60,7 @@ func (d *Decoder) DisallowUnknownFields() *Decoder { // are ignored. See Decoder.DisallowUnknownFields() to change this behavior. // // When a TOML local date, time, or date-time is decoded into a time.Time, its -// value is represented in time.Local timezone. Otherwise the appropriate Local* +// value is represented in time.Local timezone. Otherwise the approriate Local* // structure is used. For time values, precision up to the nanosecond is // supported by truncating extra digits. // @@ -746,7 +746,7 @@ func (d *decoder) unmarshalInlineTable(itable *unstable.Node, v reflect.Value) e } return d.unmarshalInlineTable(itable, elem) default: - return unstable.NewParserError(d.p.Raw(itable.Raw), "cannot store inline table in Go type %s", v.Kind()) + return unstable.NewParserError(itable.Data, "cannot store inline table in Go type %s", v.Kind()) } it := itable.Children() @@ -887,11 +887,6 @@ func init() { } func (d *decoder) unmarshalInteger(value *unstable.Node, v reflect.Value) error { - kind := v.Kind() - if kind == reflect.Float32 || kind == reflect.Float64 { - return d.unmarshalFloat(value, v) - } - i, err := parseInteger(value.Data) if err != nil { return err @@ -899,7 +894,7 @@ func (d *decoder) unmarshalInteger(value *unstable.Node, v reflect.Value) error var r reflect.Value - switch kind { + switch v.Kind() { case reflect.Int64: v.SetInt(i) return nil @@ -1039,9 +1034,15 @@ func (d *decoder) handleKeyValuePart(key unstable.Iterator, value *unstable.Node mv := v.MapIndex(mk) set := false - if !mv.IsValid() || key.IsLast() { + if !mv.IsValid() { set = true mv = reflect.New(v.Type().Elem()).Elem() + } else { + if key.IsLast() { + var x interface{} + mv = reflect.ValueOf(&x).Elem() + set = true + } } nv, err := d.handleKeyValueInner(key, value, mv) diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go b/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go index f526bf2c090..b60d9bfd6d3 100644 --- a/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go @@ -58,7 +58,7 @@ func (c *Iterator) Node() *Node { // - Table and ArrayTable's children represent a dotted key (same as // KeyValue, but without the first node being the value). // -// When relevant, Raw describes the range of bytes this node is referring to in +// When relevant, Raw describes the range of bytes this node is refering to in // the input document. Use Parser.Raw() to retrieve the actual bytes. type Node struct { Kind Kind diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go b/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go index 571f4e951e5..52db88e7a57 100644 --- a/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go @@ -132,12 +132,12 @@ func (p *Parser) NextExpression() bool { } // Expression returns a pointer to the node representing the last successfully -// parsed expression. +// parsed expresion. func (p *Parser) Expression() *Node { return p.builder.NodeAt(p.ref) } -// Error returns any error that has occurred during parsing. +// Error returns any error that has occured during parsing. func (p *Parser) Error() error { return p.err } @@ -402,7 +402,6 @@ func (p *Parser) parseInlineTable(b []byte) (reference, []byte, error) { // inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ] parent := p.builder.Push(Node{ Kind: InlineTable, - Raw: p.Range(b[:1]), }) first := true diff --git a/vendor/github.com/pierrec/lz4/.travis.yml b/vendor/github.com/pierrec/lz4/.travis.yml deleted file mode 100644 index fd6c6db713d..00000000000 --- a/vendor/github.com/pierrec/lz4/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -language: go - -env: - - GO111MODULE=off - -go: - - 1.9.x - - 1.10.x - - 1.11.x - - 1.12.x - - master - -matrix: - fast_finish: true - allow_failures: - - go: master - -sudo: false - -script: - - go test -v -cpu=2 - - go test -v -cpu=2 -race - - go test -v -cpu=2 -tags noasm - - go test -v -cpu=2 -race -tags noasm diff --git a/vendor/github.com/pierrec/lz4/debug.go b/vendor/github.com/pierrec/lz4/debug.go deleted file mode 100644 index bc5e78d40f0..00000000000 --- a/vendor/github.com/pierrec/lz4/debug.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build lz4debug - -package lz4 - -import ( - "fmt" - "os" - "path/filepath" - "runtime" -) - -const debugFlag = true - -func debug(args ...interface{}) { - _, file, line, _ := runtime.Caller(1) - file = filepath.Base(file) - - f := fmt.Sprintf("LZ4: %s:%d %s", file, line, args[0]) - if f[len(f)-1] != '\n' { - f += "\n" - } - fmt.Fprintf(os.Stderr, f, args[1:]...) -} diff --git a/vendor/github.com/pierrec/lz4/debug_stub.go b/vendor/github.com/pierrec/lz4/debug_stub.go deleted file mode 100644 index 44211ad9645..00000000000 --- a/vendor/github.com/pierrec/lz4/debug_stub.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !lz4debug - -package lz4 - -const debugFlag = false - -func debug(args ...interface{}) {} diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.go b/vendor/github.com/pierrec/lz4/decode_amd64.go deleted file mode 100644 index 43cc14fbe2e..00000000000 --- a/vendor/github.com/pierrec/lz4/decode_amd64.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !appengine -// +build gc -// +build !noasm - -package lz4 - -//go:noescape -func decodeBlock(dst, src []byte) int diff --git a/vendor/github.com/pierrec/lz4/decode_other.go b/vendor/github.com/pierrec/lz4/decode_other.go deleted file mode 100644 index 919888edf7d..00000000000 --- a/vendor/github.com/pierrec/lz4/decode_other.go +++ /dev/null @@ -1,98 +0,0 @@ -// +build !amd64 appengine !gc noasm - -package lz4 - -func decodeBlock(dst, src []byte) (ret int) { - const hasError = -2 - defer func() { - if recover() != nil { - ret = hasError - } - }() - - var si, di int - for { - // Literals and match lengths (token). - b := int(src[si]) - si++ - - // Literals. - if lLen := b >> 4; lLen > 0 { - switch { - case lLen < 0xF && si+16 < len(src): - // Shortcut 1 - // if we have enough room in src and dst, and the literals length - // is small enough (0..14) then copy all 16 bytes, even if not all - // are part of the literals. - copy(dst[di:], src[si:si+16]) - si += lLen - di += lLen - if mLen := b & 0xF; mLen < 0xF { - // Shortcut 2 - // if the match length (4..18) fits within the literals, then copy - // all 18 bytes, even if not all are part of the literals. - mLen += 4 - if offset := int(src[si]) | int(src[si+1])<<8; mLen <= offset { - i := di - offset - end := i + 18 - if end > len(dst) { - // The remaining buffer may not hold 18 bytes. - // See https://github.com/pierrec/lz4/issues/51. - end = len(dst) - } - copy(dst[di:], dst[i:end]) - si += 2 - di += mLen - continue - } - } - case lLen == 0xF: - for src[si] == 0xFF { - lLen += 0xFF - si++ - } - lLen += int(src[si]) - si++ - fallthrough - default: - copy(dst[di:di+lLen], src[si:si+lLen]) - si += lLen - di += lLen - } - } - if si >= len(src) { - return di - } - - offset := int(src[si]) | int(src[si+1])<<8 - if offset == 0 { - return hasError - } - si += 2 - - // Match. - mLen := b & 0xF - if mLen == 0xF { - for src[si] == 0xFF { - mLen += 0xFF - si++ - } - mLen += int(src[si]) - si++ - } - mLen += minMatch - - // Copy the match. - expanded := dst[di-offset:] - if mLen > offset { - // Efficiently copy the match dst[di-offset:di] into the dst slice. - bytesToCopy := offset * (mLen / offset) - for n := offset; n <= bytesToCopy+offset; n *= 2 { - copy(expanded[n:], expanded[:n]) - } - di += bytesToCopy - mLen -= bytesToCopy - } - di += copy(dst[di:di+mLen], expanded[:mLen]) - } -} diff --git a/vendor/github.com/pierrec/lz4/errors.go b/vendor/github.com/pierrec/lz4/errors.go deleted file mode 100644 index 1c45d1813ce..00000000000 --- a/vendor/github.com/pierrec/lz4/errors.go +++ /dev/null @@ -1,30 +0,0 @@ -package lz4 - -import ( - "errors" - "fmt" - "os" - rdebug "runtime/debug" -) - -var ( - // ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed - // block is corrupted or the destination buffer is not large enough for the uncompressed data. - ErrInvalidSourceShortBuffer = errors.New("lz4: invalid source or destination buffer too short") - // ErrInvalid is returned when reading an invalid LZ4 archive. - ErrInvalid = errors.New("lz4: bad magic number") - // ErrBlockDependency is returned when attempting to decompress an archive created with block dependency. - ErrBlockDependency = errors.New("lz4: block dependency not supported") - // ErrUnsupportedSeek is returned when attempting to Seek any way but forward from the current position. - ErrUnsupportedSeek = errors.New("lz4: can only seek forward from io.SeekCurrent") -) - -func recoverBlock(e *error) { - if r := recover(); r != nil && *e == nil { - if debugFlag { - fmt.Fprintln(os.Stderr, r) - rdebug.PrintStack() - } - *e = ErrInvalidSourceShortBuffer - } -} diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go deleted file mode 100644 index a3284bdf708..00000000000 --- a/vendor/github.com/pierrec/lz4/lz4.go +++ /dev/null @@ -1,116 +0,0 @@ -// Package lz4 implements reading and writing lz4 compressed data (a frame), -// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html. -// -// Although the block level compression and decompression functions are exposed and are fully compatible -// with the lz4 block format definition, they are low level and should not be used directly. -// For a complete description of an lz4 compressed block, see: -// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html -// -// See https://github.com/Cyan4973/lz4 for the reference C implementation. -// -package lz4 - -import ( - "math/bits" - "sync" -) - -const ( - // Extension is the LZ4 frame file name extension - Extension = ".lz4" - // Version is the LZ4 frame format version - Version = 1 - - frameMagic uint32 = 0x184D2204 - frameSkipMagic uint32 = 0x184D2A50 - frameMagicLegacy uint32 = 0x184C2102 - - // The following constants are used to setup the compression algorithm. - minMatch = 4 // the minimum size of the match sequence size (4 bytes) - winSizeLog = 16 // LZ4 64Kb window size limit - winSize = 1 << winSizeLog - winMask = winSize - 1 // 64Kb window of previous data for dependent blocks - compressedBlockFlag = 1 << 31 - compressedBlockMask = compressedBlockFlag - 1 - - // hashLog determines the size of the hash table used to quickly find a previous match position. - // Its value influences the compression speed and memory usage, the lower the faster, - // but at the expense of the compression ratio. - // 16 seems to be the best compromise for fast compression. - hashLog = 16 - htSize = 1 << hashLog - - mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes. -) - -// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb. -const ( - blockSize64K = 1 << (16 + 2*iota) - blockSize256K - blockSize1M - blockSize4M -) - -var ( - // Keep a pool of buffers for each valid block sizes. - bsMapValue = [...]*sync.Pool{ - newBufferPool(2 * blockSize64K), - newBufferPool(2 * blockSize256K), - newBufferPool(2 * blockSize1M), - newBufferPool(2 * blockSize4M), - } -) - -// newBufferPool returns a pool for buffers of the given size. -func newBufferPool(size int) *sync.Pool { - return &sync.Pool{ - New: func() interface{} { - return make([]byte, size) - }, - } -} - -// getBuffer returns a buffer to its pool. -func getBuffer(size int) []byte { - idx := blockSizeValueToIndex(size) - 4 - return bsMapValue[idx].Get().([]byte) -} - -// putBuffer returns a buffer to its pool. -func putBuffer(size int, buf []byte) { - if cap(buf) > 0 { - idx := blockSizeValueToIndex(size) - 4 - bsMapValue[idx].Put(buf[:cap(buf)]) - } -} -func blockSizeIndexToValue(i byte) int { - return 1 << (16 + 2*uint(i)) -} -func isValidBlockSize(size int) bool { - const blockSizeMask = blockSize64K | blockSize256K | blockSize1M | blockSize4M - - return size&blockSizeMask > 0 && bits.OnesCount(uint(size)) == 1 -} -func blockSizeValueToIndex(size int) byte { - return 4 + byte(bits.TrailingZeros(uint(size)>>16)/2) -} - -// Header describes the various flags that can be set on a Writer or obtained from a Reader. -// The default values match those of the LZ4 frame format definition -// (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html). -// -// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls. -// It is the caller's responsibility to check them if necessary. -type Header struct { - BlockChecksum bool // Compressed blocks checksum flag. - NoChecksum bool // Frame checksum flag. - BlockMaxSize int // Size of the uncompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB. - Size uint64 // Frame total size. It is _not_ computed by the Writer. - CompressionLevel int // Compression level (higher is better, use 0 for fastest compression). - done bool // Header processed flag (Read or Write and checked). -} - -// Reset reset internal status -func (h *Header) Reset() { - h.done = false -} diff --git a/vendor/github.com/pierrec/lz4/lz4_go1.10.go b/vendor/github.com/pierrec/lz4/lz4_go1.10.go deleted file mode 100644 index 9a0fb00709d..00000000000 --- a/vendor/github.com/pierrec/lz4/lz4_go1.10.go +++ /dev/null @@ -1,29 +0,0 @@ -//+build go1.10 - -package lz4 - -import ( - "fmt" - "strings" -) - -func (h Header) String() string { - var s strings.Builder - - s.WriteString(fmt.Sprintf("%T{", h)) - if h.BlockChecksum { - s.WriteString("BlockChecksum: true ") - } - if h.NoChecksum { - s.WriteString("NoChecksum: true ") - } - if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 { - s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs)) - } - if l := h.CompressionLevel; l != 0 { - s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l)) - } - s.WriteByte('}') - - return s.String() -} diff --git a/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go deleted file mode 100644 index 12c761a2e7f..00000000000 --- a/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go +++ /dev/null @@ -1,29 +0,0 @@ -//+build !go1.10 - -package lz4 - -import ( - "bytes" - "fmt" -) - -func (h Header) String() string { - var s bytes.Buffer - - s.WriteString(fmt.Sprintf("%T{", h)) - if h.BlockChecksum { - s.WriteString("BlockChecksum: true ") - } - if h.NoChecksum { - s.WriteString("NoChecksum: true ") - } - if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 { - s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs)) - } - if l := h.CompressionLevel; l != 0 { - s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l)) - } - s.WriteByte('}') - - return s.String() -} diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go deleted file mode 100644 index 87dd72bd0db..00000000000 --- a/vendor/github.com/pierrec/lz4/reader.go +++ /dev/null @@ -1,335 +0,0 @@ -package lz4 - -import ( - "encoding/binary" - "fmt" - "io" - "io/ioutil" - - "github.com/pierrec/lz4/internal/xxh32" -) - -// Reader implements the LZ4 frame decoder. -// The Header is set after the first call to Read(). -// The Header may change between Read() calls in case of concatenated frames. -type Reader struct { - Header - // Handler called when a block has been successfully read. - // It provides the number of bytes read. - OnBlockDone func(size int) - - buf [8]byte // Scrap buffer. - pos int64 // Current position in src. - src io.Reader // Source. - zdata []byte // Compressed data. - data []byte // Uncompressed data. - idx int // Index of unread bytes into data. - checksum xxh32.XXHZero // Frame hash. - skip int64 // Bytes to skip before next read. - dpos int64 // Position in dest -} - -// NewReader returns a new LZ4 frame decoder. -// No access to the underlying io.Reader is performed. -func NewReader(src io.Reader) *Reader { - r := &Reader{src: src} - return r -} - -// readHeader checks the frame magic number and parses the frame descriptoz. -// Skippable frames are supported even as a first frame although the LZ4 -// specifications recommends skippable frames not to be used as first frames. -func (z *Reader) readHeader(first bool) error { - defer z.checksum.Reset() - - buf := z.buf[:] - for { - magic, err := z.readUint32() - if err != nil { - z.pos += 4 - if !first && err == io.ErrUnexpectedEOF { - return io.EOF - } - return err - } - if magic == frameMagic { - break - } - if magic>>8 != frameSkipMagic>>8 { - return ErrInvalid - } - skipSize, err := z.readUint32() - if err != nil { - return err - } - z.pos += 4 - m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize)) - if err != nil { - return err - } - z.pos += m - } - - // Header. - if _, err := io.ReadFull(z.src, buf[:2]); err != nil { - return err - } - z.pos += 8 - - b := buf[0] - if v := b >> 6; v != Version { - return fmt.Errorf("lz4: invalid version: got %d; expected %d", v, Version) - } - if b>>5&1 == 0 { - return ErrBlockDependency - } - z.BlockChecksum = b>>4&1 > 0 - frameSize := b>>3&1 > 0 - z.NoChecksum = b>>2&1 == 0 - - bmsID := buf[1] >> 4 & 0x7 - if bmsID < 4 || bmsID > 7 { - return fmt.Errorf("lz4: invalid block max size ID: %d", bmsID) - } - bSize := blockSizeIndexToValue(bmsID - 4) - z.BlockMaxSize = bSize - - // Allocate the compressed/uncompressed buffers. - // The compressed buffer cannot exceed the uncompressed one. - if n := 2 * bSize; cap(z.zdata) < n { - z.zdata = make([]byte, n, n) - } - if debugFlag { - debug("header block max size id=%d size=%d", bmsID, bSize) - } - z.zdata = z.zdata[:bSize] - z.data = z.zdata[:cap(z.zdata)][bSize:] - z.idx = len(z.data) - - _, _ = z.checksum.Write(buf[0:2]) - - if frameSize { - buf := buf[:8] - if _, err := io.ReadFull(z.src, buf); err != nil { - return err - } - z.Size = binary.LittleEndian.Uint64(buf) - z.pos += 8 - _, _ = z.checksum.Write(buf) - } - - // Header checksum. - if _, err := io.ReadFull(z.src, buf[:1]); err != nil { - return err - } - z.pos++ - if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] { - return fmt.Errorf("lz4: invalid header checksum: got %x; expected %x", buf[0], h) - } - - z.Header.done = true - if debugFlag { - debug("header read: %v", z.Header) - } - - return nil -} - -// Read decompresses data from the underlying source into the supplied buffer. -// -// Since there can be multiple streams concatenated, Header values may -// change between calls to Read(). If that is the case, no data is actually read from -// the underlying io.Reader, to allow for potential input buffer resizing. -func (z *Reader) Read(buf []byte) (int, error) { - if debugFlag { - debug("Read buf len=%d", len(buf)) - } - if !z.Header.done { - if err := z.readHeader(true); err != nil { - return 0, err - } - if debugFlag { - debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d", - len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx) - } - } - - if len(buf) == 0 { - return 0, nil - } - - if z.idx == len(z.data) { - // No data ready for reading, process the next block. - if debugFlag { - debug("reading block from writer") - } - // Reset uncompressed buffer - z.data = z.zdata[:cap(z.zdata)][len(z.zdata):] - - // Block length: 0 = end of frame, highest bit set: uncompressed. - bLen, err := z.readUint32() - if err != nil { - return 0, err - } - z.pos += 4 - - if bLen == 0 { - // End of frame reached. - if !z.NoChecksum { - // Validate the frame checksum. - checksum, err := z.readUint32() - if err != nil { - return 0, err - } - if debugFlag { - debug("frame checksum got=%x / want=%x", z.checksum.Sum32(), checksum) - } - z.pos += 4 - if h := z.checksum.Sum32(); checksum != h { - return 0, fmt.Errorf("lz4: invalid frame checksum: got %x; expected %x", h, checksum) - } - } - - // Get ready for the next concatenated frame and keep the position. - pos := z.pos - z.Reset(z.src) - z.pos = pos - - // Since multiple frames can be concatenated, check for more. - return 0, z.readHeader(false) - } - - if debugFlag { - debug("raw block size %d", bLen) - } - if bLen&compressedBlockFlag > 0 { - // Uncompressed block. - bLen &= compressedBlockMask - if debugFlag { - debug("uncompressed block size %d", bLen) - } - if int(bLen) > cap(z.data) { - return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) - } - z.data = z.data[:bLen] - if _, err := io.ReadFull(z.src, z.data); err != nil { - return 0, err - } - z.pos += int64(bLen) - if z.OnBlockDone != nil { - z.OnBlockDone(int(bLen)) - } - - if z.BlockChecksum { - checksum, err := z.readUint32() - if err != nil { - return 0, err - } - z.pos += 4 - - if h := xxh32.ChecksumZero(z.data); h != checksum { - return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum) - } - } - - } else { - // Compressed block. - if debugFlag { - debug("compressed block size %d", bLen) - } - if int(bLen) > cap(z.data) { - return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) - } - zdata := z.zdata[:bLen] - if _, err := io.ReadFull(z.src, zdata); err != nil { - return 0, err - } - z.pos += int64(bLen) - - if z.BlockChecksum { - checksum, err := z.readUint32() - if err != nil { - return 0, err - } - z.pos += 4 - - if h := xxh32.ChecksumZero(zdata); h != checksum { - return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum) - } - } - - n, err := UncompressBlock(zdata, z.data) - if err != nil { - return 0, err - } - z.data = z.data[:n] - if z.OnBlockDone != nil { - z.OnBlockDone(n) - } - } - - if !z.NoChecksum { - _, _ = z.checksum.Write(z.data) - if debugFlag { - debug("current frame checksum %x", z.checksum.Sum32()) - } - } - z.idx = 0 - } - - if z.skip > int64(len(z.data[z.idx:])) { - z.skip -= int64(len(z.data[z.idx:])) - z.dpos += int64(len(z.data[z.idx:])) - z.idx = len(z.data) - return 0, nil - } - - z.idx += int(z.skip) - z.dpos += z.skip - z.skip = 0 - - n := copy(buf, z.data[z.idx:]) - z.idx += n - z.dpos += int64(n) - if debugFlag { - debug("copied %d bytes to input", n) - } - - return n, nil -} - -// Seek implements io.Seeker, but supports seeking forward from the current -// position only. Any other seek will return an error. Allows skipping output -// bytes which aren't needed, which in some scenarios is faster than reading -// and discarding them. -// Note this may cause future calls to Read() to read 0 bytes if all of the -// data they would have returned is skipped. -func (z *Reader) Seek(offset int64, whence int) (int64, error) { - if offset < 0 || whence != io.SeekCurrent { - return z.dpos + z.skip, ErrUnsupportedSeek - } - z.skip += offset - return z.dpos + z.skip, nil -} - -// Reset discards the Reader's state and makes it equivalent to the -// result of its original state from NewReader, but reading from r instead. -// This permits reusing a Reader rather than allocating a new one. -func (z *Reader) Reset(r io.Reader) { - z.Header = Header{} - z.pos = 0 - z.src = r - z.zdata = z.zdata[:0] - z.data = z.data[:0] - z.idx = 0 - z.checksum.Reset() -} - -// readUint32 reads an uint32 into the supplied buffer. -// The idea is to make use of the already allocated buffers avoiding additional allocations. -func (z *Reader) readUint32() (uint32, error) { - buf := z.buf[:4] - _, err := io.ReadFull(z.src, buf) - x := binary.LittleEndian.Uint32(buf) - return x, err -} diff --git a/vendor/github.com/pierrec/lz4/reader_legacy.go b/vendor/github.com/pierrec/lz4/reader_legacy.go deleted file mode 100644 index 1670a77d02a..00000000000 --- a/vendor/github.com/pierrec/lz4/reader_legacy.go +++ /dev/null @@ -1,207 +0,0 @@ -package lz4 - -import ( - "encoding/binary" - "fmt" - "io" -) - -// ReaderLegacy implements the LZ4Demo frame decoder. -// The Header is set after the first call to Read(). -type ReaderLegacy struct { - Header - // Handler called when a block has been successfully read. - // It provides the number of bytes read. - OnBlockDone func(size int) - - lastBlock bool - buf [8]byte // Scrap buffer. - pos int64 // Current position in src. - src io.Reader // Source. - zdata []byte // Compressed data. - data []byte // Uncompressed data. - idx int // Index of unread bytes into data. - skip int64 // Bytes to skip before next read. - dpos int64 // Position in dest -} - -// NewReaderLegacy returns a new LZ4Demo frame decoder. -// No access to the underlying io.Reader is performed. -func NewReaderLegacy(src io.Reader) *ReaderLegacy { - r := &ReaderLegacy{src: src} - return r -} - -// readHeader checks the frame magic number and parses the frame descriptoz. -// Skippable frames are supported even as a first frame although the LZ4 -// specifications recommends skippable frames not to be used as first frames. -func (z *ReaderLegacy) readLegacyHeader() error { - z.lastBlock = false - magic, err := z.readUint32() - if err != nil { - z.pos += 4 - if err == io.ErrUnexpectedEOF { - return io.EOF - } - return err - } - if magic != frameMagicLegacy { - return ErrInvalid - } - z.pos += 4 - - // Legacy has fixed 8MB blocksizes - // https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame - bSize := blockSize4M * 2 - - // Allocate the compressed/uncompressed buffers. - // The compressed buffer cannot exceed the uncompressed one. - if n := 2 * bSize; cap(z.zdata) < n { - z.zdata = make([]byte, n, n) - } - if debugFlag { - debug("header block max size size=%d", bSize) - } - z.zdata = z.zdata[:bSize] - z.data = z.zdata[:cap(z.zdata)][bSize:] - z.idx = len(z.data) - - z.Header.done = true - if debugFlag { - debug("header read: %v", z.Header) - } - - return nil -} - -// Read decompresses data from the underlying source into the supplied buffer. -// -// Since there can be multiple streams concatenated, Header values may -// change between calls to Read(). If that is the case, no data is actually read from -// the underlying io.Reader, to allow for potential input buffer resizing. -func (z *ReaderLegacy) Read(buf []byte) (int, error) { - if debugFlag { - debug("Read buf len=%d", len(buf)) - } - if !z.Header.done { - if err := z.readLegacyHeader(); err != nil { - return 0, err - } - if debugFlag { - debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d", - len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx) - } - } - - if len(buf) == 0 { - return 0, nil - } - - if z.idx == len(z.data) { - // No data ready for reading, process the next block. - if debugFlag { - debug(" reading block from writer %d %d", z.idx, blockSize4M*2) - } - - // Reset uncompressed buffer - z.data = z.zdata[:cap(z.zdata)][len(z.zdata):] - - bLen, err := z.readUint32() - if err != nil { - return 0, err - } - if debugFlag { - debug(" bLen %d (0x%x) offset = %d (0x%x)", bLen, bLen, z.pos, z.pos) - } - z.pos += 4 - - // Legacy blocks are always compressed, even when detrimental - if debugFlag { - debug(" compressed block size %d", bLen) - } - - if int(bLen) > cap(z.data) { - return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) - } - zdata := z.zdata[:bLen] - if _, err := io.ReadFull(z.src, zdata); err != nil { - return 0, err - } - z.pos += int64(bLen) - - n, err := UncompressBlock(zdata, z.data) - if err != nil { - return 0, err - } - - z.data = z.data[:n] - if z.OnBlockDone != nil { - z.OnBlockDone(n) - } - - z.idx = 0 - - // Legacy blocks are fixed to 8MB, if we read a decompressed block smaller than this - // it means we've reached the end... - if n < blockSize4M*2 { - z.lastBlock = true - } - } - - if z.skip > int64(len(z.data[z.idx:])) { - z.skip -= int64(len(z.data[z.idx:])) - z.dpos += int64(len(z.data[z.idx:])) - z.idx = len(z.data) - return 0, nil - } - - z.idx += int(z.skip) - z.dpos += z.skip - z.skip = 0 - - n := copy(buf, z.data[z.idx:]) - z.idx += n - z.dpos += int64(n) - if debugFlag { - debug("%v] copied %d bytes to input (%d:%d)", z.lastBlock, n, z.idx, len(z.data)) - } - if z.lastBlock && len(z.data) == z.idx { - return n, io.EOF - } - return n, nil -} - -// Seek implements io.Seeker, but supports seeking forward from the current -// position only. Any other seek will return an error. Allows skipping output -// bytes which aren't needed, which in some scenarios is faster than reading -// and discarding them. -// Note this may cause future calls to Read() to read 0 bytes if all of the -// data they would have returned is skipped. -func (z *ReaderLegacy) Seek(offset int64, whence int) (int64, error) { - if offset < 0 || whence != io.SeekCurrent { - return z.dpos + z.skip, ErrUnsupportedSeek - } - z.skip += offset - return z.dpos + z.skip, nil -} - -// Reset discards the Reader's state and makes it equivalent to the -// result of its original state from NewReader, but reading from r instead. -// This permits reusing a Reader rather than allocating a new one. -func (z *ReaderLegacy) Reset(r io.Reader) { - z.Header = Header{} - z.pos = 0 - z.src = r - z.zdata = z.zdata[:0] - z.data = z.data[:0] - z.idx = 0 -} - -// readUint32 reads an uint32 into the supplied buffer. -// The idea is to make use of the already allocated buffers avoiding additional allocations. -func (z *ReaderLegacy) readUint32() (uint32, error) { - buf := z.buf[:4] - _, err := io.ReadFull(z.src, buf) - x := binary.LittleEndian.Uint32(buf) - return x, err -} diff --git a/vendor/github.com/pierrec/lz4/.gitignore b/vendor/github.com/pierrec/lz4/v4/.gitignore similarity index 96% rename from vendor/github.com/pierrec/lz4/.gitignore rename to vendor/github.com/pierrec/lz4/v4/.gitignore index 5e987350471..5d7e88de0a3 100644 --- a/vendor/github.com/pierrec/lz4/.gitignore +++ b/vendor/github.com/pierrec/lz4/v4/.gitignore @@ -31,4 +31,6 @@ Temporary Items # End of https://www.gitignore.io/api/macos cmd/*/*exe -.idea \ No newline at end of file +.idea + +fuzz/*.zip diff --git a/vendor/github.com/pierrec/lz4/LICENSE b/vendor/github.com/pierrec/lz4/v4/LICENSE similarity index 100% rename from vendor/github.com/pierrec/lz4/LICENSE rename to vendor/github.com/pierrec/lz4/v4/LICENSE diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/v4/README.md similarity index 82% rename from vendor/github.com/pierrec/lz4/README.md rename to vendor/github.com/pierrec/lz4/v4/README.md index 4ee388e81bf..dee77545b0c 100644 --- a/vendor/github.com/pierrec/lz4/README.md +++ b/vendor/github.com/pierrec/lz4/v4/README.md @@ -1,7 +1,7 @@ # lz4 : LZ4 compression in pure Go -[![GoDoc](https://godoc.org/github.com/pierrec/lz4?status.svg)](https://godoc.org/github.com/pierrec/lz4) -[![Build Status](https://travis-ci.org/pierrec/lz4.svg?branch=master)](https://travis-ci.org/pierrec/lz4) +[![Go Reference](https://pkg.go.dev/badge/github.com/pierrec/lz4/v4.svg)](https://pkg.go.dev/github.com/pierrec/lz4/v4) +[![CI](https://github.com/pierrec/lz4/workflows/ci/badge.svg)](https://github.com/pierrec/lz4/actions) [![Go Report Card](https://goreportcard.com/badge/github.com/pierrec/lz4)](https://goreportcard.com/report/github.com/pierrec/lz4) [![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/pierrec/lz4.svg?style=social)](https://github.com/pierrec/lz4/tags) @@ -15,13 +15,13 @@ The implementation is based on the reference C [one](https://github.com/lz4/lz4) Assuming you have the go toolchain installed: ``` -go get github.com/pierrec/lz4 +go get github.com/pierrec/lz4/v4 ``` There is a command line interface tool to compress and decompress LZ4 files. ``` -go install github.com/pierrec/lz4/cmd/lz4c +go install github.com/pierrec/lz4/v4/cmd/lz4c@latest ``` Usage @@ -87,4 +87,6 @@ Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors) Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder. +Special thanks to [@greatroar](https://github.com/greatroar) for his work on the asm implementations of the decoder for amd64 and arm64. + Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code. diff --git a/vendor/github.com/pierrec/lz4/v4/compressing_reader.go b/vendor/github.com/pierrec/lz4/v4/compressing_reader.go new file mode 100644 index 00000000000..8df0dc76d00 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/compressing_reader.go @@ -0,0 +1,222 @@ +package lz4 + +import ( + "errors" + "io" + + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" + "github.com/pierrec/lz4/v4/internal/lz4stream" +) + +type crState int + +const ( + crStateInitial crState = iota + crStateReading + crStateFlushing + crStateDone +) + +type CompressingReader struct { + state crState + src io.ReadCloser // source reader + level lz4block.CompressionLevel // how hard to try + frame *lz4stream.Frame // frame being built + in []byte + out ovWriter + handler func(int) +} + +// NewCompressingReader creates a reader which reads compressed data from +// raw stream. This makes it a logical opposite of a normal lz4.Reader. +// We require an io.ReadCloser as an underlying source for compatibility +// with Go's http.Request. +func NewCompressingReader(src io.ReadCloser) *CompressingReader { + zrd := &CompressingReader { + frame: lz4stream.NewFrame(), + } + + _ = zrd.Apply(DefaultBlockSizeOption, DefaultChecksumOption, defaultOnBlockDone) + zrd.Reset(src) + + return zrd +} + +// Source exposes the underlying source stream for introspection and control. +func (zrd *CompressingReader) Source() io.ReadCloser { + return zrd.src +} + +// Close simply invokes the underlying stream Close method. This method is +// provided for the benefit of Go http client/server, which relies on Close +// for goroutine termination. +func (zrd *CompressingReader) Close() error { + return zrd.src.Close() +} + +// Apply applies useful options to the lz4 encoder. +func (zrd *CompressingReader) Apply(options ...Option) (err error) { + if zrd.state != crStateInitial { + return lz4errors.ErrOptionClosedOrError + } + + zrd.Reset(zrd.src) + + for _, o := range options { + if err = o(zrd); err != nil { + return + } + } + return +} + +func (*CompressingReader) private() {} + +func (zrd *CompressingReader) init() error { + zrd.frame.InitW(&zrd.out, 1, false) + size := zrd.frame.Descriptor.Flags.BlockSizeIndex() + zrd.in = size.Get() + return zrd.frame.Descriptor.Write(zrd.frame, &zrd.out) +} + +// Read allows reading of lz4 compressed data +func (zrd *CompressingReader) Read(p []byte) (n int, err error) { + defer func() { + if err != nil { + zrd.state = crStateDone + } + }() + + if !zrd.out.reset(p) { + return len(p), nil + } + + switch zrd.state { + case crStateInitial: + err = zrd.init() + if err != nil { + return + } + zrd.state = crStateReading + case crStateDone: + return 0, errors.New("This reader is done") + case crStateFlushing: + if zrd.out.dataPos > 0 { + n = zrd.out.dataPos + zrd.out.data = nil + zrd.out.dataPos = 0 + return + } else { + zrd.state = crStateDone + return 0, io.EOF + } + } + + for zrd.state == crStateReading { + block := zrd.frame.Blocks.Block + + var rCount int + rCount, err = io.ReadFull(zrd.src, zrd.in) + switch err { + case nil: + err = block.Compress( + zrd.frame, zrd.in[ : rCount], zrd.level, + ).Write(zrd.frame, &zrd.out) + zrd.handler(len(block.Data)) + if err != nil { + return + } + + if zrd.out.dataPos == len(zrd.out.data) { + n = zrd.out.dataPos + zrd.out.dataPos = 0 + zrd.out.data = nil + return + } + case io.EOF, io.ErrUnexpectedEOF: // read may be partial + if rCount > 0 { + err = block.Compress( + zrd.frame, zrd.in[ : rCount], zrd.level, + ).Write(zrd.frame, &zrd.out) + zrd.handler(len(block.Data)) + if err != nil { + return + } + } + + err = zrd.frame.CloseW(&zrd.out, 1) + if err != nil { + return + } + zrd.state = crStateFlushing + + n = zrd.out.dataPos + zrd.out.dataPos = 0 + zrd.out.data = nil + return + default: + return + } + } + + err = lz4errors.ErrInternalUnhandledState + return +} + +// Reset makes the stream usable again; mostly handy to reuse lz4 encoder +// instances. +func (zrd *CompressingReader) Reset(src io.ReadCloser) { + zrd.frame.Reset(1) + zrd.state = crStateInitial + zrd.src = src + zrd.out.clear() +} + +type ovWriter struct { + data []byte + ov []byte + dataPos int + ovPos int +} + +func (wr *ovWriter) Write(p []byte) (n int, err error) { + count := copy(wr.data[wr.dataPos : ], p) + wr.dataPos += count + + if count < len(p) { + wr.ov = append(wr.ov, p[count : ]...) + } + + return len(p), nil +} + +func (wr *ovWriter) reset(out []byte) bool { + ovRem := len(wr.ov) - wr.ovPos + + if ovRem >= len(out) { + wr.ovPos += copy(out, wr.ov[wr.ovPos : ]) + return false + } + + if ovRem > 0 { + copy(out, wr.ov[wr.ovPos : ]) + wr.ov = wr.ov[ : 0] + wr.ovPos = 0 + wr.dataPos = ovRem + } else if wr.ovPos > 0 { + wr.ov = wr.ov[ : 0] + wr.ovPos = 0 + wr.dataPos = 0 + } + + wr.data = out + return true +} + +func (wr *ovWriter) clear() { + wr.data = nil + wr.dataPos = 0 + wr.ov = wr.ov[ : 0] + wr.ovPos = 0 +} diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go similarity index 58% rename from vendor/github.com/pierrec/lz4/block.go rename to vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go index 664d9be580d..fec8adb03a1 100644 --- a/vendor/github.com/pierrec/lz4/block.go +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go @@ -1,53 +1,110 @@ -package lz4 +package lz4block import ( "encoding/binary" "math/bits" "sync" + + "github.com/pierrec/lz4/v4/internal/lz4errors" +) + +const ( + // The following constants are used to setup the compression algorithm. + minMatch = 4 // the minimum size of the match sequence size (4 bytes) + winSizeLog = 16 // LZ4 64Kb window size limit + winSize = 1 << winSizeLog + winMask = winSize - 1 // 64Kb window of previous data for dependent blocks + + // hashLog determines the size of the hash table used to quickly find a previous match position. + // Its value influences the compression speed and memory usage, the lower the faster, + // but at the expense of the compression ratio. + // 16 seems to be the best compromise for fast compression. + hashLog = 16 + htSize = 1 << hashLog + + mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes. ) +func recoverBlock(e *error) { + if r := recover(); r != nil && *e == nil { + *e = lz4errors.ErrInvalidSourceShortBuffer + } +} + // blockHash hashes the lower 6 bytes into a value < htSize. func blockHash(x uint64) uint32 { const prime6bytes = 227718039650203 return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog)) } -// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible. func CompressBlockBound(n int) int { return n + n/255 + 16 } -// UncompressBlock uncompresses the source buffer into the destination one, -// and returns the uncompressed size. -// -// The destination buffer must be sized appropriately. -// -// An error is returned if the source data is invalid or the destination buffer is too small. -func UncompressBlock(src, dst []byte) (int, error) { +func UncompressBlock(src, dst, dict []byte) (int, error) { if len(src) == 0 { return 0, nil } - if di := decodeBlock(dst, src); di >= 0 { + if di := decodeBlock(dst, src, dict); di >= 0 { return di, nil } - return 0, ErrInvalidSourceShortBuffer + return 0, lz4errors.ErrInvalidSourceShortBuffer } -// CompressBlock compresses the source buffer into the destination one. -// This is the fast version of LZ4 compression and also the default one. -// -// The argument hashTable is scratch space for a hash table used by the -// compressor. If provided, it should have length at least 1<<16. If it is -// shorter (or nil), CompressBlock allocates its own hash table. -// -// The size of the compressed data is returned. -// -// If the destination buffer size is lower than CompressBlockBound and -// the compressed size is 0 and no error, then the data is incompressible. -// -// An error is returned if the destination buffer is too small. -func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) { - defer recoverBlock(&err) +type Compressor struct { + // Offsets are at most 64kiB, so we can store only the lower 16 bits of + // match positions: effectively, an offset from some 64kiB block boundary. + // + // When we retrieve such an offset, we interpret it as relative to the last + // block boundary si &^ 0xffff, or the one before, (si &^ 0xffff) - 0x10000, + // depending on which of these is inside the current window. If a table + // entry was generated more than 64kiB back in the input, we find out by + // inspecting the input stream. + table [htSize]uint16 + + // Bitmap indicating which positions in the table are in use. + // This allows us to quickly reset the table for reuse, + // without having to zero everything. + inUse [htSize / 32]uint32 +} + +// Get returns the position of a presumptive match for the hash h. +// The match may be a false positive due to a hash collision or an old entry. +// If si < winSize, the return value may be negative. +func (c *Compressor) get(h uint32, si int) int { + h &= htSize - 1 + i := 0 + if c.inUse[h/32]&(1<<(h%32)) != 0 { + i = int(c.table[h]) + } + i += si &^ winMask + if i >= si { + // Try previous 64kiB block (negative when in first block). + i -= winSize + } + return i +} + +func (c *Compressor) put(h uint32, si int) { + h &= htSize - 1 + c.table[h] = uint16(si) + c.inUse[h/32] |= 1 << (h % 32) +} + +func (c *Compressor) reset() { c.inUse = [htSize / 32]uint32{} } + +var compressorPool = sync.Pool{New: func() interface{} { return new(Compressor) }} + +func CompressBlock(src, dst []byte) (int, error) { + c := compressorPool.Get().(*Compressor) + n, err := c.CompressBlock(src, dst) + compressorPool.Put(c) + return n, err +} + +func (c *Compressor) CompressBlock(src, dst []byte) (int, error) { + // Zero out reused table to avoid non-deterministic output (issue #65). + c.reset() // Return 0, nil only if the destination buffer size is < CompressBlockBound. isNotCompressible := len(dst) < CompressBlockBound(len(src)) @@ -56,14 +113,6 @@ func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) { // This significantly speeds up incompressible data and usually has very small impact on compression. // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) const adaptSkipLog = 7 - if len(hashTable) < htSize { - htIface := htPool.Get() - defer htPool.Put(htIface) - hashTable = (*(htIface).(*[htSize]int))[:] - } - // Prove to the compiler the table has at least htSize elements. - // The compiler can see that "uint32() >> hashShift" cannot be out of bounds. - hashTable = hashTable[:htSize] // si: Current position of the search. // anchor: Position of the current literals. @@ -82,33 +131,30 @@ func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) { // We check a match at s, s+1 and s+2 and pick the first one we get. // Checking 3 only requires us to load the source one. - ref := hashTable[h] - ref2 := hashTable[h2] - hashTable[h] = si - hashTable[h2] = si + 1 + ref := c.get(h, si) + ref2 := c.get(h2, si+1) + c.put(h, si) + c.put(h2, si+1) + offset := si - ref - // If offset <= 0 we got an old entry in the hash table. - if offset <= 0 || offset >= winSize || // Out of window. - uint32(match) != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches. + if offset <= 0 || offset >= winSize || uint32(match) != binary.LittleEndian.Uint32(src[ref:]) { // No match. Start calculating another hash. // The processor can usually do this out-of-order. h = blockHash(match >> 16) - ref = hashTable[h] + ref3 := c.get(h, si+2) // Check the second match at si+1 si += 1 offset = si - ref2 - if offset <= 0 || offset >= winSize || - uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) { + if offset <= 0 || offset >= winSize || uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) { // No match. Check the third match at si+2 si += 1 - offset = si - ref - hashTable[h] = si + offset = si - ref3 + c.put(h, si) - if offset <= 0 || offset >= winSize || - uint32(match>>16) != binary.LittleEndian.Uint32(src[ref:]) { + if offset <= 0 || offset >= winSize || uint32(match>>16) != binary.LittleEndian.Uint32(src[ref3:]) { // Skip one extra byte (at si+3) before we check 3 matches again. si += 2 + (si-anchor)>>adaptSkipLog continue @@ -135,7 +181,7 @@ func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) { si, mLen = si+mLen, si+minMatch // Find the longest match by looking by batches of 8 bytes. - for si+8 < sn { + for si+8 <= sn { x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:]) if x == 0 { si += 8 @@ -147,6 +193,9 @@ func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) { } mLen = si - mLen + if di >= len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } if mLen < 0xF { dst[di] = byte(mLen) } else { @@ -160,29 +209,40 @@ func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) { dst[di] |= 0xF0 di++ l := lLen - 0xF - for ; l >= 0xFF; l -= 0xFF { + for ; l >= 0xFF && di < len(dst); l -= 0xFF { dst[di] = 0xFF di++ } + if di >= len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } dst[di] = byte(l) } di++ // Literals. + if di+lLen > len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } copy(dst[di:di+lLen], src[anchor:anchor+lLen]) di += lLen + 2 anchor = si // Encode offset. - _ = dst[di] // Bound check elimination. + if di > len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) // Encode match length part 2. if mLen >= 0xF { - for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { + for mLen -= 0xF; mLen >= 0xFF && di < len(dst); mLen -= 0xFF { dst[di] = 0xFF di++ } + if di >= len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } dst[di] = byte(mLen) di++ } @@ -192,7 +252,7 @@ func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) { } // Hash match end-2 h = blockHash(binary.LittleEndian.Uint64(src[si-2:])) - hashTable[h] = si - 2 + c.put(h, si-2) } lastLiterals: @@ -202,16 +262,22 @@ lastLiterals: } // Last literals. + if di >= len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } lLen := len(src) - anchor if lLen < 0xF { dst[di] = byte(lLen << 4) } else { dst[di] = 0xF0 di++ - for lLen -= 0xF; lLen >= 0xFF; lLen -= 0xFF { + for lLen -= 0xF; lLen >= 0xFF && di < len(dst); lLen -= 0xFF { dst[di] = 0xFF di++ } + if di >= len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } dst[di] = byte(lLen) } di++ @@ -221,35 +287,43 @@ lastLiterals: // Incompressible. return 0, nil } + if di+len(src)-anchor > len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } di += copy(dst[di:di+len(src)-anchor], src[anchor:]) return di, nil } -// Pool of hash tables for CompressBlock. -var htPool = sync.Pool{ - New: func() interface{} { - return new([htSize]int) - }, -} - // blockHash hashes 4 bytes into a value < winSize. func blockHashHC(x uint32) uint32 { const hasher uint32 = 2654435761 // Knuth multiplicative hash. return x * hasher >> (32 - winSizeLog) } -// CompressBlockHC compresses the source buffer src into the destination dst -// with max search depth (use 0 or negative value for no max). -// -// CompressBlockHC compression ratio is better than CompressBlock but it is also slower. -// -// The size of the compressed data is returned. -// -// If the destination buffer size is lower than CompressBlockBound and -// the compressed size is 0 and no error, then the data is incompressible. -// -// An error is returned if the destination buffer is too small. -func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) { +type CompressorHC struct { + // hashTable: stores the last position found for a given hash + // chainTable: stores previous positions for a given hash + hashTable, chainTable [htSize]int + needsReset bool +} + +var compressorHCPool = sync.Pool{New: func() interface{} { return new(CompressorHC) }} + +func CompressBlockHC(src, dst []byte, depth CompressionLevel) (int, error) { + c := compressorHCPool.Get().(*CompressorHC) + n, err := c.CompressBlock(src, dst, depth) + compressorHCPool.Put(c) + return n, err +} + +func (c *CompressorHC) CompressBlock(src, dst []byte, depth CompressionLevel) (_ int, err error) { + if c.needsReset { + // Zero out reused table to avoid non-deterministic output (issue #65). + c.hashTable = [htSize]int{} + c.chainTable = [htSize]int{} + } + c.needsReset = true // Only false on first call. + defer recoverBlock(&err) // Return 0, nil only if the destination buffer size is < CompressBlockBound. @@ -261,20 +335,15 @@ func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) { const adaptSkipLog = 7 var si, di, anchor int - - // hashTable: stores the last position found for a given hash - // chainTable: stores previous positions for a given hash - var hashTable, chainTable [winSize]int - - if depth <= 0 { - depth = winSize - } - sn := len(src) - mfLimit if sn <= 0 { goto lastLiterals } + if depth == 0 { + depth = winSize + } + for si < sn { // Hash the next 4 bytes (sequence). match := binary.LittleEndian.Uint32(src[si:]) @@ -283,7 +352,7 @@ func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) { // Follow the chain until out of window and give the longest match. mLen := 0 offset := 0 - for next, try := hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next = chainTable[next&winMask] { + for next, try := c.hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next, try = c.chainTable[next&winMask], try-1 { // The first (mLen==0) or next byte (mLen>=minMatch) at current match length // must match to improve on the match length. if src[next+mLen] != src[si+mLen] { @@ -309,10 +378,9 @@ func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) { mLen = ml offset = si - next // Try another previous position with the same hash. - try-- } - chainTable[si&winMask] = hashTable[h] - hashTable[h] = si + c.chainTable[si&winMask] = c.hashTable[h] + c.hashTable[h] = si // No match found. if mLen == 0 { @@ -331,8 +399,8 @@ func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) { match >>= 8 match |= uint32(src[si+3]) << 24 h := blockHashHC(match) - chainTable[si&winMask] = hashTable[h] - hashTable[h] = si + c.chainTable[si&winMask] = c.hashTable[h] + c.hashTable[h] = si si++ } diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go new file mode 100644 index 00000000000..138083d9479 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go @@ -0,0 +1,87 @@ +// Package lz4block provides LZ4 BlockSize types and pools of buffers. +package lz4block + +import "sync" + +const ( + Block64Kb uint32 = 1 << (16 + iota*2) + Block256Kb + Block1Mb + Block4Mb + Block8Mb = 2 * Block4Mb +) + +var ( + BlockPool64K = sync.Pool{New: func() interface{} { return make([]byte, Block64Kb) }} + BlockPool256K = sync.Pool{New: func() interface{} { return make([]byte, Block256Kb) }} + BlockPool1M = sync.Pool{New: func() interface{} { return make([]byte, Block1Mb) }} + BlockPool4M = sync.Pool{New: func() interface{} { return make([]byte, Block4Mb) }} + BlockPool8M = sync.Pool{New: func() interface{} { return make([]byte, Block8Mb) }} +) + +func Index(b uint32) BlockSizeIndex { + switch b { + case Block64Kb: + return 4 + case Block256Kb: + return 5 + case Block1Mb: + return 6 + case Block4Mb: + return 7 + case Block8Mb: // only valid in legacy mode + return 3 + } + return 0 +} + +func IsValid(b uint32) bool { + return Index(b) > 0 +} + +type BlockSizeIndex uint8 + +func (b BlockSizeIndex) IsValid() bool { + switch b { + case 4, 5, 6, 7: + return true + } + return false +} + +func (b BlockSizeIndex) Get() []byte { + var buf interface{} + switch b { + case 4: + buf = BlockPool64K.Get() + case 5: + buf = BlockPool256K.Get() + case 6: + buf = BlockPool1M.Get() + case 7: + buf = BlockPool4M.Get() + case 3: + buf = BlockPool8M.Get() + } + return buf.([]byte) +} + +func Put(buf []byte) { + // Safeguard: do not allow invalid buffers. + switch c := cap(buf); uint32(c) { + case Block64Kb: + BlockPool64K.Put(buf[:c]) + case Block256Kb: + BlockPool256K.Put(buf[:c]) + case Block1Mb: + BlockPool1M.Put(buf[:c]) + case Block4Mb: + BlockPool4M.Put(buf[:c]) + case Block8Mb: + BlockPool8M.Put(buf[:c]) + } +} + +type CompressionLevel uint32 + +const Fast CompressionLevel = 0 diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s similarity index 52% rename from vendor/github.com/pierrec/lz4/decode_amd64.s rename to vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s index 20fef39759c..1d00133fac4 100644 --- a/vendor/github.com/pierrec/lz4/decode_amd64.s +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s @@ -2,12 +2,13 @@ // +build gc // +build !noasm +#include "go_asm.h" #include "textflag.h" // AX scratch // BX scratch -// CX scratch -// DX token +// CX literal and match lengths +// DX token, match offset // // DI &dst // SI &src @@ -16,9 +17,11 @@ // R11 &dst // R12 short output end // R13 short input end -// func decodeBlock(dst, src []byte) int -// using 50 bytes of stack currently -TEXT ·decodeBlock(SB), NOSPLIT, $64-56 +// R14 &dict +// R15 len(dict) + +// func decodeBlock(dst, src, dict []byte) int +TEXT ·decodeBlock(SB), NOSPLIT, $48-80 MOVQ dst_base+0(FP), DI MOVQ DI, R11 MOVQ dst_len+8(FP), R8 @@ -26,8 +29,13 @@ TEXT ·decodeBlock(SB), NOSPLIT, $64-56 MOVQ src_base+24(FP), SI MOVQ src_len+32(FP), R9 + CMPQ R9, $0 + JE err_corrupt ADDQ SI, R9 + MOVQ dict_base+48(FP), R14 + MOVQ dict_len+56(FP), R15 + // shortcut ends // short output end MOVQ R8, R12 @@ -36,28 +44,26 @@ TEXT ·decodeBlock(SB), NOSPLIT, $64-56 MOVQ R9, R13 SUBQ $16, R13 -loop: - // for si < len(src) - CMPQ SI, R9 - JGE end + XORL CX, CX +loop: // token := uint32(src[si]) - MOVBQZX (SI), DX + MOVBLZX (SI), DX INCQ SI // lit_len = token >> 4 // if lit_len > 0 // CX = lit_len - MOVQ DX, CX - SHRQ $4, CX + MOVL DX, CX + SHRL $4, CX // if lit_len != 0xF - CMPQ CX, $0xF - JEQ lit_len_loop_pre + CMPL CX, $0xF + JEQ lit_len_loop CMPQ DI, R12 - JGE lit_len_loop_pre + JAE copy_literal CMPQ SI, R13 - JGE lit_len_loop_pre + JAE copy_literal // copy shortcut @@ -76,28 +82,32 @@ loop: ADDQ CX, DI ADDQ CX, SI - MOVQ DX, CX - ANDQ $0xF, CX + MOVL DX, CX + ANDL $0xF, CX // The second stage: prepare for match copying, decode full info. // If it doesn't work out, the info won't be wasted. // offset := uint16(data[:2]) - MOVWQZX (SI), DX + MOVWLZX (SI), DX + TESTL DX, DX + JE err_corrupt ADDQ $2, SI + JC err_short_buf MOVQ DI, AX SUBQ DX, AX + JC err_corrupt CMPQ AX, DI - JGT err_short_buf + JA err_short_buf // if we can't do the second stage then jump straight to read the // match length, we already have the offset. - CMPQ CX, $0xF + CMPL CX, $0xF JEQ match_len_loop_pre - CMPQ DX, $8 + CMPL DX, $8 JLT match_len_loop_pre CMPQ AX, R11 - JLT err_short_buf + JB match_len_loop_pre // memcpy(op + 0, match + 0, 8); MOVQ (AX), BX @@ -109,72 +119,63 @@ loop: MOVW 16(AX), BX MOVW BX, 16(DI) - ADDQ $4, DI // minmatch - ADDQ CX, DI + LEAQ const_minMatch(DI)(CX*1), DI // shortcut complete, load next token - JMP loop - -lit_len_loop_pre: - // if lit_len > 0 - CMPQ CX, $0 - JEQ offset - CMPQ CX, $0xF - JNE copy_literal + JMP loopcheck + // Read the rest of the literal length: + // do { BX = src[si++]; lit_len += BX } while (BX == 0xFF). lit_len_loop: - // for src[si] == 0xFF - CMPB (SI), $0xFF - JNE lit_len_finalise - - // bounds check src[si+1] - MOVQ SI, AX - ADDQ $1, AX - CMPQ AX, R9 - JGT err_short_buf + CMPQ SI, R9 + JAE err_short_buf - // lit_len += 0xFF - ADDQ $0xFF, CX + MOVBLZX (SI), BX INCQ SI - JMP lit_len_loop + ADDQ BX, CX -lit_len_finalise: - // lit_len += int(src[si]) - // si++ - MOVBQZX (SI), AX - ADDQ AX, CX - INCQ SI + CMPB BX, $0xFF + JE lit_len_loop copy_literal: // bounds check src and dst MOVQ SI, AX ADDQ CX, AX + JC err_short_buf CMPQ AX, R9 - JGT err_short_buf + JA err_short_buf - MOVQ DI, AX - ADDQ CX, AX - CMPQ AX, R8 - JGT err_short_buf + MOVQ DI, BX + ADDQ CX, BX + JC err_short_buf + CMPQ BX, R8 + JA err_short_buf - // whats a good cut off to call memmove? - CMPQ CX, $16 + // Copy literals of <=48 bytes through the XMM registers. + CMPQ CX, $48 JGT memmove_lit - // if len(dst[di:]) < 16 + // if len(dst[di:]) < 48 MOVQ R8, AX SUBQ DI, AX - CMPQ AX, $16 + CMPQ AX, $48 JLT memmove_lit - // if len(src[si:]) < 16 - MOVQ R9, AX - SUBQ SI, AX - CMPQ AX, $16 + // if len(src[si:]) < 48 + MOVQ R9, BX + SUBQ SI, BX + CMPQ BX, $48 JLT memmove_lit MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU 32(SI), X2 MOVOU X0, (DI) + MOVOU X1, 16(DI) + MOVOU X2, 32(DI) + + ADDQ CX, SI + ADDQ CX, DI JMP finish_lit_copy @@ -183,18 +184,20 @@ memmove_lit: MOVQ DI, 0(SP) MOVQ SI, 8(SP) MOVQ CX, 16(SP) - // spill + + // Spill registers. Increment SI, DI now so we don't need to save CX. + ADDQ CX, DI + ADDQ CX, SI MOVQ DI, 24(SP) MOVQ SI, 32(SP) - MOVQ CX, 40(SP) // need len to inc SI, DI after - MOVB DX, 48(SP) + MOVL DX, 40(SP) + CALL runtime·memmove(SB) // restore registers MOVQ 24(SP), DI MOVQ 32(SP), SI - MOVQ 40(SP), CX - MOVB 48(SP), DX + MOVL 40(SP), DX // recalc initial values MOVQ dst_base+0(FP), R8 @@ -202,77 +205,62 @@ memmove_lit: ADDQ dst_len+8(FP), R8 MOVQ src_base+24(FP), R9 ADDQ src_len+32(FP), R9 + MOVQ dict_base+48(FP), R14 + MOVQ dict_len+56(FP), R15 MOVQ R8, R12 SUBQ $32, R12 MOVQ R9, R13 SUBQ $16, R13 finish_lit_copy: - ADDQ CX, SI - ADDQ CX, DI - - CMPQ SI, R9 - JGE end - -offset: // CX := mLen // free up DX to use for offset - MOVQ DX, CX + MOVL DX, CX + ANDL $0xF, CX - MOVQ SI, AX - ADDQ $2, AX - CMPQ AX, R9 - JGT err_short_buf + CMPQ SI, R9 + JAE end // offset - // DX := int(src[si]) | int(src[si+1])<<8 - MOVWQZX (SI), DX + // si += 2 + // DX := int(src[si-2]) | int(src[si-1])<<8 ADDQ $2, SI + JC err_short_buf + CMPQ SI, R9 + JA err_short_buf + MOVWQZX -2(SI), DX // 0 offset is invalid - CMPQ DX, $0 - JEQ err_corrupt - - ANDB $0xF, CX + TESTL DX, DX + JEQ err_corrupt match_len_loop_pre: // if mlen != 0xF CMPB CX, $0xF JNE copy_match + // do { BX = src[si++]; mlen += BX } while (BX == 0xFF). match_len_loop: - // for src[si] == 0xFF - // lit_len += 0xFF - CMPB (SI), $0xFF - JNE match_len_finalise - - // bounds check src[si+1] - MOVQ SI, AX - ADDQ $1, AX - CMPQ AX, R9 - JGT err_short_buf + CMPQ SI, R9 + JAE err_short_buf - ADDQ $0xFF, CX + MOVBLZX (SI), BX INCQ SI - JMP match_len_loop + ADDQ BX, CX -match_len_finalise: - // lit_len += int(src[si]) - // si++ - MOVBQZX (SI), AX - ADDQ AX, CX - INCQ SI + CMPB BX, $0xFF + JE match_len_loop copy_match: - // mLen += minMatch - ADDQ $4, CX + ADDQ $const_minMatch, CX // check we have match_len bytes left in dst // di+match_len < len(dst) MOVQ DI, AX ADDQ CX, AX + JC err_short_buf CMPQ AX, R8 - JGT err_short_buf + JA err_short_buf // DX = offset // CX = match_len @@ -282,14 +270,14 @@ copy_match: // check BX is within dst // if BX < &dst + JC copy_match_from_dict CMPQ BX, R11 - JLT err_short_buf + JBE copy_match_from_dict // if offset + match_len < di - MOVQ BX, AX - ADDQ CX, AX + LEAQ (BX)(CX*1), AX CMPQ DI, AX - JGT copy_interior_match + JA copy_interior_match // AX := len(dst[:di]) // MOVQ DI, AX @@ -309,11 +297,9 @@ copy_match_loop: INCQ DI INCQ BX DECQ CX + JNZ copy_match_loop - CMPQ CX, $0 - JGT copy_match_loop - - JMP loop + JMP loopcheck copy_interior_match: CMPQ CX, $16 @@ -329,23 +315,50 @@ copy_interior_match: MOVOU X0, (DI) ADDQ CX, DI - JMP loop + XORL CX, CX + JMP loopcheck + +copy_match_from_dict: + // CX = match_len + // BX = &dst + (di - offset) + + // AX = offset - di = dict_bytes_available => count of bytes potentially covered by the dictionary + MOVQ R11, AX + SUBQ BX, AX + + // BX = len(dict) - dict_bytes_available + MOVQ R15, BX + SUBQ AX, BX + JS err_short_dict + + ADDQ R14, BX + + // if match_len > dict_bytes_available, match fits entirely within external dictionary : just copy + CMPQ CX, AX + JLT memmove_match + + // The match stretches over the dictionary and our block + // 1) copy what comes from the dictionary + // AX = dict_bytes_available = copy_size + // BX = &dict_end - copy_size + // CX = match_len -memmove_match: // memmove(to, from, len) MOVQ DI, 0(SP) MOVQ BX, 8(SP) - MOVQ CX, 16(SP) + MOVQ AX, 16(SP) + // store extra stuff we want to recover // spill MOVQ DI, 24(SP) MOVQ SI, 32(SP) - MOVQ CX, 40(SP) // need len to inc SI, DI after + MOVQ CX, 40(SP) CALL runtime·memmove(SB) // restore registers + MOVQ 16(SP), AX // copy_size MOVQ 24(SP), DI MOVQ 32(SP), SI - MOVQ 40(SP), CX + MOVQ 40(SP), CX // match_len // recalc initial values MOVQ dst_base+0(FP), R8 @@ -353,23 +366,83 @@ memmove_match: ADDQ dst_len+8(FP), R8 MOVQ src_base+24(FP), R9 ADDQ src_len+32(FP), R9 + MOVQ dict_base+48(FP), R14 + MOVQ dict_len+56(FP), R15 MOVQ R8, R12 SUBQ $32, R12 MOVQ R9, R13 SUBQ $16, R13 + // di+=copy_size + ADDQ AX, DI + + // 2) copy the rest from the current block + // CX = match_len - copy_size = rest_size + SUBQ AX, CX + MOVQ R11, BX + + // check if we have a copy overlap + // AX = &dst + rest_size + MOVQ CX, AX + ADDQ BX, AX + // if &dst + rest_size > di, copy byte by byte + CMPQ AX, DI + + JA copy_match_loop + +memmove_match: + // memmove(to, from, len) + MOVQ DI, 0(SP) + MOVQ BX, 8(SP) + MOVQ CX, 16(SP) + + // Spill registers. Increment DI now so we don't need to save CX. ADDQ CX, DI - JMP loop + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + + CALL runtime·memmove(SB) + + // restore registers + MOVQ 24(SP), DI + MOVQ 32(SP), SI + + // recalc initial values + MOVQ dst_base+0(FP), R8 + MOVQ R8, R11 // TODO: make these sensible numbers + ADDQ dst_len+8(FP), R8 + MOVQ src_base+24(FP), R9 + ADDQ src_len+32(FP), R9 + MOVQ R8, R12 + SUBQ $32, R12 + MOVQ R9, R13 + SUBQ $16, R13 + MOVQ dict_base+48(FP), R14 + MOVQ dict_len+56(FP), R15 + XORL CX, CX + +loopcheck: + // for si < len(src) + CMPQ SI, R9 + JB loop + +end: + // Remaining length must be zero. + TESTQ CX, CX + JNE err_corrupt + + SUBQ R11, DI + MOVQ DI, ret+72(FP) + RET err_corrupt: - MOVQ $-1, ret+48(FP) + MOVQ $-1, ret+72(FP) RET err_short_buf: - MOVQ $-2, ret+48(FP) + MOVQ $-2, ret+72(FP) RET -end: - SUBQ R11, DI - MOVQ DI, ret+48(FP) +err_short_dict: + MOVQ $-3, ret+72(FP) RET diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s new file mode 100644 index 00000000000..20b21fcf15e --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s @@ -0,0 +1,231 @@ +// +build gc +// +build !noasm + +#include "go_asm.h" +#include "textflag.h" + +// Register allocation. +#define dst R0 +#define dstorig R1 +#define src R2 +#define dstend R3 +#define srcend R4 +#define match R5 // Match address. +#define dictend R6 +#define token R7 +#define len R8 // Literal and match lengths. +#define offset R7 // Match offset; overlaps with token. +#define tmp1 R9 +#define tmp2 R11 +#define tmp3 R12 + +// func decodeBlock(dst, src, dict []byte) int +TEXT ·decodeBlock(SB), NOFRAME+NOSPLIT, $-4-40 + MOVW dst_base +0(FP), dst + MOVW dst_len +4(FP), dstend + MOVW src_base +12(FP), src + MOVW src_len +16(FP), srcend + + CMP $0, srcend + BEQ shortSrc + + ADD dst, dstend + ADD src, srcend + + MOVW dst, dstorig + +loop: + // Read token. Extract literal length. + MOVBU.P 1(src), token + MOVW token >> 4, len + CMP $15, len + BNE readLitlenDone + +readLitlenLoop: + CMP src, srcend + BEQ shortSrc + MOVBU.P 1(src), tmp1 + ADD.S tmp1, len + BVS shortDst + CMP $255, tmp1 + BEQ readLitlenLoop + +readLitlenDone: + CMP $0, len + BEQ copyLiteralDone + + // Bounds check dst+len and src+len. + ADD.S dst, len, tmp1 + ADD.CC.S src, len, tmp2 + BCS shortSrc + CMP dstend, tmp1 + //BHI shortDst // Uncomment for distinct error codes. + CMP.LS srcend, tmp2 + BHI shortSrc + + // Copy literal. + CMP $4, len + BLO copyLiteralFinish + + // Copy 0-3 bytes until src is aligned. + TST $1, src + MOVBU.NE.P 1(src), tmp1 + MOVB.NE.P tmp1, 1(dst) + SUB.NE $1, len + + TST $2, src + MOVHU.NE.P 2(src), tmp2 + MOVB.NE.P tmp2, 1(dst) + MOVW.NE tmp2 >> 8, tmp1 + MOVB.NE.P tmp1, 1(dst) + SUB.NE $2, len + + B copyLiteralLoopCond + +copyLiteralLoop: + // Aligned load, unaligned write. + MOVW.P 4(src), tmp1 + MOVW tmp1 >> 8, tmp2 + MOVB tmp2, 1(dst) + MOVW tmp1 >> 16, tmp3 + MOVB tmp3, 2(dst) + MOVW tmp1 >> 24, tmp2 + MOVB tmp2, 3(dst) + MOVB.P tmp1, 4(dst) +copyLiteralLoopCond: + // Loop until len-4 < 0. + SUB.S $4, len + BPL copyLiteralLoop + +copyLiteralFinish: + // Copy remaining 0-3 bytes. + // At this point, len may be < 0, but len&3 is still accurate. + TST $1, len + MOVB.NE.P 1(src), tmp3 + MOVB.NE.P tmp3, 1(dst) + TST $2, len + MOVB.NE.P 2(src), tmp1 + MOVB.NE.P tmp1, 2(dst) + MOVB.NE -1(src), tmp2 + MOVB.NE tmp2, -1(dst) + +copyLiteralDone: + // Initial part of match length. + // This frees up the token register for reuse as offset. + AND $15, token, len + + CMP src, srcend + BEQ end + + // Read offset. + ADD.S $2, src + BCS shortSrc + CMP srcend, src + BHI shortSrc + MOVBU -2(src), offset + MOVBU -1(src), tmp1 + ORR.S tmp1 << 8, offset + BEQ corrupt + + // Read rest of match length. + CMP $15, len + BNE readMatchlenDone + +readMatchlenLoop: + CMP src, srcend + BEQ shortSrc + MOVBU.P 1(src), tmp1 + ADD.S tmp1, len + BVS shortDst + CMP $255, tmp1 + BEQ readMatchlenLoop + +readMatchlenDone: + // Bounds check dst+len+minMatch. + ADD.S dst, len, tmp1 + ADD.CC.S $const_minMatch, tmp1 + BCS shortDst + CMP dstend, tmp1 + BHI shortDst + + RSB dst, offset, match + CMP dstorig, match + BGE copyMatch4 + + // match < dstorig means the match starts in the dictionary, + // at len(dict) - offset + (dst - dstorig). + MOVW dict_base+24(FP), match + MOVW dict_len +28(FP), dictend + + ADD $const_minMatch, len + + RSB dst, dstorig, tmp1 + RSB dictend, offset, tmp2 + ADD.S tmp2, tmp1 + BMI shortDict + ADD match, dictend + ADD tmp1, match + +copyDict: + MOVBU.P 1(match), tmp1 + MOVB.P tmp1, 1(dst) + SUB.S $1, len + CMP.NE match, dictend + BNE copyDict + + // If the match extends beyond the dictionary, the rest is at dstorig. + CMP $0, len + BEQ copyMatchDone + MOVW dstorig, match + B copyMatch + + // Copy a regular match. + // Since len+minMatch is at least four, we can do a 4× unrolled + // byte copy loop. Using MOVW instead of four byte loads is faster, + // but to remain portable we'd have to align match first, which is + // too expensive. By alternating loads and stores, we also handle + // the case offset < 4. +copyMatch4: + SUB.S $4, len + MOVBU.P 4(match), tmp1 + MOVB.P tmp1, 4(dst) + MOVBU -3(match), tmp2 + MOVB tmp2, -3(dst) + MOVBU -2(match), tmp3 + MOVB tmp3, -2(dst) + MOVBU -1(match), tmp1 + MOVB tmp1, -1(dst) + BPL copyMatch4 + + // Restore len, which is now negative. + ADD.S $4, len + BEQ copyMatchDone + +copyMatch: + // Finish with a byte-at-a-time copy. + SUB.S $1, len + MOVBU.P 1(match), tmp2 + MOVB.P tmp2, 1(dst) + BNE copyMatch + +copyMatchDone: + CMP src, srcend + BNE loop + +end: + CMP $0, len + BNE corrupt + SUB dstorig, dst, tmp1 + MOVW tmp1, ret+36(FP) + RET + + // The error cases have distinct labels so we can put different + // return codes here when debugging, or if the error returns need to + // be changed. +shortDict: +shortDst: +shortSrc: +corrupt: + MOVW $-1, tmp1 + MOVW tmp1, ret+36(FP) + RET diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s new file mode 100644 index 00000000000..d2fe11b8ea1 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s @@ -0,0 +1,241 @@ +// +build gc +// +build !noasm + +// This implementation assumes that strict alignment checking is turned off. +// The Go compiler makes the same assumption. + +#include "go_asm.h" +#include "textflag.h" + +// Register allocation. +#define dst R0 +#define dstorig R1 +#define src R2 +#define dstend R3 +#define dstend16 R4 // dstend - 16 +#define srcend R5 +#define srcend16 R6 // srcend - 16 +#define match R7 // Match address. +#define dict R8 +#define dictlen R9 +#define dictend R10 +#define token R11 +#define len R12 // Literal and match lengths. +#define lenRem R13 +#define offset R14 // Match offset. +#define tmp1 R15 +#define tmp2 R16 +#define tmp3 R17 +#define tmp4 R19 + +// func decodeBlock(dst, src, dict []byte) int +TEXT ·decodeBlock(SB), NOFRAME+NOSPLIT, $0-80 + LDP dst_base+0(FP), (dst, dstend) + ADD dst, dstend + MOVD dst, dstorig + + LDP src_base+24(FP), (src, srcend) + CBZ srcend, shortSrc + ADD src, srcend + + // dstend16 = max(dstend-16, 0) and similarly for srcend16. + SUBS $16, dstend, dstend16 + CSEL LO, ZR, dstend16, dstend16 + SUBS $16, srcend, srcend16 + CSEL LO, ZR, srcend16, srcend16 + + LDP dict_base+48(FP), (dict, dictlen) + ADD dict, dictlen, dictend + +loop: + // Read token. Extract literal length. + MOVBU.P 1(src), token + LSR $4, token, len + CMP $15, len + BNE readLitlenDone + +readLitlenLoop: + CMP src, srcend + BEQ shortSrc + MOVBU.P 1(src), tmp1 + ADDS tmp1, len + BVS shortDst + CMP $255, tmp1 + BEQ readLitlenLoop + +readLitlenDone: + CBZ len, copyLiteralDone + + // Bounds check dst+len and src+len. + ADDS dst, len, tmp1 + BCS shortSrc + ADDS src, len, tmp2 + BCS shortSrc + CMP dstend, tmp1 + BHI shortDst + CMP srcend, tmp2 + BHI shortSrc + + // Copy literal. + SUBS $16, len + BLO copyLiteralShort + +copyLiteralLoop: + LDP.P 16(src), (tmp1, tmp2) + STP.P (tmp1, tmp2), 16(dst) + SUBS $16, len + BPL copyLiteralLoop + + // Copy (final part of) literal of length 0-15. + // If we have >=16 bytes left in src and dst, just copy 16 bytes. +copyLiteralShort: + CMP dstend16, dst + CCMP LO, src, srcend16, $0b0010 // 0010 = preserve carry (LO). + BHS copyLiteralShortEnd + + AND $15, len + + LDP (src), (tmp1, tmp2) + ADD len, src + STP (tmp1, tmp2), (dst) + ADD len, dst + + B copyLiteralDone + + // Safe but slow copy near the end of src, dst. +copyLiteralShortEnd: + TBZ $3, len, 3(PC) + MOVD.P 8(src), tmp1 + MOVD.P tmp1, 8(dst) + TBZ $2, len, 3(PC) + MOVW.P 4(src), tmp2 + MOVW.P tmp2, 4(dst) + TBZ $1, len, 3(PC) + MOVH.P 2(src), tmp3 + MOVH.P tmp3, 2(dst) + TBZ $0, len, 3(PC) + MOVBU.P 1(src), tmp4 + MOVB.P tmp4, 1(dst) + +copyLiteralDone: + // Initial part of match length. + AND $15, token, len + + CMP src, srcend + BEQ end + + // Read offset. + ADDS $2, src + BCS shortSrc + CMP srcend, src + BHI shortSrc + MOVHU -2(src), offset + CBZ offset, corrupt + + // Read rest of match length. + CMP $15, len + BNE readMatchlenDone + +readMatchlenLoop: + CMP src, srcend + BEQ shortSrc + MOVBU.P 1(src), tmp1 + ADDS tmp1, len + BVS shortDst + CMP $255, tmp1 + BEQ readMatchlenLoop + +readMatchlenDone: + ADD $const_minMatch, len + + // Bounds check dst+len. + ADDS dst, len, tmp2 + BCS shortDst + CMP dstend, tmp2 + BHI shortDst + + SUB offset, dst, match + CMP dstorig, match + BHS copyMatchTry8 + + // match < dstorig means the match starts in the dictionary, + // at len(dict) - offset + (dst - dstorig). + SUB dstorig, dst, tmp1 + SUB offset, dictlen, tmp2 + ADDS tmp2, tmp1 + BMI shortDict + ADD dict, tmp1, match + +copyDict: + MOVBU.P 1(match), tmp3 + MOVB.P tmp3, 1(dst) + SUBS $1, len + CCMP NE, dictend, match, $0b0100 // 0100 sets the Z (EQ) flag. + BNE copyDict + + CBZ len, copyMatchDone + + // If the match extends beyond the dictionary, the rest is at dstorig. + // Recompute the offset for the next check. + MOVD dstorig, match + SUB dstorig, dst, offset + +copyMatchTry8: + // Copy doublewords if both len and offset are at least eight. + // A 16-at-a-time loop doesn't provide a further speedup. + CMP $8, len + CCMP HS, offset, $8, $0 + BLO copyMatchTry4 + + AND $7, len, lenRem + SUB $8, len +copyMatchLoop8: + MOVD.P 8(match), tmp1 + MOVD.P tmp1, 8(dst) + SUBS $8, len + BPL copyMatchLoop8 + + MOVD (match)(len), tmp2 // match+len == match+lenRem-8. + ADD lenRem, dst + MOVD $0, len + MOVD tmp2, -8(dst) + B copyMatchDone + +copyMatchTry4: + // Copy words if both len and offset are at least four. + CMP $4, len + CCMP HS, offset, $4, $0 + BLO copyMatchLoop1 + + MOVWU.P 4(match), tmp2 + MOVWU.P tmp2, 4(dst) + SUBS $4, len + BEQ copyMatchDone + +copyMatchLoop1: + // Byte-at-a-time copy for small offsets <= 3. + MOVBU.P 1(match), tmp2 + MOVB.P tmp2, 1(dst) + SUBS $1, len + BNE copyMatchLoop1 + +copyMatchDone: + CMP src, srcend + BNE loop + +end: + CBNZ len, corrupt + SUB dstorig, dst, tmp1 + MOVD tmp1, ret+72(FP) + RET + + // The error cases have distinct labels so we can put different + // return codes here when debugging, or if the error returns need to + // be changed. +shortDict: +shortDst: +shortSrc: +corrupt: + MOVD $-1, tmp1 + MOVD tmp1, ret+72(FP) + RET diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go new file mode 100644 index 00000000000..8d9023d1007 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go @@ -0,0 +1,10 @@ +//go:build (amd64 || arm || arm64) && !appengine && gc && !noasm +// +build amd64 arm arm64 +// +build !appengine +// +build gc +// +build !noasm + +package lz4block + +//go:noescape +func decodeBlock(dst, src, dict []byte) int diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go new file mode 100644 index 00000000000..9f568fbb1af --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go @@ -0,0 +1,139 @@ +//go:build (!amd64 && !arm && !arm64) || appengine || !gc || noasm +// +build !amd64,!arm,!arm64 appengine !gc noasm + +package lz4block + +import ( + "encoding/binary" +) + +func decodeBlock(dst, src, dict []byte) (ret int) { + // Restrict capacities so we don't read or write out of bounds. + dst = dst[:len(dst):len(dst)] + src = src[:len(src):len(src)] + + const hasError = -2 + + if len(src) == 0 { + return hasError + } + + defer func() { + if recover() != nil { + ret = hasError + } + }() + + var si, di uint + for si < uint(len(src)) { + // Literals and match lengths (token). + b := uint(src[si]) + si++ + + // Literals. + if lLen := b >> 4; lLen > 0 { + switch { + case lLen < 0xF && si+16 < uint(len(src)): + // Shortcut 1 + // if we have enough room in src and dst, and the literals length + // is small enough (0..14) then copy all 16 bytes, even if not all + // are part of the literals. + copy(dst[di:], src[si:si+16]) + si += lLen + di += lLen + if mLen := b & 0xF; mLen < 0xF { + // Shortcut 2 + // if the match length (4..18) fits within the literals, then copy + // all 18 bytes, even if not all are part of the literals. + mLen += 4 + if offset := u16(src[si:]); mLen <= offset && offset < di { + i := di - offset + // The remaining buffer may not hold 18 bytes. + // See https://github.com/pierrec/lz4/issues/51. + if end := i + 18; end <= uint(len(dst)) { + copy(dst[di:], dst[i:end]) + si += 2 + di += mLen + continue + } + } + } + case lLen == 0xF: + for { + x := uint(src[si]) + if lLen += x; int(lLen) < 0 { + return hasError + } + si++ + if x != 0xFF { + break + } + } + fallthrough + default: + copy(dst[di:di+lLen], src[si:si+lLen]) + si += lLen + di += lLen + } + } + + mLen := b & 0xF + if si == uint(len(src)) && mLen == 0 { + break + } else if si >= uint(len(src)) { + return hasError + } + + offset := u16(src[si:]) + if offset == 0 { + return hasError + } + si += 2 + + // Match. + mLen += minMatch + if mLen == minMatch+0xF { + for { + x := uint(src[si]) + if mLen += x; int(mLen) < 0 { + return hasError + } + si++ + if x != 0xFF { + break + } + } + } + + // Copy the match. + if di < offset { + // The match is beyond our block, meaning the first part + // is in the dictionary. + fromDict := dict[uint(len(dict))+di-offset:] + n := uint(copy(dst[di:di+mLen], fromDict)) + di += n + if mLen -= n; mLen == 0 { + continue + } + // We copied n = offset-di bytes from the dictionary, + // then set di = di+n = offset, so the following code + // copies from dst[di-offset:] = dst[0:]. + } + + expanded := dst[di-offset:] + if mLen > offset { + // Efficiently copy the match dst[di-offset:di] into the dst slice. + bytesToCopy := offset * (mLen / offset) + for n := offset; n <= bytesToCopy+offset; n *= 2 { + copy(expanded[n:], expanded[:n]) + } + di += bytesToCopy + mLen -= bytesToCopy + } + di += uint(copy(dst[di:di+mLen], expanded[:mLen])) + } + + return int(di) +} + +func u16(p []byte) uint { return uint(binary.LittleEndian.Uint16(p)) } diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go new file mode 100644 index 00000000000..710ea42812e --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go @@ -0,0 +1,19 @@ +package lz4errors + +type Error string + +func (e Error) Error() string { return string(e) } + +const ( + ErrInvalidSourceShortBuffer Error = "lz4: invalid source or destination buffer too short" + ErrInvalidFrame Error = "lz4: bad magic number" + ErrInternalUnhandledState Error = "lz4: unhandled state" + ErrInvalidHeaderChecksum Error = "lz4: invalid header checksum" + ErrInvalidBlockChecksum Error = "lz4: invalid block checksum" + ErrInvalidFrameChecksum Error = "lz4: invalid frame checksum" + ErrOptionInvalidCompressionLevel Error = "lz4: invalid compression level" + ErrOptionClosedOrError Error = "lz4: cannot apply options on closed or in error object" + ErrOptionInvalidBlockSize Error = "lz4: invalid block size" + ErrOptionNotApplicable Error = "lz4: option not applicable" + ErrWriterNotClosed Error = "lz4: writer not closed" +) diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go new file mode 100644 index 00000000000..e96465460c5 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go @@ -0,0 +1,348 @@ +package lz4stream + +import ( + "encoding/binary" + "fmt" + "io" + "sync" + + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" + "github.com/pierrec/lz4/v4/internal/xxh32" +) + +type Blocks struct { + Block *FrameDataBlock + Blocks chan chan *FrameDataBlock + mu sync.Mutex + err error +} + +func (b *Blocks) initW(f *Frame, dst io.Writer, num int) { + if num == 1 { + b.Blocks = nil + b.Block = NewFrameDataBlock(f) + return + } + b.Block = nil + if cap(b.Blocks) != num { + b.Blocks = make(chan chan *FrameDataBlock, num) + } + // goroutine managing concurrent block compression goroutines. + go func() { + // Process next block compression item. + for c := range b.Blocks { + // Read the next compressed block result. + // Waiting here ensures that the blocks are output in the order they were sent. + // The incoming channel is always closed as it indicates to the caller that + // the block has been processed. + block := <-c + if block == nil { + // Notify the block compression routine that we are done with its result. + // This is used when a sentinel block is sent to terminate the compression. + close(c) + return + } + // Do not attempt to write the block upon any previous failure. + if b.err == nil { + // Write the block. + if err := block.Write(f, dst); err != nil { + // Keep the first error. + b.err = err + // All pending compression goroutines need to shut down, so we need to keep going. + } + } + close(c) + } + }() +} + +func (b *Blocks) close(f *Frame, num int) error { + if num == 1 { + if b.Block != nil { + b.Block.Close(f) + } + err := b.err + b.err = nil + return err + } + if b.Blocks == nil { + err := b.err + b.err = nil + return err + } + c := make(chan *FrameDataBlock) + b.Blocks <- c + c <- nil + <-c + err := b.err + b.err = nil + return err +} + +// ErrorR returns any error set while uncompressing a stream. +func (b *Blocks) ErrorR() error { + b.mu.Lock() + defer b.mu.Unlock() + return b.err +} + +// initR returns a channel that streams the uncompressed blocks if in concurrent +// mode and no error. When the channel is closed, check for any error with b.ErrorR. +// +// If not in concurrent mode, the uncompressed block is b.Block and the returned error +// needs to be checked. +func (b *Blocks) initR(f *Frame, num int, src io.Reader) (chan []byte, error) { + size := f.Descriptor.Flags.BlockSizeIndex() + if num == 1 { + b.Blocks = nil + b.Block = NewFrameDataBlock(f) + return nil, nil + } + b.Block = nil + blocks := make(chan chan []byte, num) + // data receives the uncompressed blocks. + data := make(chan []byte) + // Read blocks from the source sequentially + // and uncompress them concurrently. + + // In legacy mode, accrue the uncompress sizes in cum. + var cum uint32 + go func() { + var cumx uint32 + var err error + for b.ErrorR() == nil { + block := NewFrameDataBlock(f) + cumx, err = block.Read(f, src, 0) + if err != nil { + block.Close(f) + break + } + // Recheck for an error as reading may be slow and uncompressing is expensive. + if b.ErrorR() != nil { + block.Close(f) + break + } + c := make(chan []byte) + blocks <- c + go func() { + defer block.Close(f) + data, err := block.Uncompress(f, size.Get(), nil, false) + if err != nil { + b.closeR(err) + // Close the block channel to indicate an error. + close(c) + } else { + c <- data + } + }() + } + // End the collection loop and the data channel. + c := make(chan []byte) + blocks <- c + c <- nil // signal the collection loop that we are done + <-c // wait for the collect loop to complete + if f.isLegacy() && cum == cumx { + err = io.EOF + } + b.closeR(err) + close(data) + }() + // Collect the uncompressed blocks and make them available + // on the returned channel. + go func(leg bool) { + defer close(blocks) + skipBlocks := false + for c := range blocks { + buf, ok := <-c + if !ok { + // A closed channel indicates an error. + // All remaining channels should be discarded. + skipBlocks = true + continue + } + if buf == nil { + // Signal to end the loop. + close(c) + return + } + if skipBlocks { + // A previous error has occurred, skipping remaining channels. + continue + } + // Perform checksum now as the blocks are received in order. + if f.Descriptor.Flags.ContentChecksum() { + _, _ = f.checksum.Write(buf) + } + if leg { + cum += uint32(len(buf)) + } + data <- buf + close(c) + } + }(f.isLegacy()) + return data, nil +} + +// closeR safely sets the error on b if not already set. +func (b *Blocks) closeR(err error) { + b.mu.Lock() + if b.err == nil { + b.err = err + } + b.mu.Unlock() +} + +func NewFrameDataBlock(f *Frame) *FrameDataBlock { + buf := f.Descriptor.Flags.BlockSizeIndex().Get() + return &FrameDataBlock{Data: buf, data: buf} +} + +type FrameDataBlock struct { + Size DataBlockSize + Data []byte // compressed or uncompressed data (.data or .src) + Checksum uint32 + data []byte // buffer for compressed data + src []byte // uncompressed data + err error // used in concurrent mode +} + +func (b *FrameDataBlock) Close(f *Frame) { + b.Size = 0 + b.Checksum = 0 + b.err = nil + if b.data != nil { + // Block was not already closed. + lz4block.Put(b.data) + b.Data = nil + b.data = nil + b.src = nil + } +} + +// Block compression errors are ignored since the buffer is sized appropriately. +func (b *FrameDataBlock) Compress(f *Frame, src []byte, level lz4block.CompressionLevel) *FrameDataBlock { + data := b.data + if f.isLegacy() { + data = data[:cap(data)] + } else { + data = data[:len(src)] // trigger the incompressible flag in CompressBlock + } + var n int + switch level { + case lz4block.Fast: + n, _ = lz4block.CompressBlock(src, data) + default: + n, _ = lz4block.CompressBlockHC(src, data, level) + } + if n == 0 { + b.Size.UncompressedSet(true) + b.Data = src + } else { + b.Size.UncompressedSet(false) + b.Data = data[:n] + } + b.Size.sizeSet(len(b.Data)) + b.src = src // keep track of the source for content checksum + + if f.Descriptor.Flags.BlockChecksum() { + b.Checksum = xxh32.ChecksumZero(src) + } + return b +} + +func (b *FrameDataBlock) Write(f *Frame, dst io.Writer) error { + // Write is called in the same order as blocks are compressed, + // so content checksum must be done here. + if f.Descriptor.Flags.ContentChecksum() { + _, _ = f.checksum.Write(b.src) + } + buf := f.buf[:] + binary.LittleEndian.PutUint32(buf, uint32(b.Size)) + if _, err := dst.Write(buf[:4]); err != nil { + return err + } + + if _, err := dst.Write(b.Data); err != nil { + return err + } + + if b.Checksum == 0 { + return nil + } + binary.LittleEndian.PutUint32(buf, b.Checksum) + _, err := dst.Write(buf[:4]) + return err +} + +// Read updates b with the next block data, size and checksum if available. +func (b *FrameDataBlock) Read(f *Frame, src io.Reader, cum uint32) (uint32, error) { + x, err := f.readUint32(src) + if err != nil { + return 0, err + } + if f.isLegacy() { + switch x { + case frameMagicLegacy: + // Concatenated legacy frame. + return b.Read(f, src, cum) + case cum: + // Only works in non concurrent mode, for concurrent mode + // it is handled separately. + // Linux kernel format appends the total uncompressed size at the end. + return 0, io.EOF + } + } else if x == 0 { + // Marker for end of stream. + return 0, io.EOF + } + b.Size = DataBlockSize(x) + + size := b.Size.size() + if size > cap(b.data) { + return x, lz4errors.ErrOptionInvalidBlockSize + } + b.data = b.data[:size] + if _, err := io.ReadFull(src, b.data); err != nil { + return x, err + } + if f.Descriptor.Flags.BlockChecksum() { + sum, err := f.readUint32(src) + if err != nil { + return 0, err + } + b.Checksum = sum + } + return x, nil +} + +func (b *FrameDataBlock) Uncompress(f *Frame, dst, dict []byte, sum bool) ([]byte, error) { + if b.Size.Uncompressed() { + n := copy(dst, b.data) + dst = dst[:n] + } else { + n, err := lz4block.UncompressBlock(b.data, dst, dict) + if err != nil { + return nil, err + } + dst = dst[:n] + } + if f.Descriptor.Flags.BlockChecksum() { + if c := xxh32.ChecksumZero(dst); c != b.Checksum { + err := fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidBlockChecksum, c, b.Checksum) + return nil, err + } + } + if sum && f.Descriptor.Flags.ContentChecksum() { + _, _ = f.checksum.Write(dst) + } + return dst, nil +} + +func (f *Frame) readUint32(r io.Reader) (x uint32, err error) { + if _, err = io.ReadFull(r, f.buf[:4]); err != nil { + return + } + x = binary.LittleEndian.Uint32(f.buf[:4]) + return +} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go new file mode 100644 index 00000000000..18192a9433d --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go @@ -0,0 +1,204 @@ +// Package lz4stream provides the types that support reading and writing LZ4 data streams. +package lz4stream + +import ( + "encoding/binary" + "fmt" + "io" + "io/ioutil" + + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" + "github.com/pierrec/lz4/v4/internal/xxh32" +) + +//go:generate go run gen.go + +const ( + frameMagic uint32 = 0x184D2204 + frameSkipMagic uint32 = 0x184D2A50 + frameMagicLegacy uint32 = 0x184C2102 +) + +func NewFrame() *Frame { + return &Frame{} +} + +type Frame struct { + buf [15]byte // frame descriptor needs at most 4(magic)+4+8+1=11 bytes + Magic uint32 + Descriptor FrameDescriptor + Blocks Blocks + Checksum uint32 + checksum xxh32.XXHZero +} + +// Reset allows reusing the Frame. +// The Descriptor configuration is not modified. +func (f *Frame) Reset(num int) { + f.Magic = 0 + f.Descriptor.Checksum = 0 + f.Descriptor.ContentSize = 0 + _ = f.Blocks.close(f, num) + f.Checksum = 0 +} + +func (f *Frame) InitW(dst io.Writer, num int, legacy bool) { + if legacy { + f.Magic = frameMagicLegacy + idx := lz4block.Index(lz4block.Block8Mb) + f.Descriptor.Flags.BlockSizeIndexSet(idx) + } else { + f.Magic = frameMagic + f.Descriptor.initW() + } + f.Blocks.initW(f, dst, num) + f.checksum.Reset() +} + +func (f *Frame) CloseW(dst io.Writer, num int) error { + if err := f.Blocks.close(f, num); err != nil { + return err + } + if f.isLegacy() { + return nil + } + buf := f.buf[:0] + // End mark (data block size of uint32(0)). + buf = append(buf, 0, 0, 0, 0) + if f.Descriptor.Flags.ContentChecksum() { + buf = f.checksum.Sum(buf) + } + _, err := dst.Write(buf) + return err +} + +func (f *Frame) isLegacy() bool { + return f.Magic == frameMagicLegacy +} + +func (f *Frame) ParseHeaders(src io.Reader) error { + if f.Magic > 0 { + // Header already read. + return nil + } + +newFrame: + var err error + if f.Magic, err = f.readUint32(src); err != nil { + return err + } + switch m := f.Magic; { + case m == frameMagic || m == frameMagicLegacy: + // All 16 values of frameSkipMagic are valid. + case m>>8 == frameSkipMagic>>8: + skip, err := f.readUint32(src) + if err != nil { + return err + } + if _, err := io.CopyN(ioutil.Discard, src, int64(skip)); err != nil { + return err + } + goto newFrame + default: + return lz4errors.ErrInvalidFrame + } + if err := f.Descriptor.initR(f, src); err != nil { + return err + } + f.checksum.Reset() + return nil +} + +func (f *Frame) InitR(src io.Reader, num int) (chan []byte, error) { + return f.Blocks.initR(f, num, src) +} + +func (f *Frame) CloseR(src io.Reader) (err error) { + if f.isLegacy() { + return nil + } + if !f.Descriptor.Flags.ContentChecksum() { + return nil + } + if f.Checksum, err = f.readUint32(src); err != nil { + return err + } + if c := f.checksum.Sum32(); c != f.Checksum { + return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidFrameChecksum, c, f.Checksum) + } + return nil +} + +type FrameDescriptor struct { + Flags DescriptorFlags + ContentSize uint64 + Checksum uint8 +} + +func (fd *FrameDescriptor) initW() { + fd.Flags.VersionSet(1) + fd.Flags.BlockIndependenceSet(true) +} + +func (fd *FrameDescriptor) Write(f *Frame, dst io.Writer) error { + if fd.Checksum > 0 { + // Header already written. + return nil + } + + buf := f.buf[:4] + // Write the magic number here even though it belongs to the Frame. + binary.LittleEndian.PutUint32(buf, f.Magic) + if !f.isLegacy() { + buf = buf[:4+2] + binary.LittleEndian.PutUint16(buf[4:], uint16(fd.Flags)) + + if fd.Flags.Size() { + buf = buf[:4+2+8] + binary.LittleEndian.PutUint64(buf[4+2:], fd.ContentSize) + } + fd.Checksum = descriptorChecksum(buf[4:]) + buf = append(buf, fd.Checksum) + } + + _, err := dst.Write(buf) + return err +} + +func (fd *FrameDescriptor) initR(f *Frame, src io.Reader) error { + if f.isLegacy() { + idx := lz4block.Index(lz4block.Block8Mb) + f.Descriptor.Flags.BlockSizeIndexSet(idx) + return nil + } + // Read the flags and the checksum, hoping that there is not content size. + buf := f.buf[:3] + if _, err := io.ReadFull(src, buf); err != nil { + return err + } + descr := binary.LittleEndian.Uint16(buf) + fd.Flags = DescriptorFlags(descr) + if fd.Flags.Size() { + // Append the 8 missing bytes. + buf = buf[:3+8] + if _, err := io.ReadFull(src, buf[3:]); err != nil { + return err + } + fd.ContentSize = binary.LittleEndian.Uint64(buf[2:]) + } + fd.Checksum = buf[len(buf)-1] // the checksum is the last byte + buf = buf[:len(buf)-1] // all descriptor fields except checksum + if c := descriptorChecksum(buf); fd.Checksum != c { + return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidHeaderChecksum, c, fd.Checksum) + } + // Validate the elements that can be. + if idx := fd.Flags.BlockSizeIndex(); !idx.IsValid() { + return lz4errors.ErrOptionInvalidBlockSize + } + return nil +} + +func descriptorChecksum(buf []byte) byte { + return byte(xxh32.ChecksumZero(buf) >> 8) +} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go new file mode 100644 index 00000000000..d33a6be95c3 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go @@ -0,0 +1,103 @@ +// Code generated by `gen.exe`. DO NOT EDIT. + +package lz4stream + +import "github.com/pierrec/lz4/v4/internal/lz4block" + +// DescriptorFlags is defined as follow: +// field bits +// ----- ---- +// _ 2 +// ContentChecksum 1 +// Size 1 +// BlockChecksum 1 +// BlockIndependence 1 +// Version 2 +// _ 4 +// BlockSizeIndex 3 +// _ 1 +type DescriptorFlags uint16 + +// Getters. +func (x DescriptorFlags) ContentChecksum() bool { return x>>2&1 != 0 } +func (x DescriptorFlags) Size() bool { return x>>3&1 != 0 } +func (x DescriptorFlags) BlockChecksum() bool { return x>>4&1 != 0 } +func (x DescriptorFlags) BlockIndependence() bool { return x>>5&1 != 0 } +func (x DescriptorFlags) Version() uint16 { return uint16(x >> 6 & 0x3) } +func (x DescriptorFlags) BlockSizeIndex() lz4block.BlockSizeIndex { + return lz4block.BlockSizeIndex(x >> 12 & 0x7) +} + +// Setters. +func (x *DescriptorFlags) ContentChecksumSet(v bool) *DescriptorFlags { + const b = 1 << 2 + if v { + *x = *x&^b | b + } else { + *x &^= b + } + return x +} +func (x *DescriptorFlags) SizeSet(v bool) *DescriptorFlags { + const b = 1 << 3 + if v { + *x = *x&^b | b + } else { + *x &^= b + } + return x +} +func (x *DescriptorFlags) BlockChecksumSet(v bool) *DescriptorFlags { + const b = 1 << 4 + if v { + *x = *x&^b | b + } else { + *x &^= b + } + return x +} +func (x *DescriptorFlags) BlockIndependenceSet(v bool) *DescriptorFlags { + const b = 1 << 5 + if v { + *x = *x&^b | b + } else { + *x &^= b + } + return x +} +func (x *DescriptorFlags) VersionSet(v uint16) *DescriptorFlags { + *x = *x&^(0x3<<6) | (DescriptorFlags(v) & 0x3 << 6) + return x +} +func (x *DescriptorFlags) BlockSizeIndexSet(v lz4block.BlockSizeIndex) *DescriptorFlags { + *x = *x&^(0x7<<12) | (DescriptorFlags(v) & 0x7 << 12) + return x +} + +// Code generated by `gen.exe`. DO NOT EDIT. + +// DataBlockSize is defined as follow: +// field bits +// ----- ---- +// size 31 +// Uncompressed 1 +type DataBlockSize uint32 + +// Getters. +func (x DataBlockSize) size() int { return int(x & 0x7FFFFFFF) } +func (x DataBlockSize) Uncompressed() bool { return x>>31&1 != 0 } + +// Setters. +func (x *DataBlockSize) sizeSet(v int) *DataBlockSize { + *x = *x&^0x7FFFFFFF | DataBlockSize(v)&0x7FFFFFFF + return x +} +func (x *DataBlockSize) UncompressedSet(v bool) *DataBlockSize { + const b = 1 << 31 + if v { + *x = *x&^b | b + } else { + *x &^= b + } + return x +} diff --git a/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go similarity index 79% rename from vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go rename to vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go index 7a76a6bce2b..651d10c1047 100644 --- a/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go +++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go @@ -1,5 +1,5 @@ // Package xxh32 implements the very fast XXH hashing algorithm (32 bits version). -// (https://github.com/Cyan4973/XXH/) +// (ported from the reference implementation https://github.com/Cyan4973/xxHash/) package xxh32 import ( @@ -20,10 +20,7 @@ const ( // XXHZero represents an xxhash32 object with seed 0. type XXHZero struct { - v1 uint32 - v2 uint32 - v3 uint32 - v4 uint32 + v [4]uint32 totalLen uint64 buf [16]byte bufused int @@ -38,10 +35,10 @@ func (xxh XXHZero) Sum(b []byte) []byte { // Reset resets the Hash to its initial state. func (xxh *XXHZero) Reset() { - xxh.v1 = prime1plus2 - xxh.v2 = prime2 - xxh.v3 = 0 - xxh.v4 = prime1minus + xxh.v[0] = prime1plus2 + xxh.v[1] = prime2 + xxh.v[2] = 0 + xxh.v[3] = prime1minus xxh.totalLen = 0 xxh.bufused = 0 } @@ -51,7 +48,7 @@ func (xxh *XXHZero) Size() int { return 4 } -// BlockSize gives the minimum number of bytes accepted by Write(). +// BlockSizeIndex gives the minimum number of bytes accepted by Write(). func (xxh *XXHZero) BlockSize() int { return 1 } @@ -74,44 +71,48 @@ func (xxh *XXHZero) Write(input []byte) (int, error) { return n, nil } - p := 0 - // Causes compiler to work directly from registers instead of stack: - v1, v2, v3, v4 := xxh.v1, xxh.v2, xxh.v3, xxh.v4 - if m > 0 { + var buf *[16]byte + if m != 0 { // some data left from previous update - copy(xxh.buf[xxh.bufused:], input[:r]) - xxh.bufused += len(input) - r + buf = &xxh.buf + c := copy(buf[m:], input) + n -= c + input = input[c:] + } + update(&xxh.v, buf, input) + xxh.bufused = copy(xxh.buf[:], input[n-n%16:]) - // fast rotl(13) - buf := xxh.buf[:16] // BCE hint. + return n, nil +} + +// Portable version of update. This updates v by processing all of buf +// (if not nil) and all full 16-byte blocks of input. +func updateGo(v *[4]uint32, buf *[16]byte, input []byte) { + // Causes compiler to work directly from registers instead of stack: + v1, v2, v3, v4 := v[0], v[1], v[2], v[3] + + if buf != nil { v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1 v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1 v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1 v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1 - p = r - xxh.bufused = 0 } - for n := n - 16; p <= n; p += 16 { - sub := input[p:][:16] //BCE hint for compiler + for ; len(input) >= 16; input = input[16:] { + sub := input[:16] //BCE hint for compiler v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1 v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1 v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1 v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1 } - xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4 - - copy(xxh.buf[xxh.bufused:], input[p:]) - xxh.bufused += len(input) - p - - return n, nil + v[0], v[1], v[2], v[3] = v1, v2, v3, v4 } // Sum32 returns the 32 bits Hash value. func (xxh *XXHZero) Sum32() uint32 { h32 := uint32(xxh.totalLen) if h32 >= 16 { - h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4) + h32 += rol1(xxh.v[0]) + rol7(xxh.v[1]) + rol12(xxh.v[2]) + rol18(xxh.v[3]) } else { h32 += prime5 } @@ -137,8 +138,8 @@ func (xxh *XXHZero) Sum32() uint32 { return h32 } -// ChecksumZero returns the 32bits Hash value. -func ChecksumZero(input []byte) uint32 { +// Portable version of ChecksumZero. +func checksumZeroGo(input []byte) uint32 { n := len(input) h32 := uint32(n) @@ -182,18 +183,6 @@ func ChecksumZero(input []byte) uint32 { return h32 } -// Uint32Zero hashes x with seed 0. -func Uint32Zero(x uint32) uint32 { - h := prime5 + 4 + x*prime3 - h = rol17(h) * prime4 - h ^= h >> 15 - h *= prime2 - h ^= h >> 13 - h *= prime3 - h ^= h >> 16 - return h -} - func rol1(u uint32) uint32 { return u<<1 | u>>31 } diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go new file mode 100644 index 00000000000..0978b2665bd --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go @@ -0,0 +1,11 @@ +// +build !noasm + +package xxh32 + +// ChecksumZero returns the 32-bit hash of input. +// +//go:noescape +func ChecksumZero(input []byte) uint32 + +//go:noescape +func update(v *[4]uint32, buf *[16]byte, input []byte) diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s new file mode 100644 index 00000000000..c18ffd57438 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s @@ -0,0 +1,251 @@ +// +build !noasm + +#include "go_asm.h" +#include "textflag.h" + +// Register allocation. +#define p R0 +#define n R1 +#define h R2 +#define v1 R2 // Alias for h. +#define v2 R3 +#define v3 R4 +#define v4 R5 +#define x1 R6 +#define x2 R7 +#define x3 R8 +#define x4 R9 + +// We need the primes in registers. The 16-byte loop only uses prime{1,2}. +#define prime1r R11 +#define prime2r R12 +#define prime3r R3 // The rest can alias v{2-4}. +#define prime4r R4 +#define prime5r R5 + +// Update round macros. These read from and increment p. + +#define round16aligned \ + MOVM.IA.W (p), [x1, x2, x3, x4] \ + \ + MULA x1, prime2r, v1, v1 \ + MULA x2, prime2r, v2, v2 \ + MULA x3, prime2r, v3, v3 \ + MULA x4, prime2r, v4, v4 \ + \ + MOVW v1 @> 19, v1 \ + MOVW v2 @> 19, v2 \ + MOVW v3 @> 19, v3 \ + MOVW v4 @> 19, v4 \ + \ + MUL prime1r, v1 \ + MUL prime1r, v2 \ + MUL prime1r, v3 \ + MUL prime1r, v4 \ + +#define round16unaligned \ + MOVBU.P 16(p), x1 \ + MOVBU -15(p), x2 \ + ORR x2 << 8, x1 \ + MOVBU -14(p), x3 \ + MOVBU -13(p), x4 \ + ORR x4 << 8, x3 \ + ORR x3 << 16, x1 \ + \ + MULA x1, prime2r, v1, v1 \ + MOVW v1 @> 19, v1 \ + MUL prime1r, v1 \ + \ + MOVBU -12(p), x1 \ + MOVBU -11(p), x2 \ + ORR x2 << 8, x1 \ + MOVBU -10(p), x3 \ + MOVBU -9(p), x4 \ + ORR x4 << 8, x3 \ + ORR x3 << 16, x1 \ + \ + MULA x1, prime2r, v2, v2 \ + MOVW v2 @> 19, v2 \ + MUL prime1r, v2 \ + \ + MOVBU -8(p), x1 \ + MOVBU -7(p), x2 \ + ORR x2 << 8, x1 \ + MOVBU -6(p), x3 \ + MOVBU -5(p), x4 \ + ORR x4 << 8, x3 \ + ORR x3 << 16, x1 \ + \ + MULA x1, prime2r, v3, v3 \ + MOVW v3 @> 19, v3 \ + MUL prime1r, v3 \ + \ + MOVBU -4(p), x1 \ + MOVBU -3(p), x2 \ + ORR x2 << 8, x1 \ + MOVBU -2(p), x3 \ + MOVBU -1(p), x4 \ + ORR x4 << 8, x3 \ + ORR x3 << 16, x1 \ + \ + MULA x1, prime2r, v4, v4 \ + MOVW v4 @> 19, v4 \ + MUL prime1r, v4 \ + + +// func ChecksumZero([]byte) uint32 +TEXT ·ChecksumZero(SB), NOFRAME|NOSPLIT, $-4-16 + MOVW input_base+0(FP), p + MOVW input_len+4(FP), n + + MOVW $const_prime1, prime1r + MOVW $const_prime2, prime2r + + // Set up h for n < 16. It's tempting to say {ADD prime5, n, h} + // here, but that's a pseudo-op that generates a load through R11. + MOVW $const_prime5, prime5r + ADD prime5r, n, h + CMP $0, n + BEQ end + + // We let n go negative so we can do comparisons with SUB.S + // instead of separate CMP. + SUB.S $16, n + BMI loop16done + + ADD prime1r, prime2r, v1 + MOVW prime2r, v2 + MOVW $0, v3 + RSB $0, prime1r, v4 + + TST $3, p + BNE loop16unaligned + +loop16aligned: + SUB.S $16, n + round16aligned + BPL loop16aligned + B loop16finish + +loop16unaligned: + SUB.S $16, n + round16unaligned + BPL loop16unaligned + +loop16finish: + MOVW v1 @> 31, h + ADD v2 @> 25, h + ADD v3 @> 20, h + ADD v4 @> 14, h + + // h += len(input) with v2 as temporary. + MOVW input_len+4(FP), v2 + ADD v2, h + +loop16done: + ADD $16, n // Restore number of bytes left. + + SUB.S $4, n + MOVW $const_prime3, prime3r + BMI loop4done + MOVW $const_prime4, prime4r + + TST $3, p + BNE loop4unaligned + +loop4aligned: + SUB.S $4, n + + MOVW.P 4(p), x1 + MULA prime3r, x1, h, h + MOVW h @> 15, h + MUL prime4r, h + + BPL loop4aligned + B loop4done + +loop4unaligned: + SUB.S $4, n + + MOVBU.P 4(p), x1 + MOVBU -3(p), x2 + ORR x2 << 8, x1 + MOVBU -2(p), x3 + ORR x3 << 16, x1 + MOVBU -1(p), x4 + ORR x4 << 24, x1 + + MULA prime3r, x1, h, h + MOVW h @> 15, h + MUL prime4r, h + + BPL loop4unaligned + +loop4done: + ADD.S $4, n // Restore number of bytes left. + BEQ end + + MOVW $const_prime5, prime5r + +loop1: + SUB.S $1, n + + MOVBU.P 1(p), x1 + MULA prime5r, x1, h, h + MOVW h @> 21, h + MUL prime1r, h + + BNE loop1 + +end: + MOVW $const_prime3, prime3r + EOR h >> 15, h + MUL prime2r, h + EOR h >> 13, h + MUL prime3r, h + EOR h >> 16, h + + MOVW h, ret+12(FP) + RET + + +// func update(v *[4]uint64, buf *[16]byte, p []byte) +TEXT ·update(SB), NOFRAME|NOSPLIT, $-4-20 + MOVW v+0(FP), p + MOVM.IA (p), [v1, v2, v3, v4] + + MOVW $const_prime1, prime1r + MOVW $const_prime2, prime2r + + // Process buf, if not nil. + MOVW buf+4(FP), p + CMP $0, p + BEQ noBuffered + + round16aligned + +noBuffered: + MOVW input_base +8(FP), p + MOVW input_len +12(FP), n + + SUB.S $16, n + BMI end + + TST $3, p + BNE loop16unaligned + +loop16aligned: + SUB.S $16, n + round16aligned + BPL loop16aligned + B end + +loop16unaligned: + SUB.S $16, n + round16unaligned + BPL loop16unaligned + +end: + MOVW v+0(FP), p + MOVM.IA [v1, v2, v3, v4], (p) + RET diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go new file mode 100644 index 00000000000..c96b59b8c3f --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go @@ -0,0 +1,10 @@ +// +build !arm noasm + +package xxh32 + +// ChecksumZero returns the 32-bit hash of input. +func ChecksumZero(input []byte) uint32 { return checksumZeroGo(input) } + +func update(v *[4]uint32, buf *[16]byte, input []byte) { + updateGo(v, buf, input) +} diff --git a/vendor/github.com/pierrec/lz4/v4/lz4.go b/vendor/github.com/pierrec/lz4/v4/lz4.go new file mode 100644 index 00000000000..a62022e0883 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/lz4.go @@ -0,0 +1,157 @@ +// Package lz4 implements reading and writing lz4 compressed data. +// +// The package supports both the LZ4 stream format, +// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html, +// and the LZ4 block format, defined at +// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html. +// +// See https://github.com/lz4/lz4 for the reference C implementation. +package lz4 + +import ( + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" +) + +func _() { + // Safety checks for duplicated elements. + var x [1]struct{} + _ = x[lz4block.CompressionLevel(Fast)-lz4block.Fast] + _ = x[Block64Kb-BlockSize(lz4block.Block64Kb)] + _ = x[Block256Kb-BlockSize(lz4block.Block256Kb)] + _ = x[Block1Mb-BlockSize(lz4block.Block1Mb)] + _ = x[Block4Mb-BlockSize(lz4block.Block4Mb)] +} + +// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible. +func CompressBlockBound(n int) int { + return lz4block.CompressBlockBound(n) +} + +// UncompressBlock uncompresses the source buffer into the destination one, +// and returns the uncompressed size. +// +// The destination buffer must be sized appropriately. +// +// An error is returned if the source data is invalid or the destination buffer is too small. +func UncompressBlock(src, dst []byte) (int, error) { + return lz4block.UncompressBlock(src, dst, nil) +} + +// UncompressBlockWithDict uncompresses the source buffer into the destination one using a +// dictionary, and returns the uncompressed size. +// +// The destination buffer must be sized appropriately. +// +// An error is returned if the source data is invalid or the destination buffer is too small. +func UncompressBlockWithDict(src, dst, dict []byte) (int, error) { + return lz4block.UncompressBlock(src, dst, dict) +} + +// A Compressor compresses data into the LZ4 block format. +// It uses a fast compression algorithm. +// +// A Compressor is not safe for concurrent use by multiple goroutines. +// +// Use a Writer to compress into the LZ4 stream format. +type Compressor struct{ c lz4block.Compressor } + +// CompressBlock compresses the source buffer src into the destination dst. +// +// If compression is successful, the first return value is the size of the +// compressed data, which is always >0. +// +// If dst has length at least CompressBlockBound(len(src)), compression always +// succeeds. Otherwise, the first return value is zero. The error return is +// non-nil if the compressed data does not fit in dst, but it might fit in a +// larger buffer that is still smaller than CompressBlockBound(len(src)). The +// return value (0, nil) means the data is likely incompressible and a buffer +// of length CompressBlockBound(len(src)) should be passed in. +func (c *Compressor) CompressBlock(src, dst []byte) (int, error) { + return c.c.CompressBlock(src, dst) +} + +// CompressBlock compresses the source buffer into the destination one. +// This is the fast version of LZ4 compression and also the default one. +// +// The argument hashTable is scratch space for a hash table used by the +// compressor. If provided, it should have length at least 1<<16. If it is +// shorter (or nil), CompressBlock allocates its own hash table. +// +// The size of the compressed data is returned. +// +// If the destination buffer size is lower than CompressBlockBound and +// the compressed size is 0 and no error, then the data is incompressible. +// +// An error is returned if the destination buffer is too small. + +// CompressBlock is equivalent to Compressor.CompressBlock. +// The final argument is ignored and should be set to nil. +// +// This function is deprecated. Use a Compressor instead. +func CompressBlock(src, dst []byte, _ []int) (int, error) { + return lz4block.CompressBlock(src, dst) +} + +// A CompressorHC compresses data into the LZ4 block format. +// Its compression ratio is potentially better than that of a Compressor, +// but it is also slower and requires more memory. +// +// A Compressor is not safe for concurrent use by multiple goroutines. +// +// Use a Writer to compress into the LZ4 stream format. +type CompressorHC struct { + // Level is the maximum search depth for compression. + // Values <= 0 mean no maximum. + Level CompressionLevel + c lz4block.CompressorHC +} + +// CompressBlock compresses the source buffer src into the destination dst. +// +// If compression is successful, the first return value is the size of the +// compressed data, which is always >0. +// +// If dst has length at least CompressBlockBound(len(src)), compression always +// succeeds. Otherwise, the first return value is zero. The error return is +// non-nil if the compressed data does not fit in dst, but it might fit in a +// larger buffer that is still smaller than CompressBlockBound(len(src)). The +// return value (0, nil) means the data is likely incompressible and a buffer +// of length CompressBlockBound(len(src)) should be passed in. +func (c *CompressorHC) CompressBlock(src, dst []byte) (int, error) { + return c.c.CompressBlock(src, dst, lz4block.CompressionLevel(c.Level)) +} + +// CompressBlockHC is equivalent to CompressorHC.CompressBlock. +// The final two arguments are ignored and should be set to nil. +// +// This function is deprecated. Use a CompressorHC instead. +func CompressBlockHC(src, dst []byte, depth CompressionLevel, _, _ []int) (int, error) { + return lz4block.CompressBlockHC(src, dst, lz4block.CompressionLevel(depth)) +} + +const ( + // ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed + // block is corrupted or the destination buffer is not large enough for the uncompressed data. + ErrInvalidSourceShortBuffer = lz4errors.ErrInvalidSourceShortBuffer + // ErrInvalidFrame is returned when reading an invalid LZ4 archive. + ErrInvalidFrame = lz4errors.ErrInvalidFrame + // ErrInternalUnhandledState is an internal error. + ErrInternalUnhandledState = lz4errors.ErrInternalUnhandledState + // ErrInvalidHeaderChecksum is returned when reading a frame. + ErrInvalidHeaderChecksum = lz4errors.ErrInvalidHeaderChecksum + // ErrInvalidBlockChecksum is returned when reading a frame. + ErrInvalidBlockChecksum = lz4errors.ErrInvalidBlockChecksum + // ErrInvalidFrameChecksum is returned when reading a frame. + ErrInvalidFrameChecksum = lz4errors.ErrInvalidFrameChecksum + // ErrOptionInvalidCompressionLevel is returned when the supplied compression level is invalid. + ErrOptionInvalidCompressionLevel = lz4errors.ErrOptionInvalidCompressionLevel + // ErrOptionClosedOrError is returned when an option is applied to a closed or in error object. + ErrOptionClosedOrError = lz4errors.ErrOptionClosedOrError + // ErrOptionInvalidBlockSize is returned when + ErrOptionInvalidBlockSize = lz4errors.ErrOptionInvalidBlockSize + // ErrOptionNotApplicable is returned when trying to apply an option to an object not supporting it. + ErrOptionNotApplicable = lz4errors.ErrOptionNotApplicable + // ErrWriterNotClosed is returned when attempting to reset an unclosed writer. + ErrWriterNotClosed = lz4errors.ErrWriterNotClosed +) diff --git a/vendor/github.com/pierrec/lz4/v4/options.go b/vendor/github.com/pierrec/lz4/v4/options.go new file mode 100644 index 00000000000..57a44e767dc --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/options.go @@ -0,0 +1,242 @@ +package lz4 + +import ( + "fmt" + "reflect" + "runtime" + + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" +) + +//go:generate go run golang.org/x/tools/cmd/stringer -type=BlockSize,CompressionLevel -output options_gen.go + +type ( + applier interface { + Apply(...Option) error + private() + } + // Option defines the parameters to setup an LZ4 Writer or Reader. + Option func(applier) error +) + +// String returns a string representation of the option with its parameter(s). +func (o Option) String() string { + return o(nil).Error() +} + +// Default options. +var ( + DefaultBlockSizeOption = BlockSizeOption(Block4Mb) + DefaultChecksumOption = ChecksumOption(true) + DefaultConcurrency = ConcurrencyOption(1) + defaultOnBlockDone = OnBlockDoneOption(nil) +) + +const ( + Block64Kb BlockSize = 1 << (16 + iota*2) + Block256Kb + Block1Mb + Block4Mb +) + +// BlockSizeIndex defines the size of the blocks to be compressed. +type BlockSize uint32 + +// BlockSizeOption defines the maximum size of compressed blocks (default=Block4Mb). +func BlockSizeOption(size BlockSize) Option { + return func(a applier) error { + switch w := a.(type) { + case nil: + s := fmt.Sprintf("BlockSizeOption(%s)", size) + return lz4errors.Error(s) + case *Writer: + size := uint32(size) + if !lz4block.IsValid(size) { + return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidBlockSize, size) + } + w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size)) + return nil + case *CompressingReader: + size := uint32(size) + if !lz4block.IsValid(size) { + return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidBlockSize, size) + } + w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size)) + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} + +// BlockChecksumOption enables or disables block checksum (default=false). +func BlockChecksumOption(flag bool) Option { + return func(a applier) error { + switch w := a.(type) { + case nil: + s := fmt.Sprintf("BlockChecksumOption(%v)", flag) + return lz4errors.Error(s) + case *Writer: + w.frame.Descriptor.Flags.BlockChecksumSet(flag) + return nil + case *CompressingReader: + w.frame.Descriptor.Flags.BlockChecksumSet(flag) + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} + +// ChecksumOption enables/disables all blocks or content checksum (default=true). +func ChecksumOption(flag bool) Option { + return func(a applier) error { + switch w := a.(type) { + case nil: + s := fmt.Sprintf("ChecksumOption(%v)", flag) + return lz4errors.Error(s) + case *Writer: + w.frame.Descriptor.Flags.ContentChecksumSet(flag) + return nil + case *CompressingReader: + w.frame.Descriptor.Flags.ContentChecksumSet(flag) + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} + +// SizeOption sets the size of the original uncompressed data (default=0). It is useful to know the size of the +// whole uncompressed data stream. +func SizeOption(size uint64) Option { + return func(a applier) error { + switch w := a.(type) { + case nil: + s := fmt.Sprintf("SizeOption(%d)", size) + return lz4errors.Error(s) + case *Writer: + w.frame.Descriptor.Flags.SizeSet(size > 0) + w.frame.Descriptor.ContentSize = size + return nil + case *CompressingReader: + w.frame.Descriptor.Flags.SizeSet(size > 0) + w.frame.Descriptor.ContentSize = size + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} + +// ConcurrencyOption sets the number of go routines used for compression. +// If n <= 0, then the output of runtime.GOMAXPROCS(0) is used. +func ConcurrencyOption(n int) Option { + if n <= 0 { + n = runtime.GOMAXPROCS(0) + } + return func(a applier) error { + switch rw := a.(type) { + case nil: + s := fmt.Sprintf("ConcurrencyOption(%d)", n) + return lz4errors.Error(s) + case *Writer: + rw.num = n + return nil + case *Reader: + rw.num = n + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} + +// CompressionLevel defines the level of compression to use. The higher the better, but slower, compression. +type CompressionLevel uint32 + +const ( + Fast CompressionLevel = 0 + Level1 CompressionLevel = 1 << (8 + iota) + Level2 + Level3 + Level4 + Level5 + Level6 + Level7 + Level8 + Level9 +) + +// CompressionLevelOption defines the compression level (default=Fast). +func CompressionLevelOption(level CompressionLevel) Option { + return func(a applier) error { + switch w := a.(type) { + case nil: + s := fmt.Sprintf("CompressionLevelOption(%s)", level) + return lz4errors.Error(s) + case *Writer: + switch level { + case Fast, Level1, Level2, Level3, Level4, Level5, Level6, Level7, Level8, Level9: + default: + return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidCompressionLevel, level) + } + w.level = lz4block.CompressionLevel(level) + return nil + case *CompressingReader: + switch level { + case Fast, Level1, Level2, Level3, Level4, Level5, Level6, Level7, Level8, Level9: + default: + return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidCompressionLevel, level) + } + w.level = lz4block.CompressionLevel(level) + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} + +func onBlockDone(int) {} + +// OnBlockDoneOption is triggered when a block has been processed. For a Writer, it is when is has been compressed, +// for a Reader, it is when it has been uncompressed. +func OnBlockDoneOption(handler func(size int)) Option { + if handler == nil { + handler = onBlockDone + } + return func(a applier) error { + switch rw := a.(type) { + case nil: + s := fmt.Sprintf("OnBlockDoneOption(%s)", reflect.TypeOf(handler).String()) + return lz4errors.Error(s) + case *Writer: + rw.handler = handler + return nil + case *Reader: + rw.handler = handler + return nil + case *CompressingReader: + rw.handler = handler + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} + +// LegacyOption provides support for writing LZ4 frames in the legacy format. +// +// See https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame. +// +// NB. compressed Linux kernel images use a tweaked LZ4 legacy format where +// the compressed stream is followed by the original (uncompressed) size of +// the kernel (https://events.static.linuxfound.org/sites/events/files/lcjpcojp13_klee.pdf). +// This is also supported as a special case. +func LegacyOption(legacy bool) Option { + return func(a applier) error { + switch rw := a.(type) { + case nil: + s := fmt.Sprintf("LegacyOption(%v)", legacy) + return lz4errors.Error(s) + case *Writer: + rw.legacy = legacy + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} diff --git a/vendor/github.com/pierrec/lz4/v4/options_gen.go b/vendor/github.com/pierrec/lz4/v4/options_gen.go new file mode 100644 index 00000000000..2de814909ef --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/options_gen.go @@ -0,0 +1,92 @@ +// Code generated by "stringer -type=BlockSize,CompressionLevel -output options_gen.go"; DO NOT EDIT. + +package lz4 + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Block64Kb-65536] + _ = x[Block256Kb-262144] + _ = x[Block1Mb-1048576] + _ = x[Block4Mb-4194304] +} + +const ( + _BlockSize_name_0 = "Block64Kb" + _BlockSize_name_1 = "Block256Kb" + _BlockSize_name_2 = "Block1Mb" + _BlockSize_name_3 = "Block4Mb" +) + +func (i BlockSize) String() string { + switch { + case i == 65536: + return _BlockSize_name_0 + case i == 262144: + return _BlockSize_name_1 + case i == 1048576: + return _BlockSize_name_2 + case i == 4194304: + return _BlockSize_name_3 + default: + return "BlockSize(" + strconv.FormatInt(int64(i), 10) + ")" + } +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Fast-0] + _ = x[Level1-512] + _ = x[Level2-1024] + _ = x[Level3-2048] + _ = x[Level4-4096] + _ = x[Level5-8192] + _ = x[Level6-16384] + _ = x[Level7-32768] + _ = x[Level8-65536] + _ = x[Level9-131072] +} + +const ( + _CompressionLevel_name_0 = "Fast" + _CompressionLevel_name_1 = "Level1" + _CompressionLevel_name_2 = "Level2" + _CompressionLevel_name_3 = "Level3" + _CompressionLevel_name_4 = "Level4" + _CompressionLevel_name_5 = "Level5" + _CompressionLevel_name_6 = "Level6" + _CompressionLevel_name_7 = "Level7" + _CompressionLevel_name_8 = "Level8" + _CompressionLevel_name_9 = "Level9" +) + +func (i CompressionLevel) String() string { + switch { + case i == 0: + return _CompressionLevel_name_0 + case i == 512: + return _CompressionLevel_name_1 + case i == 1024: + return _CompressionLevel_name_2 + case i == 2048: + return _CompressionLevel_name_3 + case i == 4096: + return _CompressionLevel_name_4 + case i == 8192: + return _CompressionLevel_name_5 + case i == 16384: + return _CompressionLevel_name_6 + case i == 32768: + return _CompressionLevel_name_7 + case i == 65536: + return _CompressionLevel_name_8 + case i == 131072: + return _CompressionLevel_name_9 + default: + return "CompressionLevel(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/pierrec/lz4/v4/reader.go b/vendor/github.com/pierrec/lz4/v4/reader.go new file mode 100644 index 00000000000..275daad7cb9 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/reader.go @@ -0,0 +1,275 @@ +package lz4 + +import ( + "bytes" + "io" + + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" + "github.com/pierrec/lz4/v4/internal/lz4stream" +) + +var readerStates = []aState{ + noState: newState, + errorState: newState, + newState: readState, + readState: closedState, + closedState: newState, +} + +// NewReader returns a new LZ4 frame decoder. +func NewReader(r io.Reader) *Reader { + return newReader(r, false) +} + +func newReader(r io.Reader, legacy bool) *Reader { + zr := &Reader{frame: lz4stream.NewFrame()} + zr.state.init(readerStates) + _ = zr.Apply(DefaultConcurrency, defaultOnBlockDone) + zr.Reset(r) + return zr +} + +// Reader allows reading an LZ4 stream. +type Reader struct { + state _State + src io.Reader // source reader + num int // concurrency level + frame *lz4stream.Frame // frame being read + data []byte // block buffer allocated in non concurrent mode + reads chan []byte // pending data + idx int // size of pending data + handler func(int) + cum uint32 + dict []byte +} + +func (*Reader) private() {} + +func (r *Reader) Apply(options ...Option) (err error) { + defer r.state.check(&err) + switch r.state.state { + case newState: + case errorState: + return r.state.err + default: + return lz4errors.ErrOptionClosedOrError + } + for _, o := range options { + if err = o(r); err != nil { + return + } + } + return +} + +// Size returns the size of the underlying uncompressed data, if set in the stream. +func (r *Reader) Size() int { + switch r.state.state { + case readState, closedState: + if r.frame.Descriptor.Flags.Size() { + return int(r.frame.Descriptor.ContentSize) + } + } + return 0 +} + +func (r *Reader) isNotConcurrent() bool { + return r.num == 1 +} + +func (r *Reader) init() error { + err := r.frame.ParseHeaders(r.src) + if err != nil { + return err + } + if !r.frame.Descriptor.Flags.BlockIndependence() { + // We can't decompress dependent blocks concurrently. + // Instead of throwing an error to the user, silently drop concurrency + r.num = 1 + } + data, err := r.frame.InitR(r.src, r.num) + if err != nil { + return err + } + r.reads = data + r.idx = 0 + size := r.frame.Descriptor.Flags.BlockSizeIndex() + r.data = size.Get() + r.cum = 0 + return nil +} + +func (r *Reader) Read(buf []byte) (n int, err error) { + defer r.state.check(&err) + switch r.state.state { + case readState: + case closedState, errorState: + return 0, r.state.err + case newState: + // First initialization. + if err = r.init(); r.state.next(err) { + return + } + default: + return 0, r.state.fail() + } + for len(buf) > 0 { + var bn int + if r.idx == 0 { + if r.isNotConcurrent() { + bn, err = r.read(buf) + } else { + lz4block.Put(r.data) + r.data = <-r.reads + if len(r.data) == 0 { + // No uncompressed data: something went wrong or we are done. + err = r.frame.Blocks.ErrorR() + } + } + switch err { + case nil: + case io.EOF: + if er := r.frame.CloseR(r.src); er != nil { + err = er + } + lz4block.Put(r.data) + r.data = nil + return + default: + return + } + } + if bn == 0 { + // Fill buf with buffered data. + bn = copy(buf, r.data[r.idx:]) + r.idx += bn + if r.idx == len(r.data) { + // All data read, get ready for the next Read. + r.idx = 0 + } + } + buf = buf[bn:] + n += bn + r.handler(bn) + } + return +} + +// read uncompresses the next block as follow: +// - if buf has enough room, the block is uncompressed into it directly +// and the lenght of used space is returned +// - else, the uncompress data is stored in r.data and 0 is returned +func (r *Reader) read(buf []byte) (int, error) { + block := r.frame.Blocks.Block + _, err := block.Read(r.frame, r.src, r.cum) + if err != nil { + return 0, err + } + var direct bool + dst := r.data[:cap(r.data)] + if len(buf) >= len(dst) { + // Uncompress directly into buf. + direct = true + dst = buf + } + dst, err = block.Uncompress(r.frame, dst, r.dict, true) + if err != nil { + return 0, err + } + if !r.frame.Descriptor.Flags.BlockIndependence() { + if len(r.dict)+len(dst) > 128*1024 { + preserveSize := 64*1024 - len(dst) + if preserveSize < 0 { + preserveSize = 0 + } + r.dict = r.dict[len(r.dict)-preserveSize:] + } + r.dict = append(r.dict, dst...) + } + r.cum += uint32(len(dst)) + if direct { + return len(dst), nil + } + r.data = dst + return 0, nil +} + +// Reset clears the state of the Reader r such that it is equivalent to its +// initial state from NewReader, but instead reading from reader. +// No access to reader is performed. +func (r *Reader) Reset(reader io.Reader) { + if r.data != nil { + lz4block.Put(r.data) + r.data = nil + } + r.frame.Reset(r.num) + r.state.reset() + r.src = reader + r.reads = nil +} + +// WriteTo efficiently uncompresses the data from the Reader underlying source to w. +func (r *Reader) WriteTo(w io.Writer) (n int64, err error) { + switch r.state.state { + case closedState, errorState: + return 0, r.state.err + case newState: + if err = r.init(); r.state.next(err) { + return + } + default: + return 0, r.state.fail() + } + defer r.state.nextd(&err) + + var data []byte + if r.isNotConcurrent() { + size := r.frame.Descriptor.Flags.BlockSizeIndex() + data = size.Get() + defer lz4block.Put(data) + } + for { + var bn int + var dst []byte + if r.isNotConcurrent() { + bn, err = r.read(data) + dst = data[:bn] + } else { + lz4block.Put(dst) + dst = <-r.reads + bn = len(dst) + if bn == 0 { + // No uncompressed data: something went wrong or we are done. + err = r.frame.Blocks.ErrorR() + } + } + switch err { + case nil: + case io.EOF: + err = r.frame.CloseR(r.src) + return + default: + return + } + r.handler(bn) + bn, err = w.Write(dst) + n += int64(bn) + if err != nil { + return + } + } +} + +// ValidFrameHeader returns a bool indicating if the given bytes slice matches a LZ4 header. +func ValidFrameHeader(in []byte) (bool, error) { + f := lz4stream.NewFrame() + err := f.ParseHeaders(bytes.NewReader(in)) + if err == nil { + return true, nil + } + if err == lz4errors.ErrInvalidFrame { + return false, nil + } + return false, err +} diff --git a/vendor/github.com/pierrec/lz4/v4/state.go b/vendor/github.com/pierrec/lz4/v4/state.go new file mode 100644 index 00000000000..d94f04d05eb --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/state.go @@ -0,0 +1,75 @@ +package lz4 + +import ( + "errors" + "fmt" + "io" + + "github.com/pierrec/lz4/v4/internal/lz4errors" +) + +//go:generate go run golang.org/x/tools/cmd/stringer -type=aState -output state_gen.go + +const ( + noState aState = iota // uninitialized reader + errorState // unrecoverable error encountered + newState // instantiated object + readState // reading data + writeState // writing data + closedState // all done +) + +type ( + aState uint8 + _State struct { + states []aState + state aState + err error + } +) + +func (s *_State) init(states []aState) { + s.states = states + s.state = states[0] +} + +func (s *_State) reset() { + s.state = s.states[0] + s.err = nil +} + +// next sets the state to the next one unless it is passed a non nil error. +// It returns whether or not it is in error. +func (s *_State) next(err error) bool { + if err != nil { + s.err = fmt.Errorf("%s: %w", s.state, err) + s.state = errorState + return true + } + s.state = s.states[s.state] + return false +} + +// nextd is like next but for defers. +func (s *_State) nextd(errp *error) bool { + return errp != nil && s.next(*errp) +} + +// check sets s in error if not already in error and if the error is not nil or io.EOF, +func (s *_State) check(errp *error) { + if s.state == errorState || errp == nil { + return + } + if err := *errp; err != nil { + s.err = fmt.Errorf("%w[%s]", err, s.state) + if !errors.Is(err, io.EOF) { + s.state = errorState + } + } +} + +func (s *_State) fail() error { + s.state = errorState + s.err = fmt.Errorf("%w[%s]", lz4errors.ErrInternalUnhandledState, s.state) + return s.err +} diff --git a/vendor/github.com/pierrec/lz4/v4/state_gen.go b/vendor/github.com/pierrec/lz4/v4/state_gen.go new file mode 100644 index 00000000000..75fb8289243 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/state_gen.go @@ -0,0 +1,28 @@ +// Code generated by "stringer -type=aState -output state_gen.go"; DO NOT EDIT. + +package lz4 + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[noState-0] + _ = x[errorState-1] + _ = x[newState-2] + _ = x[readState-3] + _ = x[writeState-4] + _ = x[closedState-5] +} + +const _aState_name = "noStateerrorStatenewStatereadStatewriteStateclosedState" + +var _aState_index = [...]uint8{0, 7, 17, 25, 34, 44, 55} + +func (i aState) String() string { + if i >= aState(len(_aState_index)-1) { + return "aState(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _aState_name[_aState_index[i]:_aState_index[i+1]] +} diff --git a/vendor/github.com/pierrec/lz4/v4/writer.go b/vendor/github.com/pierrec/lz4/v4/writer.go new file mode 100644 index 00000000000..4358adee109 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/writer.go @@ -0,0 +1,242 @@ +package lz4 + +import ( + "io" + + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" + "github.com/pierrec/lz4/v4/internal/lz4stream" +) + +var writerStates = []aState{ + noState: newState, + newState: writeState, + writeState: closedState, + closedState: newState, + errorState: newState, +} + +// NewWriter returns a new LZ4 frame encoder. +func NewWriter(w io.Writer) *Writer { + zw := &Writer{frame: lz4stream.NewFrame()} + zw.state.init(writerStates) + _ = zw.Apply(DefaultBlockSizeOption, DefaultChecksumOption, DefaultConcurrency, defaultOnBlockDone) + zw.Reset(w) + return zw +} + +// Writer allows writing an LZ4 stream. +type Writer struct { + state _State + src io.Writer // destination writer + level lz4block.CompressionLevel // how hard to try + num int // concurrency level + frame *lz4stream.Frame // frame being built + data []byte // pending data + idx int // size of pending data + handler func(int) + legacy bool +} + +func (*Writer) private() {} + +func (w *Writer) Apply(options ...Option) (err error) { + defer w.state.check(&err) + switch w.state.state { + case newState: + case errorState: + return w.state.err + default: + return lz4errors.ErrOptionClosedOrError + } + w.Reset(w.src) + for _, o := range options { + if err = o(w); err != nil { + return + } + } + return +} + +func (w *Writer) isNotConcurrent() bool { + return w.num == 1 +} + +// init sets up the Writer when in newState. It does not change the Writer state. +func (w *Writer) init() error { + w.frame.InitW(w.src, w.num, w.legacy) + size := w.frame.Descriptor.Flags.BlockSizeIndex() + w.data = size.Get() + w.idx = 0 + return w.frame.Descriptor.Write(w.frame, w.src) +} + +func (w *Writer) Write(buf []byte) (n int, err error) { + defer w.state.check(&err) + switch w.state.state { + case writeState: + case closedState, errorState: + return 0, w.state.err + case newState: + if err = w.init(); w.state.next(err) { + return + } + default: + return 0, w.state.fail() + } + + zn := len(w.data) + for len(buf) > 0 { + if w.isNotConcurrent() && w.idx == 0 && len(buf) >= zn { + // Avoid a copy as there is enough data for a block. + if err = w.write(buf[:zn], false); err != nil { + return + } + n += zn + buf = buf[zn:] + continue + } + // Accumulate the data to be compressed. + m := copy(w.data[w.idx:], buf) + n += m + w.idx += m + buf = buf[m:] + + if w.idx < len(w.data) { + // Buffer not filled. + return + } + + // Buffer full. + if err = w.write(w.data, true); err != nil { + return + } + if !w.isNotConcurrent() { + size := w.frame.Descriptor.Flags.BlockSizeIndex() + w.data = size.Get() + } + w.idx = 0 + } + return +} + +func (w *Writer) write(data []byte, safe bool) error { + if w.isNotConcurrent() { + block := w.frame.Blocks.Block + err := block.Compress(w.frame, data, w.level).Write(w.frame, w.src) + w.handler(len(block.Data)) + return err + } + c := make(chan *lz4stream.FrameDataBlock) + w.frame.Blocks.Blocks <- c + go func(c chan *lz4stream.FrameDataBlock, data []byte, safe bool) { + b := lz4stream.NewFrameDataBlock(w.frame) + c <- b.Compress(w.frame, data, w.level) + <-c + w.handler(len(b.Data)) + b.Close(w.frame) + if safe { + // safe to put it back as the last usage of it was FrameDataBlock.Write() called before c is closed + lz4block.Put(data) + } + }(c, data, safe) + + return nil +} + +// Flush any buffered data to the underlying writer immediately. +func (w *Writer) Flush() (err error) { + switch w.state.state { + case writeState: + case errorState: + return w.state.err + case newState: + if err = w.init(); w.state.next(err) { + return + } + default: + return nil + } + + if w.idx > 0 { + // Flush pending data, disable w.data freeing as it is done later on. + if err = w.write(w.data[:w.idx], false); err != nil { + return err + } + w.idx = 0 + } + return nil +} + +// Close closes the Writer, flushing any unwritten data to the underlying writer +// without closing it. +func (w *Writer) Close() error { + if err := w.Flush(); err != nil { + return err + } + err := w.frame.CloseW(w.src, w.num) + // It is now safe to free the buffer. + if w.data != nil { + lz4block.Put(w.data) + w.data = nil + } + return err +} + +// Reset clears the state of the Writer w such that it is equivalent to its +// initial state from NewWriter, but instead writing to writer. +// Reset keeps the previous options unless overwritten by the supplied ones. +// No access to writer is performed. +// +// w.Close must be called before Reset or pending data may be dropped. +func (w *Writer) Reset(writer io.Writer) { + w.frame.Reset(w.num) + w.state.reset() + w.src = writer +} + +// ReadFrom efficiently reads from r and compressed into the Writer destination. +func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) { + switch w.state.state { + case closedState, errorState: + return 0, w.state.err + case newState: + if err = w.init(); w.state.next(err) { + return + } + default: + return 0, w.state.fail() + } + defer w.state.check(&err) + + size := w.frame.Descriptor.Flags.BlockSizeIndex() + var done bool + var rn int + data := size.Get() + if w.isNotConcurrent() { + // Keep the same buffer for the whole process. + defer lz4block.Put(data) + } + for !done { + rn, err = io.ReadFull(r, data) + switch err { + case nil: + case io.EOF, io.ErrUnexpectedEOF: // read may be partial + done = true + default: + return + } + n += int64(rn) + err = w.write(data[:rn], true) + if err != nil { + return + } + w.handler(rn) + if !done && !w.isNotConcurrent() { + // The buffer will be returned automatically by go routines (safe=true) + // so get a new one fo the next round. + data = size.Get() + } + } + return +} diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go deleted file mode 100644 index f066d56305e..00000000000 --- a/vendor/github.com/pierrec/lz4/writer.go +++ /dev/null @@ -1,422 +0,0 @@ -package lz4 - -import ( - "encoding/binary" - "fmt" - "io" - "runtime" - - "github.com/pierrec/lz4/internal/xxh32" -) - -// zResult contains the results of compressing a block. -type zResult struct { - size uint32 // Block header - data []byte // Compressed data - checksum uint32 // Data checksum -} - -// Writer implements the LZ4 frame encoder. -type Writer struct { - Header - // Handler called when a block has been successfully written out. - // It provides the number of bytes written. - OnBlockDone func(size int) - - buf [19]byte // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes - dst io.Writer // Destination. - checksum xxh32.XXHZero // Frame checksum. - data []byte // Data to be compressed + buffer for compressed data. - idx int // Index into data. - hashtable [winSize]int // Hash table used in CompressBlock(). - - // For concurrency. - c chan chan zResult // Channel for block compression goroutines and writer goroutine. - err error // Any error encountered while writing to the underlying destination. -} - -// NewWriter returns a new LZ4 frame encoder. -// No access to the underlying io.Writer is performed. -// The supplied Header is checked at the first Write. -// It is ok to change it before the first Write but then not until a Reset() is performed. -func NewWriter(dst io.Writer) *Writer { - z := new(Writer) - z.Reset(dst) - return z -} - -// WithConcurrency sets the number of concurrent go routines used for compression. -// A negative value sets the concurrency to GOMAXPROCS. -func (z *Writer) WithConcurrency(n int) *Writer { - switch { - case n == 0 || n == 1: - z.c = nil - return z - case n < 0: - n = runtime.GOMAXPROCS(0) - } - z.c = make(chan chan zResult, n) - // Writer goroutine managing concurrent block compression goroutines. - go func() { - // Process next block compression item. - for c := range z.c { - // Read the next compressed block result. - // Waiting here ensures that the blocks are output in the order they were sent. - // The incoming channel is always closed as it indicates to the caller that - // the block has been processed. - res := <-c - n := len(res.data) - if n == 0 { - // Notify the block compression routine that we are done with its result. - // This is used when a sentinel block is sent to terminate the compression. - close(c) - return - } - // Write the block. - if err := z.writeUint32(res.size); err != nil && z.err == nil { - z.err = err - } - if _, err := z.dst.Write(res.data); err != nil && z.err == nil { - z.err = err - } - if z.BlockChecksum { - if err := z.writeUint32(res.checksum); err != nil && z.err == nil { - z.err = err - } - } - // It is now safe to release the buffer as no longer in use by any goroutine. - putBuffer(cap(res.data), res.data) - if h := z.OnBlockDone; h != nil { - h(n) - } - close(c) - } - }() - return z -} - -// newBuffers instantiates new buffers which size matches the one in Header. -// The returned buffers are for decompression and compression respectively. -func (z *Writer) newBuffers() { - bSize := z.Header.BlockMaxSize - buf := getBuffer(bSize) - z.data = buf[:bSize] // Uncompressed buffer is the first half. -} - -// freeBuffers puts the writer's buffers back to the pool. -func (z *Writer) freeBuffers() { - // Put the buffer back into the pool, if any. - putBuffer(z.Header.BlockMaxSize, z.data) - z.data = nil -} - -// writeHeader builds and writes the header (magic+header) to the underlying io.Writer. -func (z *Writer) writeHeader() error { - // Default to 4Mb if BlockMaxSize is not set. - if z.Header.BlockMaxSize == 0 { - z.Header.BlockMaxSize = blockSize4M - } - // The only option that needs to be validated. - bSize := z.Header.BlockMaxSize - if !isValidBlockSize(z.Header.BlockMaxSize) { - return fmt.Errorf("lz4: invalid block max size: %d", bSize) - } - // Allocate the compressed/uncompressed buffers. - // The compressed buffer cannot exceed the uncompressed one. - z.newBuffers() - z.idx = 0 - - // Size is optional. - buf := z.buf[:] - - // Set the fixed size data: magic number, block max size and flags. - binary.LittleEndian.PutUint32(buf[0:], frameMagic) - flg := byte(Version << 6) - flg |= 1 << 5 // No block dependency. - if z.Header.BlockChecksum { - flg |= 1 << 4 - } - if z.Header.Size > 0 { - flg |= 1 << 3 - } - if !z.Header.NoChecksum { - flg |= 1 << 2 - } - buf[4] = flg - buf[5] = blockSizeValueToIndex(z.Header.BlockMaxSize) << 4 - - // Current buffer size: magic(4) + flags(1) + block max size (1). - n := 6 - // Optional items. - if z.Header.Size > 0 { - binary.LittleEndian.PutUint64(buf[n:], z.Header.Size) - n += 8 - } - - // The header checksum includes the flags, block max size and optional Size. - buf[n] = byte(xxh32.ChecksumZero(buf[4:n]) >> 8 & 0xFF) - z.checksum.Reset() - - // Header ready, write it out. - if _, err := z.dst.Write(buf[0 : n+1]); err != nil { - return err - } - z.Header.done = true - if debugFlag { - debug("wrote header %v", z.Header) - } - - return nil -} - -// Write compresses data from the supplied buffer into the underlying io.Writer. -// Write does not return until the data has been written. -func (z *Writer) Write(buf []byte) (int, error) { - if !z.Header.done { - if err := z.writeHeader(); err != nil { - return 0, err - } - } - if debugFlag { - debug("input buffer len=%d index=%d", len(buf), z.idx) - } - - zn := len(z.data) - var n int - for len(buf) > 0 { - if z.idx == 0 && len(buf) >= zn { - // Avoid a copy as there is enough data for a block. - if err := z.compressBlock(buf[:zn]); err != nil { - return n, err - } - n += zn - buf = buf[zn:] - continue - } - // Accumulate the data to be compressed. - m := copy(z.data[z.idx:], buf) - n += m - z.idx += m - buf = buf[m:] - if debugFlag { - debug("%d bytes copied to buf, current index %d", n, z.idx) - } - - if z.idx < len(z.data) { - // Buffer not filled. - if debugFlag { - debug("need more data for compression") - } - return n, nil - } - - // Buffer full. - if err := z.compressBlock(z.data); err != nil { - return n, err - } - z.idx = 0 - } - - return n, nil -} - -// compressBlock compresses a block. -func (z *Writer) compressBlock(data []byte) error { - if !z.NoChecksum { - _, _ = z.checksum.Write(data) - } - - if z.c != nil { - c := make(chan zResult) - z.c <- c // Send now to guarantee order - - // get a buffer from the pool and copy the data over - block := getBuffer(z.Header.BlockMaxSize)[:len(data)] - copy(block, data) - - go writerCompressBlock(c, z.Header, block) - return nil - } - - zdata := z.data[z.Header.BlockMaxSize:cap(z.data)] - // The compressed block size cannot exceed the input's. - var zn int - - if level := z.Header.CompressionLevel; level != 0 { - zn, _ = CompressBlockHC(data, zdata, level) - } else { - zn, _ = CompressBlock(data, zdata, z.hashtable[:]) - } - - var bLen uint32 - if debugFlag { - debug("block compression %d => %d", len(data), zn) - } - if zn > 0 && zn < len(data) { - // Compressible and compressed size smaller than uncompressed: ok! - bLen = uint32(zn) - zdata = zdata[:zn] - } else { - // Uncompressed block. - bLen = uint32(len(data)) | compressedBlockFlag - zdata = data - } - if debugFlag { - debug("block compression to be written len=%d data len=%d", bLen, len(zdata)) - } - - // Write the block. - if err := z.writeUint32(bLen); err != nil { - return err - } - written, err := z.dst.Write(zdata) - if err != nil { - return err - } - if h := z.OnBlockDone; h != nil { - h(written) - } - - if !z.BlockChecksum { - if debugFlag { - debug("current frame checksum %x", z.checksum.Sum32()) - } - return nil - } - checksum := xxh32.ChecksumZero(zdata) - if debugFlag { - debug("block checksum %x", checksum) - defer func() { debug("current frame checksum %x", z.checksum.Sum32()) }() - } - return z.writeUint32(checksum) -} - -// Flush flushes any pending compressed data to the underlying writer. -// Flush does not return until the data has been written. -// If the underlying writer returns an error, Flush returns that error. -func (z *Writer) Flush() error { - if debugFlag { - debug("flush with index %d", z.idx) - } - if z.idx == 0 { - return nil - } - - data := getBuffer(z.Header.BlockMaxSize)[:len(z.data[:z.idx])] - copy(data, z.data[:z.idx]) - - z.idx = 0 - if z.c == nil { - return z.compressBlock(data) - } - if !z.NoChecksum { - _, _ = z.checksum.Write(data) - } - c := make(chan zResult) - z.c <- c - writerCompressBlock(c, z.Header, data) - return nil -} - -func (z *Writer) close() error { - if z.c == nil { - return nil - } - // Send a sentinel block (no data to compress) to terminate the writer main goroutine. - c := make(chan zResult) - z.c <- c - c <- zResult{} - // Wait for the main goroutine to complete. - <-c - // At this point the main goroutine has shut down or is about to return. - z.c = nil - return z.err -} - -// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer. -func (z *Writer) Close() error { - if !z.Header.done { - if err := z.writeHeader(); err != nil { - return err - } - } - if err := z.Flush(); err != nil { - return err - } - if err := z.close(); err != nil { - return err - } - z.freeBuffers() - - if debugFlag { - debug("writing last empty block") - } - if err := z.writeUint32(0); err != nil { - return err - } - if z.NoChecksum { - return nil - } - checksum := z.checksum.Sum32() - if debugFlag { - debug("stream checksum %x", checksum) - } - return z.writeUint32(checksum) -} - -// Reset clears the state of the Writer z such that it is equivalent to its -// initial state from NewWriter, but instead writing to w. -// No access to the underlying io.Writer is performed. -func (z *Writer) Reset(w io.Writer) { - n := cap(z.c) - _ = z.close() - z.freeBuffers() - z.Header.Reset() - z.dst = w - z.checksum.Reset() - z.idx = 0 - z.err = nil - // reset hashtable to ensure deterministic output. - for i := range z.hashtable { - z.hashtable[i] = 0 - } - z.WithConcurrency(n) -} - -// writeUint32 writes a uint32 to the underlying writer. -func (z *Writer) writeUint32(x uint32) error { - buf := z.buf[:4] - binary.LittleEndian.PutUint32(buf, x) - _, err := z.dst.Write(buf) - return err -} - -// writerCompressBlock compresses data into a pooled buffer and writes its result -// out to the input channel. -func writerCompressBlock(c chan zResult, header Header, data []byte) { - zdata := getBuffer(header.BlockMaxSize) - // The compressed block size cannot exceed the input's. - var zn int - if level := header.CompressionLevel; level != 0 { - zn, _ = CompressBlockHC(data, zdata, level) - } else { - var hashTable [winSize]int - zn, _ = CompressBlock(data, zdata, hashTable[:]) - } - var res zResult - if zn > 0 && zn < len(data) { - res.size = uint32(zn) - res.data = zdata[:zn] - // release the uncompressed block since it is not used anymore - putBuffer(header.BlockMaxSize, data) - } else { - res.size = uint32(len(data)) | compressedBlockFlag - res.data = data - // release the compressed block since it was not used - putBuffer(header.BlockMaxSize, zdata) - } - if header.BlockChecksum { - res.checksum = xxh32.ChecksumZero(res.data) - } - c <- res -} diff --git a/vendor/github.com/pierrec/lz4/writer_legacy.go b/vendor/github.com/pierrec/lz4/writer_legacy.go deleted file mode 100644 index ca8dc8c7f0c..00000000000 --- a/vendor/github.com/pierrec/lz4/writer_legacy.go +++ /dev/null @@ -1,182 +0,0 @@ -package lz4 - -import ( - "encoding/binary" - "io" -) - -// WriterLegacy implements the LZ4Demo frame decoder. -type WriterLegacy struct { - Header - // Handler called when a block has been successfully read. - // It provides the number of bytes read. - OnBlockDone func(size int) - - dst io.Writer // Destination. - data []byte // Data to be compressed + buffer for compressed data. - idx int // Index into data. - hashtable [winSize]int // Hash table used in CompressBlock(). -} - -// NewWriterLegacy returns a new LZ4 encoder for the legacy frame format. -// No access to the underlying io.Writer is performed. -// The supplied Header is checked at the first Write. -// It is ok to change it before the first Write but then not until a Reset() is performed. -func NewWriterLegacy(dst io.Writer) *WriterLegacy { - z := new(WriterLegacy) - z.Reset(dst) - return z -} - -// Write compresses data from the supplied buffer into the underlying io.Writer. -// Write does not return until the data has been written. -func (z *WriterLegacy) Write(buf []byte) (int, error) { - if !z.Header.done { - if err := z.writeHeader(); err != nil { - return 0, err - } - } - if debugFlag { - debug("input buffer len=%d index=%d", len(buf), z.idx) - } - - zn := len(z.data) - var n int - for len(buf) > 0 { - if z.idx == 0 && len(buf) >= zn { - // Avoid a copy as there is enough data for a block. - if err := z.compressBlock(buf[:zn]); err != nil { - return n, err - } - n += zn - buf = buf[zn:] - continue - } - // Accumulate the data to be compressed. - m := copy(z.data[z.idx:], buf) - n += m - z.idx += m - buf = buf[m:] - if debugFlag { - debug("%d bytes copied to buf, current index %d", n, z.idx) - } - - if z.idx < len(z.data) { - // Buffer not filled. - if debugFlag { - debug("need more data for compression") - } - return n, nil - } - - // Buffer full. - if err := z.compressBlock(z.data); err != nil { - return n, err - } - z.idx = 0 - } - - return n, nil -} - -// writeHeader builds and writes the header to the underlying io.Writer. -func (z *WriterLegacy) writeHeader() error { - // Legacy has fixed 8MB blocksizes - // https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame - bSize := 2 * blockSize4M - - buf := make([]byte, 2*bSize, 2*bSize) - z.data = buf[:bSize] // Uncompressed buffer is the first half. - - z.idx = 0 - - // Header consists of one mageic number, write it out. - if err := binary.Write(z.dst, binary.LittleEndian, frameMagicLegacy); err != nil { - return err - } - z.Header.done = true - if debugFlag { - debug("wrote header %v", z.Header) - } - - return nil -} - -// compressBlock compresses a block. -func (z *WriterLegacy) compressBlock(data []byte) error { - bSize := 2 * blockSize4M - zdata := z.data[bSize:cap(z.data)] - // The compressed block size cannot exceed the input's. - var zn int - - if level := z.Header.CompressionLevel; level != 0 { - zn, _ = CompressBlockHC(data, zdata, level) - } else { - zn, _ = CompressBlock(data, zdata, z.hashtable[:]) - } - - if debugFlag { - debug("block compression %d => %d", len(data), zn) - } - zdata = zdata[:zn] - - // Write the block. - if err := binary.Write(z.dst, binary.LittleEndian, uint32(zn)); err != nil { - return err - } - written, err := z.dst.Write(zdata) - if err != nil { - return err - } - if h := z.OnBlockDone; h != nil { - h(written) - } - return nil -} - -// Flush flushes any pending compressed data to the underlying writer. -// Flush does not return until the data has been written. -// If the underlying writer returns an error, Flush returns that error. -func (z *WriterLegacy) Flush() error { - if debugFlag { - debug("flush with index %d", z.idx) - } - if z.idx == 0 { - return nil - } - - data := z.data[:z.idx] - z.idx = 0 - return z.compressBlock(data) -} - -// Close closes the WriterLegacy, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer. -func (z *WriterLegacy) Close() error { - if !z.Header.done { - if err := z.writeHeader(); err != nil { - return err - } - } - if err := z.Flush(); err != nil { - return err - } - - if debugFlag { - debug("writing last empty block") - } - - return nil -} - -// Reset clears the state of the WriterLegacy z such that it is equivalent to its -// initial state from NewWriterLegacy, but instead writing to w. -// No access to the underlying io.Writer is performed. -func (z *WriterLegacy) Reset(w io.Writer) { - z.Header.Reset() - z.dst = w - z.idx = 0 - // reset hashtable to ensure deterministic output. - for i := range z.hashtable { - z.hashtable[i] = 0 - } -} diff --git a/vendor/github.com/spectrocloud-labs/herd/dag.go b/vendor/github.com/rancher/yip/pkg/dag/dag.go similarity index 57% rename from vendor/github.com/spectrocloud-labs/herd/dag.go rename to vendor/github.com/rancher/yip/pkg/dag/dag.go index 0da0257accb..cb2a7e1152f 100644 --- a/vendor/github.com/spectrocloud-labs/herd/dag.go +++ b/vendor/github.com/rancher/yip/pkg/dag/dag.go @@ -1,13 +1,14 @@ -package herd +package dag import ( "context" "fmt" + "slices" "sync" + "time" "github.com/hashicorp/go-multierror" "github.com/kendru/darwin/go/depgraph" - "github.com/samber/lo" ) // Graph represents a directed graph. @@ -30,6 +31,7 @@ type GraphEntry struct { Name string Dependencies []string WeakDependencies []string + Duration time.Duration } // DAG creates a new instance of a runnable Graph. @@ -126,7 +128,7 @@ func (g *Graph) Run(ctx context.Context) error { if !r.WeakDeps { for k := range g.Graph.Dependencies(r.Name) { - if len(r.WeakDependencies) != 0 && lo.Contains(r.WeakDependencies, k) { + if len(r.WeakDependencies) != 0 && slices.Contains(r.WeakDependencies, k) { continue } @@ -156,6 +158,7 @@ func (g *Graph) Run(ctx context.Context) error { } go func(ctx context.Context, g *Graph, key string, f func(context.Context) error) { + now := time.Now() err := f(ctx) g.ops[key].Lock() if err != nil { @@ -168,6 +171,7 @@ func (g *Graph) Run(ctx context.Context) error { } else if g.collectOrphans { g.orphans.Done() } + g.ops[key].duration = time.Since(now) g.ops[key].Unlock() }(ctx, g, r.Name, fns[i]) } @@ -189,3 +193,113 @@ func (g *Graph) Run(ctx context.Context) error { } return nil } + +type OpState struct { + sync.Mutex + fn []func(context.Context) error + err error + fatal bool + background bool + executed bool + weak bool + weakdeps []string + deps []string + ignore bool + duration time.Duration +} + +func (o *OpState) toGraphEntry(name string) GraphEntry { + return GraphEntry{ + WithCallback: o.fn != nil, + Callback: o.fn, + Error: o.err, + Executed: o.executed, + Background: o.background, + WeakDeps: o.weak, + Dependencies: o.deps, + WeakDependencies: o.weakdeps, + Fatal: o.fatal, + Name: name, + Ignored: o.ignore, + Duration: o.duration, + } +} + +// GraphOption it's the option for the DAG graph. +type GraphOption func(g *Graph) + +// EnableInit enables an Init jobs that takes paternity +// of orphan jobs without dependencies. +var EnableInit GraphOption = func(g *Graph) { + g.init = true +} + +// CollectOrphans enables orphan job collection. +var CollectOrphans GraphOption = func(g *Graph) { + g.collectOrphans = true +} + +// OpOption defines the operation settings. +type OpOption func(string, *OpState, *Graph) error + +var NoOp OpOption = func(s string, os *OpState, g *Graph) error { return nil } + +// FatalOp makes the operation fatal. +// Any error will make the DAG to stop and return the error immediately. +var FatalOp OpOption = func(key string, os *OpState, g *Graph) error { + os.fatal = true + return nil +} + +// Background runs the operation in the background. +var Background OpOption = func(key string, os *OpState, g *Graph) error { + os.background = true + return nil +} + +// WeakDeps sets all the dependencies of the job as "weak". +// Any failure of the jobs which depends on won't impact running the job. +// By default, a failure job will make also fail all the children - this is option +// disables this behavor and make the child start too. +var WeakDeps OpOption = func(key string, os *OpState, g *Graph) error { + os.weak = true + return nil +} + +// WithWeakDeps defines dependencies that doesn't prevent the op to trigger. +func WithWeakDeps(deps ...string) OpOption { + return func(key string, os *OpState, g *Graph) error { + + err := WithDeps(deps...)(key, os, g) + if err != nil { + return err + } + os.weakdeps = append(os.weakdeps, deps...) + return nil + } +} + +// WithDeps defines an operation dependency. +// Dependencies can be expressed as a string. +// Note: before running the DAG you must define all the operations. +func WithDeps(deps ...string) OpOption { + return func(key string, os *OpState, g *Graph) error { + os.deps = append(os.deps, deps...) + + for _, d := range deps { + if err := g.Graph.DependOn(key, d); err != nil { + return err + } + } + return nil + } +} + +// WithCallback associates a callback to the operation to be executed +// when the DAG is walked-by. +func WithCallback(fn ...func(context.Context) error) OpOption { + return func(s string, os *OpState, g *Graph) error { + os.fn = append(os.fn, fn...) + return nil + } +} diff --git a/vendor/github.com/rancher/yip/pkg/executor/default.go b/vendor/github.com/rancher/yip/pkg/executor/default.go index 17b466cb53a..541feb63be4 100644 --- a/vendor/github.com/rancher/yip/pkg/executor/default.go +++ b/vendor/github.com/rancher/yip/pkg/executor/default.go @@ -22,9 +22,9 @@ import ( "path/filepath" "github.com/hashicorp/go-multierror" - "github.com/spectrocloud-labs/herd" "github.com/twpayne/go-vfs/v4" + "github.com/rancher/yip/pkg/dag" "github.com/rancher/yip/pkg/logger" "github.com/rancher/yip/pkg/plugins" "github.com/rancher/yip/pkg/schema" @@ -56,7 +56,7 @@ type op struct { fn func(context.Context) error deps []string after []string - options []herd.OpOption + options []dag.OpOption name string } @@ -103,14 +103,30 @@ func (e *DefaultExecutor) applyStage(stage schema.Stage, fs vfs.FS, console plug return errs } +func checkDuplicates(stages []schema.Stage) bool { + stageNames := map[string]bool{} + for _, st := range stages { + if _, ok := stageNames[st.Name]; ok { + return true + } + stageNames[st.Name] = true + } + return false +} + func (e *DefaultExecutor) genOpFromSchema(file, stage string, config schema.YipConfig, fs vfs.FS, console plugins.Console) []*op { results := []*op{} - currentStages := config.Stages[stage] + duplicatedNames := checkDuplicates(currentStages) + prev := "" for i, st := range currentStages { name := st.Name + if duplicatedNames { + name = fmt.Sprintf("%s.%d", st.Name, i) + } + if name == "" { name = fmt.Sprint(i) } @@ -132,7 +148,7 @@ func (e *DefaultExecutor) genOpFromSchema(file, stage string, config schema.YipC return e.applyStage(stageLocal, fs, console) }, name: opName, - options: []herd.OpOption{herd.WeakDeps}, + options: []dag.OpOption{dag.WeakDeps}, } for _, d := range st.After { @@ -196,7 +212,7 @@ func (e *DefaultExecutor) dirOps(stage, dir string, fs vfs.FS, console plugins.C return results, err } -func writeDAG(dag [][]herd.GraphEntry) { +func writeDAG(dag [][]dag.GraphEntry) { for i, layer := range dag { fmt.Printf("%d.\n", (i + 1)) for _, op := range layer { @@ -210,7 +226,7 @@ func writeDAG(dag [][]herd.GraphEntry) { return } -func (e *DefaultExecutor) Graph(stage string, fs vfs.FS, console plugins.Console, source string) ([][]herd.GraphEntry, error) { +func (e *DefaultExecutor) Graph(stage string, fs vfs.FS, console plugins.Console, source string) ([][]dag.GraphEntry, error) { g, err := e.prepareDAG(stage, source, fs, console) if err != nil { return nil, err @@ -239,10 +255,10 @@ func (e *DefaultExecutor) Analyze(stage string, fs vfs.FS, console plugins.Conso } } -func (e *DefaultExecutor) prepareDAG(stage, uri string, fs vfs.FS, console plugins.Console) (*herd.Graph, error) { +func (e *DefaultExecutor) prepareDAG(stage, uri string, fs vfs.FS, console plugins.Console) (*dag.Graph, error) { f, err := fs.Stat(uri) - g := herd.DAG(herd.EnableInit) + g := dag.DAG(dag.EnableInit) var ops opList switch { case err == nil && f.IsDir(): @@ -276,7 +292,7 @@ func (e *DefaultExecutor) prepareDAG(stage, uri string, fs vfs.FS, console plugi // Ensure all names are unique ops.uniqueNames() for _, o := range ops { - g.Add(o.name, append(o.options, herd.WithCallback(o.fn), herd.WithDeps(append(o.after, o.deps...)...))...) + g.Add(o.name, append(o.options, dag.WithCallback(o.fn), dag.WithDeps(append(o.after, o.deps...)...))...) } return g, nil diff --git a/vendor/github.com/rancher/yip/pkg/executor/executor.go b/vendor/github.com/rancher/yip/pkg/executor/executor.go index 3f1f30edcae..e2e274db611 100644 --- a/vendor/github.com/rancher/yip/pkg/executor/executor.go +++ b/vendor/github.com/rancher/yip/pkg/executor/executor.go @@ -16,12 +16,11 @@ package executor import ( "github.com/sirupsen/logrus" - "github.com/spectrocloud-labs/herd" "github.com/twpayne/go-vfs/v4" + "github.com/rancher/yip/pkg/dag" "github.com/rancher/yip/pkg/logger" "github.com/rancher/yip/pkg/plugins" - "github.com/rancher/yip/pkg/schema" ) @@ -33,7 +32,7 @@ type Executor interface { Conditionals([]Plugin) Modifier(m schema.Modifier) Analyze(string, vfs.FS, plugins.Console, ...string) - Graph(string, vfs.FS, plugins.Console, string) ([][]herd.GraphEntry, error) + Graph(string, vfs.FS, plugins.Console, string) ([][]dag.GraphEntry, error) } type Plugin func(logger.Interface, schema.Stage, vfs.FS, plugins.Console) error @@ -85,7 +84,6 @@ func NewExecutor(opts ...Options) Executor { plugins.Sysctl, plugins.User, plugins.SSH, - plugins.LoadModules, plugins.Timesyncd, plugins.Systemctl, plugins.Environment, diff --git a/vendor/github.com/rancher/yip/pkg/plugins/common.go b/vendor/github.com/rancher/yip/pkg/plugins/common.go index e78f0f5c8c7..487902a161a 100644 --- a/vendor/github.com/rancher/yip/pkg/plugins/common.go +++ b/vendor/github.com/rancher/yip/pkg/plugins/common.go @@ -4,13 +4,12 @@ import ( "crypto/tls" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "os/exec" "strings" "time" - "github.com/pkg/errors" "github.com/zcalusic/sysinfo" "github.com/rancher/yip/pkg/logger" @@ -65,7 +64,7 @@ func download(url string) (string, error) { time.Sleep(time.Second) } if err != nil { - return "", errors.Wrap(err, "failed while getting file") + return "", fmt.Errorf("failed getting file: %s", err.Error()) } if resp.Body != nil { defer resp.Body.Close() @@ -73,7 +72,7 @@ func download(url string) (string, error) { if resp.StatusCode/100 > 2 { return "", fmt.Errorf("%s %s", resp.Proto, resp.Status) } - bytes, err := ioutil.ReadAll(resp.Body) + bytes, err := io.ReadAll(resp.Body) return string(bytes), err } diff --git a/vendor/github.com/rancher/yip/pkg/plugins/network.go b/vendor/github.com/rancher/yip/pkg/plugins/conditional.go similarity index 78% rename from vendor/github.com/rancher/yip/pkg/plugins/network.go rename to vendor/github.com/rancher/yip/pkg/plugins/conditional.go index 22176e23bc8..56ce3bed2b5 100644 --- a/vendor/github.com/rancher/yip/pkg/plugins/network.go +++ b/vendor/github.com/rancher/yip/pkg/plugins/conditional.go @@ -1,10 +1,10 @@ package plugins import ( + "errors" "fmt" "regexp" - "github.com/pkg/errors" "github.com/twpayne/go-vfs/v4" "github.com/rancher/yip/pkg/logger" @@ -18,7 +18,7 @@ func NodeConditional(l logger.Interface, s schema.Stage, fs vfs.FS, console Cons return fmt.Errorf("Skipping stage (node hostname '%s' doesn't match '%s')", system.Node.Hostname, s.Node) } if err != nil { - return errors.Wrapf(err, "Skipping invalid regex for node hostname '%s', error: %s", s.Node, err.Error()) + return errors.Join(err, fmt.Errorf("Skipping invalid regex for node hostname '%s'", s.Node)) } } return nil diff --git a/vendor/github.com/rancher/yip/pkg/plugins/datasource.go b/vendor/github.com/rancher/yip/pkg/plugins/datasource.go index 7e15a0f8cd9..8b14ccdf13a 100644 --- a/vendor/github.com/rancher/yip/pkg/plugins/datasource.go +++ b/vendor/github.com/rancher/yip/pkg/plugins/datasource.go @@ -12,7 +12,6 @@ import ( "strings" "sync" - "github.com/pkg/errors" prv "github.com/rancher-sandbox/linuxkit/providers" "github.com/twpayne/go-vfs/v4" @@ -201,7 +200,7 @@ func processSSHFile(l logger.Interface, fs vfs.FS, console Console) error { var line string usr, err := user.Current() if err != nil { - return errors.Wrap(err, "could not get current user info") + return fmt.Errorf("could not get current user info: %s", err.Error()) } scanner := bufio.NewScanner(strings.NewReader(string(auth_keys))) @@ -287,7 +286,7 @@ func writeToFile(l logger.Interface, filename string, content string, perm uint3 }, }, fs, console) if err != nil { - return errors.Wrap(err, "could not write file") + return fmt.Errorf("could not write file '%s': %s", filename, err.Error()) } return nil } diff --git a/vendor/github.com/rancher/yip/pkg/plugins/dns.go b/vendor/github.com/rancher/yip/pkg/plugins/dns.go index d8bbebaf07d..962445645fc 100644 --- a/vendor/github.com/rancher/yip/pkg/plugins/dns.go +++ b/vendor/github.com/rancher/yip/pkg/plugins/dns.go @@ -1,7 +1,10 @@ package plugins import ( - "github.com/moby/moby/libnetwork/resolvconf" + "bytes" + "os" + "strings" + "github.com/twpayne/go-vfs/v4" "github.com/rancher/yip/pkg/logger" @@ -20,6 +23,38 @@ func applyDNS(s schema.Stage) error { if path == "" { path = "/etc/resolv.conf" } - _, err := resolvconf.Build(path, s.Dns.Nameservers, s.Dns.DnsSearch, s.Dns.DnsOptions) - return err + return writeResolvConf(path, s.Dns.Nameservers, s.Dns.DnsSearch, s.Dns.DnsOptions) +} + +// Build generates and writes a configuration file to path containing a nameserver +// entry for every element in nameservers, a "search" entry for every element in +// dnsSearch, and an "options" entry for every element in dnsOptions. It returns +// a File containing the generated content and its (sha256) hash. +// +// Note that the resolv.conf file is written, but the hash file is not. +// Duplicated from https://github.com/moby/moby/blob/9d07820b221db010bf1bdc26ca904468804ca712/libnetwork/resolvconf/resolvconf.go#L131 +// Because of use of internal module. +func writeResolvConf(path string, nameservers, dnsSearch, dnsOptions []string) error { + content := bytes.NewBuffer(nil) + if len(dnsSearch) > 0 { + if searchString := strings.Join(dnsSearch, " "); strings.Trim(searchString, " ") != "." { + if _, err := content.WriteString("search " + searchString + "\n"); err != nil { + return err + } + } + } + for _, dns := range nameservers { + if _, err := content.WriteString("nameserver " + dns + "\n"); err != nil { + return err + } + } + if len(dnsOptions) > 0 { + if optsString := strings.Join(dnsOptions, " "); strings.Trim(optsString, " ") != "" { + if _, err := content.WriteString("options " + optsString + "\n"); err != nil { + return err + } + } + } + + return os.WriteFile(path, content.Bytes(), 0o644) } diff --git a/vendor/github.com/rancher/yip/pkg/plugins/download.go b/vendor/github.com/rancher/yip/pkg/plugins/download.go index 705d93c9219..34d85db7310 100644 --- a/vendor/github.com/rancher/yip/pkg/plugins/download.go +++ b/vendor/github.com/rancher/yip/pkg/plugins/download.go @@ -1,13 +1,13 @@ package plugins import ( + "errors" "net/http" "os" "time" grab "github.com/cavaliergopher/grab/v3" "github.com/hashicorp/go-multierror" - "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/twpayne/go-vfs/v4" @@ -88,7 +88,7 @@ Loop: // FIXUP: Doesn't support fs. It reads real /etc/passwd files uid, gid, err := utils.GetUserDataFromString(dl.OwnerString) if err != nil { - return errors.Wrap(err, "Failed getting gid") + return errors.New("Failed getting gid") } return os.Chown(dl.Path, uid, gid) } diff --git a/vendor/github.com/rancher/yip/pkg/plugins/environment.go b/vendor/github.com/rancher/yip/pkg/plugins/environment.go index 370d0577531..efd4cda7998 100644 --- a/vendor/github.com/rancher/yip/pkg/plugins/environment.go +++ b/vendor/github.com/rancher/yip/pkg/plugins/environment.go @@ -1,11 +1,11 @@ package plugins import ( + "fmt" "os" "path/filepath" "github.com/joho/godotenv" - "github.com/pkg/errors" "github.com/twpayne/go-vfs/v4" "github.com/rancher/yip/pkg/logger" @@ -47,7 +47,7 @@ func Environment(l logger.Interface, s schema.Stage, fs vfs.FS, console Console) } if err := utils.Touch(environment, os.ModePerm, fs); err != nil { - return errors.Wrap(err, "failed touching environment file") + return fmt.Errorf("failed touching environment file: %s", err.Error()) } content, err := fs.ReadFile(environment) diff --git a/vendor/github.com/rancher/yip/pkg/plugins/files.go b/vendor/github.com/rancher/yip/pkg/plugins/files.go index 82dfe6e8da9..057afc28ac0 100644 --- a/vendor/github.com/rancher/yip/pkg/plugins/files.go +++ b/vendor/github.com/rancher/yip/pkg/plugins/files.go @@ -1,11 +1,12 @@ package plugins import ( + "errors" + "fmt" "os" "path/filepath" "github.com/hashicorp/go-multierror" - "github.com/pkg/errors" "github.com/twpayne/go-vfs/v4" "github.com/rancher/yip/pkg/logger" @@ -59,7 +60,7 @@ func writeFile(l logger.Interface, file schema.File, fs vfs.FS, console Console) d := newDecoder(file.Encoding) c, err := d.Decode(file.Content) if err != nil { - return errors.Wrapf(err, "failed decoding content with encoding %s", file.Encoding) + return fmt.Errorf("failed decoding content with encoding %s: %s", file.Encoding, err.Error()) } _, err = fsfile.WriteString(templateSysData(l, string(c))) @@ -77,7 +78,7 @@ func writeFile(l logger.Interface, file schema.File, fs vfs.FS, console Console) // FIXUP: Doesn't support fs. It reads real /etc/passwd files uid, gid, err := utils.GetUserDataFromString(file.OwnerString) if err != nil { - return errors.Wrap(err, "Failed getting gid") + return errors.New("Failed getting gid") } return fs.Chown(file.Path, uid, gid) } diff --git a/vendor/github.com/rancher/yip/pkg/plugins/files_base64_decoder.go b/vendor/github.com/rancher/yip/pkg/plugins/files_base64_decoder.go index 4f2123a2ce1..c0f6a85e55d 100644 --- a/vendor/github.com/rancher/yip/pkg/plugins/files_base64_decoder.go +++ b/vendor/github.com/rancher/yip/pkg/plugins/files_base64_decoder.go @@ -3,8 +3,6 @@ package plugins import ( "encoding/base64" "fmt" - - "github.com/pkg/errors" ) type base64Decoder struct{} @@ -23,7 +21,7 @@ func (base64GZip) Decode(content string) ([]byte, error) { b := base64Decoder{} c, err := b.Decode(content) if err != nil { - return []byte{}, errors.Wrap(err, "while reading base64 data") + return []byte{}, fmt.Errorf("Error while reading base64 data: %s", err.Error()) } g := gzipDecoder{} diff --git a/vendor/github.com/rancher/yip/pkg/plugins/git.go b/vendor/github.com/rancher/yip/pkg/plugins/git.go index cd1062a3f33..c53ebc7e811 100644 --- a/vendor/github.com/rancher/yip/pkg/plugins/git.go +++ b/vendor/github.com/rancher/yip/pkg/plugins/git.go @@ -15,6 +15,7 @@ package plugins import ( + "fmt" "path/filepath" git "github.com/go-git/go-git/v5" @@ -22,7 +23,6 @@ import ( "github.com/go-git/go-git/v5/plumbing/transport" gith "github.com/go-git/go-git/v5/plumbing/transport/http" ssh2 "github.com/go-git/go-git/v5/plumbing/transport/ssh" - "github.com/pkg/errors" "github.com/twpayne/go-vfs/v4" "golang.org/x/crypto/ssh" @@ -93,7 +93,7 @@ func Git(l logger.Interface, s schema.Stage, fs vfs.FS, console Console) error { _, err = git.PlainClone(path, false, opts) if err != nil { - return errors.Wrap(err, "failed cloning repo") + return fmt.Errorf("Failed cloning repo: %s", err.Error()) } return nil } diff --git a/vendor/github.com/rancher/yip/pkg/plugins/layout.go b/vendor/github.com/rancher/yip/pkg/plugins/layout.go index 41856e9f4d5..bb82d8f3ace 100644 --- a/vendor/github.com/rancher/yip/pkg/plugins/layout.go +++ b/vendor/github.com/rancher/yip/pkg/plugins/layout.go @@ -810,7 +810,7 @@ func (mkfs MkfsCall) buildOptions() ([]string, error) { opts = append(opts, mkfs.dev) case fatFS: if mkfs.part.FSLabel != "" { - opts = append(opts, "-i") + opts = append(opts, "-n") opts = append(opts, mkfs.part.FSLabel) } if len(mkfs.customOpts) > 0 { diff --git a/vendor/github.com/rancher/yip/pkg/plugins/modules.go b/vendor/github.com/rancher/yip/pkg/plugins/modules.go deleted file mode 100644 index 6a418a2cdea..00000000000 --- a/vendor/github.com/rancher/yip/pkg/plugins/modules.go +++ /dev/null @@ -1,60 +0,0 @@ -package plugins - -import ( - "bufio" - "strings" - - "github.com/hashicorp/go-multierror" - "github.com/twpayne/go-vfs/v4" - "pault.ag/go/modprobe" - - "github.com/rancher/yip/pkg/logger" - "github.com/rancher/yip/pkg/schema" -) - -const ( - modules = "/proc/modules" -) - -func loadedModules(l logger.Interface, fs vfs.FS) map[string]interface{} { - loaded := map[string]interface{}{} - f, err := fs.Open(modules) - if err != nil { - l.Warnf("Cannot open %s: %s", modules, err.Error()) - return loaded - } - defer f.Close() - sc := bufio.NewScanner(f) - for sc.Scan() { - mod := strings.SplitN(sc.Text(), " ", 2) - if len(mod) == 0 { - continue - } - loaded[mod[0]] = nil - } - return loaded -} - -func LoadModules(l logger.Interface, s schema.Stage, fs vfs.FS, console Console) error { - var errs error - - if len(s.Modules) == 0 { - return nil - } - - loaded := loadedModules(l, fs) - - for _, m := range s.Modules { - if _, ok := loaded[m]; ok { - continue - } - params := strings.SplitN(m, " ", -1) - l.Debugf("loading module %s with parameters [%s]", m, params) - if err := modprobe.Load(params[0], strings.Join(params[1:], " ")); err != nil { - errs = multierror.Append(errs, err) - continue - } - l.Debugf("module %s loaded", m) - } - return errs -} diff --git a/vendor/github.com/rancher/yip/pkg/plugins/ssh.go b/vendor/github.com/rancher/yip/pkg/plugins/ssh.go index 7ee2042a1c6..29e4a1a4bc6 100644 --- a/vendor/github.com/rancher/yip/pkg/plugins/ssh.go +++ b/vendor/github.com/rancher/yip/pkg/plugins/ssh.go @@ -5,13 +5,11 @@ import ( "net/url" "os" "path" - "strconv" "strings" "github.com/hashicorp/go-multierror" - "github.com/pkg/errors" + "github.com/mauromorales/xpasswd/pkg/users" "github.com/twpayne/go-vfs/v4" - passwd "github.com/willdonnelly/passwd" "github.com/rancher/yip/pkg/logger" "github.com/rancher/yip/pkg/schema" @@ -53,7 +51,7 @@ func getRemotePubKey(key string) (string, error) { out, err := download(key) if err != nil { - return "", errors.Wrap(err, "failed while downloading key") + return "", fmt.Errorf("failed downloading key: %s", err.Error()) } return out, err } @@ -73,7 +71,7 @@ func ensure(file string, fs vfs.FS) (os.FileInfo, error) { } info, err = fs.Stat(file) if err != nil { - return info, errors.Wrapf(err, "cannot stat %s", file) + return info, fmt.Errorf("cannot stat '%s': %s", file, err.Error()) } } else if err != nil { return info, err @@ -88,13 +86,13 @@ func authorizeSSHKey(key, file string, uid, gid int, fs vfs.FS) error { if utils.IsUrl(key) { key, err = getRemotePubKey(key) if err != nil { - return errors.Wrap(err, "failed fetching ssh key") + return fmt.Errorf("failed fetching ssh key: %s", err.Error()) } } info, err := ensure(file, fs) if err != nil { - return errors.Wrapf(err, "while ensuring %s exists", file) + return fmt.Errorf("failed ensuring %s exists: %s", file, err.Error()) } bytes, err := fs.ReadFile(file) @@ -122,27 +120,29 @@ func ensureKeys(user string, keys []string, fs vfs.FS) error { var errs error f, err := fs.RawPath(passwdFile) - current, err := passwd.ParseFile(f) + list := users.NewUserList() + list.SetPath(f) + err = list.Load() if err != nil { - return errors.Wrap(err, "Failed parsing passwd") + return fmt.Errorf("Failed parsing passwd: %s", err.Error()) } - data, ok := current[user] - if !ok { + data := list.Get(user) + if data == nil { return fmt.Errorf("user %s not found", user) } - uid, err := strconv.Atoi(data.Uid) + uid, err := data.UID() if err != nil { - return errors.Wrap(err, "Failed getting uid") + return fmt.Errorf("Failed getting uid: %s", err.Error()) } - gid, err := strconv.Atoi(data.Gid) + gid, err := data.GID() if err != nil { - return errors.Wrap(err, "Failed getting gid") + return fmt.Errorf("Failed getting gid: %s", err.Error()) } - homeDir := data.Home + homeDir := data.HomeDir() userSSHDir := path.Join(homeDir, sshDir) if _, err := fs.Stat(userSSHDir); os.IsNotExist(err) { diff --git a/vendor/github.com/rancher/yip/pkg/plugins/user.go b/vendor/github.com/rancher/yip/pkg/plugins/user.go index 29846d81087..759c5845a7e 100644 --- a/vendor/github.com/rancher/yip/pkg/plugins/user.go +++ b/vendor/github.com/rancher/yip/pkg/plugins/user.go @@ -7,13 +7,12 @@ import ( "sort" "strconv" - "github.com/pkg/errors" + "github.com/mauromorales/xpasswd/pkg/users" "github.com/hashicorp/go-multierror" "github.com/joho/godotenv" entities "github.com/mudler/entities/pkg/entities" "github.com/twpayne/go-vfs/v4" - passwd "github.com/willdonnelly/passwd" "github.com/rancher/yip/pkg/logger" "github.com/rancher/yip/pkg/schema" @@ -33,22 +32,22 @@ func createUser(fs vfs.FS, u schema.User, console Console) error { etcgroup, err := fs.RawPath("/etc/group") if err != nil { - return errors.Wrap(err, "getting rawpath for /etc/group") + return fmt.Errorf("error getting rawpath for /etc/group: %s", err.Error()) } etcshadow, err := fs.RawPath("/etc/shadow") if err != nil { - return errors.Wrap(err, "getting rawpath for /etc/shadow") + return fmt.Errorf("error getting rawpath for /etc/shadow: %s", err.Error()) } etcpasswd, err := fs.RawPath("/etc/passwd") if err != nil { - return errors.Wrap(err, "getting rawpath for /etc/passwd") + return fmt.Errorf("error getting rawpath for /etc/passwd: %s", err.Error()) } useradd, err := fs.RawPath("/etc/default/useradd") if err != nil { - return errors.Wrap(err, "getting rawpath for /etc/default/useradd") + return fmt.Errorf("error getting rawpath for /etc/default/useradd: %s", err.Error()) } usrDefaults := map[string]string{} @@ -57,7 +56,7 @@ func createUser(fs vfs.FS, u schema.User, console Console) error { if _, err = os.Stat(useradd); err == nil { usrDefaults, err = godotenv.Read(useradd) if err != nil { - return errors.Wrapf(err, "could not parse '%s'", useradd) + return fmt.Errorf("failed parsing '%s'", useradd) } } @@ -70,31 +69,15 @@ func createUser(fs vfs.FS, u schema.User, console Console) error { } primaryGroup := u.Name - gid := 1000 + gid := -1 // -1 instructs entities to find the next free id and assign it if u.PrimaryGroup != "" { gr, err := osuser.LookupGroup(u.PrimaryGroup) if err != nil { - return errors.Wrap(err, "could not resolve primary group of user") + return fmt.Errorf("could not resolve primary group of user: %s", err.Error()) } gid, _ = strconv.Atoi(gr.Gid) primaryGroup = u.PrimaryGroup - } else { - // Create a new group after the user name - all, _ := entities.ParseGroup(etcgroup) - if len(all) != 0 { - usedGids := []int{} - for _, entry := range all { - usedGids = append(usedGids, *entry.Gid) - } - sort.Ints(usedGids) - if len(usedGids) == 0 { - return errors.New("no new guid found") - } - gid = usedGids[len(usedGids)-1] - gid++ - } - } updateGroup := entities.Group{ @@ -103,32 +86,51 @@ func createUser(fs vfs.FS, u schema.User, console Console) error { Gid: &gid, Users: u.Name, } - updateGroup.Apply(etcgroup, false) + err = updateGroup.Apply(etcgroup, false) + if err != nil { + return fmt.Errorf("creating the user's group: %v", err) + } + + // reload the group to get the generated GID + groups, _ := entities.ParseGroup(etcgroup) + for name, group := range groups { + if name == updateGroup.Name { + updateGroup = group + gid = *group.Gid + break + } + } - uid := 1000 + uid := -1 if u.UID != "" { // User defined-uid uid, err = strconv.Atoi(u.UID) if err != nil { - return errors.Wrap(err, "invalid uid defined") + return fmt.Errorf("failed parsing uid: %s", err.Error()) } } else { - // find an available uid if there are others already - all, _ := passwd.ParseFile(etcpasswd) - if len(all) != 0 { - usedUids := []int{} - for _, entry := range all { - uid, _ := strconv.Atoi(entry.Uid) - usedUids = append(usedUids, uid) + list := users.NewUserList() + list.SetPath(etcpasswd) + list.Load() + + user := list.Get(u.Name) + + if user != nil { + uid, err = user.UID() + if err != nil { + return fmt.Errorf("could not get user id: %v", err) } - sort.Ints(usedUids) - if len(usedUids) == 0 { - return errors.New("no new UID found") + } else { + // https://systemd.io/UIDS-GIDS/#special-distribution-uid-ranges + uid, err = list.GenerateUIDInRange(entities.HumanIDMin, entities.HumanIDMax) + if err != nil { + return fmt.Errorf("no available uid: %v", err) } - uid = usedUids[len(usedUids)-1] - uid++ } } + if uid == -1 { + return fmt.Errorf("could not set uid for user") + } if u.Homedir == "" { u.Homedir = fmt.Sprintf("%s/%s", usrDefaults["HOME"], u.Name) @@ -159,13 +161,13 @@ func createUser(fs vfs.FS, u schema.User, console Console) error { if !u.NoCreateHome { homedir, err := fs.RawPath(u.Homedir) if err != nil { - return errors.Wrap(err, "getting rawpath for homedir") + return fmt.Errorf("error getting rawpath for homedir: %s", err.Error()) } os.MkdirAll(homedir, 0755) os.Chown(homedir, uid, gid) } - groups, _ := entities.ParseGroup(etcgroup) + groups, _ = entities.ParseGroup(etcgroup) for name, group := range groups { for _, w := range u.Groups { if w == name { @@ -181,7 +183,7 @@ func createUser(fs vfs.FS, u schema.User, console Console) error { func setUserPass(fs vfs.FS, username, password string) error { etcshadow, err := fs.RawPath("/etc/shadow") if err != nil { - return errors.Wrap(err, "getting rawpath for /etc/shadow") + return fmt.Errorf("error getting rawpath for /etc/shadow: %s", err.Error()) } userShadow := &entities.Shadow{ Username: username, @@ -208,11 +210,11 @@ func User(l logger.Interface, s schema.Stage, fs vfs.FS, console Console) error for _, k := range users { r := s.Users[k] r.Name = k - if !s.Users[k].Exists() { + if !r.Exists() { if err := createUser(fs, r, console); err != nil { errs = multierror.Append(errs, err) } - } else if s.Users[k].PasswordHash != "" { + } else if r.PasswordHash != "" { if err := setUserPass(fs, r.Name, r.PasswordHash); err != nil { return err } diff --git a/vendor/github.com/rancher/yip/pkg/schema/schema.go b/vendor/github.com/rancher/yip/pkg/schema/schema.go index 68335001fca..d67faa4f292 100644 --- a/vendor/github.com/rancher/yip/pkg/schema/schema.go +++ b/vendor/github.com/rancher/yip/pkg/schema/schema.go @@ -16,6 +16,7 @@ package schema import ( "bytes" + "errors" "fmt" "io" "net/http" @@ -24,8 +25,6 @@ import ( "github.com/google/shlex" - "github.com/pkg/errors" - config "github.com/rancher/yip/pkg/schema/cloudinit" "github.com/hashicorp/go-multierror" @@ -149,7 +148,6 @@ type Stage struct { SSHKeys map[string][]string `yaml:"authorized_keys,omitempty"` Node string `yaml:"node,omitempty"` Users map[string]User `yaml:"users,omitempty"` - Modules []string `yaml:"modules,omitempty"` Systemctl Systemctl `yaml:"systemctl,omitempty"` Environment map[string]string `yaml:"environment,omitempty"` EnvironmentFile string `yaml:"environment_file,omitempty"` @@ -200,12 +198,12 @@ func Load(s string, fs vfs.FS, l Loader, m Modifier) (*YipConfig, error) { } data, err := l(s, fs, m) if err != nil { - return nil, errors.Wrap(err, "while loading yipconfig") + return nil, fmt.Errorf("error while loading yipconfig: %s", err.Error()) } loader, err := detect(data) if err != nil { - return nil, errors.Wrap(err, "invalid file type") + return nil, fmt.Errorf("invalid file type: %s", err.Error()) } return loader.Load(data, fs) } @@ -270,7 +268,7 @@ func jq(command string, data map[string]interface{}) (map[string]interface{}, er v, ok := iter.Next() if !ok { - return nil, errors.New("failed getting rsult from gojq") + return nil, errors.New("failed getting result from gojq") } if err, ok := v.(error); ok { return nil, err diff --git a/vendor/github.com/rancher/yip/pkg/utils/user.go b/vendor/github.com/rancher/yip/pkg/utils/user.go index 7c5f8dd32b9..d87bd33c2de 100644 --- a/vendor/github.com/rancher/yip/pkg/utils/user.go +++ b/vendor/github.com/rancher/yip/pkg/utils/user.go @@ -1,11 +1,11 @@ package utils import ( + "errors" + "fmt" osuser "os/user" "strconv" "strings" - - "github.com/pkg/errors" ) // GetUserDataFromString retrieves uid and gid from a string. The string @@ -17,19 +17,19 @@ func GetUserDataFromString(s string) (int, int, error) { dat := strings.Split(user, ":") us, err := osuser.Lookup(dat[0]) if err != nil { - return 0, 0, errors.Wrapf(err, "while looking up user %s", dat[0]) + return 0, 0, fmt.Errorf("failed looking up user %s", dat[0]) } u = us.Uid group, err := osuser.LookupGroup(dat[1]) if err != nil { - return 0, 0, errors.Wrapf(err, "while looking up group %s", dat[1]) + return 0, 0, fmt.Errorf("failed looking up group %s", dat[1]) } g = group.Gid } else { us, err := osuser.Lookup(s) if err != nil { - return 0, 0, errors.Wrapf(err, "while looking up user %s", s) + return 0, 0, fmt.Errorf("failed looking up user %s", s) } u = us.Uid g = us.Gid @@ -37,12 +37,12 @@ func GetUserDataFromString(s string) (int, int, error) { uid, err := strconv.Atoi(u) if err != nil { - return 0, 0, errors.Wrap(err, "failed converting uid to int") + return 0, 0, errors.New("failed converting uid to int") } gid, err := strconv.Atoi(g) if err != nil { - return 0, 0, errors.Wrap(err, "failed converting gid to int") + return 0, 0, errors.New("failed converting gid to int") } return uid, gid, nil } diff --git a/vendor/github.com/samber/lo/.travis.yml b/vendor/github.com/samber/lo/.travis.yml deleted file mode 100644 index f0de7f51c46..00000000000 --- a/vendor/github.com/samber/lo/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go -before_install: - - go mod download - - make tools -go: - - "1.18" -script: make test diff --git a/vendor/github.com/samber/lo/CHANGELOG.md b/vendor/github.com/samber/lo/CHANGELOG.md deleted file mode 100644 index ea3ef7a536e..00000000000 --- a/vendor/github.com/samber/lo/CHANGELOG.md +++ /dev/null @@ -1,411 +0,0 @@ -# Changelog - -@samber: I sometimes forget to update this file. Ping me on [Twitter](https://twitter.com/samuelberthe) or open an issue in case of error. We need to keep a clear changelog for easier lib upgrade. - -## 1.37.0 (2022-12-15) - -Adding: -- lo.PartialX -- lo.Transaction - -Improvement: -- lo.Associate / lo.SliceToMap: faster memory allocation - -Chore: -- Remove *_test.go files from releases, in order to cleanup dev dependencies - -## 1.36.0 (2022-11-28) - -Adding: -- lo.AttemptWhile -- lo.AttemptWhileWithDelay - -## 1.35.0 (2022-11-15) - -Adding: -- lo.RandomString -- lo.BufferWithTimeout (alias to lo.BatchWithTimeout) -- lo.Buffer (alias to lo.Batch) - -Change: -- lo.Slice: avoid panic caused by out-of-bounds - -Deprecation: -- lo.BatchWithTimeout -- lo.Batch - -## 1.34.0 (2022-11-12) - -Improving: -- lo.Union: faster and can receive more than 2 lists - -Adding: -- lo.FanIn (alias to lo.ChannelMerge) -- lo.FanOut - -Deprecation: -- lo.ChannelMerge - -## 1.33.0 (2022-10-14) - -Adding: -- lo.ChannelMerge - -Improving: -- helpers with callbacks/predicates/iteratee now have named arguments, for easier autocompletion - -## 1.32.0 (2022-10-10) - -Adding: - -- lo.ChannelToSlice -- lo.CountValues -- lo.CountValuesBy -- lo.MapEntries -- lo.Sum -- lo.Interleave -- TupleX.Unpack() - -## 1.31.0 (2022-10-06) - -Adding: - -- lo.SliceToChannel -- lo.Generator -- lo.Batch -- lo.BatchWithTimeout - -## 1.30.1 (2022-10-06) - -Fix: - -- lo.Try1: remove generic type -- lo.Validate: format error properly - -## 1.30.0 (2022-10-04) - -Adding: - -- lo.TernaryF -- lo.Validate - -## 1.29.0 (2022-10-02) - -Adding: - -- lo.ErrorAs -- lo.TryOr -- lo.TryOrX - -## 1.28.0 (2022-09-05) - -Adding: - -- lo.ChannelDispatcher with 6 dispatching strategies: - - lo.DispatchingStrategyRoundRobin - - lo.DispatchingStrategyRandom - - lo.DispatchingStrategyWeightedRandom - - lo.DispatchingStrategyFirst - - lo.DispatchingStrategyLeast - - lo.DispatchingStrategyMost - -## 1.27.1 (2022-08-15) - -Bugfix: - -- Removed comparable constraint for lo.FindKeyBy - -## 1.27.0 (2022-07-29) - -Breaking: - -- Change of MapToSlice prototype: `MapToSlice[K comparable, V any, R any](in map[K]V, iteratee func(V, K) R) []R` -> `MapToSlice[K comparable, V any, R any](in map[K]V, iteratee func(K, V) R) []R` - -Added: - -- lo.ChunkString -- lo.SliceToMap (alias to lo.Associate) - -## 1.26.0 (2022-07-24) - -Adding: - -- lo.Associate -- lo.ReduceRight -- lo.FromPtrOr -- lo.MapToSlice -- lo.IsSorted -- lo.IsSortedByKey - -## 1.25.0 (2022-07-04) - -Adding: - -- lo.FindUniques -- lo.FindUniquesBy -- lo.FindDuplicates -- lo.FindDuplicatesBy -- lo.IsNotEmpty - -## 1.24.0 (2022-07-04) - -Adding: - -- lo.Without -- lo.WithoutEmpty - -## 1.23.0 (2022-07-04) - -Adding: - -- lo.FindKey -- lo.FindKeyBy - -## 1.22.0 (2022-07-04) - -Adding: - -- lo.Slice -- lo.FromPtr -- lo.IsEmpty -- lo.Compact -- lo.ToPairs: alias to lo.Entries -- lo.FromPairs: alias to lo.FromEntries -- lo.Partial - -Change: - -- lo.Must + lo.MustX: add context to panic message - -Fix: - -- lo.Nth: out of bound exception (#137) - -## 1.21.0 (2022-05-10) - -Adding: - -- lo.ToAnySlice -- lo.FromAnySlice - -## 1.20.0 (2022-05-02) - -Adding: - -- lo.Synchronize -- lo.SumBy - -Change: -- Removed generic type definition for lo.Try0: `lo.Try0[T]()` -> `lo.Try0()` - -## 1.19.0 (2022-04-30) - -Adding: - -- lo.RepeatBy -- lo.Subset -- lo.Replace -- lo.ReplaceAll -- lo.Substring -- lo.RuneLength - -## 1.18.0 (2022-04-28) - -Adding: - -- lo.SomeBy -- lo.EveryBy -- lo.None -- lo.NoneBy - -## 1.17.0 (2022-04-27) - -Adding: - -- lo.Unpack2 -> lo.Unpack3 -- lo.Async0 -> lo.Async6 - -## 1.16.0 (2022-04-26) - -Adding: - -- lo.AttemptWithDelay - -## 1.15.0 (2022-04-22) - -Improvement: - -- lo.Must: error or boolean value - -## 1.14.0 (2022-04-21) - -Adding: - -- lo.Coalesce - -## 1.13.0 (2022-04-14) - -Adding: - -- PickBy -- PickByKeys -- PickByValues -- OmitBy -- OmitByKeys -- OmitByValues -- Clamp -- MapKeys -- Invert -- IfF + ElseIfF + ElseF -- T0() + T1() + T2() + T3() + ... - -## 1.12.0 (2022-04-12) - -Adding: - -- Must -- Must{0-6} -- FindOrElse -- Async -- MinBy -- MaxBy -- Count -- CountBy -- FindIndexOf -- FindLastIndexOf -- FilterMap - -## 1.11.0 (2022-03-11) - -Adding: - -- Try -- Try{0-6} -- TryWitchValue -- TryCatch -- TryCatchWitchValue -- Debounce -- Reject - -## 1.10.0 (2022-03-11) - -Adding: - -- Range -- RangeFrom -- RangeWithSteps - -## 1.9.0 (2022-03-10) - -Added - -- Drop -- DropRight -- DropWhile -- DropRightWhile - -## 1.8.0 (2022-03-10) - -Adding Union. - -## 1.7.0 (2022-03-09) - -Adding ContainBy - -Adding MapValues - -Adding FlatMap - -## 1.6.0 (2022-03-07) - -Fixed PartitionBy. - -Adding Sample - -Adding Samples - -## 1.5.0 (2022-03-07) - -Adding Times - -Adding Attempt - -Adding Repeat - -## 1.4.0 (2022-03-07) - -- adding tuple types (2->9) -- adding Zip + Unzip -- adding lo.PartitionBy + lop.PartitionBy -- adding lop.GroupBy -- fixing Nth - -## 1.3.0 (2022-03-03) - -Last and Nth return errors - -## 1.2.0 (2022-03-03) - -Adding `lop.Map` and `lop.ForEach`. - -## 1.1.0 (2022-03-03) - -Adding `i int` param to `lo.Map()`, `lo.Filter()`, `lo.ForEach()` and `lo.Reduce()` predicates. - -## 1.0.0 (2022-03-02) - -*Initial release* - -Supported helpers for slices: - -- Filter -- Map -- Reduce -- ForEach -- Uniq -- UniqBy -- GroupBy -- Chunk -- Flatten -- Shuffle -- Reverse -- Fill -- ToMap - -Supported helpers for maps: - -- Keys -- Values -- Entries -- FromEntries -- Assign (maps merge) - -Supported intersection helpers: - -- Contains -- Every -- Some -- Intersect -- Difference - -Supported search helpers: - -- IndexOf -- LastIndexOf -- Find -- Min -- Max -- Last -- Nth - -Other functional programming helpers: - -- Ternary (1 line if/else statement) -- If / ElseIf / Else -- Switch / Case / Default -- ToPtr -- ToSlicePtr - -Constraints: - -- Clonable diff --git a/vendor/github.com/samber/lo/Dockerfile b/vendor/github.com/samber/lo/Dockerfile deleted file mode 100644 index bd01bbbb45b..00000000000 --- a/vendor/github.com/samber/lo/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ - -FROM golang:1.18 - -WORKDIR /go/src/github.com/samber/lo - -COPY Makefile go.* ./ - -RUN make tools diff --git a/vendor/github.com/samber/lo/LICENSE b/vendor/github.com/samber/lo/LICENSE deleted file mode 100644 index c3dc72d9ab8..00000000000 --- a/vendor/github.com/samber/lo/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2022 Samuel Berthe - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/samber/lo/Makefile b/vendor/github.com/samber/lo/Makefile deleted file mode 100644 index 57bb49159f5..00000000000 --- a/vendor/github.com/samber/lo/Makefile +++ /dev/null @@ -1,44 +0,0 @@ - -BIN=go - -build: - ${BIN} build -v ./... - -test: - go test -race -v ./... -watch-test: - reflex -t 50ms -s -- sh -c 'gotest -race -v ./...' - -bench: - go test -benchmem -count 3 -bench ./... -watch-bench: - reflex -t 50ms -s -- sh -c 'go test -benchmem -count 3 -bench ./...' - -coverage: - ${BIN} test -v -coverprofile=cover.out -covermode=atomic . - ${BIN} tool cover -html=cover.out -o cover.html - -# tools -tools: - ${BIN} install github.com/cespare/reflex@latest - ${BIN} install github.com/rakyll/gotest@latest - ${BIN} install github.com/psampaz/go-mod-outdated@latest - ${BIN} install github.com/jondot/goweight@latest - ${BIN} install github.com/golangci/golangci-lint/cmd/golangci-lint@latest - ${BIN} get -t -u golang.org/x/tools/cmd/cover - ${BIN} install github.com/sonatype-nexus-community/nancy@latest - go mod tidy - -lint: - golangci-lint run --timeout 60s --max-same-issues 50 ./... -lint-fix: - golangci-lint run --timeout 60s --max-same-issues 50 --fix ./... - -audit: tools - ${BIN} list -json -m all | nancy sleuth - -outdated: tools - ${BIN} list -u -m -json all | go-mod-outdated -update -direct - -weight: tools - goweight diff --git a/vendor/github.com/samber/lo/README.md b/vendor/github.com/samber/lo/README.md deleted file mode 100644 index ffb7c43895f..00000000000 --- a/vendor/github.com/samber/lo/README.md +++ /dev/null @@ -1,2851 +0,0 @@ -# lo - -[![tag](https://img.shields.io/github/tag/samber/lo.svg)](https://github.com/samber/lo/releases) -![Go Version](https://img.shields.io/badge/Go-%3E%3D%201.18-%23007d9c) -[![GoDoc](https://godoc.org/github.com/samber/lo?status.svg)](https://pkg.go.dev/github.com/samber/lo) -![Build Status](https://github.com/samber/lo/actions/workflows/go.yml/badge.svg) -[![Go report](https://goreportcard.com/badge/github.com/samber/lo)](https://goreportcard.com/report/github.com/samber/lo) -[![Coverage](https://img.shields.io/codecov/c/github/samber/lo)](https://codecov.io/gh/samber/lo) -[![Contributors](https://img.shields.io/github/contributors/samber/lo)](https://github.com/samber/lo/graphs/contributors) -[![License](https://img.shields.io/github/license/samber/lo)](./LICENSE) - -✨ **`samber/lo` is a Lodash-style Go library based on Go 1.18+ Generics.** - -This project started as an experiment with the new generics implementation. It may look like [Lodash](https://github.com/lodash/lodash) in some aspects. I used to code with the fantastic ["go-funk"](https://github.com/thoas/go-funk) package, but "go-funk" uses reflection and therefore is not typesafe. - -As expected, benchmarks demonstrate that generics are much faster than implementations based on the "reflect" package. Benchmarks also show similar performance gains compared to pure `for` loops. [See below](#-benchmark). - -In the future, 5 to 10 helpers will overlap with those coming into the Go standard library (under package names `slices` and `maps`). I feel this library is legitimate and offers many more valuable abstractions. - -**See also:** - -- [samber/do](https://github.com/samber/do): A dependency injection toolkit based on Go 1.18+ Generics -- [samber/mo](https://github.com/samber/mo): Monads based on Go 1.18+ Generics (Option, Result, Either...) - -**Why this name?** - -I wanted a **short name**, similar to "Lodash" and no Go package currently uses this name. - -![](img/logo-full.png) - -## 🚀 Install - -```sh -go get github.com/samber/lo@v1 -``` - -This library is v1 and follows SemVer strictly. - -No breaking changes will be made to exported APIs before v2.0.0. - -## 💡 Usage - -You can import `lo` using: - -```go -import ( - "github.com/samber/lo" - lop "github.com/samber/lo/parallel" -) -``` - -Then use one of the helpers below: - -```go -names := lo.Uniq[string]([]string{"Samuel", "John", "Samuel"}) -// []string{"Samuel", "John"} -``` - -Most of the time, the compiler will be able to infer the type so that you can call: `lo.Uniq([]string{...})`. - -### Tips for lazy developers - -I cannot recommend it, but in case you are too lazy for repeating `lo.` everywhere, you can import the entire library into the namespace. - -```go -import ( - . "github.com/samber/lo" -) -``` - -I take no responsibility on this junk. 😁 💩 - -## 🤠 Spec - -GoDoc: [https://godoc.org/github.com/samber/lo](https://godoc.org/github.com/samber/lo) - -Supported helpers for slices: - -- [Filter](#filter) -- [Map](#map) -- [FilterMap](#filtermap) -- [FlatMap](#flatmap) -- [Reduce](#reduce) -- [ReduceRight](#reduceright) -- [ForEach](#foreach) -- [Times](#times) -- [Uniq](#uniq) -- [UniqBy](#uniqby) -- [GroupBy](#groupby) -- [Chunk](#chunk) -- [PartitionBy](#partitionby) -- [Flatten](#flatten) -- [Interleave](#interleave) -- [Shuffle](#shuffle) -- [Reverse](#reverse) -- [Fill](#fill) -- [Repeat](#repeat) -- [RepeatBy](#repeatby) -- [KeyBy](#keyby) -- [Associate / SliceToMap](#associate-alias-slicetomap) -- [Drop](#drop) -- [DropRight](#dropright) -- [DropWhile](#dropwhile) -- [DropRightWhile](#droprightwhile) -- [Reject](#reject) -- [Count](#count) -- [CountBy](#countby) -- [CountValues](#countvalues) -- [CountValuesBy](#countvaluesby) -- [Subset](#subset) -- [Slice](#slice) -- [Replace](#replace) -- [ReplaceAll](#replaceall) -- [Compact](#compact) -- [IsSorted](#issorted) -- [IsSortedByKey](#issortedbykey) - -Supported helpers for maps: - -- [Keys](#keys) -- [Values](#values) -- [PickBy](#pickby) -- [PickByKeys](#pickbykeys) -- [PickByValues](#pickbyvalues) -- [OmitBy](#omitby) -- [OmitByKeys](#omitbykeys) -- [OmitByValues](#omitbyvalues) -- [Entries / ToPairs](#entries-alias-topairs) -- [FromEntries / FromPairs](#fromentries-alias-frompairs) -- [Invert](#invert) -- [Assign (merge of maps)](#assign) -- [MapKeys](#mapkeys) -- [MapValues](#mapvalues) -- [MapEntries](#mapentries) -- [MapToSlice](#maptoslice) - -Supported math helpers: - -- [Range / RangeFrom / RangeWithSteps](#range--rangefrom--rangewithsteps) -- [Clamp](#clamp) -- [Sum](#sum) -- [SumBy](#sumby) - -Supported helpers for strings: - -- [RandomString](#randomstring) -- [Substring](#substring) -- [ChunkString](#chunkstring) -- [RuneLength](#runelength) - -Supported helpers for tuples: - -- [T2 -> T9](#t2---t9) -- [Unpack2 -> Unpack9](#unpack2---unpack9) -- [Zip2 -> Zip9](#zip2---zip9) -- [Unzip2 -> Unzip9](#unzip2---unzip9) - -Supported helpers for channels: - -- [ChannelDispatcher](#channeldispatcher) -- [SliceToChannel](#slicetochannel) -- [Generator](#generator) -- [Buffer](#buffer) -- [BufferWithTimeout](#bufferwithtimeout) -- [FanIn](#fanin) -- [FanOut](#fanout) - -Supported intersection helpers: - -- [Contains](#contains) -- [ContainsBy](#containsby) -- [Every](#every) -- [EveryBy](#everyby) -- [Some](#some) -- [SomeBy](#someby) -- [None](#none) -- [NoneBy](#noneby) -- [Intersect](#intersect) -- [Difference](#difference) -- [Union](#union) -- [Without](#without) -- [WithoutEmpty](#withoutempty) - -Supported search helpers: - -- [IndexOf](#indexof) -- [LastIndexOf](#lastindexof) -- [Find](#find) -- [FindIndexOf](#findindexof) -- [FindLastIndexOf](#findlastindexof) -- [FindKey](#findkey) -- [FindKeyBy](#findkeyby) -- [FindUniques](#finduniques) -- [FindUniquesBy](#finduniquesby) -- [FindDuplicates](#findduplicates) -- [FindDuplicatesBy](#findduplicatesby) -- [Min](#min) -- [MinBy](#minby) -- [Max](#max) -- [MaxBy](#maxby) -- [Last](#last) -- [Nth](#nth) -- [Sample](#sample) -- [Samples](#samples) - -Conditional helpers: - -- [Ternary](#ternary) -- [TernaryF](#ternaryf) -- [If / ElseIf / Else](#if--elseif--else) -- [Switch / Case / Default](#switch--case--default) - -Type manipulation helpers: - -- [ToPtr](#toptr) -- [FromPtr](#fromptr) -- [FromPtrOr](#fromptror) -- [ToSlicePtr](#tosliceptr) -- [ToAnySlice](#toanyslice) -- [FromAnySlice](#fromanyslice) -- [Empty](#empty) -- [IsEmpty](#isempty) -- [IsNotEmpty](#isnotempty) -- [Coalesce](#coalesce) - -Function helpers: - -- [Partial](#partial) -- [Partial2 -> Partial5](#partial2---partial5) - -Concurrency helpers: - -- [Attempt](#attempt) -- [AttemptWhile](#attemptwhile) -- [AttemptWithDelay](#attemptwithdelay) -- [AttemptWhileWithDelay](#attemptwhilewithdelay) -- [Debounce](#debounce) -- [Synchronize](#synchronize) -- [Async](#async) -- [Transaction](#transaction) - -Error handling: - -- [Validate](#validate) -- [Must](#must) -- [Try](#try) -- [Try1 -> Try6](#try0-6) -- [TryOr](#tryor) -- [TryOr1 -> TryOr6](#tryor0-6) -- [TryCatch](#trycatch) -- [TryWithErrorValue](#trywitherrorvalue) -- [TryCatchWithErrorValue](#trycatchwitherrorvalue) -- [ErrorsAs](#errorsas) - -Constraints: - -- Clonable - -### Filter - -Iterates over a collection and returns an array of all the elements the predicate function returns `true` for. - -```go -even := lo.Filter[int]([]int{1, 2, 3, 4}, func(x int, index int) bool { - return x%2 == 0 -}) -// []int{2, 4} -``` - -[[play](https://go.dev/play/p/Apjg3WeSi7K)] - -### Map - -Manipulates a slice of one type and transforms it into a slice of another type: - -```go -import "github.com/samber/lo" - -lo.Map[int64, string]([]int64{1, 2, 3, 4}, func(x int64, index int) string { - return strconv.FormatInt(x, 10) -}) -// []string{"1", "2", "3", "4"} -``` - -[[play](https://go.dev/play/p/OkPcYAhBo0D)] - -Parallel processing: like `lo.Map()`, but the mapper function is called in a goroutine. Results are returned in the same order. - -```go -import lop "github.com/samber/lo/parallel" - -lop.Map[int64, string]([]int64{1, 2, 3, 4}, func(x int64, _ int) string { - return strconv.FormatInt(x, 10) -}) -// []string{"1", "2", "3", "4"} -``` - -### FilterMap - -Returns a slice which obtained after both filtering and mapping using the given callback function. - -The callback function should return two values: the result of the mapping operation and whether the result element should be included or not. - -```go -matching := lo.FilterMap[string, string]([]string{"cpu", "gpu", "mouse", "keyboard"}, func(x string, _ int) (string, bool) { - if strings.HasSuffix(x, "pu") { - return "xpu", true - } - return "", false -}) -// []string{"xpu", "xpu"} -``` - -[[play](https://go.dev/play/p/-AuYXfy7opz)] - -### FlatMap - -Manipulates a slice and transforms and flattens it to a slice of another type. - -```go -lo.FlatMap[int, string]([]int{0, 1, 2}, func(x int, _ int) []string { - return []string{ - strconv.FormatInt(x, 10), - strconv.FormatInt(x, 10), - } -}) -// []string{"0", "0", "1", "1", "2", "2"} -``` - -[[play](https://go.dev/play/p/YSoYmQTA8-U)] - -### Reduce - -Reduces a collection to a single value. The value is calculated by accumulating the result of running each element in the collection through an accumulator function. Each successive invocation is supplied with the return value returned by the previous call. - -```go -sum := lo.Reduce[int, int]([]int{1, 2, 3, 4}, func(agg int, item int, _ int) int { - return agg + item -}, 0) -// 10 -``` - -[[play](https://go.dev/play/p/R4UHXZNaaUG)] - -### ReduceRight - -Like `lo.Reduce` except that it iterates over elements of collection from right to left. - -```go -result := lo.ReduceRight[[]int, []int]([][]int{{0, 1}, {2, 3}, {4, 5}}, func(agg []int, item []int, _ int) []int { - return append(agg, item...) -}, []int{}) -// []int{4, 5, 2, 3, 0, 1} -``` - -[[play](https://go.dev/play/p/Fq3W70l7wXF)] - -### ForEach - -Iterates over elements of a collection and invokes the function over each element. - -```go -import "github.com/samber/lo" - -lo.ForEach[string]([]string{"hello", "world"}, func(x string, _ int) { - println(x) -}) -// prints "hello\nworld\n" -``` - -[[play](https://go.dev/play/p/oofyiUPRf8t)] - -Parallel processing: like `lo.ForEach()`, but the callback is called as a goroutine. - -```go -import lop "github.com/samber/lo/parallel" - -lop.ForEach[string]([]string{"hello", "world"}, func(x string, _ int) { - println(x) -}) -// prints "hello\nworld\n" or "world\nhello\n" -``` - -### Times - -Times invokes the iteratee n times, returning an array of the results of each invocation. The iteratee is invoked with index as argument. - -```go -import "github.com/samber/lo" - -lo.Times[string](3, func(i int) string { - return strconv.FormatInt(int64(i), 10) -}) -// []string{"0", "1", "2"} -``` - -[[play](https://go.dev/play/p/vgQj3Glr6lT)] - -Parallel processing: like `lo.Times()`, but callback is called in goroutine. - -```go -import lop "github.com/samber/lo/parallel" - -lop.Times[string](3, func(i int) string { - return strconv.FormatInt(int64(i), 10) -}) -// []string{"0", "1", "2"} -``` - -### Uniq - -Returns a duplicate-free version of an array, in which only the first occurrence of each element is kept. The order of result values is determined by the order they occur in the array. - -```go -uniqValues := lo.Uniq[int]([]int{1, 2, 2, 1}) -// []int{1, 2} -``` - -[[play](https://go.dev/play/p/DTzbeXZ6iEN)] - -### UniqBy - -Returns a duplicate-free version of an array, in which only the first occurrence of each element is kept. The order of result values is determined by the order they occur in the array. It accepts `iteratee` which is invoked for each element in array to generate the criterion by which uniqueness is computed. - -```go -uniqValues := lo.UniqBy[int, int]([]int{0, 1, 2, 3, 4, 5}, func(i int) int { - return i%3 -}) -// []int{0, 1, 2} -``` - -[[play](https://go.dev/play/p/g42Z3QSb53u)] - -### GroupBy - -Returns an object composed of keys generated from the results of running each element of collection through iteratee. - -```go -import lo "github.com/samber/lo" - -groups := lo.GroupBy[int, int]([]int{0, 1, 2, 3, 4, 5}, func(i int) int { - return i%3 -}) -// map[int][]int{0: []int{0, 3}, 1: []int{1, 4}, 2: []int{2, 5}} -``` - -[[play](https://go.dev/play/p/XnQBd_v6brd)] - -Parallel processing: like `lo.GroupBy()`, but callback is called in goroutine. - -```go -import lop "github.com/samber/lo/parallel" - -lop.GroupBy[int, int]([]int{0, 1, 2, 3, 4, 5}, func(i int) int { - return i%3 -}) -// map[int][]int{0: []int{0, 3}, 1: []int{1, 4}, 2: []int{2, 5}} -``` - -### Chunk - -Returns an array of elements split into groups the length of size. If array can't be split evenly, the final chunk will be the remaining elements. - -```go -lo.Chunk[int]([]int{0, 1, 2, 3, 4, 5}, 2) -// [][]int{{0, 1}, {2, 3}, {4, 5}} - -lo.Chunk[int]([]int{0, 1, 2, 3, 4, 5, 6}, 2) -// [][]int{{0, 1}, {2, 3}, {4, 5}, {6}} - -lo.Chunk[int]([]int{}, 2) -// [][]int{} - -lo.Chunk[int]([]int{0}, 2) -// [][]int{{0}} -``` - -[[play](https://go.dev/play/p/EeKl0AuTehH)] - -### PartitionBy - -Returns an array of elements split into groups. The order of grouped values is determined by the order they occur in collection. The grouping is generated from the results of running each element of collection through iteratee. - -```go -import lo "github.com/samber/lo" - -partitions := lo.PartitionBy[int, string]([]int{-2, -1, 0, 1, 2, 3, 4, 5}, func(x int) string { - if x < 0 { - return "negative" - } else if x%2 == 0 { - return "even" - } - return "odd" -}) -// [][]int{{-2, -1}, {0, 2, 4}, {1, 3, 5}} -``` - -[[play](https://go.dev/play/p/NfQ_nGjkgXW)] - -Parallel processing: like `lo.PartitionBy()`, but callback is called in goroutine. Results are returned in the same order. - -```go -import lop "github.com/samber/lo/parallel" - -partitions := lop.PartitionBy[int, string]([]int{-2, -1, 0, 1, 2, 3, 4, 5}, func(x int) string { - if x < 0 { - return "negative" - } else if x%2 == 0 { - return "even" - } - return "odd" -}) -// [][]int{{-2, -1}, {0, 2, 4}, {1, 3, 5}} -``` - -### Flatten - -Returns an array a single level deep. - -```go -flat := lo.Flatten[int]([][]int{{0, 1}, {2, 3, 4, 5}}) -// []int{0, 1, 2, 3, 4, 5} -``` - -[[play](https://go.dev/play/p/rbp9ORaMpjw)] - -### Interleave - -Round-robin alternating input slices and sequentially appending value at index into result. - -```go -interleaved := lo.Interleave[int]([]int{1, 4, 7}, []int{2, 5, 8}, []int{3, 6, 9}) -// []int{1, 2, 3, 4, 5, 6, 7, 8, 9} - -interleaved := lo.Interleave[int]([]int{1}, []int{2, 5, 8}, []int{3, 6}, []int{4, 7, 9, 10}) -// []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} -``` - -[[play](https://go.dev/play/p/DDhlwrShbwe)] - -### Shuffle - -Returns an array of shuffled values. Uses the Fisher-Yates shuffle algorithm. - -```go -randomOrder := lo.Shuffle[int]([]int{0, 1, 2, 3, 4, 5}) -// []int{1, 4, 0, 3, 5, 2} -``` - -[[play](https://go.dev/play/p/Qp73bnTDnc7)] - -### Reverse - -Reverses array so that the first element becomes the last, the second element becomes the second to last, and so on. - -```go -reverseOrder := lo.Reverse[int]([]int{0, 1, 2, 3, 4, 5}) -// []int{5, 4, 3, 2, 1, 0} -``` - -[[play](https://go.dev/play/p/fhUMLvZ7vS6)] - -### Fill - -Fills elements of array with `initial` value. - -```go -type foo struct { - bar string -} - -func (f foo) Clone() foo { - return foo{f.bar} -} - -initializedSlice := lo.Fill[foo]([]foo{foo{"a"}, foo{"a"}}, foo{"b"}) -// []foo{foo{"b"}, foo{"b"}} -``` - -[[play](https://go.dev/play/p/VwR34GzqEub)] - -### Repeat - -Builds a slice with N copies of initial value. - -```go -type foo struct { - bar string -} - -func (f foo) Clone() foo { - return foo{f.bar} -} - -slice := lo.Repeat[foo](2, foo{"a"}) -// []foo{foo{"a"}, foo{"a"}} -``` - -[[play](https://go.dev/play/p/g3uHXbmc3b6)] - -### RepeatBy - -Builds a slice with values returned by N calls of callback. - -```go -slice := lo.RepeatBy[string](0, func (i int) string { - return strconv.FormatInt(int64(math.Pow(float64(i), 2)), 10) -}) -// []string{} - -slice := lo.RepeatBy[string](5, func(i int) string { - return strconv.FormatInt(int64(math.Pow(float64(i), 2)), 10) -}) -// []string{"0", "1", "4", "9", "16"} -``` - -[[play](https://go.dev/play/p/ozZLCtX_hNU)] - -### KeyBy - -Transforms a slice or an array of structs to a map based on a pivot callback. - -```go -m := lo.KeyBy[int, string]([]string{"a", "aa", "aaa"}, func(str string) int { - return len(str) -}) -// map[int]string{1: "a", 2: "aa", 3: "aaa"} - -type Character struct { - dir string - code int -} -characters := []Character{ - {dir: "left", code: 97}, - {dir: "right", code: 100}, -} -result := lo.KeyBy[string, Character](characters, func(char Character) string { - return string(rune(char.code)) -}) -//map[a:{dir:left code:97} d:{dir:right code:100}] -``` - -[[play](https://go.dev/play/p/mdaClUAT-zZ)] - -### Associate (alias: SliceToMap) - -Returns a map containing key-value pairs provided by transform function applied to elements of the given slice. -If any of two pairs would have the same key the last one gets added to the map. - -The order of keys in returned map is not specified and is not guaranteed to be the same from the original array. - -```go -in := []*foo{{baz: "apple", bar: 1}, {baz: "banana", bar: 2}} - -aMap := lo.Associate[*foo, string, int](in, func (f *foo) (string, int) { - return f.baz, f.bar -}) -// map[string][int]{ "apple":1, "banana":2 } -``` - -[[play](https://go.dev/play/p/WHa2CfMO3Lr)] - -### Drop - -Drops n elements from the beginning of a slice or array. - -```go -l := lo.Drop[int]([]int{0, 1, 2, 3, 4, 5}, 2) -// []int{2, 3, 4, 5} -``` - -[[play](https://go.dev/play/p/JswS7vXRJP2)] - -### DropRight - -Drops n elements from the end of a slice or array. - -```go -l := lo.DropRight[int]([]int{0, 1, 2, 3, 4, 5}, 2) -// []int{0, 1, 2, 3} -``` - -[[play](https://go.dev/play/p/GG0nXkSJJa3)] - -### DropWhile - -Drop elements from the beginning of a slice or array while the predicate returns true. - -```go -l := lo.DropWhile[string]([]string{"a", "aa", "aaa", "aa", "aa"}, func(val string) bool { - return len(val) <= 2 -}) -// []string{"aaa", "aa", "aa"} -``` - -[[play](https://go.dev/play/p/7gBPYw2IK16)] - -### DropRightWhile - -Drop elements from the end of a slice or array while the predicate returns true. - -```go -l := lo.DropRightWhile[string]([]string{"a", "aa", "aaa", "aa", "aa"}, func(val string) bool { - return len(val) <= 2 -}) -// []string{"a", "aa", "aaa"} -``` - -[[play](https://go.dev/play/p/3-n71oEC0Hz)] - -### Reject - -The opposite of Filter, this method returns the elements of collection that predicate does not return truthy for. - -```go -odd := lo.Reject[int]([]int{1, 2, 3, 4}, func(x int, _ int) bool { - return x%2 == 0 -}) -// []int{1, 3} -``` - -[[play](https://go.dev/play/p/YkLMODy1WEL)] - -### Count - -Counts the number of elements in the collection that compare equal to value. - -```go -count := lo.Count[int]([]int{1, 5, 1}, 1) -// 2 -``` - -[[play](https://go.dev/play/p/Y3FlK54yveC)] - -### CountBy - -Counts the number of elements in the collection for which predicate is true. - -```go -count := lo.CountBy[int]([]int{1, 5, 1}, func(i int) bool { - return i < 4 -}) -// 2 -``` - -[[play](https://go.dev/play/p/ByQbNYQQi4X)] - -### CountValues - -Counts the number of each element in the collection. - -```go -lo.CountValues([]int{}) -// map[int]int{} - -lo.CountValues([]int{1, 2}) -// map[int]int{1: 1, 2: 1} - -lo.CountValues([]int{1, 2, 2}) -// map[int]int{1: 1, 2: 2} - -lo.CountValues([]string{"foo", "bar", ""}) -// map[string]int{"": 1, "foo": 1, "bar": 1} - -lo.CountValues([]string{"foo", "bar", "bar"}) -// map[string]int{"foo": 1, "bar": 2} -``` - -[[play](https://go.dev/play/p/-p-PyLT4dfy)] - -### CountValuesBy - -Counts the number of each element in the collection. It ss equivalent to chaining lo.Map and lo.CountValues. - -```go -isEven := func(v int) bool { - return v%2==0 -} - -lo.CountValuesBy([]int{}, isEven) -// map[bool]int{} - -lo.CountValuesBy([]int{1, 2}, isEven) -// map[bool]int{false: 1, true: 1} - -lo.CountValuesBy([]int{1, 2, 2}, isEven) -// map[bool]int{false: 1, true: 2} - -length := func(v string) int { - return len(v) -} - -lo.CountValuesBy([]string{"foo", "bar", ""}, length) -// map[int]int{0: 1, 3: 2} - -lo.CountValuesBy([]string{"foo", "bar", "bar"}, length) -// map[int]int{3: 3} -``` - -[[play](https://go.dev/play/p/2U0dG1SnOmS)] - -### Subset - -Returns a copy of a slice from `offset` up to `length` elements. Like `slice[start:start+length]`, but does not panic on overflow. - -```go -in := []int{0, 1, 2, 3, 4} - -sub := lo.Subset(in, 2, 3) -// []int{2, 3, 4} - -sub := lo.Subset(in, -4, 3) -// []int{1, 2, 3} - -sub := lo.Subset(in, -2, math.MaxUint) -// []int{3, 4} -``` - -[[play](https://go.dev/play/p/tOQu1GhFcog)] - -### Slice - -Returns a copy of a slice from `start` up to, but not including `end`. Like `slice[start:end]`, but does not panic on overflow. - -```go -in := []int{0, 1, 2, 3, 4} - -slice := lo.Slice(in, 0, 5) -// []int{0, 1, 2, 3, 4} - -slice := lo.Slice(in, 2, 3) -// []int{2} - -slice := lo.Slice(in, 2, 6) -// []int{2, 3, 4} - -slice := lo.Slice(in, 4, 3) -// []int{} -``` - -[[play](https://go.dev/play/p/8XWYhfMMA1h)] - -### Replace - -Returns a copy of the slice with the first n non-overlapping instances of old replaced by new. - -```go -in := []int{0, 1, 0, 1, 2, 3, 0} - -slice := lo.Replace(in, 0, 42, 1) -// []int{42, 1, 0, 1, 2, 3, 0} - -slice := lo.Replace(in, -1, 42, 1) -// []int{0, 1, 0, 1, 2, 3, 0} - -slice := lo.Replace(in, 0, 42, 2) -// []int{42, 1, 42, 1, 2, 3, 0} - -slice := lo.Replace(in, 0, 42, -1) -// []int{42, 1, 42, 1, 2, 3, 42} -``` - -[[play](https://go.dev/play/p/XfPzmf9gql6)] - -### ReplaceAll - -Returns a copy of the slice with all non-overlapping instances of old replaced by new. - -```go -in := []int{0, 1, 0, 1, 2, 3, 0} - -slice := lo.ReplaceAll(in, 0, 42) -// []int{42, 1, 42, 1, 2, 3, 42} - -slice := lo.ReplaceAll(in, -1, 42) -// []int{0, 1, 0, 1, 2, 3, 0} -``` - -[[play](https://go.dev/play/p/a9xZFUHfYcV)] - -### Compact - -Returns a slice of all non-zero elements. - -```go -in := []string{"", "foo", "", "bar", ""} - -slice := lo.Compact[string](in) -// []string{"foo", "bar"} -``` - -[[play](https://go.dev/play/p/tXiy-iK6PAc)] - -### IsSorted - -Checks if a slice is sorted. - -```go -slice := lo.IsSorted([]int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}) -// true -``` - -[[play](https://go.dev/play/p/mc3qR-t4mcx)] - -### IsSortedByKey - -Checks if a slice is sorted by iteratee. - -```go -slice := lo.IsSortedByKey([]string{"a", "bb", "ccc"}, func(s string) int { - return len(s) -}) -// true -``` - -[[play](https://go.dev/play/p/wiG6XyBBu49)] - -### Keys - -Creates an array of the map keys. - -```go -keys := lo.Keys[string, int](map[string]int{"foo": 1, "bar": 2}) -// []string{"foo", "bar"} -``` - -[[play](https://go.dev/play/p/Uu11fHASqrU)] - -### Values - -Creates an array of the map values. - -```go -values := lo.Values[string, int](map[string]int{"foo": 1, "bar": 2}) -// []int{1, 2} -``` - -[[play](https://go.dev/play/p/nnRTQkzQfF6)] - -### PickBy - -Returns same map type filtered by given predicate. - -```go -m := lo.PickBy[string, int](map[string]int{"foo": 1, "bar": 2, "baz": 3}, func(key string, value int) bool { - return value%2 == 1 -}) -// map[string]int{"foo": 1, "baz": 3} -``` - -[[play](https://go.dev/play/p/kdg8GR_QMmf)] - -### PickByKeys - -Returns same map type filtered by given keys. - -```go -m := lo.PickByKeys[string, int](map[string]int{"foo": 1, "bar": 2, "baz": 3}, []string{"foo", "baz"}) -// map[string]int{"foo": 1, "baz": 3} -``` - -[[play](https://go.dev/play/p/R1imbuci9qU)] - -### PickByValues - -Returns same map type filtered by given values. - -```go -m := lo.PickByValues[string, int](map[string]int{"foo": 1, "bar": 2, "baz": 3}, []int{1, 3}) -// map[string]int{"foo": 1, "baz": 3} -``` - -[[play](https://go.dev/play/p/1zdzSvbfsJc)] - -### OmitBy - -Returns same map type filtered by given predicate. - -```go -m := lo.OmitBy[string, int](map[string]int{"foo": 1, "bar": 2, "baz": 3}, func(key string, value int) bool { - return value%2 == 1 -}) -// map[string]int{"bar": 2} -``` - -[[play](https://go.dev/play/p/EtBsR43bdsd)] - -### OmitByKeys - -Returns same map type filtered by given keys. - -```go -m := lo.OmitByKeys[string, int](map[string]int{"foo": 1, "bar": 2, "baz": 3}, []string{"foo", "baz"}) -// map[string]int{"bar": 2} -``` - -[[play](https://go.dev/play/p/t1QjCrs-ysk)] - -### OmitByValues - -Returns same map type filtered by given values. - -```go -m := lo.OmitByValues[string, int](map[string]int{"foo": 1, "bar": 2, "baz": 3}, []int{1, 3}) -// map[string]int{"bar": 2} -``` - -[[play](https://go.dev/play/p/9UYZi-hrs8j)] - -### Entries (alias: ToPairs) - -Transforms a map into array of key/value pairs. - -```go -entries := lo.Entries[string, int](map[string]int{"foo": 1, "bar": 2}) -// []lo.Entry[string, int]{ -// { -// Key: "foo", -// Value: 1, -// }, -// { -// Key: "bar", -// Value: 2, -// }, -// } -``` - -[[play](https://go.dev/play/p/3Dhgx46gawJ)] - -### FromEntries (alias: FromPairs) - -Transforms an array of key/value pairs into a map. - -```go -m := lo.FromEntries[string, int]([]lo.Entry[string, int]{ - { - Key: "foo", - Value: 1, - }, - { - Key: "bar", - Value: 2, - }, -}) -// map[string]int{"foo": 1, "bar": 2} -``` - -[[play](https://go.dev/play/p/oIr5KHFGCEN)] - -### Invert - -Creates a map composed of the inverted keys and values. If map contains duplicate values, subsequent values overwrite property assignments of previous values. - -```go -m1 := lo.Invert[string, int](map[string]int{"a": 1, "b": 2}) -// map[int]string{1: "a", 2: "b"} - -m2 := lo.Invert[string, int](map[string]int{"a": 1, "b": 2, "c": 1}) -// map[int]string{1: "c", 2: "b"} -``` - -[[play](https://go.dev/play/p/rFQ4rak6iA1)] - -### Assign - -Merges multiple maps from left to right. - -```go -mergedMaps := lo.Assign[string, int]( - map[string]int{"a": 1, "b": 2}, - map[string]int{"b": 3, "c": 4}, -) -// map[string]int{"a": 1, "b": 3, "c": 4} -``` - -[[play](https://go.dev/play/p/VhwfJOyxf5o)] - -### MapKeys - -Manipulates a map keys and transforms it to a map of another type. - -```go -m2 := lo.MapKeys[int, int, string](map[int]int{1: 1, 2: 2, 3: 3, 4: 4}, func(_ int, v int) string { - return strconv.FormatInt(int64(v), 10) -}) -// map[string]int{"1": 1, "2": 2, "3": 3, "4": 4} -``` - -[[play](https://go.dev/play/p/9_4WPIqOetJ)] - -### MapValues - -Manipulates a map values and transforms it to a map of another type. - -```go -m1 := map[int]int64{1: 1, 2: 2, 3: 3} - -m2 := lo.MapValues[int, int64, string](m1, func(x int64, _ int) string { - return strconv.FormatInt(x, 10) -}) -// map[int]string{1: "1", 2: "2", 3: "3"} -``` - -[[play](https://go.dev/play/p/T_8xAfvcf0W)] - -### MapEntries - -Manipulates a map entries and transforms it to a map of another type. - -```go -in := map[string]int{"foo": 1, "bar": 2} - -out := lo.MapEntries(in, func(k string, v int) (int, string) { - return v,k -}) -// map[int]string{1: "foo", 2: "bar"} -``` - -[[play](https://go.dev/play/p/VuvNQzxKimT)] - -### MapToSlice - -Transforms a map into a slice based on specific iteratee. - -```go -m := map[int]int64{1: 4, 2: 5, 3: 6} - -s := lo.MapToSlice(m, func(k int, v int64) string { - return fmt.Sprintf("%d_%d", k, v) -}) -// []string{"1_4", "2_5", "3_6"} -``` - -[[play](https://go.dev/play/p/ZuiCZpDt6LD)] - -### Range / RangeFrom / RangeWithSteps - -Creates an array of numbers (positive and/or negative) progressing from start up to, but not including end. - -```go -result := lo.Range(4) -// [0, 1, 2, 3] - -result := lo.Range(-4) -// [0, -1, -2, -3] - -result := lo.RangeFrom(1, 5) -// [1, 2, 3, 4, 5] - -result := lo.RangeFrom[float64](1.0, 5) -// [1.0, 2.0, 3.0, 4.0, 5.0] - -result := lo.RangeWithSteps(0, 20, 5) -// [0, 5, 10, 15] - -result := lo.RangeWithSteps[float32](-1.0, -4.0, -1.0) -// [-1.0, -2.0, -3.0] - -result := lo.RangeWithSteps(1, 4, -1) -// [] - -result := lo.Range(0) -// [] -``` - -[[play](https://go.dev/play/p/0r6VimXAi9H)] - -### Clamp - -Clamps number within the inclusive lower and upper bounds. - -```go -r1 := lo.Clamp(0, -10, 10) -// 0 - -r2 := lo.Clamp(-42, -10, 10) -// -10 - -r3 := lo.Clamp(42, -10, 10) -// 10 -``` - -[[play](https://go.dev/play/p/RU4lJNC2hlI)] - -### Sum - -Sums the values in a collection. - -If collection is empty 0 is returned. - -```go -list := []int{1, 2, 3, 4, 5} -sum := lo.Sum(list) -// 15 -``` - -[[play](https://go.dev/play/p/upfeJVqs4Bt)] - -### SumBy - -Summarizes the values in a collection using the given return value from the iteration function. - -If collection is empty 0 is returned. - -```go -strings := []string{"foo", "bar"} -sum := lo.SumBy(strings, func(item string) int { - return len(item) -}) -// 6 -``` - -[[play](https://go.dev/play/p/Dz_a_7jN_ca)] - -### RandomString - -Returns a random string of the specified length and made of the specified charset. - -```go -str := lo.RandomString(5, lo.LettersCharset) -// example: "eIGbt" -``` - -[[play](https://go.dev/play/p/rRseOQVVum4)] - -### Substring - -Return part of a string. - -```go -sub := lo.Substring("hello", 2, 3) -// "llo" - -sub := lo.Substring("hello", -4, 3) -// "ell" - -sub := lo.Substring("hello", -2, math.MaxUint) -// "lo" -``` - -[[play](https://go.dev/play/p/TQlxQi82Lu1)] - -### ChunkString - -Returns an array of strings split into groups the length of size. If array can't be split evenly, the final chunk will be the remaining elements. - -```go -lo.ChunkString("123456", 2) -// []string{"12", "34", "56"} - -lo.ChunkString("1234567", 2) -// []string{"12", "34", "56", "7"} - -lo.ChunkString("", 2) -// []string{""} - -lo.ChunkString("1", 2) -// []string{"1"} -``` - -[[play](https://go.dev/play/p/__FLTuJVz54)] - -### RuneLength - -An alias to utf8.RuneCountInString which returns the number of runes in string. - -```go -sub := lo.RuneLength("hellô") -// 5 - -sub := len("hellô") -// 6 -``` - -[[play](https://go.dev/play/p/tuhgW_lWY8l)] - -### T2 -> T9 - -Creates a tuple from a list of values. - -```go -tuple1 := lo.T2("x", 1) -// Tuple2[string, int]{A: "x", B: 1} - -func example() (string, int) { return "y", 2 } -tuple2 := lo.T2(example()) -// Tuple2[string, int]{A: "y", B: 2} -``` - -[[play](https://go.dev/play/p/IllL3ZO4BQm)] - -### Unpack2 -> Unpack9 - -Returns values contained in tuple. - -```go -r1, r2 := lo.Unpack2[string, int](lo.Tuple2[string, int]{"a", 1}) -// "a", 1 -``` - -Unpack is also available as a method of TupleX. - -```go -tuple2 := lo.T2("a", 1) -a, b := tuple2.Unpack() -// "a" 1 -``` - -[[play](https://go.dev/play/p/xVP_k0kJ96W)] - -### Zip2 -> Zip9 - -Zip creates a slice of grouped elements, the first of which contains the first elements of the given arrays, the second of which contains the second elements of the given arrays, and so on. - -When collections have different size, the Tuple attributes are filled with zero value. - -```go -tuples := lo.Zip2[string, int]([]string{"a", "b"}, []int{1, 2}) -// []Tuple2[string, int]{{A: "a", B: 1}, {A: "b", B: 2}} -``` - -[[play](https://go.dev/play/p/jujaA6GaJTp)] - -### Unzip2 -> Unzip9 - -Unzip accepts an array of grouped elements and creates an array regrouping the elements to their pre-zip configuration. - -```go -a, b := lo.Unzip2[string, int]([]Tuple2[string, int]{{A: "a", B: 1}, {A: "b", B: 2}}) -// []string{"a", "b"} -// []int{1, 2} -``` - -[[play](https://go.dev/play/p/ciHugugvaAW)] - -### ChannelDispatcher - -Distributes messages from input channels into N child channels. Close events are propagated to children. - -Underlying channels can have a fixed buffer capacity or be unbuffered when cap is 0. - -```go -ch := make(chan int, 42) -for i := 0; i <= 10; i++ { - ch <- i -} - -children := lo.ChannelDispatcher(ch, 5, 10, DispatchingStrategyRoundRobin[int]) -// []<-chan int{...} - -consumer := func(c <-chan int) { - for { - msg, ok := <-c - if !ok { - println("closed") - break - } - - println(msg) - } -} - -for i := range children { - go consumer(children[i]) -} -``` - -Many distributions strategies are available: - -- [lo.DispatchingStrategyRoundRobin](./channel.go): Distributes messages in a rotating sequential manner. -- [lo.DispatchingStrategyRandom](./channel.go): Distributes messages in a random manner. -- [lo.DispatchingStrategyWeightedRandom](./channel.go): Distributes messages in a weighted manner. -- [lo.DispatchingStrategyFirst](./channel.go): Distributes messages in the first non-full channel. -- [lo.DispatchingStrategyLeast](./channel.go): Distributes messages in the emptiest channel. -- [lo.DispatchingStrategyMost](./channel.go): Distributes to the fullest channel. - -Some strategies bring fallback, in order to favor non-blocking behaviors. See implementations. - -For custom strategies, just implement the `lo.DispatchingStrategy` prototype: - -```go -type DispatchingStrategy[T any] func(message T, messageIndex uint64, channels []<-chan T) int -``` - -Eg: - -```go -type Message struct { - TenantID uuid.UUID -} - -func hash(id uuid.UUID) int { - h := fnv.New32a() - h.Write([]byte(id.String())) - return int(h.Sum32()) -} - -// Routes messages per TenantID. -customStrategy := func(message pubsub.AMQPSubMessage, messageIndex uint64, channels []<-chan pubsub.AMQPSubMessage) int { - destination := hash(message.TenantID) % len(channels) - - // check if channel is full - if len(channels[destination]) < cap(channels[destination]) { - return destination - } - - // fallback when child channel is full - return utils.DispatchingStrategyRoundRobin(message, uint64(destination), channels) -} - -children := lo.ChannelDispatcher(ch, 5, 10, customStrategy) -... -``` - -### SliceToChannel - -Returns a read-only channels of collection elements. Channel is closed after last element. Channel capacity can be customized. - -```go -list := []int{1, 2, 3, 4, 5} - -for v := range lo.SliceToChannel(2, list) { - println(v) -} -// prints 1, then 2, then 3, then 4, then 5 -``` - -### ChannelToSlice - -Returns a slice built from channels items. Blocks until channel closes. - -```go -list := []int{1, 2, 3, 4, 5} -ch := lo.SliceToChannel(2, list) - -items := ChannelToSlice(ch) -// []int{1, 2, 3, 4, 5} -``` - -### Generator - -Implements the generator design pattern. Channel is closed after last element. Channel capacity can be customized. - -```go -generator := func(yield func(int)) { - yield(1) - yield(2) - yield(3) -} - -for v := range lo.Generator(2, generator) { - println(v) -} -// prints 1, then 2, then 3 -``` - -### Buffer - -Creates a slice of n elements from a channel. Returns the slice, the slice length, the read time and the channel status (opened/closed). - -```go -ch := lo.SliceToChannel(2, []int{1, 2, 3, 4, 5}) - -items1, length1, duration1, ok1 := lo.Buffer(ch, 3) -// []int{1, 2, 3}, 3, 0s, true -items2, length2, duration2, ok2 := lo.Buffer(ch, 3) -// []int{4, 5}, 2, 0s, false -``` - -Example: RabbitMQ consumer 👇 - -```go -ch := readFromQueue() - -for { - // read 1k items - items, length, _, ok := lo.Buffer(ch, 1000) - - // do batching stuff - - if !ok { - break - } -} -``` - -### BufferWithTimeout - -Creates a slice of n elements from a channel, with timeout. Returns the slice, the slice length, the read time and the channel status (opened/closed). - -```go -generator := func(yield func(int)) { - for i := 0; i < 5; i++ { - yield(i) - time.Sleep(35*time.Millisecond) - } -} - -ch := lo.Generator(0, generator) - -items1, length1, duration1, ok1 := lo.BufferWithTimeout(ch, 3, 100*time.Millisecond) -// []int{1, 2}, 2, 100ms, true -items2, length2, duration2, ok2 := lo.BufferWithTimeout(ch, 3, 100*time.Millisecond) -// []int{3, 4, 5}, 3, 75ms, true -items3, length3, duration2, ok3 := lo.BufferWithTimeout(ch, 3, 100*time.Millisecond) -// []int{}, 0, 10ms, false -``` - -Example: RabbitMQ consumer 👇 - -```go -ch := readFromQueue() - -for { - // read 1k items - // wait up to 1 second - items, length, _, ok := lo.BufferWithTimeout(ch, 1000, 1*time.Second) - - // do batching stuff - - if !ok { - break - } -} -``` - -Example: Multithreaded RabbitMQ consumer 👇 - -```go -ch := readFromQueue() - -// 5 workers -// prefetch 1k messages per worker -children := lo.ChannelDispatcher(ch, 5, 1000, DispatchingStrategyFirst[int]) - -consumer := func(c <-chan int) { - for { - // read 1k items - // wait up to 1 second - items, length, _, ok := lo.BufferWithTimeout(ch, 1000, 1*time.Second) - - // do batching stuff - - if !ok { - break - } - } -} - -for i := range children { - go consumer(children[i]) -} -``` - -### FanIn - -Merge messages from multiple input channels into a single buffered channel. Output messages has no priority. When all upstream channels reach EOF, downstream channel closes. - -```go -stream1 := make(chan int, 42) -stream2 := make(chan int, 42) -stream3 := make(chan int, 42) - -all := lo.FanIn(100, stream1, stream2, stream3) -// <-chan int -``` - -### FanOut - -Broadcasts all the upstream messages to multiple downstream channels. When upstream channel reach EOF, downstream channels close. If any downstream channels is full, broadcasting is paused. - -```go -stream := make(chan int, 42) - -all := lo.FanOut(5, 100, stream) -// [5]<-chan int -``` - -### Contains - -Returns true if an element is present in a collection. - -```go -present := lo.Contains[int]([]int{0, 1, 2, 3, 4, 5}, 5) -// true -``` - -### ContainsBy - -Returns true if the predicate function returns `true`. - -```go -present := lo.ContainsBy[int]([]int{0, 1, 2, 3, 4, 5}, func(x int) bool { - return x == 3 -}) -// true -``` - -### Every - -Returns true if all elements of a subset are contained into a collection or if the subset is empty. - -```go -ok := lo.Every[int]([]int{0, 1, 2, 3, 4, 5}, []int{0, 2}) -// true - -ok := lo.Every[int]([]int{0, 1, 2, 3, 4, 5}, []int{0, 6}) -// false -``` - -### EveryBy - -Returns true if the predicate returns true for all of the elements in the collection or if the collection is empty. - -```go -b := EveryBy[int]([]int{1, 2, 3, 4}, func(x int) bool { - return x < 5 -}) -// true -``` - -### Some - -Returns true if at least 1 element of a subset is contained into a collection. -If the subset is empty Some returns false. - -```go -ok := lo.Some[int]([]int{0, 1, 2, 3, 4, 5}, []int{0, 2}) -// true - -ok := lo.Some[int]([]int{0, 1, 2, 3, 4, 5}, []int{-1, 6}) -// false -``` - -### SomeBy - -Returns true if the predicate returns true for any of the elements in the collection. -If the collection is empty SomeBy returns false. - -```go -b := SomeBy[int]([]int{1, 2, 3, 4}, func(x int) bool { - return x < 3 -}) -// true -``` - -### None - -Returns true if no element of a subset are contained into a collection or if the subset is empty. - -```go -b := None[int]([]int{0, 1, 2, 3, 4, 5}, []int{0, 2}) -// false -b := None[int]([]int{0, 1, 2, 3, 4, 5}, []int{-1, 6}) -// true -``` - -### NoneBy - -Returns true if the predicate returns true for none of the elements in the collection or if the collection is empty. - -```go -b := NoneBy[int]([]int{1, 2, 3, 4}, func(x int) bool { - return x < 0 -}) -// true -``` - -### Intersect - -Returns the intersection between two collections. - -```go -result1 := lo.Intersect[int]([]int{0, 1, 2, 3, 4, 5}, []int{0, 2}) -// []int{0, 2} - -result2 := lo.Intersect[int]([]int{0, 1, 2, 3, 4, 5}, []int{0, 6} -// []int{0} - -result3 := lo.Intersect[int]([]int{0, 1, 2, 3, 4, 5}, []int{-1, 6}) -// []int{} -``` - -### Difference - -Returns the difference between two collections. - -- The first value is the collection of element absent of list2. -- The second value is the collection of element absent of list1. - -```go -left, right := lo.Difference[int]([]int{0, 1, 2, 3, 4, 5}, []int{0, 2, 6}) -// []int{1, 3, 4, 5}, []int{6} - -left, right := lo.Difference[int]([]int{0, 1, 2, 3, 4, 5}, []int{0, 1, 2, 3, 4, 5}) -// []int{}, []int{} -``` - -### Union - -Returns all distinct elements from given collections. Result will not change the order of elements relatively. - -```go -union := lo.Union[int]([]int{0, 1, 2, 3, 4, 5}, []int{0, 2}, []int{0, 10}) -// []int{0, 1, 2, 3, 4, 5, 10} -``` - -### Without - -Returns slice excluding all given values. - -```go -subset := lo.Without[int]([]int{0, 2, 10}, 2) -// []int{0, 10} - -subset := lo.Without[int]([]int{0, 2, 10}, 0, 1, 2, 3, 4, 5) -// []int{10} -``` - -### WithoutEmpty - -Returns slice excluding empty values. - -```go -subset := lo.WithoutEmpty[int]([]int{0, 2, 10}) -// []int{2, 10} -``` - -### IndexOf - -Returns the index at which the first occurrence of a value is found in an array or return -1 if the value cannot be found. - -```go -found := lo.IndexOf[int]([]int{0, 1, 2, 1, 2, 3}, 2) -// 2 - -notFound := lo.IndexOf[int]([]int{0, 1, 2, 1, 2, 3}, 6) -// -1 -``` - -### LastIndexOf - -Returns the index at which the last occurrence of a value is found in an array or return -1 if the value cannot be found. - -```go -found := lo.LastIndexOf[int]([]int{0, 1, 2, 1, 2, 3}, 2) -// 4 - -notFound := lo.LastIndexOf[int]([]int{0, 1, 2, 1, 2, 3}, 6) -// -1 -``` - -### Find - -Search an element in a slice based on a predicate. It returns element and true if element was found. - -```go -str, ok := lo.Find[string]([]string{"a", "b", "c", "d"}, func(i string) bool { - return i == "b" -}) -// "b", true - -str, ok := lo.Find[string]([]string{"foobar"}, func(i string) bool { - return i == "b" -}) -// "", false -``` - -### FindIndexOf - -FindIndexOf searches an element in a slice based on a predicate and returns the index and true. It returns -1 and false if the element is not found. - -```go -str, index, ok := lo.FindIndexOf[string]([]string{"a", "b", "a", "b"}, func(i string) bool { - return i == "b" -}) -// "b", 1, true - -str, index, ok := lo.FindIndexOf[string]([]string{"foobar"}, func(i string) bool { - return i == "b" -}) -// "", -1, false -``` - -### FindLastIndexOf - -FindLastIndexOf searches an element in a slice based on a predicate and returns the index and true. It returns -1 and false if the element is not found. - -```go -str, index, ok := lo.FindLastIndexOf[string]([]string{"a", "b", "a", "b"}, func(i string) bool { - return i == "b" -}) -// "b", 4, true - -str, index, ok := lo.FindLastIndexOf[string]([]string{"foobar"}, func(i string) bool { - return i == "b" -}) -// "", -1, false -``` - -### FindKey - -Returns the key of the first value matching. - -```go -result1, ok1 := lo.FindKey(map[string]int{"foo": 1, "bar": 2, "baz": 3}, 2) -// "bar", true - -result2, ok2 := lo.FindKey(map[string]int{"foo": 1, "bar": 2, "baz": 3}, 42) -// "", false - -type test struct { - foobar string -} -result3, ok3 := lo.FindKey(map[string]test{"foo": test{"foo"}, "bar": test{"bar"}, "baz": test{"baz"}}, test{"foo"}) -// "foo", true -``` - -### FindKeyBy - -Returns the key of the first element predicate returns truthy for. - -```go -result1, ok1 := lo.FindKeyBy(map[string]int{"foo": 1, "bar": 2, "baz": 3}, func(k string, v int) bool { - return k == "foo" -}) -// "foo", true - -result2, ok2 := lo.FindKeyBy(map[string]int{"foo": 1, "bar": 2, "baz": 3}, func(k string, v int) bool { - return false -}) -// "", false -``` - -### FindUniques - -Returns a slice with all the unique elements of the collection. The order of result values is determined by the order they occur in the array. - -```go -uniqueValues := lo.FindUniques[int]([]int{1, 2, 2, 1, 2, 3}) -// []int{3} -``` - -### FindUniquesBy - -Returns a slice with all the unique elements of the collection. The order of result values is determined by the order they occur in the array. It accepts `iteratee` which is invoked for each element in array to generate the criterion by which uniqueness is computed. - -```go -uniqueValues := lo.FindUniquesBy[int, int]([]int{3, 4, 5, 6, 7}, func(i int) int { - return i%3 -}) -// []int{5} -``` - -### FindDuplicates - -Returns a slice with the first occurrence of each duplicated elements of the collection. The order of result values is determined by the order they occur in the array. - -```go -duplicatedValues := lo.FindDuplicates[int]([]int{1, 2, 2, 1, 2, 3}) -// []int{1, 2} -``` - -### FindDuplicatesBy - -Returns a slice with the first occurrence of each duplicated elements of the collection. The order of result values is determined by the order they occur in the array. It accepts `iteratee` which is invoked for each element in array to generate the criterion by which uniqueness is computed. - -```go -duplicatedValues := lo.FindDuplicatesBy[int, int]([]int{3, 4, 5, 6, 7}, func(i int) int { - return i%3 -}) -// []int{3, 4} -``` - -### Min - -Search the minimum value of a collection. - -Returns zero value when collection is empty. - -```go -min := lo.Min([]int{1, 2, 3}) -// 1 - -min := lo.Min([]int{}) -// 0 -``` - -### MinBy - -Search the minimum value of a collection using the given comparison function. - -If several values of the collection are equal to the smallest value, returns the first such value. - -Returns zero value when collection is empty. - -```go -min := lo.MinBy([]string{"s1", "string2", "s3"}, func(item string, min string) bool { - return len(item) < len(min) -}) -// "s1" - -min := lo.MinBy([]string{}, func(item string, min string) bool { - return len(item) < len(min) -}) -// "" -``` - -### Max - -Search the maximum value of a collection. - -Returns zero value when collection is empty. - -```go -max := lo.Max([]int{1, 2, 3}) -// 3 - -max := lo.Max([]int{}) -// 0 -``` - -### MaxBy - -Search the maximum value of a collection using the given comparison function. - -If several values of the collection are equal to the greatest value, returns the first such value. - -Returns zero value when collection is empty. - -```go -max := lo.MaxBy([]string{"string1", "s2", "string3"}, func(item string, max string) bool { - return len(item) > len(max) -}) -// "string1" - -max := lo.MaxBy([]string{}, func(item string, max string) bool { - return len(item) > len(max) -}) -// "" -``` - -### Last - -Returns the last element of a collection or error if empty. - -```go -last, err := lo.Last[int]([]int{1, 2, 3}) -// 3 -``` - -### Nth - -Returns the element at index `nth` of collection. If `nth` is negative, the nth element from the end is returned. An error is returned when nth is out of slice bounds. - -```go -nth, err := lo.Nth[int]([]int{0, 1, 2, 3}, 2) -// 2 - -nth, err := lo.Nth[int]([]int{0, 1, 2, 3}, -2) -// 2 -``` - -### Sample - -Returns a random item from collection. - -```go -lo.Sample[string]([]string{"a", "b", "c"}) -// a random string from []string{"a", "b", "c"} - -lo.Sample[string]([]string{}) -// "" -``` - -### Samples - -Returns N random unique items from collection. - -```go -lo.Samples[string]([]string{"a", "b", "c"}, 3) -// []string{"a", "b", "c"} in random order -``` - -### Ternary - -A 1 line if/else statement. - -```go -result := lo.Ternary[string](true, "a", "b") -// "a" - -result := lo.Ternary[string](false, "a", "b") -// "b" -``` - -[[play](https://go.dev/play/p/t-D7WBL44h2)] - -### TernaryF - -A 1 line if/else statement whose options are functions. - -```go -result := lo.TernaryF[string](true, func() string { return "a" }, func() string { return "b" }) -// "a" - -result := lo.TernaryF[string](false, func() string { return "a" }, func() string { return "b" }) -// "b" -``` - -Useful to avoid nil-pointer dereferencing in intializations, or avoid running unnecessary code - -```go -var s *string - -someStr := TernaryF[string](s == nil, func() string { return uuid.New().String() }, func() string { return *s }) -// ef782193-c30c-4e2e-a7ae-f8ab5e125e02 -``` - -[[play](https://go.dev/play/p/AO4VW20JoqM)] - -### If / ElseIf / Else - -```go -result := lo.If[int](true, 1). - ElseIf(false, 2). - Else(3) -// 1 - -result := lo.If[int](false, 1). - ElseIf(true, 2). - Else(3) -// 2 - -result := lo.If[int](false, 1). - ElseIf(false, 2). - Else(3) -// 3 -``` - -Using callbacks: - -```go -result := lo.IfF[int](true, func () int { - return 1 - }). - ElseIfF(false, func () int { - return 2 - }). - ElseF(func () int { - return 3 - }) -// 1 -``` - -Mixed: - -```go -result := lo.IfF[int](true, func () int { - return 1 - }). - Else(42) -// 1 -``` - -[[play](https://go.dev/play/p/WSw3ApMxhyW)] - -### Switch / Case / Default - -```go -result := lo.Switch[int, string](1). - Case(1, "1"). - Case(2, "2"). - Default("3") -// "1" - -result := lo.Switch[int, string](2). - Case(1, "1"). - Case(2, "2"). - Default("3") -// "2" - -result := lo.Switch[int, string](42). - Case(1, "1"). - Case(2, "2"). - Default("3") -// "3" -``` - -Using callbacks: - -```go -result := lo.Switch[int, string](1). - CaseF(1, func() string { - return "1" - }). - CaseF(2, func() string { - return "2" - }). - DefaultF(func() string { - return "3" - }) -// "1" -``` - -Mixed: - -```go -result := lo.Switch[int, string](1). - CaseF(1, func() string { - return "1" - }). - Default("42") -// "1" -``` - -[[play](https://go.dev/play/p/TGbKUMAeRUd)] - -### ToPtr - -Returns a pointer copy of value. - -```go -ptr := lo.ToPtr[string]("hello world") -// *string{"hello world"} -``` - -### FromPtr - -Returns the pointer value or empty. - -```go -str := "hello world" -value := lo.FromPtr[string](&str) -// "hello world" - -value := lo.FromPtr[string](nil) -// "" -``` - -### FromPtrOr - -Returns the pointer value or the fallback value. - -```go -str := "hello world" -value := lo.FromPtrOr[string](&str, "empty") -// "hello world" - -value := lo.FromPtrOr[string](nil, "empty") -// "empty" -``` - -### ToSlicePtr - -Returns a slice of pointer copy of value. - -```go -ptr := lo.ToSlicePtr[string]([]string{"hello", "world"}) -// []*string{"hello", "world"} -``` - -### ToAnySlice - -Returns a slice with all elements mapped to `any` type. - -```go -elements := lo.ToAnySlice[int]([]int{1, 5, 1}) -// []any{1, 5, 1} -``` - -### FromAnySlice - -Returns an `any` slice with all elements mapped to a type. Returns false in case of type conversion failure. - -```go -elements, ok := lo.FromAnySlice[string]([]any{"foobar", 42}) -// []string{}, false - -elements, ok := lo.FromAnySlice[string]([]any{"foobar", "42"}) -// []string{"foobar", "42"}, true -``` - -### Empty - -Returns an empty value. - -```go -lo.Empty[int]() -// 0 -lo.Empty[string]() -// "" -lo.Empty[bool]() -// false -``` - -### IsEmpty - -Returns true if argument is a zero value. - -```go -lo.IsEmpty[int](0) -// true -lo.IsEmpty[int](42) -// false - -lo.IsEmpty[string]("") -// true -lo.IsEmpty[bool]("foobar") -// false - -type test struct { - foobar string -} - -lo.IsEmpty[test](test{foobar: ""}) -// true -lo.IsEmpty[test](test{foobar: "foobar"}) -// false -``` - -### IsNotEmpty - -Returns true if argument is a zero value. - -```go -lo.IsNotEmpty[int](0) -// false -lo.IsNotEmpty[int](42) -// true - -lo.IsNotEmpty[string]("") -// false -lo.IsNotEmpty[bool]("foobar") -// true - -type test struct { - foobar string -} - -lo.IsNotEmpty[test](test{foobar: ""}) -// false -lo.IsNotEmpty[test](test{foobar: "foobar"}) -// true -``` - -### Coalesce - -Returns the first non-empty arguments. Arguments must be comparable. - -```go -result, ok := lo.Coalesce(0, 1, 2, 3) -// 1 true - -result, ok := lo.Coalesce("") -// "" false - -var nilStr *string -str := "foobar" -result, ok := lo.Coalesce[*string](nil, nilStr, &str) -// &"foobar" true -``` - -### Partial - -Returns new function that, when called, has its first argument set to the provided value. - -```go -add := func(x, y int) int { return x + y } -f := lo.Partial(add, 5) - -f(10) -// 15 - -f(42) -// 47 -``` - -### Partial2 -> Partial5 - -Returns new function that, when called, has its first argument set to the provided value. - -```go -add := func(x, y, z int) int { return x + y + z } -f := lo.Partial2(add, 42) - -f(10, 5) -// 57 - -f(42, -4) -// 80 -``` - -### Attempt - -Invokes a function N times until it returns valid output. Returning either the caught error or nil. When first argument is less than `1`, the function runs until a successful response is returned. - -```go -iter, err := lo.Attempt(42, func(i int) error { - if i == 5 { - return nil - } - - return fmt.Errorf("failed") -}) -// 6 -// nil - -iter, err := lo.Attempt(2, func(i int) error { - if i == 5 { - return nil - } - - return fmt.Errorf("failed") -}) -// 2 -// error "failed" - -iter, err := lo.Attempt(0, func(i int) error { - if i < 42 { - return fmt.Errorf("failed") - } - - return nil -}) -// 43 -// nil -``` - -For more advanced retry strategies (delay, exponential backoff...), please take a look on [cenkalti/backoff](https://github.com/cenkalti/backoff). - -[[play](https://go.dev/play/p/3ggJZ2ZKcMj)] - -### AttemptWithDelay - -Invokes a function N times until it returns valid output, with a pause between each call. Returning either the caught error or nil. - -When first argument is less than `1`, the function runs until a successful response is returned. - -```go -iter, duration, err := lo.AttemptWithDelay(5, 2*time.Second, func(i int, duration time.Duration) error { - if i == 2 { - return nil - } - - return fmt.Errorf("failed") -}) -// 3 -// ~ 4 seconds -// nil -``` - -For more advanced retry strategies (delay, exponential backoff...), please take a look on [cenkalti/backoff](https://github.com/cenkalti/backoff). - -[[play](https://go.dev/play/p/tVs6CygC7m1)] - -### AttemptWhile - -Invokes a function N times until it returns valid output. Returning either the caught error or nil, and along with a bool value to identifying whether it needs invoke function continuously. It will terminate the invoke immediately if second bool value is returned with falsy value. - -When first argument is less than `1`, the function runs until a successful response is returned. - -```go -count1, err1 := lo.AttemptWhile(5, func(i int) (error, bool) { - err := doMockedHTTPRequest(i) - if err != nil { - if errors.Is(err, ErrBadRequest) { // lets assume ErrBadRequest is a critical error that needs to terminate the invoke - return err, false // flag the second return value as false to terminate the invoke - } - - return err, true - } - - return nil, false -}) -``` - -For more advanced retry strategies (delay, exponential backoff...), please take a look on [cenkalti/backoff](https://github.com/cenkalti/backoff). - -[[play](https://go.dev/play/p/M2wVq24PaZM)] - -### AttemptWhileWithDelay - -Invokes a function N times until it returns valid output, with a pause between each call. Returning either the caught error or nil, and along with a bool value to identifying whether it needs to invoke function continuously. It will terminate the invoke immediately if second bool value is returned with falsy value. - -When first argument is less than `1`, the function runs until a successful response is returned. - -```go -count1, time1, err1 := lo.AttemptWhileWithDelay(5, time.Millisecond, func(i int, d time.Duration) (error, bool) { - err := doMockedHTTPRequest(i) - if err != nil { - if errors.Is(err, ErrBadRequest) { // lets assume ErrBadRequest is a critical error that needs to terminate the invoke - return err, false // flag the second return value as false to terminate the invoke - } - - return err, true - } - - return nil, false -}) -``` - -For more advanced retry strategies (delay, exponential backoff...), please take a look on [cenkalti/backoff](https://github.com/cenkalti/backoff). - -[[play](https://go.dev/play/p/cfcmhvLO-nv)] - -### Debounce - -`NewDebounce` creates a debounced instance that delays invoking functions given until after wait milliseconds have elapsed, until `cancel` is called. - -```go -f := func() { - println("Called once after 100ms when debounce stopped invoking!") -} - -debounce, cancel := lo.NewDebounce(100 * time.Millisecond, f) -for j := 0; j < 10; j++ { - debounce() -} - -time.Sleep(1 * time.Second) -cancel() -``` - -[[play](https://go.dev/play/p/mz32VMK2nqe)] - -### Synchronize - -Wraps the underlying callback in a mutex. It receives an optional mutex. - -```go -s := lo.Synchronize() - -for i := 0; i < 10; i++ { - go s.Do(func () { - println("will be called sequentially") - }) -} -``` - -It is equivalent to: - -```go -mu := sync.Mutex{} - -func foobar() { - mu.Lock() - defer mu.Unlock() - - // ... -} -``` - -### Async - -Executes a function in a goroutine and returns the result in a channel. - -```go -ch := lo.Async(func() error { time.Sleep(10 * time.Second); return nil }) -// chan error (nil) -``` - -### Async{0->6} - -Executes a function in a goroutine and returns the result in a channel. -For function with multiple return values, the results will be returned as a tuple inside the channel. -For function without return, struct{} will be returned in the channel. - -```go -ch := lo.Async0(func() { time.Sleep(10 * time.Second) }) -// chan struct{} - -ch := lo.Async1(func() int { - time.Sleep(10 * time.Second); - return 42 -}) -// chan int (42) - -ch := lo.Async2(func() (int, string) { - time.Sleep(10 * time.Second); - return 42, "Hello" -}) -// chan lo.Tuple2[int, string] ({42, "Hello"}) -``` - -### Transaction - -Implements a Saga pattern. - -```go -transaction := NewTransaction[int](). - Then( - func(state int) (int, error) { - fmt.Println("step 1") - return state + 10, nil - }, - func(state int) int { - fmt.Println("rollback 1") - return state - 10 - }, - ). - Then( - func(state int) (int, error) { - fmt.Println("step 2") - return state + 15, nil - }, - func(state int) int { - fmt.Println("rollback 2") - return state - 15 - }, - ). - Then( - func(state int) (int, error) { - fmt.Println("step 3") - - if true { - return state, fmt.Errorf("error") - } - - return state + 42, nil - }, - func(state int) int { - fmt.Println("rollback 3") - return state - 42 - }, - ) - -_, _ = transaction.Process(-5) - -// Output: -// step 1 -// step 2 -// step 3 -// rollback 2 -// rollback 1 -``` - -### Validate - -Helper function that creates an error when a condition is not met. - -```go -slice := []string{"a"} -val := lo.Validate(len(slice) == 0, "Slice should be empty but contains %v", slice) -// error("Slice should be empty but contains [a]") - -slice := []string{} -val := lo.Validate(len(slice) == 0, "Slice should be empty but contains %v", slice) -// nil -``` - -[[play](https://go.dev/play/p/vPyh51XpCBt)] - -### Must - -Wraps a function call to panics if second argument is `error` or `false`, returns the value otherwise. - -```go -val := lo.Must(time.Parse("2006-01-02", "2022-01-15")) -// 2022-01-15 - -val := lo.Must(time.Parse("2006-01-02", "bad-value")) -// panics -``` - -[[play](https://go.dev/play/p/TMoWrRp3DyC)] - -### Must{0->6} - -Must\* has the same behavior as Must, but returns multiple values. - -```go -func example0() (error) -func example1() (int, error) -func example2() (int, string, error) -func example3() (int, string, time.Date, error) -func example4() (int, string, time.Date, bool, error) -func example5() (int, string, time.Date, bool, float64, error) -func example6() (int, string, time.Date, bool, float64, byte, error) - -lo.Must0(example0()) -val1 := lo.Must1(example1()) // alias to Must -val1, val2 := lo.Must2(example2()) -val1, val2, val3 := lo.Must3(example3()) -val1, val2, val3, val4 := lo.Must4(example4()) -val1, val2, val3, val4, val5 := lo.Must5(example5()) -val1, val2, val3, val4, val5, val6 := lo.Must6(example6()) -``` - -You can wrap functions like `func (...) (..., ok bool)`. - -```go -// math.Signbit(float64) bool -lo.Must0(math.Signbit(v)) - -// bytes.Cut([]byte,[]byte) ([]byte, []byte, bool) -before, after := lo.Must2(bytes.Cut(s, sep)) -``` - -You can give context to the panic message by adding some printf-like arguments. - -```go -val, ok := lo.Find(myString, func(i string) bool { - return i == requiredChar -}) -lo.Must0(ok, "'%s' must always contain '%s'", myString, requiredChar) - -list := []int{0, 1, 2} -item := 5 -lo.Must0(lo.Contains[int](list, item), "'%s' must always contain '%s'", list, item) -... -``` - -[[play](https://go.dev/play/p/TMoWrRp3DyC)] - -### Try - -Calls the function and return false in case of error and on panic. - -```go -ok := lo.Try(func() error { - panic("error") - return nil -}) -// false - -ok := lo.Try(func() error { - return nil -}) -// true - -ok := lo.Try(func() error { - return fmt.Errorf("error") -}) -// false -``` - -[[play](https://go.dev/play/p/mTyyWUvn9u4)] - -### Try{0->6} - -The same behavior than `Try`, but callback returns 2 variables. - -```go -ok := lo.Try2(func() (string, error) { - panic("error") - return "", nil -}) -// false -``` - -[[play](https://go.dev/play/p/mTyyWUvn9u4)] - -### TryOr - -Calls the function and return a default value in case of error and on panic. - -```go -str, ok := lo.TryOr(func() (string, error) { - panic("error") - return "hello", nil -}, "world") -// world -// false - -ok := lo.TryOr(func() error { - return "hello", nil -}, "world") -// hello -// true - -ok := lo.TryOr(func() error { - return "hello", fmt.Errorf("error") -}, "world") -// world -// false -``` - -[[play](https://go.dev/play/p/B4F7Wg2Zh9X)] - -### TryOr{0->6} - -The same behavior than `TryOr`, but callback returns 2 variables. - -```go -str, nbr, ok := lo.TryOr2(func() (string, int, error) { - panic("error") - return "hello", 42, nil -}, "world", 21) -// world -// 21 -// false -``` - -[[play](https://go.dev/play/p/B4F7Wg2Zh9X)] - -### TryWithErrorValue - -The same behavior than `Try`, but also returns value passed to panic. - -```go -err, ok := lo.TryWithErrorValue(func() error { - panic("error") - return nil -}) -// "error", false -``` - -[[play](https://go.dev/play/p/Kc7afQIT2Fs)] - -### TryCatch - -The same behavior than `Try`, but calls the catch function in case of error. - -```go -caught := false - -ok := lo.TryCatch(func() error { - panic("error") - return nil -}, func() { - caught = true -}) -// false -// caught == true -``` - -[[play](https://go.dev/play/p/PnOON-EqBiU)] - -### TryCatchWithErrorValue - -The same behavior than `TryWithErrorValue`, but calls the catch function in case of error. - -```go -caught := false - -ok := lo.TryCatchWithErrorValue(func() error { - panic("error") - return nil -}, func(val any) { - caught = val == "error" -}) -// false -// caught == true -``` - -[[play](https://go.dev/play/p/8Pc9gwX_GZO)] - -### ErrorsAs - -A shortcut for: - -```go -err := doSomething() - -var rateLimitErr *RateLimitError -if ok := errors.As(err, &rateLimitErr); ok { - // retry later -} -``` - -1 line `lo` helper: - -```go -err := doSomething() - -if rateLimitErr, ok := lo.ErrorsAs[*RateLimitError](err); ok { - // retry later -} -``` - -[[play](https://go.dev/play/p/8wk5rH8UfrE)] - -## 🛩 Benchmark - -We executed a simple benchmark with the a dead-simple `lo.Map` loop: - -See the full implementation [here](./benchmark_test.go). - -```go -_ = lo.Map[int64](arr, func(x int64, i int) string { - return strconv.FormatInt(x, 10) -}) -``` - -**Result:** - -Here is a comparison between `lo.Map`, `lop.Map`, `go-funk` library and a simple Go `for` loop. - -``` -$ go test -benchmem -bench ./... -goos: linux -goarch: amd64 -pkg: github.com/samber/lo -cpu: Intel(R) Core(TM) i5-7267U CPU @ 3.10GHz -cpu: Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz -BenchmarkMap/lo.Map-8 8 132728237 ns/op 39998945 B/op 1000002 allocs/op -BenchmarkMap/lop.Map-8 2 503947830 ns/op 119999956 B/op 3000007 allocs/op -BenchmarkMap/reflect-8 2 826400560 ns/op 170326512 B/op 4000042 allocs/op -BenchmarkMap/for-8 9 126252954 ns/op 39998674 B/op 1000001 allocs/op -PASS -ok github.com/samber/lo 6.657s -``` - -- `lo.Map` is way faster (x7) than `go-funk`, a reflection-based Map implementation. -- `lo.Map` have the same allocation profile than `for`. -- `lo.Map` is 4% slower than `for`. -- `lop.Map` is slower than `lo.Map` because it implies more memory allocation and locks. `lop.Map` will be useful for long-running callbacks, such as i/o bound processing. -- `for` beats other implementations for memory and CPU. - -## 🤝 Contributing - -- Ping me on twitter [@samuelberthe](https://twitter.com/samuelberthe) (DMs, mentions, whatever :)) -- Fork the [project](https://github.com/samber/lo) -- Fix [open issues](https://github.com/samber/lo/issues) or request new features - -Don't hesitate ;) - -### With Docker - -```bash -docker-compose run --rm dev -``` - -### Without Docker - -```bash -# Install some dev dependencies -make tools - -# Run tests -make test -# or -make watch-test -``` - -## 👤 Contributors - -![Contributors](https://contrib.rocks/image?repo=samber/lo) - -## 💫 Show your support - -Give a ⭐️ if this project helped you! - -[![support us](https://c5.patreon.com/external/logo/become_a_patron_button.png)](https://www.patreon.com/samber) - -## 📝 License - -Copyright © 2022 [Samuel Berthe](https://github.com/samber). - -This project is [MIT](./LICENSE) licensed. diff --git a/vendor/github.com/samber/lo/channel.go b/vendor/github.com/samber/lo/channel.go deleted file mode 100644 index e5af2504322..00000000000 --- a/vendor/github.com/samber/lo/channel.go +++ /dev/null @@ -1,306 +0,0 @@ -package lo - -import ( - "math/rand" - "sync" - "time" -) - -type DispatchingStrategy[T any] func(msg T, index uint64, channels []<-chan T) int - -// ChannelDispatcher distributes messages from input channels into N child channels. -// Close events are propagated to children. -// Underlying channels can have a fixed buffer capacity or be unbuffered when cap is 0. -func ChannelDispatcher[T any](stream <-chan T, count int, channelBufferCap int, strategy DispatchingStrategy[T]) []<-chan T { - children := createChannels[T](count, channelBufferCap) - - roChildren := channelsToReadOnly(children) - - go func() { - // propagate channel closing to children - defer closeChannels(children) - - var i uint64 = 0 - - for { - msg, ok := <-stream - if !ok { - return - } - - destination := strategy(msg, i, roChildren) % count - children[destination] <- msg - - i++ - } - }() - - return roChildren -} - -func createChannels[T any](count int, channelBufferCap int) []chan T { - children := make([]chan T, 0, count) - - for i := 0; i < count; i++ { - children = append(children, make(chan T, channelBufferCap)) - } - - return children -} - -func channelsToReadOnly[T any](children []chan T) []<-chan T { - roChildren := make([]<-chan T, 0, len(children)) - - for i := range children { - roChildren = append(roChildren, children[i]) - } - - return roChildren -} - -func closeChannels[T any](children []chan T) { - for i := 0; i < len(children); i++ { - close(children[i]) - } -} - -func channelIsNotFull[T any](ch <-chan T) bool { - return cap(ch) == 0 || len(ch) < cap(ch) -} - -// DispatchingStrategyRoundRobin distributes messages in a rotating sequential manner. -// If the channel capacity is exceeded, the next channel will be selected and so on. -func DispatchingStrategyRoundRobin[T any](msg T, index uint64, channels []<-chan T) int { - for { - i := int(index % uint64(len(channels))) - if channelIsNotFull(channels[i]) { - return i - } - - index++ - time.Sleep(10 * time.Microsecond) // prevent CPU from burning 🔥 - } -} - -// DispatchingStrategyRandom distributes messages in a random manner. -// If the channel capacity is exceeded, another random channel will be selected and so on. -func DispatchingStrategyRandom[T any](msg T, index uint64, channels []<-chan T) int { - for { - i := rand.Intn(len(channels)) - if channelIsNotFull(channels[i]) { - return i - } - - time.Sleep(10 * time.Microsecond) // prevent CPU from burning 🔥 - } -} - -// DispatchingStrategyWeightedRandom distributes messages in a weighted manner. -// If the channel capacity is exceeded, another random channel will be selected and so on. -func DispatchingStrategyWeightedRandom[T any](weights []int) DispatchingStrategy[T] { - seq := []int{} - - for i := 0; i < len(weights); i++ { - for j := 0; j < weights[i]; j++ { - seq = append(seq, i) - } - } - - return func(msg T, index uint64, channels []<-chan T) int { - for { - i := seq[rand.Intn(len(seq))] - if channelIsNotFull(channels[i]) { - return i - } - - time.Sleep(10 * time.Microsecond) // prevent CPU from burning 🔥 - } - } -} - -// DispatchingStrategyFirst distributes messages in the first non-full channel. -// If the capacity of the first channel is exceeded, the second channel will be selected and so on. -func DispatchingStrategyFirst[T any](msg T, index uint64, channels []<-chan T) int { - for { - for i := range channels { - if channelIsNotFull(channels[i]) { - return i - } - } - - time.Sleep(10 * time.Microsecond) // prevent CPU from burning 🔥 - } -} - -// DispatchingStrategyLeast distributes messages in the emptiest channel. -func DispatchingStrategyLeast[T any](msg T, index uint64, channels []<-chan T) int { - seq := Range(len(channels)) - - return MinBy(seq, func(item int, min int) bool { - return len(channels[item]) < len(channels[min]) - }) -} - -// DispatchingStrategyMost distributes messages in the fullest channel. -// If the channel capacity is exceeded, the next channel will be selected and so on. -func DispatchingStrategyMost[T any](msg T, index uint64, channels []<-chan T) int { - seq := Range(len(channels)) - - return MaxBy(seq, func(item int, max int) bool { - return len(channels[item]) > len(channels[max]) && channelIsNotFull(channels[item]) - }) -} - -// SliceToChannel returns a read-only channels of collection elements. -func SliceToChannel[T any](bufferSize int, collection []T) <-chan T { - ch := make(chan T, bufferSize) - - go func() { - for _, item := range collection { - ch <- item - } - - close(ch) - }() - - return ch -} - -// ChannelToSlice returns a slice built from channels items. Blocks until channel closes. -func ChannelToSlice[T any](ch <-chan T) []T { - collection := []T{} - - for item := range ch { - collection = append(collection, item) - } - - return collection -} - -// Generator implements the generator design pattern. -func Generator[T any](bufferSize int, generator func(yield func(T))) <-chan T { - ch := make(chan T, bufferSize) - - go func() { - // WARNING: infinite loop - generator(func(t T) { - ch <- t - }) - - close(ch) - }() - - return ch -} - -// Buffer creates a slice of n elements from a channel. Returns the slice and the slice length. -// @TODO: we should probably provide an helper that reuse the same buffer. -func Buffer[T any](ch <-chan T, size int) (collection []T, length int, readTime time.Duration, ok bool) { - buffer := make([]T, 0, size) - index := 0 - now := time.Now() - - for ; index < size; index++ { - item, ok := <-ch - if !ok { - return buffer, index, time.Since(now), false - } - - buffer = append(buffer, item) - } - - return buffer, index, time.Since(now), true -} - -// Buffer creates a slice of n elements from a channel. Returns the slice and the slice length. -// Deprecated: Use lo.Buffer instead. -func Batch[T any](ch <-chan T, size int) (collection []T, length int, readTime time.Duration, ok bool) { - return Buffer(ch, size) -} - -// BufferWithTimeout creates a slice of n elements from a channel, with timeout. Returns the slice and the slice length. -// @TODO: we should probably provide an helper that reuse the same buffer. -func BufferWithTimeout[T any](ch <-chan T, size int, timeout time.Duration) (collection []T, length int, readTime time.Duration, ok bool) { - expire := time.NewTimer(timeout) - defer expire.Stop() - - buffer := make([]T, 0, size) - index := 0 - now := time.Now() - - for ; index < size; index++ { - select { - case item, ok := <-ch: - if !ok { - return buffer, index, time.Since(now), false - } - - buffer = append(buffer, item) - - case <-expire.C: - return buffer, index, time.Since(now), true - } - } - - return buffer, index, time.Since(now), true -} - -// BufferWithTimeout creates a slice of n elements from a channel, with timeout. Returns the slice and the slice length. -// Deprecated: Use lo.BufferWithTimeout instead. -func BatchWithTimeout[T any](ch <-chan T, size int, timeout time.Duration) (collection []T, length int, readTime time.Duration, ok bool) { - return BufferWithTimeout(ch, size, timeout) -} - -// FanIn collects messages from multiple input channels into a single buffered channel. -// Output messages has no priority. When all upstream channels reach EOF, downstream channel closes. -func FanIn[T any](channelBufferCap int, upstreams ...<-chan T) <-chan T { - out := make(chan T, channelBufferCap) - var wg sync.WaitGroup - - // Start an output goroutine for each input channel in upstreams. - wg.Add(len(upstreams)) - for _, c := range upstreams { - go func(c <-chan T) { - for n := range c { - out <- n - } - wg.Done() - }(c) - } - - // Start a goroutine to close out once all the output goroutines are done. - go func() { - wg.Wait() - close(out) - }() - return out -} - -// ChannelMerge collects messages from multiple input channels into a single buffered channel. -// Output messages has no priority. When all upstream channels reach EOF, downstream channel closes. -// Deprecated: Use lo.FanIn instead. -func ChannelMerge[T any](channelBufferCap int, upstreams ...<-chan T) <-chan T { - return FanIn(channelBufferCap, upstreams...) -} - -// FanOut broadcasts all the upstream messages to multiple downstream channels. -// When upstream channel reach EOF, downstream channels close. If any downstream -// channels is full, broadcasting is paused. -func FanOut[T any](count int, channelsBufferCap int, upstream <-chan T) []<-chan T { - downstreams := createChannels[T](count, channelsBufferCap) - - go func() { - for msg := range upstream { - for i := range downstreams { - downstreams[i] <- msg - } - } - - // Close out once all the output goroutines are done. - for i := range downstreams { - close(downstreams[i]) - } - }() - - return channelsToReadOnly(downstreams) -} diff --git a/vendor/github.com/samber/lo/concurrency.go b/vendor/github.com/samber/lo/concurrency.go deleted file mode 100644 index 2d6fd871a4b..00000000000 --- a/vendor/github.com/samber/lo/concurrency.go +++ /dev/null @@ -1,95 +0,0 @@ -package lo - -import "sync" - -type synchronize struct { - locker sync.Locker -} - -func (s *synchronize) Do(cb func()) { - s.locker.Lock() - Try0(cb) - s.locker.Unlock() -} - -// Synchronize wraps the underlying callback in a mutex. It receives an optional mutex. -func Synchronize(opt ...sync.Locker) *synchronize { - if len(opt) > 1 { - panic("unexpected arguments") - } else if len(opt) == 0 { - opt = append(opt, &sync.Mutex{}) - } - - return &synchronize{ - locker: opt[0], - } -} - -// Async executes a function in a goroutine and returns the result in a channel. -func Async[A any](f func() A) chan A { - ch := make(chan A) - go func() { - ch <- f() - }() - return ch -} - -// Async0 executes a function in a goroutine and returns a channel set once the function finishes. -func Async0(f func()) chan struct{} { - ch := make(chan struct{}) - go func() { - f() - ch <- struct{}{} - }() - return ch -} - -// Async1 is an alias to Async. -func Async1[A any](f func() A) chan A { - return Async(f) -} - -// Async2 has the same behavior as Async, but returns the 2 results as a tuple inside the channel. -func Async2[A any, B any](f func() (A, B)) chan Tuple2[A, B] { - ch := make(chan Tuple2[A, B]) - go func() { - ch <- T2(f()) - }() - return ch -} - -// Async3 has the same behavior as Async, but returns the 3 results as a tuple inside the channel. -func Async3[A any, B any, C any](f func() (A, B, C)) chan Tuple3[A, B, C] { - ch := make(chan Tuple3[A, B, C]) - go func() { - ch <- T3(f()) - }() - return ch -} - -// Async4 has the same behavior as Async, but returns the 4 results as a tuple inside the channel. -func Async4[A any, B any, C any, D any](f func() (A, B, C, D)) chan Tuple4[A, B, C, D] { - ch := make(chan Tuple4[A, B, C, D]) - go func() { - ch <- T4(f()) - }() - return ch -} - -// Async5 has the same behavior as Async, but returns the 5 results as a tuple inside the channel. -func Async5[A any, B any, C any, D any, E any](f func() (A, B, C, D, E)) chan Tuple5[A, B, C, D, E] { - ch := make(chan Tuple5[A, B, C, D, E]) - go func() { - ch <- T5(f()) - }() - return ch -} - -// Async6 has the same behavior as Async, but returns the 6 results as a tuple inside the channel. -func Async6[A any, B any, C any, D any, E any, F any](f func() (A, B, C, D, E, F)) chan Tuple6[A, B, C, D, E, F] { - ch := make(chan Tuple6[A, B, C, D, E, F]) - go func() { - ch <- T6(f()) - }() - return ch -} diff --git a/vendor/github.com/samber/lo/condition.go b/vendor/github.com/samber/lo/condition.go deleted file mode 100644 index 1d4e75d25ed..00000000000 --- a/vendor/github.com/samber/lo/condition.go +++ /dev/null @@ -1,150 +0,0 @@ -package lo - -// Ternary is a 1 line if/else statement. -// Play: https://go.dev/play/p/t-D7WBL44h2 -func Ternary[T any](condition bool, ifOutput T, elseOutput T) T { - if condition { - return ifOutput - } - - return elseOutput -} - -// TernaryF is a 1 line if/else statement whose options are functions -// Play: https://go.dev/play/p/AO4VW20JoqM -func TernaryF[T any](condition bool, ifFunc func() T, elseFunc func() T) T { - if condition { - return ifFunc() - } - - return elseFunc() -} - -type ifElse[T any] struct { - result T - done bool -} - -// If. -// Play: https://go.dev/play/p/WSw3ApMxhyW -func If[T any](condition bool, result T) *ifElse[T] { - if condition { - return &ifElse[T]{result, true} - } - - var t T - return &ifElse[T]{t, false} -} - -// IfF. -// Play: https://go.dev/play/p/WSw3ApMxhyW -func IfF[T any](condition bool, resultF func() T) *ifElse[T] { - if condition { - return &ifElse[T]{resultF(), true} - } - - var t T - return &ifElse[T]{t, false} -} - -// ElseIf. -// Play: https://go.dev/play/p/WSw3ApMxhyW -func (i *ifElse[T]) ElseIf(condition bool, result T) *ifElse[T] { - if !i.done && condition { - i.result = result - i.done = true - } - - return i -} - -// ElseIfF. -// Play: https://go.dev/play/p/WSw3ApMxhyW -func (i *ifElse[T]) ElseIfF(condition bool, resultF func() T) *ifElse[T] { - if !i.done && condition { - i.result = resultF() - i.done = true - } - - return i -} - -// Else. -// Play: https://go.dev/play/p/WSw3ApMxhyW -func (i *ifElse[T]) Else(result T) T { - if i.done { - return i.result - } - - return result -} - -// ElseF. -// Play: https://go.dev/play/p/WSw3ApMxhyW -func (i *ifElse[T]) ElseF(resultF func() T) T { - if i.done { - return i.result - } - - return resultF() -} - -type switchCase[T comparable, R any] struct { - predicate T - result R - done bool -} - -// Switch is a pure functional switch/case/default statement. -// Play: https://go.dev/play/p/TGbKUMAeRUd -func Switch[T comparable, R any](predicate T) *switchCase[T, R] { - var result R - - return &switchCase[T, R]{ - predicate, - result, - false, - } -} - -// Case. -// Play: https://go.dev/play/p/TGbKUMAeRUd -func (s *switchCase[T, R]) Case(val T, result R) *switchCase[T, R] { - if !s.done && s.predicate == val { - s.result = result - s.done = true - } - - return s -} - -// CaseF. -// Play: https://go.dev/play/p/TGbKUMAeRUd -func (s *switchCase[T, R]) CaseF(val T, cb func() R) *switchCase[T, R] { - if !s.done && s.predicate == val { - s.result = cb() - s.done = true - } - - return s -} - -// Default. -// Play: https://go.dev/play/p/TGbKUMAeRUd -func (s *switchCase[T, R]) Default(result R) R { - if !s.done { - s.result = result - } - - return s.result -} - -// DefaultF. -// Play: https://go.dev/play/p/TGbKUMAeRUd -func (s *switchCase[T, R]) DefaultF(cb func() R) R { - if !s.done { - s.result = cb() - } - - return s.result -} diff --git a/vendor/github.com/samber/lo/constraints.go b/vendor/github.com/samber/lo/constraints.go deleted file mode 100644 index c1f3529685e..00000000000 --- a/vendor/github.com/samber/lo/constraints.go +++ /dev/null @@ -1,6 +0,0 @@ -package lo - -// Clonable defines a constraint of types having Clone() T method. -type Clonable[T any] interface { - Clone() T -} diff --git a/vendor/github.com/samber/lo/errors.go b/vendor/github.com/samber/lo/errors.go deleted file mode 100644 index a99013d950b..00000000000 --- a/vendor/github.com/samber/lo/errors.go +++ /dev/null @@ -1,354 +0,0 @@ -package lo - -import ( - "errors" - "fmt" - "reflect" -) - -// Validate is a helper that creates an error when a condition is not met. -// Play: https://go.dev/play/p/vPyh51XpCBt -func Validate(ok bool, format string, args ...any) error { - if !ok { - return fmt.Errorf(fmt.Sprintf(format, args...)) - } - return nil -} - -func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { - if len(msgAndArgs) == 1 { - if msgAsStr, ok := msgAndArgs[0].(string); ok { - return msgAsStr - } - return fmt.Sprintf("%+v", msgAndArgs[0]) - } - if len(msgAndArgs) > 1 { - return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) - } - return "" -} - -// must panics if err is error or false. -func must(err any, messageArgs ...interface{}) { - if err == nil { - return - } - - switch e := err.(type) { - case bool: - if !e { - message := messageFromMsgAndArgs(messageArgs...) - if message == "" { - message = "not ok" - } - - panic(message) - } - - case error: - message := messageFromMsgAndArgs(messageArgs...) - if message != "" { - panic(message + ": " + e.Error()) - } else { - panic(e.Error()) - } - - default: - panic("must: invalid err type '" + reflect.TypeOf(err).Name() + "', should either be a bool or an error") - } -} - -// Must is a helper that wraps a call to a function returning a value and an error -// and panics if err is error or false. -// Play: https://go.dev/play/p/TMoWrRp3DyC -func Must[T any](val T, err any, messageArgs ...interface{}) T { - must(err, messageArgs...) - return val -} - -// Must0 has the same behavior as Must, but callback returns no variable. -// Play: https://go.dev/play/p/TMoWrRp3DyC -func Must0(err any, messageArgs ...interface{}) { - must(err, messageArgs...) -} - -// Must1 is an alias to Must -// Play: https://go.dev/play/p/TMoWrRp3DyC -func Must1[T any](val T, err any, messageArgs ...interface{}) T { - return Must(val, err, messageArgs...) -} - -// Must2 has the same behavior as Must, but callback returns 2 variables. -// Play: https://go.dev/play/p/TMoWrRp3DyC -func Must2[T1 any, T2 any](val1 T1, val2 T2, err any, messageArgs ...interface{}) (T1, T2) { - must(err, messageArgs...) - return val1, val2 -} - -// Must3 has the same behavior as Must, but callback returns 3 variables. -// Play: https://go.dev/play/p/TMoWrRp3DyC -func Must3[T1 any, T2 any, T3 any](val1 T1, val2 T2, val3 T3, err any, messageArgs ...interface{}) (T1, T2, T3) { - must(err, messageArgs...) - return val1, val2, val3 -} - -// Must4 has the same behavior as Must, but callback returns 4 variables. -// Play: https://go.dev/play/p/TMoWrRp3DyC -func Must4[T1 any, T2 any, T3 any, T4 any](val1 T1, val2 T2, val3 T3, val4 T4, err any, messageArgs ...interface{}) (T1, T2, T3, T4) { - must(err, messageArgs...) - return val1, val2, val3, val4 -} - -// Must5 has the same behavior as Must, but callback returns 5 variables. -// Play: https://go.dev/play/p/TMoWrRp3DyC -func Must5[T1 any, T2 any, T3 any, T4 any, T5 any](val1 T1, val2 T2, val3 T3, val4 T4, val5 T5, err any, messageArgs ...interface{}) (T1, T2, T3, T4, T5) { - must(err, messageArgs...) - return val1, val2, val3, val4, val5 -} - -// Must6 has the same behavior as Must, but callback returns 6 variables. -// Play: https://go.dev/play/p/TMoWrRp3DyC -func Must6[T1 any, T2 any, T3 any, T4 any, T5 any, T6 any](val1 T1, val2 T2, val3 T3, val4 T4, val5 T5, val6 T6, err any, messageArgs ...interface{}) (T1, T2, T3, T4, T5, T6) { - must(err, messageArgs...) - return val1, val2, val3, val4, val5, val6 -} - -// Try calls the function and return false in case of error. -func Try(callback func() error) (ok bool) { - ok = true - - defer func() { - if r := recover(); r != nil { - ok = false - } - }() - - err := callback() - if err != nil { - ok = false - } - - return -} - -// Try0 has the same behavior as Try, but callback returns no variable. -// Play: https://go.dev/play/p/mTyyWUvn9u4 -func Try0(callback func()) bool { - return Try(func() error { - callback() - return nil - }) -} - -// Try1 is an alias to Try. -// Play: https://go.dev/play/p/mTyyWUvn9u4 -func Try1(callback func() error) bool { - return Try(callback) -} - -// Try2 has the same behavior as Try, but callback returns 2 variables. -// Play: https://go.dev/play/p/mTyyWUvn9u4 -func Try2[T any](callback func() (T, error)) bool { - return Try(func() error { - _, err := callback() - return err - }) -} - -// Try3 has the same behavior as Try, but callback returns 3 variables. -// Play: https://go.dev/play/p/mTyyWUvn9u4 -func Try3[T, R any](callback func() (T, R, error)) bool { - return Try(func() error { - _, _, err := callback() - return err - }) -} - -// Try4 has the same behavior as Try, but callback returns 4 variables. -// Play: https://go.dev/play/p/mTyyWUvn9u4 -func Try4[T, R, S any](callback func() (T, R, S, error)) bool { - return Try(func() error { - _, _, _, err := callback() - return err - }) -} - -// Try5 has the same behavior as Try, but callback returns 5 variables. -// Play: https://go.dev/play/p/mTyyWUvn9u4 -func Try5[T, R, S, Q any](callback func() (T, R, S, Q, error)) bool { - return Try(func() error { - _, _, _, _, err := callback() - return err - }) -} - -// Try6 has the same behavior as Try, but callback returns 6 variables. -// Play: https://go.dev/play/p/mTyyWUvn9u4 -func Try6[T, R, S, Q, U any](callback func() (T, R, S, Q, U, error)) bool { - return Try(func() error { - _, _, _, _, _, err := callback() - return err - }) -} - -// TryOr has the same behavior as Must, but returns a default value in case of error. -// Play: https://go.dev/play/p/B4F7Wg2Zh9X -func TryOr[A any](callback func() (A, error), fallbackA A) (A, bool) { - return TryOr1(callback, fallbackA) -} - -// TryOr1 has the same behavior as Must, but returns a default value in case of error. -// Play: https://go.dev/play/p/B4F7Wg2Zh9X -func TryOr1[A any](callback func() (A, error), fallbackA A) (A, bool) { - ok := false - - Try0(func() { - a, err := callback() - if err == nil { - fallbackA = a - ok = true - } - }) - - return fallbackA, ok -} - -// TryOr2 has the same behavior as Must, but returns a default value in case of error. -// Play: https://go.dev/play/p/B4F7Wg2Zh9X -func TryOr2[A any, B any](callback func() (A, B, error), fallbackA A, fallbackB B) (A, B, bool) { - ok := false - - Try0(func() { - a, b, err := callback() - if err == nil { - fallbackA = a - fallbackB = b - ok = true - } - }) - - return fallbackA, fallbackB, ok -} - -// TryOr3 has the same behavior as Must, but returns a default value in case of error. -// Play: https://go.dev/play/p/B4F7Wg2Zh9X -func TryOr3[A any, B any, C any](callback func() (A, B, C, error), fallbackA A, fallbackB B, fallbackC C) (A, B, C, bool) { - ok := false - - Try0(func() { - a, b, c, err := callback() - if err == nil { - fallbackA = a - fallbackB = b - fallbackC = c - ok = true - } - }) - - return fallbackA, fallbackB, fallbackC, ok -} - -// TryOr4 has the same behavior as Must, but returns a default value in case of error. -// Play: https://go.dev/play/p/B4F7Wg2Zh9X -func TryOr4[A any, B any, C any, D any](callback func() (A, B, C, D, error), fallbackA A, fallbackB B, fallbackC C, fallbackD D) (A, B, C, D, bool) { - ok := false - - Try0(func() { - a, b, c, d, err := callback() - if err == nil { - fallbackA = a - fallbackB = b - fallbackC = c - fallbackD = d - ok = true - } - }) - - return fallbackA, fallbackB, fallbackC, fallbackD, ok -} - -// TryOr5 has the same behavior as Must, but returns a default value in case of error. -// Play: https://go.dev/play/p/B4F7Wg2Zh9X -func TryOr5[A any, B any, C any, D any, E any](callback func() (A, B, C, D, E, error), fallbackA A, fallbackB B, fallbackC C, fallbackD D, fallbackE E) (A, B, C, D, E, bool) { - ok := false - - Try0(func() { - a, b, c, d, e, err := callback() - if err == nil { - fallbackA = a - fallbackB = b - fallbackC = c - fallbackD = d - fallbackE = e - ok = true - } - }) - - return fallbackA, fallbackB, fallbackC, fallbackD, fallbackE, ok -} - -// TryOr6 has the same behavior as Must, but returns a default value in case of error. -// Play: https://go.dev/play/p/B4F7Wg2Zh9X -func TryOr6[A any, B any, C any, D any, E any, F any](callback func() (A, B, C, D, E, F, error), fallbackA A, fallbackB B, fallbackC C, fallbackD D, fallbackE E, fallbackF F) (A, B, C, D, E, F, bool) { - ok := false - - Try0(func() { - a, b, c, d, e, f, err := callback() - if err == nil { - fallbackA = a - fallbackB = b - fallbackC = c - fallbackD = d - fallbackE = e - fallbackF = f - ok = true - } - }) - - return fallbackA, fallbackB, fallbackC, fallbackD, fallbackE, fallbackF, ok -} - -// TryWithErrorValue has the same behavior as Try, but also returns value passed to panic. -// Play: https://go.dev/play/p/Kc7afQIT2Fs -func TryWithErrorValue(callback func() error) (errorValue any, ok bool) { - ok = true - - defer func() { - if r := recover(); r != nil { - ok = false - errorValue = r - } - }() - - err := callback() - if err != nil { - ok = false - errorValue = err - } - - return -} - -// TryCatch has the same behavior as Try, but calls the catch function in case of error. -// Play: https://go.dev/play/p/PnOON-EqBiU -func TryCatch(callback func() error, catch func()) { - if !Try(callback) { - catch() - } -} - -// TryCatchWithErrorValue has the same behavior as TryWithErrorValue, but calls the catch function in case of error. -// Play: https://go.dev/play/p/8Pc9gwX_GZO -func TryCatchWithErrorValue(callback func() error, catch func(any)) { - if err, ok := TryWithErrorValue(callback); !ok { - catch(err) - } -} - -// ErrorsAs is a shortcut for errors.As(err, &&T). -// Play: https://go.dev/play/p/8wk5rH8UfrE -func ErrorsAs[T error](err error) (T, bool) { - var t T - ok := errors.As(err, &t) - return t, ok -} diff --git a/vendor/github.com/samber/lo/find.go b/vendor/github.com/samber/lo/find.go deleted file mode 100644 index f8caeb89598..00000000000 --- a/vendor/github.com/samber/lo/find.go +++ /dev/null @@ -1,372 +0,0 @@ -package lo - -import ( - "fmt" - "math/rand" - - "golang.org/x/exp/constraints" -) - -// import "golang.org/x/exp/constraints" - -// IndexOf returns the index at which the first occurrence of a value is found in an array or return -1 -// if the value cannot be found. -func IndexOf[T comparable](collection []T, element T) int { - for i, item := range collection { - if item == element { - return i - } - } - - return -1 -} - -// LastIndexOf returns the index at which the last occurrence of a value is found in an array or return -1 -// if the value cannot be found. -func LastIndexOf[T comparable](collection []T, element T) int { - length := len(collection) - - for i := length - 1; i >= 0; i-- { - if collection[i] == element { - return i - } - } - - return -1 -} - -// Find search an element in a slice based on a predicate. It returns element and true if element was found. -func Find[T any](collection []T, predicate func(item T) bool) (T, bool) { - for _, item := range collection { - if predicate(item) { - return item, true - } - } - - var result T - return result, false -} - -// FindIndexOf searches an element in a slice based on a predicate and returns the index and true. -// It returns -1 and false if the element is not found. -func FindIndexOf[T any](collection []T, predicate func(item T) bool) (T, int, bool) { - for i, item := range collection { - if predicate(item) { - return item, i, true - } - } - - var result T - return result, -1, false -} - -// FindLastIndexOf searches last element in a slice based on a predicate and returns the index and true. -// It returns -1 and false if the element is not found. -func FindLastIndexOf[T any](collection []T, predicate func(item T) bool) (T, int, bool) { - length := len(collection) - - for i := length - 1; i >= 0; i-- { - if predicate(collection[i]) { - return collection[i], i, true - } - } - - var result T - return result, -1, false -} - -// FindOrElse search an element in a slice based on a predicate. It returns the element if found or a given fallback value otherwise. -func FindOrElse[T any](collection []T, fallback T, predicate func(item T) bool) T { - for _, item := range collection { - if predicate(item) { - return item - } - } - - return fallback -} - -// FindKey returns the key of the first value matching. -func FindKey[K comparable, V comparable](object map[K]V, value V) (K, bool) { - for k, v := range object { - if v == value { - return k, true - } - } - - return Empty[K](), false -} - -// FindKeyBy returns the key of the first element predicate returns truthy for. -func FindKeyBy[K comparable, V any](object map[K]V, predicate func(key K, value V) bool) (K, bool) { - for k, v := range object { - if predicate(k, v) { - return k, true - } - } - - return Empty[K](), false -} - -// FindUniques returns a slice with all the unique elements of the collection. -// The order of result values is determined by the order they occur in the collection. -func FindUniques[T comparable](collection []T) []T { - isDupl := make(map[T]bool, len(collection)) - - for _, item := range collection { - duplicated, ok := isDupl[item] - if !ok { - isDupl[item] = false - } else if !duplicated { - isDupl[item] = true - } - } - - result := make([]T, 0, len(collection)-len(isDupl)) - - for _, item := range collection { - if duplicated := isDupl[item]; !duplicated { - result = append(result, item) - } - } - - return result -} - -// FindUniquesBy returns a slice with all the unique elements of the collection. -// The order of result values is determined by the order they occur in the array. It accepts `iteratee` which is -// invoked for each element in array to generate the criterion by which uniqueness is computed. -func FindUniquesBy[T any, U comparable](collection []T, iteratee func(item T) U) []T { - isDupl := make(map[U]bool, len(collection)) - - for _, item := range collection { - key := iteratee(item) - - duplicated, ok := isDupl[key] - if !ok { - isDupl[key] = false - } else if !duplicated { - isDupl[key] = true - } - } - - result := make([]T, 0, len(collection)-len(isDupl)) - - for _, item := range collection { - key := iteratee(item) - - if duplicated := isDupl[key]; !duplicated { - result = append(result, item) - } - } - - return result -} - -// FindDuplicates returns a slice with the first occurrence of each duplicated elements of the collection. -// The order of result values is determined by the order they occur in the collection. -func FindDuplicates[T comparable](collection []T) []T { - isDupl := make(map[T]bool, len(collection)) - - for _, item := range collection { - duplicated, ok := isDupl[item] - if !ok { - isDupl[item] = false - } else if !duplicated { - isDupl[item] = true - } - } - - result := make([]T, 0, len(collection)-len(isDupl)) - - for _, item := range collection { - if duplicated := isDupl[item]; duplicated { - result = append(result, item) - isDupl[item] = false - } - } - - return result -} - -// FindDuplicatesBy returns a slice with the first occurrence of each duplicated elements of the collection. -// The order of result values is determined by the order they occur in the array. It accepts `iteratee` which is -// invoked for each element in array to generate the criterion by which uniqueness is computed. -func FindDuplicatesBy[T any, U comparable](collection []T, iteratee func(item T) U) []T { - isDupl := make(map[U]bool, len(collection)) - - for _, item := range collection { - key := iteratee(item) - - duplicated, ok := isDupl[key] - if !ok { - isDupl[key] = false - } else if !duplicated { - isDupl[key] = true - } - } - - result := make([]T, 0, len(collection)-len(isDupl)) - - for _, item := range collection { - key := iteratee(item) - - if duplicated := isDupl[key]; duplicated { - result = append(result, item) - isDupl[key] = false - } - } - - return result -} - -// Min search the minimum value of a collection. -// Returns zero value when collection is empty. -func Min[T constraints.Ordered](collection []T) T { - var min T - - if len(collection) == 0 { - return min - } - - min = collection[0] - - for i := 1; i < len(collection); i++ { - item := collection[i] - - if item < min { - min = item - } - } - - return min -} - -// MinBy search the minimum value of a collection using the given comparison function. -// If several values of the collection are equal to the smallest value, returns the first such value. -// Returns zero value when collection is empty. -func MinBy[T any](collection []T, comparison func(a T, b T) bool) T { - var min T - - if len(collection) == 0 { - return min - } - - min = collection[0] - - for i := 1; i < len(collection); i++ { - item := collection[i] - - if comparison(item, min) { - min = item - } - } - - return min -} - -// Max searches the maximum value of a collection. -// Returns zero value when collection is empty. -func Max[T constraints.Ordered](collection []T) T { - var max T - - if len(collection) == 0 { - return max - } - - max = collection[0] - - for i := 1; i < len(collection); i++ { - item := collection[i] - - if item > max { - max = item - } - } - - return max -} - -// MaxBy search the maximum value of a collection using the given comparison function. -// If several values of the collection are equal to the greatest value, returns the first such value. -// Returns zero value when collection is empty. -func MaxBy[T any](collection []T, comparison func(a T, b T) bool) T { - var max T - - if len(collection) == 0 { - return max - } - - max = collection[0] - - for i := 1; i < len(collection); i++ { - item := collection[i] - - if comparison(item, max) { - max = item - } - } - - return max -} - -// Last returns the last element of a collection or error if empty. -func Last[T any](collection []T) (T, error) { - length := len(collection) - - if length == 0 { - var t T - return t, fmt.Errorf("last: cannot extract the last element of an empty slice") - } - - return collection[length-1], nil -} - -// Nth returns the element at index `nth` of collection. If `nth` is negative, the nth element -// from the end is returned. An error is returned when nth is out of slice bounds. -func Nth[T any, N constraints.Integer](collection []T, nth N) (T, error) { - n := int(nth) - l := len(collection) - if n >= l || -n > l { - var t T - return t, fmt.Errorf("nth: %d out of slice bounds", n) - } - - if n >= 0 { - return collection[n], nil - } - return collection[l+n], nil -} - -// Sample returns a random item from collection. -func Sample[T any](collection []T) T { - size := len(collection) - if size == 0 { - return Empty[T]() - } - - return collection[rand.Intn(size)] -} - -// Samples returns N random unique items from collection. -func Samples[T any](collection []T, count int) []T { - size := len(collection) - - copy := append([]T{}, collection...) - - results := []T{} - - for i := 0; i < size && i < count; i++ { - copyLength := size - i - - index := rand.Intn(size - i) - results = append(results, copy[index]) - - // Removes element. - // It is faster to swap with last element and remove it. - copy[index] = copy[copyLength-1] - copy = copy[:copyLength-1] - } - - return results -} diff --git a/vendor/github.com/samber/lo/func.go b/vendor/github.com/samber/lo/func.go deleted file mode 100644 index 5fa1cbc8718..00000000000 --- a/vendor/github.com/samber/lo/func.go +++ /dev/null @@ -1,41 +0,0 @@ -package lo - -// Partial returns new function that, when called, has its first argument set to the provided value. -func Partial[T1, T2, R any](f func(a T1, b T2) R, arg1 T1) func(T2) R { - return func(t2 T2) R { - return f(arg1, t2) - } -} - -// Partial1 returns new function that, when called, has its first argument set to the provided value. -func Partial1[T1, T2, R any](f func(T1, T2) R, arg1 T1) func(T2) R { - return Partial(f, arg1) -} - -// Partial2 returns new function that, when called, has its first argument set to the provided value. -func Partial2[T1, T2, T3, R any](f func(T1, T2, T3) R, arg1 T1) func(T2, T3) R { - return func(t2 T2, t3 T3) R { - return f(arg1, t2, t3) - } -} - -// Partial3 returns new function that, when called, has its first argument set to the provided value. -func Partial3[T1, T2, T3, T4, R any](f func(T1, T2, T3, T4) R, arg1 T1) func(T2, T3, T4) R { - return func(t2 T2, t3 T3, t4 T4) R { - return f(arg1, t2, t3, t4) - } -} - -// Partial4 returns new function that, when called, has its first argument set to the provided value. -func Partial4[T1, T2, T3, T4, T5, R any](f func(T1, T2, T3, T4, T5) R, arg1 T1) func(T2, T3, T4, T5) R { - return func(t2 T2, t3 T3, t4 T4, t5 T5) R { - return f(arg1, t2, t3, t4, t5) - } -} - -// Partial5 returns new function that, when called, has its first argument set to the provided value -func Partial5[T1, T2, T3, T4, T5, T6, R any](f func(T1, T2, T3, T4, T5, T6) R, arg1 T1) func(T2, T3, T4, T5, T6) R { - return func(t2 T2, t3 T3, t4 T4, t5 T5, t6 T6) R { - return f(arg1, t2, t3, t4, t5, t6) - } -} diff --git a/vendor/github.com/samber/lo/intersect.go b/vendor/github.com/samber/lo/intersect.go deleted file mode 100644 index cf6cab3d138..00000000000 --- a/vendor/github.com/samber/lo/intersect.go +++ /dev/null @@ -1,185 +0,0 @@ -package lo - -// Contains returns true if an element is present in a collection. -func Contains[T comparable](collection []T, element T) bool { - for _, item := range collection { - if item == element { - return true - } - } - - return false -} - -// ContainsBy returns true if predicate function return true. -func ContainsBy[T any](collection []T, predicate func(item T) bool) bool { - for _, item := range collection { - if predicate(item) { - return true - } - } - - return false -} - -// Every returns true if all elements of a subset are contained into a collection or if the subset is empty. -func Every[T comparable](collection []T, subset []T) bool { - for _, elem := range subset { - if !Contains(collection, elem) { - return false - } - } - - return true -} - -// EveryBy returns true if the predicate returns true for all of the elements in the collection or if the collection is empty. -func EveryBy[T any](collection []T, predicate func(item T) bool) bool { - for _, v := range collection { - if !predicate(v) { - return false - } - } - - return true -} - -// Some returns true if at least 1 element of a subset is contained into a collection. -// If the subset is empty Some returns false. -func Some[T comparable](collection []T, subset []T) bool { - for _, elem := range subset { - if Contains(collection, elem) { - return true - } - } - - return false -} - -// SomeBy returns true if the predicate returns true for any of the elements in the collection. -// If the collection is empty SomeBy returns false. -func SomeBy[T any](collection []T, predicate func(item T) bool) bool { - for _, v := range collection { - if predicate(v) { - return true - } - } - - return false -} - -// None returns true if no element of a subset are contained into a collection or if the subset is empty. -func None[T comparable](collection []T, subset []T) bool { - for _, elem := range subset { - if Contains(collection, elem) { - return false - } - } - - return true -} - -// NoneBy returns true if the predicate returns true for none of the elements in the collection or if the collection is empty. -func NoneBy[T any](collection []T, predicate func(item T) bool) bool { - for _, v := range collection { - if predicate(v) { - return false - } - } - - return true -} - -// Intersect returns the intersection between two collections. -func Intersect[T comparable](list1 []T, list2 []T) []T { - result := []T{} - seen := map[T]struct{}{} - - for _, elem := range list1 { - seen[elem] = struct{}{} - } - - for _, elem := range list2 { - if _, ok := seen[elem]; ok { - result = append(result, elem) - } - } - - return result -} - -// Difference returns the difference between two collections. -// The first value is the collection of element absent of list2. -// The second value is the collection of element absent of list1. -func Difference[T comparable](list1 []T, list2 []T) ([]T, []T) { - left := []T{} - right := []T{} - - seenLeft := map[T]struct{}{} - seenRight := map[T]struct{}{} - - for _, elem := range list1 { - seenLeft[elem] = struct{}{} - } - - for _, elem := range list2 { - seenRight[elem] = struct{}{} - } - - for _, elem := range list1 { - if _, ok := seenRight[elem]; !ok { - left = append(left, elem) - } - } - - for _, elem := range list2 { - if _, ok := seenLeft[elem]; !ok { - right = append(right, elem) - } - } - - return left, right -} - -// Union returns all distinct elements from given collections. -// result returns will not change the order of elements relatively. -func Union[T comparable](lists ...[]T) []T { - result := []T{} - seen := map[T]struct{}{} - - for _, list := range lists { - for _, e := range list { - if _, ok := seen[e]; !ok { - seen[e] = struct{}{} - result = append(result, e) - } - } - } - - return result -} - -// Without returns slice excluding all given values. -func Without[T comparable](collection []T, exclude ...T) []T { - result := make([]T, 0, len(collection)) - for _, e := range collection { - if !Contains(exclude, e) { - result = append(result, e) - } - } - return result -} - -// WithoutEmpty returns slice excluding empty values. -func WithoutEmpty[T comparable](collection []T) []T { - var empty T - - result := make([]T, 0, len(collection)) - for _, e := range collection { - if e != empty { - result = append(result, e) - } - } - - return result -} diff --git a/vendor/github.com/samber/lo/map.go b/vendor/github.com/samber/lo/map.go deleted file mode 100644 index 9ed68b4202e..00000000000 --- a/vendor/github.com/samber/lo/map.go +++ /dev/null @@ -1,215 +0,0 @@ -package lo - -// Keys creates an array of the map keys. -// Play: https://go.dev/play/p/Uu11fHASqrU -func Keys[K comparable, V any](in map[K]V) []K { - result := make([]K, 0, len(in)) - - for k := range in { - result = append(result, k) - } - - return result -} - -// Values creates an array of the map values. -// Play: https://go.dev/play/p/nnRTQkzQfF6 -func Values[K comparable, V any](in map[K]V) []V { - result := make([]V, 0, len(in)) - - for _, v := range in { - result = append(result, v) - } - - return result -} - -// PickBy returns same map type filtered by given predicate. -// Play: https://go.dev/play/p/kdg8GR_QMmf -func PickBy[K comparable, V any](in map[K]V, predicate func(key K, value V) bool) map[K]V { - r := map[K]V{} - for k, v := range in { - if predicate(k, v) { - r[k] = v - } - } - return r -} - -// PickByKeys returns same map type filtered by given keys. -// Play: https://go.dev/play/p/R1imbuci9qU -func PickByKeys[K comparable, V any](in map[K]V, keys []K) map[K]V { - r := map[K]V{} - for k, v := range in { - if Contains(keys, k) { - r[k] = v - } - } - return r -} - -// PickByValues returns same map type filtered by given values. -// Play: https://go.dev/play/p/1zdzSvbfsJc -func PickByValues[K comparable, V comparable](in map[K]V, values []V) map[K]V { - r := map[K]V{} - for k, v := range in { - if Contains(values, v) { - r[k] = v - } - } - return r -} - -// OmitBy returns same map type filtered by given predicate. -// Play: https://go.dev/play/p/EtBsR43bdsd -func OmitBy[K comparable, V any](in map[K]V, predicate func(key K, value V) bool) map[K]V { - r := map[K]V{} - for k, v := range in { - if !predicate(k, v) { - r[k] = v - } - } - return r -} - -// OmitByKeys returns same map type filtered by given keys. -// Play: https://go.dev/play/p/t1QjCrs-ysk -func OmitByKeys[K comparable, V any](in map[K]V, keys []K) map[K]V { - r := map[K]V{} - for k, v := range in { - if !Contains(keys, k) { - r[k] = v - } - } - return r -} - -// OmitByValues returns same map type filtered by given values. -// Play: https://go.dev/play/p/9UYZi-hrs8j -func OmitByValues[K comparable, V comparable](in map[K]V, values []V) map[K]V { - r := map[K]V{} - for k, v := range in { - if !Contains(values, v) { - r[k] = v - } - } - return r -} - -// Entries transforms a map into array of key/value pairs. -// Play: -func Entries[K comparable, V any](in map[K]V) []Entry[K, V] { - entries := make([]Entry[K, V], 0, len(in)) - - for k, v := range in { - entries = append(entries, Entry[K, V]{ - Key: k, - Value: v, - }) - } - - return entries -} - -// ToPairs transforms a map into array of key/value pairs. -// Alias of Entries(). -// Play: https://go.dev/play/p/3Dhgx46gawJ -func ToPairs[K comparable, V any](in map[K]V) []Entry[K, V] { - return Entries(in) -} - -// FromEntries transforms an array of key/value pairs into a map. -// Play: https://go.dev/play/p/oIr5KHFGCEN -func FromEntries[K comparable, V any](entries []Entry[K, V]) map[K]V { - out := map[K]V{} - - for _, v := range entries { - out[v.Key] = v.Value - } - - return out -} - -// FromPairs transforms an array of key/value pairs into a map. -// Alias of FromEntries(). -// Play: https://go.dev/play/p/oIr5KHFGCEN -func FromPairs[K comparable, V any](entries []Entry[K, V]) map[K]V { - return FromEntries(entries) -} - -// Invert creates a map composed of the inverted keys and values. If map -// contains duplicate values, subsequent values overwrite property assignments -// of previous values. -// Play: https://go.dev/play/p/rFQ4rak6iA1 -func Invert[K comparable, V comparable](in map[K]V) map[V]K { - out := map[V]K{} - - for k, v := range in { - out[v] = k - } - - return out -} - -// Assign merges multiple maps from left to right. -// Play: https://go.dev/play/p/VhwfJOyxf5o -func Assign[K comparable, V any](maps ...map[K]V) map[K]V { - out := map[K]V{} - - for _, m := range maps { - for k, v := range m { - out[k] = v - } - } - - return out -} - -// MapKeys manipulates a map keys and transforms it to a map of another type. -// Play: https://go.dev/play/p/9_4WPIqOetJ -func MapKeys[K comparable, V any, R comparable](in map[K]V, iteratee func(value V, key K) R) map[R]V { - result := map[R]V{} - - for k, v := range in { - result[iteratee(v, k)] = v - } - - return result -} - -// MapValues manipulates a map values and transforms it to a map of another type. -// Play: https://go.dev/play/p/T_8xAfvcf0W -func MapValues[K comparable, V any, R any](in map[K]V, iteratee func(value V, key K) R) map[K]R { - result := map[K]R{} - - for k, v := range in { - result[k] = iteratee(v, k) - } - - return result -} - -// MapEntries manipulates a map entries and transforms it to a map of another type. -// Play: https://go.dev/play/p/VuvNQzxKimT -func MapEntries[K1 comparable, V1 any, K2 comparable, V2 any](in map[K1]V1, iteratee func(key K1, value V1) (K2, V2)) map[K2]V2 { - result := make(map[K2]V2, len(in)) - - for k1, v1 := range in { - k2, v2 := iteratee(k1, v1) - result[k2] = v2 - } - - return result -} - -// MapToSlice transforms a map into a slice based on specific iteratee -// Play: https://go.dev/play/p/ZuiCZpDt6LD -func MapToSlice[K comparable, V any, R any](in map[K]V, iteratee func(key K, value V) R) []R { - result := make([]R, 0, len(in)) - - for k, v := range in { - result = append(result, iteratee(k, v)) - } - - return result -} diff --git a/vendor/github.com/samber/lo/math.go b/vendor/github.com/samber/lo/math.go deleted file mode 100644 index 9dce28cf831..00000000000 --- a/vendor/github.com/samber/lo/math.go +++ /dev/null @@ -1,84 +0,0 @@ -package lo - -import "golang.org/x/exp/constraints" - -// Range creates an array of numbers (positive and/or negative) with given length. -// Play: https://go.dev/play/p/0r6VimXAi9H -func Range(elementNum int) []int { - length := If(elementNum < 0, -elementNum).Else(elementNum) - result := make([]int, length) - step := If(elementNum < 0, -1).Else(1) - for i, j := 0, 0; i < length; i, j = i+1, j+step { - result[i] = j - } - return result -} - -// RangeFrom creates an array of numbers from start with specified length. -// Play: https://go.dev/play/p/0r6VimXAi9H -func RangeFrom[T constraints.Integer | constraints.Float](start T, elementNum int) []T { - length := If(elementNum < 0, -elementNum).Else(elementNum) - result := make([]T, length) - step := If(elementNum < 0, -1).Else(1) - for i, j := 0, start; i < length; i, j = i+1, j+T(step) { - result[i] = j - } - return result -} - -// RangeWithSteps creates an array of numbers (positive and/or negative) progressing from start up to, but not including end. -// step set to zero will return empty array. -// Play: https://go.dev/play/p/0r6VimXAi9H -func RangeWithSteps[T constraints.Integer | constraints.Float](start, end, step T) []T { - result := []T{} - if start == end || step == 0 { - return result - } - if start < end { - if step < 0 { - return result - } - for i := start; i < end; i += step { - result = append(result, i) - } - return result - } - if step > 0 { - return result - } - for i := start; i > end; i += step { - result = append(result, i) - } - return result -} - -// Clamp clamps number within the inclusive lower and upper bounds. -// Play: https://go.dev/play/p/RU4lJNC2hlI -func Clamp[T constraints.Ordered](value T, min T, max T) T { - if value < min { - return min - } else if value > max { - return max - } - return value -} - -// Sum sums the values in a collection. If collection is empty 0 is returned. -// Play: https://go.dev/play/p/upfeJVqs4Bt -func Sum[T constraints.Float | constraints.Integer | constraints.Complex](collection []T) T { - var sum T = 0 - for _, val := range collection { - sum += val - } - return sum -} - -// SumBy summarizes the values in a collection using the given return value from the iteration function. If collection is empty 0 is returned. -// Play: https://go.dev/play/p/Dz_a_7jN_ca -func SumBy[T any, R constraints.Float | constraints.Integer | constraints.Complex](collection []T, iteratee func(item T) R) R { - var sum R = 0 - for _, item := range collection { - sum = sum + iteratee(item) - } - return sum -} diff --git a/vendor/github.com/samber/lo/retry.go b/vendor/github.com/samber/lo/retry.go deleted file mode 100644 index 0d46f265d55..00000000000 --- a/vendor/github.com/samber/lo/retry.go +++ /dev/null @@ -1,210 +0,0 @@ -package lo - -import ( - "sync" - "time" -) - -type debounce struct { - after time.Duration - mu *sync.Mutex - timer *time.Timer - done bool - callbacks []func() -} - -func (d *debounce) reset() *debounce { - d.mu.Lock() - defer d.mu.Unlock() - - if d.done { - return d - } - - if d.timer != nil { - d.timer.Stop() - } - - d.timer = time.AfterFunc(d.after, func() { - for _, f := range d.callbacks { - f() - } - }) - return d -} - -func (d *debounce) cancel() { - d.mu.Lock() - defer d.mu.Unlock() - - if d.timer != nil { - d.timer.Stop() - d.timer = nil - } - - d.done = true -} - -// NewDebounce creates a debounced instance that delays invoking functions given until after wait milliseconds have elapsed. -// Play: https://go.dev/play/p/mz32VMK2nqe -func NewDebounce(duration time.Duration, f ...func()) (func(), func()) { - d := &debounce{ - after: duration, - mu: new(sync.Mutex), - timer: nil, - done: false, - callbacks: f, - } - - return func() { - d.reset() - }, d.cancel -} - -// Attempt invokes a function N times until it returns valid output. Returning either the caught error or nil. When first argument is less than `1`, the function runs until a successful response is returned. -// Play: https://go.dev/play/p/3ggJZ2ZKcMj -func Attempt(maxIteration int, f func(index int) error) (int, error) { - var err error - - for i := 0; maxIteration <= 0 || i < maxIteration; i++ { - // for retries >= 0 { - err = f(i) - if err == nil { - return i + 1, nil - } - } - - return maxIteration, err -} - -// AttemptWithDelay invokes a function N times until it returns valid output, -// with a pause between each call. Returning either the caught error or nil. -// When first argument is less than `1`, the function runs until a successful -// response is returned. -// Play: https://go.dev/play/p/tVs6CygC7m1 -func AttemptWithDelay(maxIteration int, delay time.Duration, f func(index int, duration time.Duration) error) (int, time.Duration, error) { - var err error - - start := time.Now() - - for i := 0; maxIteration <= 0 || i < maxIteration; i++ { - err = f(i, time.Since(start)) - if err == nil { - return i + 1, time.Since(start), nil - } - - if maxIteration <= 0 || i+1 < maxIteration { - time.Sleep(delay) - } - } - - return maxIteration, time.Since(start), err -} - -// AttemptWhile invokes a function N times until it returns valid output. -// Returning either the caught error or nil, and along with a bool value to identify -// whether it needs invoke function continuously. It will terminate the invoke -// immediately if second bool value is returned with falsy value. When first -// argument is less than `1`, the function runs until a successful response is -// returned. -func AttemptWhile(maxIteration int, f func(int) (error, bool)) (int, error) { - var err error - var shouldContinueInvoke bool - - for i := 0; maxIteration <= 0 || i < maxIteration; i++ { - // for retries >= 0 { - err, shouldContinueInvoke = f(i) - if !shouldContinueInvoke { // if shouldContinueInvoke is false, then return immediately - return i + 1, err - } - if err == nil { - return i + 1, nil - } - } - - return maxIteration, err -} - -// AttemptWhileWithDelay invokes a function N times until it returns valid output, -// with a pause between each call. Returning either the caught error or nil, and along -// with a bool value to identify whether it needs to invoke function continuously. -// It will terminate the invoke immediately if second bool value is returned with falsy -// value. When first argument is less than `1`, the function runs until a successful -// response is returned. -func AttemptWhileWithDelay(maxIteration int, delay time.Duration, f func(int, time.Duration) (error, bool)) (int, time.Duration, error) { - var err error - var shouldContinueInvoke bool - - start := time.Now() - - for i := 0; maxIteration <= 0 || i < maxIteration; i++ { - err, shouldContinueInvoke = f(i, time.Since(start)) - if !shouldContinueInvoke { // if shouldContinueInvoke is false, then return immediately - return i + 1, time.Since(start), err - } - if err == nil { - return i + 1, time.Since(start), nil - } - - if maxIteration <= 0 || i+1 < maxIteration { - time.Sleep(delay) - } - } - - return maxIteration, time.Since(start), err -} - -type transactionStep[T any] struct { - exec func(T) (T, error) - onRollback func(T) T -} - -// NewTransaction instanciate a new transaction. -func NewTransaction[T any]() *Transaction[T] { - return &Transaction[T]{ - steps: []transactionStep[T]{}, - } -} - -// Transaction implements a Saga pattern -type Transaction[T any] struct { - steps []transactionStep[T] -} - -// Then adds a step to the chain of callbacks. It returns the same Transaction. -func (t *Transaction[T]) Then(exec func(T) (T, error), onRollback func(T) T) *Transaction[T] { - t.steps = append(t.steps, transactionStep[T]{ - exec: exec, - onRollback: onRollback, - }) - - return t -} - -// Process runs the Transaction steps and rollbacks in case of errors. -func (t *Transaction[T]) Process(state T) (T, error) { - var i int - var err error - - for i < len(t.steps) { - state, err = t.steps[i].exec(state) - if err != nil { - break - } - - i++ - } - - if err == nil { - return state, nil - } - - for i > 0 { - i-- - state = t.steps[i].onRollback(state) - } - - return state, err -} - -// throttle ? diff --git a/vendor/github.com/samber/lo/slice.go b/vendor/github.com/samber/lo/slice.go deleted file mode 100644 index b2e92ad2f66..00000000000 --- a/vendor/github.com/samber/lo/slice.go +++ /dev/null @@ -1,592 +0,0 @@ -package lo - -import ( - "math/rand" - - "golang.org/x/exp/constraints" -) - -// Filter iterates over elements of collection, returning an array of all elements predicate returns truthy for. -// Play: https://go.dev/play/p/Apjg3WeSi7K -func Filter[V any](collection []V, predicate func(item V, index int) bool) []V { - result := []V{} - - for i, item := range collection { - if predicate(item, i) { - result = append(result, item) - } - } - - return result -} - -// Map manipulates a slice and transforms it to a slice of another type. -// Play: https://go.dev/play/p/OkPcYAhBo0D -func Map[T any, R any](collection []T, iteratee func(item T, index int) R) []R { - result := make([]R, len(collection)) - - for i, item := range collection { - result[i] = iteratee(item, i) - } - - return result -} - -// FilterMap returns a slice which obtained after both filtering and mapping using the given callback function. -// The callback function should return two values: -// - the result of the mapping operation and -// - whether the result element should be included or not. -// -// Play: https://go.dev/play/p/-AuYXfy7opz -func FilterMap[T any, R any](collection []T, callback func(item T, index int) (R, bool)) []R { - result := []R{} - - for i, item := range collection { - if r, ok := callback(item, i); ok { - result = append(result, r) - } - } - - return result -} - -// FlatMap manipulates a slice and transforms and flattens it to a slice of another type. -// Play: https://go.dev/play/p/YSoYmQTA8-U -func FlatMap[T any, R any](collection []T, iteratee func(item T, index int) []R) []R { - result := []R{} - - for i, item := range collection { - result = append(result, iteratee(item, i)...) - } - - return result -} - -// Reduce reduces collection to a value which is the accumulated result of running each element in collection -// through accumulator, where each successive invocation is supplied the return value of the previous. -// Play: https://go.dev/play/p/R4UHXZNaaUG -func Reduce[T any, R any](collection []T, accumulator func(agg R, item T, index int) R, initial R) R { - for i, item := range collection { - initial = accumulator(initial, item, i) - } - - return initial -} - -// ReduceRight helper is like Reduce except that it iterates over elements of collection from right to left. -// Play: https://go.dev/play/p/Fq3W70l7wXF -func ReduceRight[T any, R any](collection []T, accumulator func(agg R, item T, index int) R, initial R) R { - for i := len(collection) - 1; i >= 0; i-- { - initial = accumulator(initial, collection[i], i) - } - - return initial -} - -// ForEach iterates over elements of collection and invokes iteratee for each element. -// Play: https://go.dev/play/p/oofyiUPRf8t -func ForEach[T any](collection []T, iteratee func(item T, index int)) { - for i, item := range collection { - iteratee(item, i) - } -} - -// Times invokes the iteratee n times, returning an array of the results of each invocation. -// The iteratee is invoked with index as argument. -// Play: https://go.dev/play/p/vgQj3Glr6lT -func Times[T any](count int, iteratee func(index int) T) []T { - result := make([]T, count) - - for i := 0; i < count; i++ { - result[i] = iteratee(i) - } - - return result -} - -// Uniq returns a duplicate-free version of an array, in which only the first occurrence of each element is kept. -// The order of result values is determined by the order they occur in the array. -// Play: https://go.dev/play/p/DTzbeXZ6iEN -func Uniq[T comparable](collection []T) []T { - result := make([]T, 0, len(collection)) - seen := make(map[T]struct{}, len(collection)) - - for _, item := range collection { - if _, ok := seen[item]; ok { - continue - } - - seen[item] = struct{}{} - result = append(result, item) - } - - return result -} - -// UniqBy returns a duplicate-free version of an array, in which only the first occurrence of each element is kept. -// The order of result values is determined by the order they occur in the array. It accepts `iteratee` which is -// invoked for each element in array to generate the criterion by which uniqueness is computed. -// Play: https://go.dev/play/p/g42Z3QSb53u -func UniqBy[T any, U comparable](collection []T, iteratee func(item T) U) []T { - result := make([]T, 0, len(collection)) - seen := make(map[U]struct{}, len(collection)) - - for _, item := range collection { - key := iteratee(item) - - if _, ok := seen[key]; ok { - continue - } - - seen[key] = struct{}{} - result = append(result, item) - } - - return result -} - -// GroupBy returns an object composed of keys generated from the results of running each element of collection through iteratee. -// Play: https://go.dev/play/p/XnQBd_v6brd -func GroupBy[T any, U comparable](collection []T, iteratee func(item T) U) map[U][]T { - result := map[U][]T{} - - for _, item := range collection { - key := iteratee(item) - - result[key] = append(result[key], item) - } - - return result -} - -// Chunk returns an array of elements split into groups the length of size. If array can't be split evenly, -// the final chunk will be the remaining elements. -// Play: https://go.dev/play/p/EeKl0AuTehH -func Chunk[T any](collection []T, size int) [][]T { - if size <= 0 { - panic("Second parameter must be greater than 0") - } - - chunksNum := len(collection) / size - if len(collection)%size != 0 { - chunksNum += 1 - } - - result := make([][]T, 0, chunksNum) - - for i := 0; i < chunksNum; i++ { - last := (i + 1) * size - if last > len(collection) { - last = len(collection) - } - result = append(result, collection[i*size:last]) - } - - return result -} - -// PartitionBy returns an array of elements split into groups. The order of grouped values is -// determined by the order they occur in collection. The grouping is generated from the results -// of running each element of collection through iteratee. -// Play: https://go.dev/play/p/NfQ_nGjkgXW -func PartitionBy[T any, K comparable](collection []T, iteratee func(item T) K) [][]T { - result := [][]T{} - seen := map[K]int{} - - for _, item := range collection { - key := iteratee(item) - - resultIndex, ok := seen[key] - if !ok { - resultIndex = len(result) - seen[key] = resultIndex - result = append(result, []T{}) - } - - result[resultIndex] = append(result[resultIndex], item) - } - - return result - - // unordered: - // groups := GroupBy[T, K](collection, iteratee) - // return Values[K, []T](groups) -} - -// Flatten returns an array a single level deep. -// Play: https://go.dev/play/p/rbp9ORaMpjw -func Flatten[T any](collection [][]T) []T { - totalLen := 0 - for i := range collection { - totalLen += len(collection[i]) - } - - result := make([]T, 0, totalLen) - for i := range collection { - result = append(result, collection[i]...) - } - - return result -} - -// Interleave round-robin alternating input slices and sequentially appending value at index into result -// Play: https://go.dev/play/p/DDhlwrShbwe -func Interleave[T any](collections ...[]T) []T { - if len(collections) == 0 { - return []T{} - } - - maxSize := 0 - totalSize := 0 - for _, c := range collections { - size := len(c) - totalSize += size - if size > maxSize { - maxSize = size - } - } - - if maxSize == 0 { - return []T{} - } - - result := make([]T, totalSize) - - resultIdx := 0 - for i := 0; i < maxSize; i++ { - for j := range collections { - if len(collections[j])-1 < i { - continue - } - - result[resultIdx] = collections[j][i] - resultIdx++ - } - } - - return result -} - -// Shuffle returns an array of shuffled values. Uses the Fisher-Yates shuffle algorithm. -// Play: https://go.dev/play/p/Qp73bnTDnc7 -func Shuffle[T any](collection []T) []T { - rand.Shuffle(len(collection), func(i, j int) { - collection[i], collection[j] = collection[j], collection[i] - }) - - return collection -} - -// Reverse reverses array so that the first element becomes the last, the second element becomes the second to last, and so on. -// Play: https://go.dev/play/p/fhUMLvZ7vS6 -func Reverse[T any](collection []T) []T { - length := len(collection) - half := length / 2 - - for i := 0; i < half; i = i + 1 { - j := length - 1 - i - collection[i], collection[j] = collection[j], collection[i] - } - - return collection -} - -// Fill fills elements of array with `initial` value. -// Play: https://go.dev/play/p/VwR34GzqEub -func Fill[T Clonable[T]](collection []T, initial T) []T { - result := make([]T, 0, len(collection)) - - for range collection { - result = append(result, initial.Clone()) - } - - return result -} - -// Repeat builds a slice with N copies of initial value. -// Play: https://go.dev/play/p/g3uHXbmc3b6 -func Repeat[T Clonable[T]](count int, initial T) []T { - result := make([]T, 0, count) - - for i := 0; i < count; i++ { - result = append(result, initial.Clone()) - } - - return result -} - -// RepeatBy builds a slice with values returned by N calls of callback. -// Play: https://go.dev/play/p/ozZLCtX_hNU -func RepeatBy[T any](count int, predicate func(index int) T) []T { - result := make([]T, 0, count) - - for i := 0; i < count; i++ { - result = append(result, predicate(i)) - } - - return result -} - -// KeyBy transforms a slice or an array of structs to a map based on a pivot callback. -// Play: https://go.dev/play/p/mdaClUAT-zZ -func KeyBy[K comparable, V any](collection []V, iteratee func(item V) K) map[K]V { - result := make(map[K]V, len(collection)) - - for _, v := range collection { - k := iteratee(v) - result[k] = v - } - - return result -} - -// Associate returns a map containing key-value pairs provided by transform function applied to elements of the given slice. -// If any of two pairs would have the same key the last one gets added to the map. -// The order of keys in returned map is not specified and is not guaranteed to be the same from the original array. -// Play: https://go.dev/play/p/WHa2CfMO3Lr -func Associate[T any, K comparable, V any](collection []T, transform func(item T) (K, V)) map[K]V { - result := make(map[K]V, len(collection)) - - for _, t := range collection { - k, v := transform(t) - result[k] = v - } - - return result -} - -// SliceToMap returns a map containing key-value pairs provided by transform function applied to elements of the given slice. -// If any of two pairs would have the same key the last one gets added to the map. -// The order of keys in returned map is not specified and is not guaranteed to be the same from the original array. -// Alias of Associate(). -// Play: https://go.dev/play/p/WHa2CfMO3Lr -func SliceToMap[T any, K comparable, V any](collection []T, transform func(item T) (K, V)) map[K]V { - return Associate(collection, transform) -} - -// Drop drops n elements from the beginning of a slice or array. -// Play: https://go.dev/play/p/JswS7vXRJP2 -func Drop[T any](collection []T, n int) []T { - if len(collection) <= n { - return make([]T, 0) - } - - result := make([]T, 0, len(collection)-n) - - return append(result, collection[n:]...) -} - -// DropRight drops n elements from the end of a slice or array. -// Play: https://go.dev/play/p/GG0nXkSJJa3 -func DropRight[T any](collection []T, n int) []T { - if len(collection) <= n { - return []T{} - } - - result := make([]T, 0, len(collection)-n) - return append(result, collection[:len(collection)-n]...) -} - -// DropWhile drops elements from the beginning of a slice or array while the predicate returns true. -// Play: https://go.dev/play/p/7gBPYw2IK16 -func DropWhile[T any](collection []T, predicate func(item T) bool) []T { - i := 0 - for ; i < len(collection); i++ { - if !predicate(collection[i]) { - break - } - } - - result := make([]T, 0, len(collection)-i) - return append(result, collection[i:]...) -} - -// DropRightWhile drops elements from the end of a slice or array while the predicate returns true. -// Play: https://go.dev/play/p/3-n71oEC0Hz -func DropRightWhile[T any](collection []T, predicate func(item T) bool) []T { - i := len(collection) - 1 - for ; i >= 0; i-- { - if !predicate(collection[i]) { - break - } - } - - result := make([]T, 0, i+1) - return append(result, collection[:i+1]...) -} - -// Reject is the opposite of Filter, this method returns the elements of collection that predicate does not return truthy for. -// Play: https://go.dev/play/p/YkLMODy1WEL -func Reject[V any](collection []V, predicate func(item V, index int) bool) []V { - result := []V{} - - for i, item := range collection { - if !predicate(item, i) { - result = append(result, item) - } - } - - return result -} - -// Count counts the number of elements in the collection that compare equal to value. -// Play: https://go.dev/play/p/Y3FlK54yveC -func Count[T comparable](collection []T, value T) (count int) { - for _, item := range collection { - if item == value { - count++ - } - } - - return count -} - -// CountBy counts the number of elements in the collection for which predicate is true. -// Play: https://go.dev/play/p/ByQbNYQQi4X -func CountBy[T any](collection []T, predicate func(item T) bool) (count int) { - for _, item := range collection { - if predicate(item) { - count++ - } - } - - return count -} - -// CountValues counts the number of each element in the collection. -// Play: https://go.dev/play/p/-p-PyLT4dfy -func CountValues[T comparable](collection []T) map[T]int { - result := make(map[T]int) - - for _, item := range collection { - result[item]++ - } - - return result -} - -// CountValuesBy counts the number of each element return from mapper function. -// Is equivalent to chaining lo.Map and lo.CountValues. -// Play: https://go.dev/play/p/2U0dG1SnOmS -func CountValuesBy[T any, U comparable](collection []T, mapper func(item T) U) map[U]int { - result := make(map[U]int) - - for _, item := range collection { - result[mapper(item)]++ - } - - return result -} - -// Subset returns a copy of a slice from `offset` up to `length` elements. Like `slice[start:start+length]`, but does not panic on overflow. -// Play: https://go.dev/play/p/tOQu1GhFcog -func Subset[T any](collection []T, offset int, length uint) []T { - size := len(collection) - - if offset < 0 { - offset = size + offset - if offset < 0 { - offset = 0 - } - } - - if offset > size { - return []T{} - } - - if length > uint(size)-uint(offset) { - length = uint(size - offset) - } - - return collection[offset : offset+int(length)] -} - -// Slice returns a copy of a slice from `start` up to, but not including `end`. Like `slice[start:end]`, but does not panic on overflow. -// Play: https://go.dev/play/p/8XWYhfMMA1h -func Slice[T any](collection []T, start int, end int) []T { - size := len(collection) - - if start >= end { - return []T{} - } - - if start > size { - start = size - } - if start < 0 { - start = 0 - } - - if end > size { - end = size - } - if end < 0 { - end = 0 - } - - return collection[start:end] -} - -// Replace returns a copy of the slice with the first n non-overlapping instances of old replaced by new. -// Play: https://go.dev/play/p/XfPzmf9gql6 -func Replace[T comparable](collection []T, old T, new T, n int) []T { - result := make([]T, len(collection)) - copy(result, collection) - - for i := range result { - if result[i] == old && n != 0 { - result[i] = new - n-- - } - } - - return result -} - -// ReplaceAll returns a copy of the slice with all non-overlapping instances of old replaced by new. -// Play: https://go.dev/play/p/a9xZFUHfYcV -func ReplaceAll[T comparable](collection []T, old T, new T) []T { - return Replace(collection, old, new, -1) -} - -// Compact returns a slice of all non-zero elements. -// Play: https://go.dev/play/p/tXiy-iK6PAc -func Compact[T comparable](collection []T) []T { - var zero T - - result := []T{} - - for _, item := range collection { - if item != zero { - result = append(result, item) - } - } - - return result -} - -// IsSorted checks if a slice is sorted. -// Play: https://go.dev/play/p/mc3qR-t4mcx -func IsSorted[T constraints.Ordered](collection []T) bool { - for i := 1; i < len(collection); i++ { - if collection[i-1] > collection[i] { - return false - } - } - - return true -} - -// IsSortedByKey checks if a slice is sorted by iteratee. -// Play: https://go.dev/play/p/wiG6XyBBu49 -func IsSortedByKey[T any, K constraints.Ordered](collection []T, iteratee func(item T) K) bool { - size := len(collection) - - for i := 0; i < size-1; i++ { - if iteratee(collection[i]) > iteratee(collection[i+1]) { - return false - } - } - - return true -} diff --git a/vendor/github.com/samber/lo/string.go b/vendor/github.com/samber/lo/string.go deleted file mode 100644 index dfe1050b62e..00000000000 --- a/vendor/github.com/samber/lo/string.go +++ /dev/null @@ -1,94 +0,0 @@ -package lo - -import ( - "math/rand" - "unicode/utf8" -) - -var ( - LowerCaseLettersCharset = []rune("abcdefghijklmnopqrstuvwxyz") - UpperCaseLettersCharset = []rune("ABCDEFGHIJKLMNOPQRSTUVWXYZ") - LettersCharset = append(LowerCaseLettersCharset, UpperCaseLettersCharset...) - NumbersCharset = []rune("0123456789") - AlphanumericCharset = append(LettersCharset, NumbersCharset...) - SpecialCharset = []rune("!@#$%^&*()_+-=[]{}|;':\",./<>?") - AllCharset = append(AlphanumericCharset, SpecialCharset...) -) - -// RandomString return a random string. -// Play: https://go.dev/play/p/rRseOQVVum4 -func RandomString(size int, charset []rune) string { - if size <= 0 { - panic("lo.RandomString: Size parameter must be greater than 0") - } - if len(charset) <= 0 { - panic("lo.RandomString: Charset parameter must not be empty") - } - - b := make([]rune, size) - possibleCharactersCount := len(charset) - for i := range b { - b[i] = charset[rand.Intn(possibleCharactersCount)] - } - return string(b) -} - -// Substring return part of a string. -// Play: https://go.dev/play/p/TQlxQi82Lu1 -func Substring[T ~string](str T, offset int, length uint) T { - size := len(str) - - if offset < 0 { - offset = size + offset - if offset < 0 { - offset = 0 - } - } - - if offset > size { - return Empty[T]() - } - - if length > uint(size)-uint(offset) { - length = uint(size - offset) - } - - return str[offset : offset+int(length)] -} - -// ChunkString returns an array of strings split into groups the length of size. If array can't be split evenly, -// the final chunk will be the remaining elements. -// Play: https://go.dev/play/p/__FLTuJVz54 -func ChunkString[T ~string](str T, size int) []T { - if size <= 0 { - panic("lo.ChunkString: Size parameter must be greater than 0") - } - - if len(str) == 0 { - return []T{""} - } - - if size >= len(str) { - return []T{str} - } - - var chunks []T = make([]T, 0, ((len(str)-1)/size)+1) - currentLen := 0 - currentStart := 0 - for i := range str { - if currentLen == size { - chunks = append(chunks, str[currentStart:i]) - currentLen = 0 - currentStart = i - } - currentLen++ - } - chunks = append(chunks, str[currentStart:]) - return chunks -} - -// RuneLength is an alias to utf8.RuneCountInString which returns the number of runes in string. -// Play: https://go.dev/play/p/tuhgW_lWY8l -func RuneLength(str string) int { - return utf8.RuneCountInString(str) -} diff --git a/vendor/github.com/samber/lo/tuples.go b/vendor/github.com/samber/lo/tuples.go deleted file mode 100644 index cdddf6afc10..00000000000 --- a/vendor/github.com/samber/lo/tuples.go +++ /dev/null @@ -1,513 +0,0 @@ -package lo - -// T2 creates a tuple from a list of values. -// Play: https://go.dev/play/p/IllL3ZO4BQm -func T2[A any, B any](a A, b B) Tuple2[A, B] { - return Tuple2[A, B]{A: a, B: b} -} - -// T3 creates a tuple from a list of values. -// Play: https://go.dev/play/p/IllL3ZO4BQm -func T3[A any, B any, C any](a A, b B, c C) Tuple3[A, B, C] { - return Tuple3[A, B, C]{A: a, B: b, C: c} -} - -// T4 creates a tuple from a list of values. -// Play: https://go.dev/play/p/IllL3ZO4BQm -func T4[A any, B any, C any, D any](a A, b B, c C, d D) Tuple4[A, B, C, D] { - return Tuple4[A, B, C, D]{A: a, B: b, C: c, D: d} -} - -// T5 creates a tuple from a list of values. -// Play: https://go.dev/play/p/IllL3ZO4BQm -func T5[A any, B any, C any, D any, E any](a A, b B, c C, d D, e E) Tuple5[A, B, C, D, E] { - return Tuple5[A, B, C, D, E]{A: a, B: b, C: c, D: d, E: e} -} - -// T6 creates a tuple from a list of values. -// Play: https://go.dev/play/p/IllL3ZO4BQm -func T6[A any, B any, C any, D any, E any, F any](a A, b B, c C, d D, e E, f F) Tuple6[A, B, C, D, E, F] { - return Tuple6[A, B, C, D, E, F]{A: a, B: b, C: c, D: d, E: e, F: f} -} - -// T7 creates a tuple from a list of values. -// Play: https://go.dev/play/p/IllL3ZO4BQm -func T7[A any, B any, C any, D any, E any, F any, G any](a A, b B, c C, d D, e E, f F, g G) Tuple7[A, B, C, D, E, F, G] { - return Tuple7[A, B, C, D, E, F, G]{A: a, B: b, C: c, D: d, E: e, F: f, G: g} -} - -// T8 creates a tuple from a list of values. -// Play: https://go.dev/play/p/IllL3ZO4BQm -func T8[A any, B any, C any, D any, E any, F any, G any, H any](a A, b B, c C, d D, e E, f F, g G, h H) Tuple8[A, B, C, D, E, F, G, H] { - return Tuple8[A, B, C, D, E, F, G, H]{A: a, B: b, C: c, D: d, E: e, F: f, G: g, H: h} -} - -// T9 creates a tuple from a list of values. -// Play: https://go.dev/play/p/IllL3ZO4BQm -func T9[A any, B any, C any, D any, E any, F any, G any, H any, I any](a A, b B, c C, d D, e E, f F, g G, h H, i I) Tuple9[A, B, C, D, E, F, G, H, I] { - return Tuple9[A, B, C, D, E, F, G, H, I]{A: a, B: b, C: c, D: d, E: e, F: f, G: g, H: h, I: i} -} - -// Unpack2 returns values contained in tuple. -// Play: https://go.dev/play/p/xVP_k0kJ96W -func Unpack2[A any, B any](tuple Tuple2[A, B]) (A, B) { - return tuple.A, tuple.B -} - -// Unpack3 returns values contained in tuple. -// Play: https://go.dev/play/p/xVP_k0kJ96W -func Unpack3[A any, B any, C any](tuple Tuple3[A, B, C]) (A, B, C) { - return tuple.A, tuple.B, tuple.C -} - -// Unpack4 returns values contained in tuple. -// Play: https://go.dev/play/p/xVP_k0kJ96W -func Unpack4[A any, B any, C any, D any](tuple Tuple4[A, B, C, D]) (A, B, C, D) { - return tuple.A, tuple.B, tuple.C, tuple.D -} - -// Unpack5 returns values contained in tuple. -// Play: https://go.dev/play/p/xVP_k0kJ96W -func Unpack5[A any, B any, C any, D any, E any](tuple Tuple5[A, B, C, D, E]) (A, B, C, D, E) { - return tuple.A, tuple.B, tuple.C, tuple.D, tuple.E -} - -// Unpack6 returns values contained in tuple. -// Play: https://go.dev/play/p/xVP_k0kJ96W -func Unpack6[A any, B any, C any, D any, E any, F any](tuple Tuple6[A, B, C, D, E, F]) (A, B, C, D, E, F) { - return tuple.A, tuple.B, tuple.C, tuple.D, tuple.E, tuple.F -} - -// Unpack7 returns values contained in tuple. -// Play: https://go.dev/play/p/xVP_k0kJ96W -func Unpack7[A any, B any, C any, D any, E any, F any, G any](tuple Tuple7[A, B, C, D, E, F, G]) (A, B, C, D, E, F, G) { - return tuple.A, tuple.B, tuple.C, tuple.D, tuple.E, tuple.F, tuple.G -} - -// Unpack8 returns values contained in tuple. -// Play: https://go.dev/play/p/xVP_k0kJ96W -func Unpack8[A any, B any, C any, D any, E any, F any, G any, H any](tuple Tuple8[A, B, C, D, E, F, G, H]) (A, B, C, D, E, F, G, H) { - return tuple.A, tuple.B, tuple.C, tuple.D, tuple.E, tuple.F, tuple.G, tuple.H -} - -// Unpack9 returns values contained in tuple. -// Play: https://go.dev/play/p/xVP_k0kJ96W -func Unpack9[A any, B any, C any, D any, E any, F any, G any, H any, I any](tuple Tuple9[A, B, C, D, E, F, G, H, I]) (A, B, C, D, E, F, G, H, I) { - return tuple.A, tuple.B, tuple.C, tuple.D, tuple.E, tuple.F, tuple.G, tuple.H, tuple.I -} - -// Zip2 creates a slice of grouped elements, the first of which contains the first elements -// of the given arrays, the second of which contains the second elements of the given arrays, and so on. -// When collections have different size, the Tuple attributes are filled with zero value. -// Play: https://go.dev/play/p/jujaA6GaJTp -func Zip2[A any, B any](a []A, b []B) []Tuple2[A, B] { - size := Max([]int{len(a), len(b)}) - - result := make([]Tuple2[A, B], 0, size) - - for index := 0; index < size; index++ { - _a, _ := Nth(a, index) - _b, _ := Nth(b, index) - - result = append(result, Tuple2[A, B]{ - A: _a, - B: _b, - }) - } - - return result -} - -// Zip3 creates a slice of grouped elements, the first of which contains the first elements -// of the given arrays, the second of which contains the second elements of the given arrays, and so on. -// When collections have different size, the Tuple attributes are filled with zero value. -// Play: https://go.dev/play/p/jujaA6GaJTp -func Zip3[A any, B any, C any](a []A, b []B, c []C) []Tuple3[A, B, C] { - size := Max([]int{len(a), len(b), len(c)}) - - result := make([]Tuple3[A, B, C], 0, size) - - for index := 0; index < size; index++ { - _a, _ := Nth(a, index) - _b, _ := Nth(b, index) - _c, _ := Nth(c, index) - - result = append(result, Tuple3[A, B, C]{ - A: _a, - B: _b, - C: _c, - }) - } - - return result -} - -// Zip4 creates a slice of grouped elements, the first of which contains the first elements -// of the given arrays, the second of which contains the second elements of the given arrays, and so on. -// When collections have different size, the Tuple attributes are filled with zero value. -// Play: https://go.dev/play/p/jujaA6GaJTp -func Zip4[A any, B any, C any, D any](a []A, b []B, c []C, d []D) []Tuple4[A, B, C, D] { - size := Max([]int{len(a), len(b), len(c), len(d)}) - - result := make([]Tuple4[A, B, C, D], 0, size) - - for index := 0; index < size; index++ { - _a, _ := Nth(a, index) - _b, _ := Nth(b, index) - _c, _ := Nth(c, index) - _d, _ := Nth(d, index) - - result = append(result, Tuple4[A, B, C, D]{ - A: _a, - B: _b, - C: _c, - D: _d, - }) - } - - return result -} - -// Zip5 creates a slice of grouped elements, the first of which contains the first elements -// of the given arrays, the second of which contains the second elements of the given arrays, and so on. -// When collections have different size, the Tuple attributes are filled with zero value. -// Play: https://go.dev/play/p/jujaA6GaJTp -func Zip5[A any, B any, C any, D any, E any](a []A, b []B, c []C, d []D, e []E) []Tuple5[A, B, C, D, E] { - size := Max([]int{len(a), len(b), len(c), len(d), len(e)}) - - result := make([]Tuple5[A, B, C, D, E], 0, size) - - for index := 0; index < size; index++ { - _a, _ := Nth(a, index) - _b, _ := Nth(b, index) - _c, _ := Nth(c, index) - _d, _ := Nth(d, index) - _e, _ := Nth(e, index) - - result = append(result, Tuple5[A, B, C, D, E]{ - A: _a, - B: _b, - C: _c, - D: _d, - E: _e, - }) - } - - return result -} - -// Zip6 creates a slice of grouped elements, the first of which contains the first elements -// of the given arrays, the second of which contains the second elements of the given arrays, and so on. -// When collections have different size, the Tuple attributes are filled with zero value. -// Play: https://go.dev/play/p/jujaA6GaJTp -func Zip6[A any, B any, C any, D any, E any, F any](a []A, b []B, c []C, d []D, e []E, f []F) []Tuple6[A, B, C, D, E, F] { - size := Max([]int{len(a), len(b), len(c), len(d), len(e), len(f)}) - - result := make([]Tuple6[A, B, C, D, E, F], 0, size) - - for index := 0; index < size; index++ { - _a, _ := Nth(a, index) - _b, _ := Nth(b, index) - _c, _ := Nth(c, index) - _d, _ := Nth(d, index) - _e, _ := Nth(e, index) - _f, _ := Nth(f, index) - - result = append(result, Tuple6[A, B, C, D, E, F]{ - A: _a, - B: _b, - C: _c, - D: _d, - E: _e, - F: _f, - }) - } - - return result -} - -// Zip7 creates a slice of grouped elements, the first of which contains the first elements -// of the given arrays, the second of which contains the second elements of the given arrays, and so on. -// When collections have different size, the Tuple attributes are filled with zero value. -// Play: https://go.dev/play/p/jujaA6GaJTp -func Zip7[A any, B any, C any, D any, E any, F any, G any](a []A, b []B, c []C, d []D, e []E, f []F, g []G) []Tuple7[A, B, C, D, E, F, G] { - size := Max([]int{len(a), len(b), len(c), len(d), len(e), len(f), len(g)}) - - result := make([]Tuple7[A, B, C, D, E, F, G], 0, size) - - for index := 0; index < size; index++ { - _a, _ := Nth(a, index) - _b, _ := Nth(b, index) - _c, _ := Nth(c, index) - _d, _ := Nth(d, index) - _e, _ := Nth(e, index) - _f, _ := Nth(f, index) - _g, _ := Nth(g, index) - - result = append(result, Tuple7[A, B, C, D, E, F, G]{ - A: _a, - B: _b, - C: _c, - D: _d, - E: _e, - F: _f, - G: _g, - }) - } - - return result -} - -// Zip8 creates a slice of grouped elements, the first of which contains the first elements -// of the given arrays, the second of which contains the second elements of the given arrays, and so on. -// When collections have different size, the Tuple attributes are filled with zero value. -// Play: https://go.dev/play/p/jujaA6GaJTp -func Zip8[A any, B any, C any, D any, E any, F any, G any, H any](a []A, b []B, c []C, d []D, e []E, f []F, g []G, h []H) []Tuple8[A, B, C, D, E, F, G, H] { - size := Max([]int{len(a), len(b), len(c), len(d), len(e), len(f), len(g), len(h)}) - - result := make([]Tuple8[A, B, C, D, E, F, G, H], 0, size) - - for index := 0; index < size; index++ { - _a, _ := Nth(a, index) - _b, _ := Nth(b, index) - _c, _ := Nth(c, index) - _d, _ := Nth(d, index) - _e, _ := Nth(e, index) - _f, _ := Nth(f, index) - _g, _ := Nth(g, index) - _h, _ := Nth(h, index) - - result = append(result, Tuple8[A, B, C, D, E, F, G, H]{ - A: _a, - B: _b, - C: _c, - D: _d, - E: _e, - F: _f, - G: _g, - H: _h, - }) - } - - return result -} - -// Zip9 creates a slice of grouped elements, the first of which contains the first elements -// of the given arrays, the second of which contains the second elements of the given arrays, and so on. -// When collections have different size, the Tuple attributes are filled with zero value. -// Play: https://go.dev/play/p/jujaA6GaJTp -func Zip9[A any, B any, C any, D any, E any, F any, G any, H any, I any](a []A, b []B, c []C, d []D, e []E, f []F, g []G, h []H, i []I) []Tuple9[A, B, C, D, E, F, G, H, I] { - size := Max([]int{len(a), len(b), len(c), len(d), len(e), len(f), len(g), len(h), len(i)}) - - result := make([]Tuple9[A, B, C, D, E, F, G, H, I], 0, size) - - for index := 0; index < size; index++ { - _a, _ := Nth(a, index) - _b, _ := Nth(b, index) - _c, _ := Nth(c, index) - _d, _ := Nth(d, index) - _e, _ := Nth(e, index) - _f, _ := Nth(f, index) - _g, _ := Nth(g, index) - _h, _ := Nth(h, index) - _i, _ := Nth(i, index) - - result = append(result, Tuple9[A, B, C, D, E, F, G, H, I]{ - A: _a, - B: _b, - C: _c, - D: _d, - E: _e, - F: _f, - G: _g, - H: _h, - I: _i, - }) - } - - return result -} - -// Unzip2 accepts an array of grouped elements and creates an array regrouping the elements -// to their pre-zip configuration. -// Play: https://go.dev/play/p/ciHugugvaAW -func Unzip2[A any, B any](tuples []Tuple2[A, B]) ([]A, []B) { - size := len(tuples) - r1 := make([]A, 0, size) - r2 := make([]B, 0, size) - - for _, tuple := range tuples { - r1 = append(r1, tuple.A) - r2 = append(r2, tuple.B) - } - - return r1, r2 -} - -// Unzip3 accepts an array of grouped elements and creates an array regrouping the elements -// to their pre-zip configuration. -// Play: https://go.dev/play/p/ciHugugvaAW -func Unzip3[A any, B any, C any](tuples []Tuple3[A, B, C]) ([]A, []B, []C) { - size := len(tuples) - r1 := make([]A, 0, size) - r2 := make([]B, 0, size) - r3 := make([]C, 0, size) - - for _, tuple := range tuples { - r1 = append(r1, tuple.A) - r2 = append(r2, tuple.B) - r3 = append(r3, tuple.C) - } - - return r1, r2, r3 -} - -// Unzip4 accepts an array of grouped elements and creates an array regrouping the elements -// to their pre-zip configuration. -// Play: https://go.dev/play/p/ciHugugvaAW -func Unzip4[A any, B any, C any, D any](tuples []Tuple4[A, B, C, D]) ([]A, []B, []C, []D) { - size := len(tuples) - r1 := make([]A, 0, size) - r2 := make([]B, 0, size) - r3 := make([]C, 0, size) - r4 := make([]D, 0, size) - - for _, tuple := range tuples { - r1 = append(r1, tuple.A) - r2 = append(r2, tuple.B) - r3 = append(r3, tuple.C) - r4 = append(r4, tuple.D) - } - - return r1, r2, r3, r4 -} - -// Unzip5 accepts an array of grouped elements and creates an array regrouping the elements -// to their pre-zip configuration. -// Play: https://go.dev/play/p/ciHugugvaAW -func Unzip5[A any, B any, C any, D any, E any](tuples []Tuple5[A, B, C, D, E]) ([]A, []B, []C, []D, []E) { - size := len(tuples) - r1 := make([]A, 0, size) - r2 := make([]B, 0, size) - r3 := make([]C, 0, size) - r4 := make([]D, 0, size) - r5 := make([]E, 0, size) - - for _, tuple := range tuples { - r1 = append(r1, tuple.A) - r2 = append(r2, tuple.B) - r3 = append(r3, tuple.C) - r4 = append(r4, tuple.D) - r5 = append(r5, tuple.E) - } - - return r1, r2, r3, r4, r5 -} - -// Unzip6 accepts an array of grouped elements and creates an array regrouping the elements -// to their pre-zip configuration. -// Play: https://go.dev/play/p/ciHugugvaAW -func Unzip6[A any, B any, C any, D any, E any, F any](tuples []Tuple6[A, B, C, D, E, F]) ([]A, []B, []C, []D, []E, []F) { - size := len(tuples) - r1 := make([]A, 0, size) - r2 := make([]B, 0, size) - r3 := make([]C, 0, size) - r4 := make([]D, 0, size) - r5 := make([]E, 0, size) - r6 := make([]F, 0, size) - - for _, tuple := range tuples { - r1 = append(r1, tuple.A) - r2 = append(r2, tuple.B) - r3 = append(r3, tuple.C) - r4 = append(r4, tuple.D) - r5 = append(r5, tuple.E) - r6 = append(r6, tuple.F) - } - - return r1, r2, r3, r4, r5, r6 -} - -// Unzip7 accepts an array of grouped elements and creates an array regrouping the elements -// to their pre-zip configuration. -// Play: https://go.dev/play/p/ciHugugvaAW -func Unzip7[A any, B any, C any, D any, E any, F any, G any](tuples []Tuple7[A, B, C, D, E, F, G]) ([]A, []B, []C, []D, []E, []F, []G) { - size := len(tuples) - r1 := make([]A, 0, size) - r2 := make([]B, 0, size) - r3 := make([]C, 0, size) - r4 := make([]D, 0, size) - r5 := make([]E, 0, size) - r6 := make([]F, 0, size) - r7 := make([]G, 0, size) - - for _, tuple := range tuples { - r1 = append(r1, tuple.A) - r2 = append(r2, tuple.B) - r3 = append(r3, tuple.C) - r4 = append(r4, tuple.D) - r5 = append(r5, tuple.E) - r6 = append(r6, tuple.F) - r7 = append(r7, tuple.G) - } - - return r1, r2, r3, r4, r5, r6, r7 -} - -// Unzip8 accepts an array of grouped elements and creates an array regrouping the elements -// to their pre-zip configuration. -// Play: https://go.dev/play/p/ciHugugvaAW -func Unzip8[A any, B any, C any, D any, E any, F any, G any, H any](tuples []Tuple8[A, B, C, D, E, F, G, H]) ([]A, []B, []C, []D, []E, []F, []G, []H) { - size := len(tuples) - r1 := make([]A, 0, size) - r2 := make([]B, 0, size) - r3 := make([]C, 0, size) - r4 := make([]D, 0, size) - r5 := make([]E, 0, size) - r6 := make([]F, 0, size) - r7 := make([]G, 0, size) - r8 := make([]H, 0, size) - - for _, tuple := range tuples { - r1 = append(r1, tuple.A) - r2 = append(r2, tuple.B) - r3 = append(r3, tuple.C) - r4 = append(r4, tuple.D) - r5 = append(r5, tuple.E) - r6 = append(r6, tuple.F) - r7 = append(r7, tuple.G) - r8 = append(r8, tuple.H) - } - - return r1, r2, r3, r4, r5, r6, r7, r8 -} - -// Unzip9 accepts an array of grouped elements and creates an array regrouping the elements -// to their pre-zip configuration. -// Play: https://go.dev/play/p/ciHugugvaAW -func Unzip9[A any, B any, C any, D any, E any, F any, G any, H any, I any](tuples []Tuple9[A, B, C, D, E, F, G, H, I]) ([]A, []B, []C, []D, []E, []F, []G, []H, []I) { - size := len(tuples) - r1 := make([]A, 0, size) - r2 := make([]B, 0, size) - r3 := make([]C, 0, size) - r4 := make([]D, 0, size) - r5 := make([]E, 0, size) - r6 := make([]F, 0, size) - r7 := make([]G, 0, size) - r8 := make([]H, 0, size) - r9 := make([]I, 0, size) - - for _, tuple := range tuples { - r1 = append(r1, tuple.A) - r2 = append(r2, tuple.B) - r3 = append(r3, tuple.C) - r4 = append(r4, tuple.D) - r5 = append(r5, tuple.E) - r6 = append(r6, tuple.F) - r7 = append(r7, tuple.G) - r8 = append(r8, tuple.H) - r9 = append(r9, tuple.I) - } - - return r1, r2, r3, r4, r5, r6, r7, r8, r9 -} diff --git a/vendor/github.com/samber/lo/type_manipulation.go b/vendor/github.com/samber/lo/type_manipulation.go deleted file mode 100644 index fe99ee1f040..00000000000 --- a/vendor/github.com/samber/lo/type_manipulation.go +++ /dev/null @@ -1,88 +0,0 @@ -package lo - -// ToPtr returns a pointer copy of value. -func ToPtr[T any](x T) *T { - return &x -} - -// FromPtr returns the pointer value or empty. -func FromPtr[T any](x *T) T { - if x == nil { - return Empty[T]() - } - - return *x -} - -// FromPtrOr returns the pointer value or the fallback value. -func FromPtrOr[T any](x *T, fallback T) T { - if x == nil { - return fallback - } - - return *x -} - -// ToSlicePtr returns a slice of pointer copy of value. -func ToSlicePtr[T any](collection []T) []*T { - return Map(collection, func(x T, _ int) *T { - return &x - }) -} - -// ToAnySlice returns a slice with all elements mapped to `any` type -func ToAnySlice[T any](collection []T) []any { - result := make([]any, len(collection)) - for i, item := range collection { - result[i] = item - } - return result -} - -// FromAnySlice returns an `any` slice with all elements mapped to a type. -// Returns false in case of type conversion failure. -func FromAnySlice[T any](in []any) (out []T, ok bool) { - defer func() { - if r := recover(); r != nil { - out = []T{} - ok = false - } - }() - - result := make([]T, len(in)) - for i, item := range in { - result[i] = item.(T) - } - return result, true -} - -// Empty returns an empty value. -func Empty[T any]() T { - var zero T - return zero -} - -// IsEmpty returns true if argument is a zero value. -func IsEmpty[T comparable](v T) bool { - var zero T - return zero == v -} - -// IsNotEmpty returns true if argument is not a zero value. -func IsNotEmpty[T comparable](v T) bool { - var zero T - return zero != v -} - -// Coalesce returns the first non-empty arguments. Arguments must be comparable. -func Coalesce[T comparable](v ...T) (result T, ok bool) { - for _, e := range v { - if e != result { - result = e - ok = true - return - } - } - - return -} diff --git a/vendor/github.com/samber/lo/types.go b/vendor/github.com/samber/lo/types.go deleted file mode 100644 index 271c5b4fdf7..00000000000 --- a/vendor/github.com/samber/lo/types.go +++ /dev/null @@ -1,123 +0,0 @@ -package lo - -// Entry defines a key/value pairs. -type Entry[K comparable, V any] struct { - Key K - Value V -} - -// Tuple2 is a group of 2 elements (pair). -type Tuple2[A any, B any] struct { - A A - B B -} - -// Unpack returns values contained in tuple. -func (t Tuple2[A, B]) Unpack() (A, B) { - return t.A, t.B -} - -// Tuple3 is a group of 3 elements. -type Tuple3[A any, B any, C any] struct { - A A - B B - C C -} - -// Unpack returns values contained in tuple. -func (t Tuple3[A, B, C]) Unpack() (A, B, C) { - return t.A, t.B, t.C -} - -// Tuple4 is a group of 4 elements. -type Tuple4[A any, B any, C any, D any] struct { - A A - B B - C C - D D -} - -// Unpack returns values contained in tuple. -func (t Tuple4[A, B, C, D]) Unpack() (A, B, C, D) { - return t.A, t.B, t.C, t.D -} - -// Tuple5 is a group of 5 elements. -type Tuple5[A any, B any, C any, D any, E any] struct { - A A - B B - C C - D D - E E -} - -// Unpack returns values contained in tuple. -func (t Tuple5[A, B, C, D, E]) Unpack() (A, B, C, D, E) { - return t.A, t.B, t.C, t.D, t.E -} - -// Tuple6 is a group of 6 elements. -type Tuple6[A any, B any, C any, D any, E any, F any] struct { - A A - B B - C C - D D - E E - F F -} - -// Unpack returns values contained in tuple. -func (t Tuple6[A, B, C, D, E, F]) Unpack() (A, B, C, D, E, F) { - return t.A, t.B, t.C, t.D, t.E, t.F -} - -// Tuple7 is a group of 7 elements. -type Tuple7[A any, B any, C any, D any, E any, F any, G any] struct { - A A - B B - C C - D D - E E - F F - G G -} - -// Unpack returns values contained in tuple. -func (t Tuple7[A, B, C, D, E, F, G]) Unpack() (A, B, C, D, E, F, G) { - return t.A, t.B, t.C, t.D, t.E, t.F, t.G -} - -// Tuple8 is a group of 8 elements. -type Tuple8[A any, B any, C any, D any, E any, F any, G any, H any] struct { - A A - B B - C C - D D - E E - F F - G G - H H -} - -// Unpack returns values contained in tuple. -func (t Tuple8[A, B, C, D, E, F, G, H]) Unpack() (A, B, C, D, E, F, G, H) { - return t.A, t.B, t.C, t.D, t.E, t.F, t.G, t.H -} - -// Tuple9 is a group of 9 elements. -type Tuple9[A any, B any, C any, D any, E any, F any, G any, H any, I any] struct { - A A - B B - C C - D D - E E - F F - G G - H H - I I -} - -// Unpack returns values contained in tuple. -func (t Tuple9[A, B, C, D, E, F, G, H, I]) Unpack() (A, B, C, D, E, F, G, H, I) { - return t.A, t.B, t.C, t.D, t.E, t.F, t.G, t.H, t.I -} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go index 4f7b42488aa..915d5090dde 100644 --- a/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go @@ -34,8 +34,6 @@ const ( DiffInsert Operation = 1 // DiffEqual item represents an equal diff. DiffEqual Operation = 0 - //IndexSeparator is used to seperate the array indexes in an index string - IndexSeparator = "," ) // Diff represents one diff operation @@ -406,14 +404,11 @@ func (dmp *DiffMatchPatch) DiffLinesToRunes(text1, text2 string) ([]rune, []rune func (dmp *DiffMatchPatch) DiffCharsToLines(diffs []Diff, lineArray []string) []Diff { hydrated := make([]Diff, 0, len(diffs)) for _, aDiff := range diffs { - chars := strings.Split(aDiff.Text, IndexSeparator) - text := make([]string, len(chars)) + runes := []rune(aDiff.Text) + text := make([]string, len(runes)) - for i, r := range chars { - i1, err := strconv.Atoi(r) - if err == nil { - text[i] = lineArray[i1] - } + for i, r := range runes { + text[i] = lineArray[runeToInt(r)] } aDiff.Text = strings.Join(text, "") diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go index 44c43595478..eb727bb5948 100644 --- a/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go @@ -9,11 +9,16 @@ package diffmatchpatch import ( - "strconv" + "fmt" "strings" "unicode/utf8" ) +const UNICODE_INVALID_RANGE_START = 0xD800 +const UNICODE_INVALID_RANGE_END = 0xDFFF +const UNICODE_INVALID_RANGE_DELTA = UNICODE_INVALID_RANGE_END - UNICODE_INVALID_RANGE_START + 1 +const UNICODE_RANGE_MAX = 0x10FFFF + // unescaper unescapes selected chars for compatibility with JavaScript's encodeURI. // In speed critical applications this could be dropped since the receiving application will certainly decode these fine. Note that this function is case-sensitive. Thus "%3F" would not be unescaped. But this is ok because it is only called with the output of HttpUtility.UrlEncode which returns lowercase hex. Example: "%3f" -> "?", "%24" -> "$", etc. var unescaper = strings.NewReplacer( @@ -93,14 +98,93 @@ func intArrayToString(ns []uint32) string { return "" } - indexSeparator := IndexSeparator[0] - - // Appr. 3 chars per num plus the comma. - b := []byte{} + b := []rune{} for _, n := range ns { - b = strconv.AppendInt(b, int64(n), 10) - b = append(b, indexSeparator) + b = append(b, intToRune(n)) } - b = b[:len(b)-1] return string(b) } + +// These constants define the number of bits representable +// in 1,2,3,4 byte utf8 sequences, respectively. +const ONE_BYTE_BITS = 7 +const TWO_BYTE_BITS = 11 +const THREE_BYTE_BITS = 16 +const FOUR_BYTE_BITS = 21 + +// Helper for getting a sequence of bits from an integer. +func getBits(i uint32, cnt byte, from byte) byte { + return byte((i >> from) & ((1 << cnt) - 1)) +} + +// Converts an integer in the range 0~1112060 into a rune. +// Based on the ranges table in https://en.wikipedia.org/wiki/UTF-8 +func intToRune(i uint32) rune { + if i < (1 << ONE_BYTE_BITS) { + return rune(i) + } + + if i < (1 << TWO_BYTE_BITS) { + r, size := utf8.DecodeRune([]byte{0b11000000 | getBits(i, 5, 6), 0b10000000 | getBits(i, 6, 0)}) + if size != 2 || r == utf8.RuneError { + panic(fmt.Sprintf("Error encoding an int %d with size 2, got rune %v and size %d", size, r, i)) + } + return r + } + + // Last -3 here needed because for some reason 3rd to last codepoint 65533 in this range + // was returning utf8.RuneError during encoding. + if i < ((1 << THREE_BYTE_BITS) - UNICODE_INVALID_RANGE_DELTA - 3) { + if i >= UNICODE_INVALID_RANGE_START { + i += UNICODE_INVALID_RANGE_DELTA + } + + r, size := utf8.DecodeRune([]byte{0b11100000 | getBits(i, 4, 12), 0b10000000 | getBits(i, 6, 6), 0b10000000 | getBits(i, 6, 0)}) + if size != 3 || r == utf8.RuneError { + panic(fmt.Sprintf("Error encoding an int %d with size 3, got rune %v and size %d", size, r, i)) + } + return r + } + + if i < (1<= UNICODE_INVALID_RANGE_END { + return result - UNICODE_INVALID_RANGE_DELTA + } + + return result + } + + if size == 4 { + result := uint32(bytes[0]&0b111)<<18 | uint32(bytes[1]&0b111111)<<12 | uint32(bytes[2]&0b111111)<<6 | uint32(bytes[3]&0b111111) + return result - UNICODE_INVALID_RANGE_DELTA - 3 + } + + panic(fmt.Sprintf("Unexpected state decoding rune=%v size=%d", r, size)) +} diff --git a/vendor/github.com/shopspring/decimal/.travis.yml b/vendor/github.com/shopspring/decimal/.travis.yml deleted file mode 100644 index 6326d40f0e9..00000000000 --- a/vendor/github.com/shopspring/decimal/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -language: go - -arch: - - amd64 - - ppc64le - -go: - - 1.7.x - - 1.14.x - - 1.15.x - - 1.16.x - - 1.17.x - - tip - -install: - - go build . - -script: - - go test -v diff --git a/vendor/github.com/shopspring/decimal/CHANGELOG.md b/vendor/github.com/shopspring/decimal/CHANGELOG.md index aea61154b8c..432d0fd4ea6 100644 --- a/vendor/github.com/shopspring/decimal/CHANGELOG.md +++ b/vendor/github.com/shopspring/decimal/CHANGELOG.md @@ -1,3 +1,30 @@ +## Decimal v1.4.0 +#### BREAKING +- Drop support for Go version older than 1.10 [#361](https://github.com/shopspring/decimal/pull/361) + +#### FEATURES +- Add implementation of natural logarithm [#339](https://github.com/shopspring/decimal/pull/339) [#357](https://github.com/shopspring/decimal/pull/357) +- Add improved implementation of power operation [#358](https://github.com/shopspring/decimal/pull/358) +- Add Compare method which forwards calls to Cmp [#346](https://github.com/shopspring/decimal/pull/346) +- Add NewFromBigRat constructor [#288](https://github.com/shopspring/decimal/pull/288) +- Add NewFromUint64 constructor [#352](https://github.com/shopspring/decimal/pull/352) + +#### ENHANCEMENTS +- Migrate to Github Actions [#245](https://github.com/shopspring/decimal/pull/245) [#340](https://github.com/shopspring/decimal/pull/340) +- Fix examples for RoundDown, RoundFloor, RoundUp, and RoundCeil [#285](https://github.com/shopspring/decimal/pull/285) [#328](https://github.com/shopspring/decimal/pull/328) [#341](https://github.com/shopspring/decimal/pull/341) +- Use Godoc standard to mark deprecated Equals and StringScaled methods [#342](https://github.com/shopspring/decimal/pull/342) +- Removed unnecessary min function for RescalePair method [#265](https://github.com/shopspring/decimal/pull/265) +- Avoid reallocation of initial slice in MarshalBinary (GobEncode) [#355](https://github.com/shopspring/decimal/pull/355) +- Optimize NumDigits method [#301](https://github.com/shopspring/decimal/pull/301) [#356](https://github.com/shopspring/decimal/pull/356) +- Optimize BigInt method [#359](https://github.com/shopspring/decimal/pull/359) +- Support scanning uint64 [#131](https://github.com/shopspring/decimal/pull/131) [#364](https://github.com/shopspring/decimal/pull/364) +- Add docs section with alternative libraries [#363](https://github.com/shopspring/decimal/pull/363) + +#### BUGFIXES +- Fix incorrect calculation of decimal modulo [#258](https://github.com/shopspring/decimal/pull/258) [#317](https://github.com/shopspring/decimal/pull/317) +- Allocate new(big.Int) in Copy method to deeply clone it [#278](https://github.com/shopspring/decimal/pull/278) +- Fix overflow edge case in QuoRem method [#322](https://github.com/shopspring/decimal/pull/322) + ## Decimal v1.3.1 #### ENHANCEMENTS diff --git a/vendor/github.com/shopspring/decimal/README.md b/vendor/github.com/shopspring/decimal/README.md index 2e35df068ea..318c9df5813 100644 --- a/vendor/github.com/shopspring/decimal/README.md +++ b/vendor/github.com/shopspring/decimal/README.md @@ -1,6 +1,8 @@ # decimal -[![Build Status](https://app.travis-ci.com/shopspring/decimal.svg?branch=master)](https://app.travis-ci.com/shopspring/decimal) [![GoDoc](https://godoc.org/github.com/shopspring/decimal?status.svg)](https://godoc.org/github.com/shopspring/decimal) [![Go Report Card](https://goreportcard.com/badge/github.com/shopspring/decimal)](https://goreportcard.com/report/github.com/shopspring/decimal) +[![ci](https://github.com/shopspring/decimal/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/shopspring/decimal/actions/workflows/ci.yml) +[![GoDoc](https://godoc.org/github.com/shopspring/decimal?status.svg)](https://godoc.org/github.com/shopspring/decimal) +[![Go Report Card](https://goreportcard.com/badge/github.com/shopspring/decimal)](https://goreportcard.com/report/github.com/shopspring/decimal) Arbitrary-precision fixed-point decimal numbers in go. @@ -20,7 +22,12 @@ Run `go get github.com/shopspring/decimal` ## Requirements -Decimal library requires Go version `>=1.7` +Decimal library requires Go version `>=1.10` + +## Documentation + +http://godoc.org/github.com/shopspring/decimal + ## Usage @@ -57,14 +64,16 @@ func main() { } ``` -## Documentation - -http://godoc.org/github.com/shopspring/decimal +## Alternative libraries -## Production Usage +When working with decimal numbers, you might face problems this library is not perfectly suited for. +Fortunately, thanks to the wonderful community we have a dozen other libraries that you can choose from. +Explore other alternatives to find the one that best fits your needs :) -* [Spring](https://shopspring.com/), since August 14, 2014. -* If you are using this in production, please let us know! +* [cockroachdb/apd](https://github.com/cockroachdb/apd) - arbitrary precision, mutable and rich API similar to `big.Int`, more performant than this library +* [alpacahq/alpacadecimal](https://github.com/alpacahq/alpacadecimal) - high performance, low precision (12 digits), fully compatible API with this library +* [govalues/decimal](https://github.com/govalues/decimal) - high performance, zero-allocation, low precision (19 digits) +* [greatcloak/decimal](https://github.com/greatcloak/decimal) - fork focusing on billing and e-commerce web application related use cases, includes out-of-the-box BSON marshaling support ## FAQ diff --git a/vendor/github.com/shopspring/decimal/const.go b/vendor/github.com/shopspring/decimal/const.go new file mode 100644 index 00000000000..e5d6fa87e8d --- /dev/null +++ b/vendor/github.com/shopspring/decimal/const.go @@ -0,0 +1,63 @@ +package decimal + +import ( + "strings" +) + +const ( + strLn10 = "2.302585092994045684017991454684364207601101488628772976033327900967572609677352480235997205089598298341967784042286248633409525465082806756666287369098781689482907208325554680843799894826233198528393505308965377732628846163366222287698219886746543667474404243274365155048934314939391479619404400222105101714174800368808401264708068556774321622835522011480466371565912137345074785694768346361679210180644507064800027750268491674655058685693567342067058113642922455440575892572420824131469568901675894025677631135691929203337658714166023010570308963457207544037084746994016826928280848118428931484852494864487192780967627127577539702766860595249671667418348570442250719796500471495105049221477656763693866297697952211071826454973477266242570942932258279850258550978526538320760672631716430950599508780752371033310119785754733154142180842754386359177811705430982748238504564801909561029929182431823752535770975053956518769751037497088869218020518933950723853920514463419726528728696511086257149219884997874887377134568620916705849807828059751193854445009978131146915934666241071846692310107598438319191292230792503747298650929009880391941702654416816335727555703151596113564846546190897042819763365836983716328982174407366009162177850541779276367731145041782137660111010731042397832521894898817597921798666394319523936855916447118246753245630912528778330963604262982153040874560927760726641354787576616262926568298704957954913954918049209069438580790032763017941503117866862092408537949861264933479354871737451675809537088281067452440105892444976479686075120275724181874989395971643105518848195288330746699317814634930000321200327765654130472621883970596794457943468343218395304414844803701305753674262153675579814770458031413637793236291560128185336498466942261465206459942072917119370602444929358037007718981097362533224548366988505528285966192805098447175198503666680874970496982273220244823343097169111136813588418696549323714996941979687803008850408979618598756579894836445212043698216415292987811742973332588607915912510967187510929248475023930572665446276200923068791518135803477701295593646298412366497023355174586195564772461857717369368404676577047874319780573853271810933883496338813069945569399346101090745616033312247949360455361849123333063704751724871276379140924398331810164737823379692265637682071706935846394531616949411701841938119405416449466111274712819705817783293841742231409930022911502362192186723337268385688273533371925103412930705632544426611429765388301822384091026198582888433587455960453004548370789052578473166283701953392231047527564998119228742789713715713228319641003422124210082180679525276689858180956119208391760721080919923461516952599099473782780648128058792731993893453415320185969711021407542282796298237068941764740642225757212455392526179373652434440560595336591539160312524480149313234572453879524389036839236450507881731359711238145323701508413491122324390927681724749607955799151363982881058285740538000653371655553014196332241918087621018204919492651483892" +) + +var ( + ln10 = newConstApproximation(strLn10) +) + +type constApproximation struct { + exact Decimal + approximations []Decimal +} + +func newConstApproximation(value string) constApproximation { + parts := strings.Split(value, ".") + coeff, fractional := parts[0], parts[1] + + coeffLen := len(coeff) + maxPrecision := len(fractional) + + var approximations []Decimal + for p := 1; p < maxPrecision; p *= 2 { + r := RequireFromString(value[:coeffLen+p]) + approximations = append(approximations, r) + } + + return constApproximation{ + RequireFromString(value), + approximations, + } +} + +// Returns the smallest approximation available that's at least as precise +// as the passed precision (places after decimal point), i.e. Floor[ log2(precision) ] + 1 +func (c constApproximation) withPrecision(precision int32) Decimal { + i := 0 + + if precision >= 1 { + i++ + } + + for precision >= 16 { + precision /= 16 + i += 4 + } + + for precision >= 2 { + precision /= 2 + i++ + } + + if i >= len(c.approximations) { + return c.exact + } + + return c.approximations[i] +} diff --git a/vendor/github.com/shopspring/decimal/decimal.go b/vendor/github.com/shopspring/decimal/decimal.go index 84405ec1cf0..a37a2301ef0 100644 --- a/vendor/github.com/shopspring/decimal/decimal.go +++ b/vendor/github.com/shopspring/decimal/decimal.go @@ -4,14 +4,14 @@ // // The best way to create a new Decimal is to use decimal.NewFromString, ex: // -// n, err := decimal.NewFromString("-123.4567") -// n.String() // output: "-123.4567" +// n, err := decimal.NewFromString("-123.4567") +// n.String() // output: "-123.4567" // // To use Decimal as part of a struct: // -// type Struct struct { -// Number Decimal -// } +// type StructName struct { +// Number Decimal +// } // // Note: This can "only" represent numbers with a maximum of 2^31 digits after the decimal point. package decimal @@ -32,18 +32,31 @@ import ( // // Example: // -// d1 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3)) -// d1.String() // output: "0.6666666666666667" -// d2 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(30000)) -// d2.String() // output: "0.0000666666666667" -// d3 := decimal.NewFromFloat(20000).Div(decimal.NewFromFloat(3)) -// d3.String() // output: "6666.6666666666666667" -// decimal.DivisionPrecision = 3 -// d4 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3)) -// d4.String() // output: "0.667" -// +// d1 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3)) +// d1.String() // output: "0.6666666666666667" +// d2 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(30000)) +// d2.String() // output: "0.0000666666666667" +// d3 := decimal.NewFromFloat(20000).Div(decimal.NewFromFloat(3)) +// d3.String() // output: "6666.6666666666666667" +// decimal.DivisionPrecision = 3 +// d4 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3)) +// d4.String() // output: "0.667" var DivisionPrecision = 16 +// PowPrecisionNegativeExponent specifies the maximum precision of the result (digits after decimal point) +// when calculating decimal power. Only used for cases where the exponent is a negative number. +// This constant applies to Pow, PowInt32 and PowBigInt methods, PowWithPrecision method is not constrained by it. +// +// Example: +// +// d1, err := decimal.NewFromFloat(15.2).PowInt32(-2) +// d1.String() // output: "0.0043282548476454" +// +// decimal.PowPrecisionNegativeExponent = 24 +// d2, err := decimal.NewFromFloat(15.2).PowInt32(-2) +// d2.String() // output: "0.004328254847645429362881" +var PowPrecisionNegativeExponent = 16 + // MarshalJSONWithoutQuotes should be set to true if you want the decimal to // be JSON marshaled as a number, instead of as a string. // WARNING: this is dangerous for decimals with many digits, since many JSON @@ -91,12 +104,12 @@ func New(value int64, exp int32) Decimal { } } -// NewFromInt converts a int64 to Decimal. +// NewFromInt converts an int64 to Decimal. // // Example: // -// NewFromInt(123).String() // output: "123" -// NewFromInt(-10).String() // output: "-10" +// NewFromInt(123).String() // output: "123" +// NewFromInt(-10).String() // output: "-10" func NewFromInt(value int64) Decimal { return Decimal{ value: big.NewInt(value), @@ -104,12 +117,12 @@ func NewFromInt(value int64) Decimal { } } -// NewFromInt32 converts a int32 to Decimal. +// NewFromInt32 converts an int32 to Decimal. // // Example: // -// NewFromInt(123).String() // output: "123" -// NewFromInt(-10).String() // output: "-10" +// NewFromInt(123).String() // output: "123" +// NewFromInt(-10).String() // output: "-10" func NewFromInt32(value int32) Decimal { return Decimal{ value: big.NewInt(int64(value)), @@ -117,6 +130,18 @@ func NewFromInt32(value int32) Decimal { } } +// NewFromUint64 converts an uint64 to Decimal. +// +// Example: +// +// NewFromUint64(123).String() // output: "123" +func NewFromUint64(value uint64) Decimal { + return Decimal{ + value: new(big.Int).SetUint64(value), + exp: 0, + } +} + // NewFromBigInt returns a new Decimal from a big.Int, value * 10 ^ exp func NewFromBigInt(value *big.Int, exp int32) Decimal { return Decimal{ @@ -125,15 +150,33 @@ func NewFromBigInt(value *big.Int, exp int32) Decimal { } } +// NewFromBigRat returns a new Decimal from a big.Rat. The numerator and +// denominator are divided and rounded to the given precision. +// +// Example: +// +// d1 := NewFromBigRat(big.NewRat(0, 1), 0) // output: "0" +// d2 := NewFromBigRat(big.NewRat(4, 5), 1) // output: "0.8" +// d3 := NewFromBigRat(big.NewRat(1000, 3), 3) // output: "333.333" +// d4 := NewFromBigRat(big.NewRat(2, 7), 4) // output: "0.2857" +func NewFromBigRat(value *big.Rat, precision int32) Decimal { + return Decimal{ + value: new(big.Int).Set(value.Num()), + exp: 0, + }.DivRound(Decimal{ + value: new(big.Int).Set(value.Denom()), + exp: 0, + }, precision) +} + // NewFromString returns a new Decimal from a string representation. // Trailing zeroes are not trimmed. // // Example: // -// d, err := NewFromString("-123.45") -// d2, err := NewFromString(".0001") -// d3, err := NewFromString("1.47000") -// +// d, err := NewFromString("-123.45") +// d2, err := NewFromString(".0001") +// d3, err := NewFromString("1.47000") func NewFromString(value string) (Decimal, error) { originalInput := value var intString string @@ -211,15 +254,14 @@ func NewFromString(value string) (Decimal, error) { // // Example: // -// r := regexp.MustCompile("[$,]") -// d1, err := NewFromFormattedString("$5,125.99", r) +// r := regexp.MustCompile("[$,]") +// d1, err := NewFromFormattedString("$5,125.99", r) // -// r2 := regexp.MustCompile("[_]") -// d2, err := NewFromFormattedString("1_000_000", r2) -// -// r3 := regexp.MustCompile("[USD\\s]") -// d3, err := NewFromFormattedString("5000 USD", r3) +// r2 := regexp.MustCompile("[_]") +// d2, err := NewFromFormattedString("1_000_000", r2) // +// r3 := regexp.MustCompile("[USD\\s]") +// d3, err := NewFromFormattedString("5000 USD", r3) func NewFromFormattedString(value string, replRegexp *regexp.Regexp) (Decimal, error) { parsedValue := replRegexp.ReplaceAllString(value, "") d, err := NewFromString(parsedValue) @@ -230,13 +272,12 @@ func NewFromFormattedString(value string, replRegexp *regexp.Regexp) (Decimal, e } // RequireFromString returns a new Decimal from a string representation -// or panics if NewFromString would have returned an error. +// or panics if NewFromString had returned an error. // // Example: // -// d := RequireFromString("-123.45") -// d2 := RequireFromString(".0001") -// +// d := RequireFromString("-123.45") +// d2 := RequireFromString(".0001") func RequireFromString(value string) Decimal { dec, err := NewFromString(value) if err != nil { @@ -332,8 +373,7 @@ func newFromFloat(val float64, bits uint64, flt *floatInfo) Decimal { // // Example: // -// NewFromFloatWithExponent(123.456, -2).String() // output: "123.46" -// +// NewFromFloatWithExponent(123.456, -2).String() // output: "123.46" func NewFromFloatWithExponent(value float64, exp int32) Decimal { if math.IsNaN(value) || math.IsInf(value, 0) { panic(fmt.Sprintf("Cannot create a Decimal from %v", value)) @@ -418,7 +458,7 @@ func NewFromFloatWithExponent(value float64, exp int32) Decimal { func (d Decimal) Copy() Decimal { d.ensureInitialized() return Decimal{ - value: &(*d.value), + value: new(big.Int).Set(d.value), exp: d.exp, } } @@ -430,7 +470,7 @@ func (d Decimal) Copy() Decimal { // // Example: // -// d := New(12345, -4) +// d := New(12345, -4) // d2 := d.rescale(-1) // d3 := d2.rescale(-4) // println(d1) @@ -442,7 +482,6 @@ func (d Decimal) Copy() Decimal { // 1.2345 // 1.2 // 1.2000 -// func (d Decimal) rescale(exp int32) Decimal { d.ensureInitialized() @@ -552,11 +591,13 @@ func (d Decimal) Div(d2 Decimal) Decimal { return d.DivRound(d2, int32(DivisionPrecision)) } -// QuoRem does divsion with remainder +// QuoRem does division with remainder // d.QuoRem(d2,precision) returns quotient q and remainder r such that -// d = d2 * q + r, q an integer multiple of 10^(-precision) -// 0 <= r < abs(d2) * 10 ^(-precision) if d>=0 -// 0 >= r > -abs(d2) * 10 ^(-precision) if d<0 +// +// d = d2 * q + r, q an integer multiple of 10^(-precision) +// 0 <= r < abs(d2) * 10 ^(-precision) if d>=0 +// 0 >= r > -abs(d2) * 10 ^(-precision) if d<0 +// // Note that precision<0 is allowed as input. func (d Decimal) QuoRem(d2 Decimal, precision int32) (Decimal, Decimal) { d.ensureInitialized() @@ -565,7 +606,7 @@ func (d Decimal) QuoRem(d2 Decimal, precision int32) (Decimal, Decimal) { panic("decimal division by 0") } scale := -precision - e := int64(d.exp - d2.exp - scale) + e := int64(d.exp) - int64(d2.exp) - int64(scale) if e > math.MaxInt32 || e < math.MinInt32 { panic("overflow in decimal QuoRem") } @@ -599,8 +640,10 @@ func (d Decimal) QuoRem(d2 Decimal, precision int32) (Decimal, Decimal) { // DivRound divides and rounds to a given precision // i.e. to an integer multiple of 10^(-precision) -// for a positive quotient digit 5 is rounded up, away from 0 -// if the quotient is negative then digit 5 is rounded down, away from 0 +// +// for a positive quotient digit 5 is rounded up, away from 0 +// if the quotient is negative then digit 5 is rounded down, away from 0 +// // Note that precision<0 is allowed as input. func (d Decimal) DivRound(d2 Decimal, precision int32) Decimal { // QuoRem already checks initialization @@ -628,24 +671,278 @@ func (d Decimal) DivRound(d2 Decimal, precision int32) Decimal { // Mod returns d % d2. func (d Decimal) Mod(d2 Decimal) Decimal { - quo := d.Div(d2).Truncate(0) - return d.Sub(d2.Mul(quo)) + _, r := d.QuoRem(d2, 0) + return r } -// Pow returns d to the power d2 +// Pow returns d to the power of d2. +// When exponent is negative the returned decimal will have maximum precision of PowPrecisionNegativeExponent places after decimal point. +// +// Pow returns 0 (zero-value of Decimal) instead of error for power operation edge cases, to handle those edge cases use PowWithPrecision +// Edge cases not handled by Pow: +// - 0 ** 0 => undefined value +// - 0 ** y, where y < 0 => infinity +// - x ** y, where x < 0 and y is non-integer decimal => imaginary value +// +// Example: +// +// d1 := decimal.NewFromFloat(4.0) +// d2 := decimal.NewFromFloat(4.0) +// res1 := d1.Pow(d2) +// res1.String() // output: "256" +// +// d3 := decimal.NewFromFloat(5.0) +// d4 := decimal.NewFromFloat(5.73) +// res2 := d3.Pow(d4) +// res2.String() // output: "10118.08037125" func (d Decimal) Pow(d2 Decimal) Decimal { - var temp Decimal - if d2.IntPart() == 0 { - return NewFromFloat(1) + baseSign := d.Sign() + expSign := d2.Sign() + + if baseSign == 0 { + if expSign == 0 { + return Decimal{} + } + if expSign == 1 { + return Decimal{zeroInt, 0} + } + if expSign == -1 { + return Decimal{} + } + } + + if expSign == 0 { + return Decimal{oneInt, 0} + } + + // TODO: optimize extraction of fractional part + one := Decimal{oneInt, 0} + expIntPart, expFracPart := d2.QuoRem(one, 0) + + if baseSign == -1 && !expFracPart.IsZero() { + return Decimal{} + } + + intPartPow, _ := d.PowBigInt(expIntPart.value) + + // if exponent is an integer we don't need to calculate d1**frac(d2) + if expFracPart.value.Sign() == 0 { + return intPartPow + } + + // TODO: optimize NumDigits for more performant precision adjustment + digitsBase := d.NumDigits() + digitsExponent := d2.NumDigits() + + precision := digitsBase + + if digitsExponent > precision { + precision += digitsExponent + } + + precision += 6 + + // Calculate x ** frac(y), where + // x ** frac(y) = exp(ln(x ** frac(y)) = exp(ln(x) * frac(y)) + fracPartPow, err := d.Abs().Ln(-d.exp + int32(precision)) + if err != nil { + return Decimal{} + } + + fracPartPow = fracPartPow.Mul(expFracPart) + + fracPartPow, err = fracPartPow.ExpTaylor(-d.exp + int32(precision)) + if err != nil { + return Decimal{} + } + + // Join integer and fractional part, + // base ** (expBase + expFrac) = base ** expBase * base ** expFrac + res := intPartPow.Mul(fracPartPow) + + return res +} + +// PowWithPrecision returns d to the power of d2. +// Precision parameter specifies minimum precision of the result (digits after decimal point). +// Returned decimal is not rounded to 'precision' places after decimal point. +// +// PowWithPrecision returns error when: +// - 0 ** 0 => undefined value +// - 0 ** y, where y < 0 => infinity +// - x ** y, where x < 0 and y is non-integer decimal => imaginary value +// +// Example: +// +// d1 := decimal.NewFromFloat(4.0) +// d2 := decimal.NewFromFloat(4.0) +// res1, err := d1.PowWithPrecision(d2, 2) +// res1.String() // output: "256" +// +// d3 := decimal.NewFromFloat(5.0) +// d4 := decimal.NewFromFloat(5.73) +// res2, err := d3.PowWithPrecision(d4, 5) +// res2.String() // output: "10118.080371595015625" +// +// d5 := decimal.NewFromFloat(-3.0) +// d6 := decimal.NewFromFloat(-6.0) +// res3, err := d5.PowWithPrecision(d6, 10) +// res3.String() // output: "0.0013717421" +func (d Decimal) PowWithPrecision(d2 Decimal, precision int32) (Decimal, error) { + baseSign := d.Sign() + expSign := d2.Sign() + + if baseSign == 0 { + if expSign == 0 { + return Decimal{}, fmt.Errorf("cannot represent undefined value of 0**0") + } + if expSign == 1 { + return Decimal{zeroInt, 0}, nil + } + if expSign == -1 { + return Decimal{}, fmt.Errorf("cannot represent infinity value of 0 ** y, where y < 0") + } + } + + if expSign == 0 { + return Decimal{oneInt, 0}, nil + } + + // TODO: optimize extraction of fractional part + one := Decimal{oneInt, 0} + expIntPart, expFracPart := d2.QuoRem(one, 0) + + if baseSign == -1 && !expFracPart.IsZero() { + return Decimal{}, fmt.Errorf("cannot represent imaginary value of x ** y, where x < 0 and y is non-integer decimal") + } + + intPartPow, _ := d.powBigIntWithPrecision(expIntPart.value, precision) + + // if exponent is an integer we don't need to calculate d1**frac(d2) + if expFracPart.value.Sign() == 0 { + return intPartPow, nil + } + + // TODO: optimize NumDigits for more performant precision adjustment + digitsBase := d.NumDigits() + digitsExponent := d2.NumDigits() + + if int32(digitsBase) > precision { + precision = int32(digitsBase) + } + if int32(digitsExponent) > precision { + precision += int32(digitsExponent) + } + // increase precision by 10 to compensate for errors in further calculations + precision += 10 + + // Calculate x ** frac(y), where + // x ** frac(y) = exp(ln(x ** frac(y)) = exp(ln(x) * frac(y)) + fracPartPow, err := d.Abs().Ln(precision) + if err != nil { + return Decimal{}, err + } + + fracPartPow = fracPartPow.Mul(expFracPart) + + fracPartPow, err = fracPartPow.ExpTaylor(precision) + if err != nil { + return Decimal{}, err + } + + // Join integer and fractional part, + // base ** (expBase + expFrac) = base ** expBase * base ** expFrac + res := intPartPow.Mul(fracPartPow) + + return res, nil +} + +// PowInt32 returns d to the power of exp, where exp is int32. +// Only returns error when d and exp is 0, thus result is undefined. +// +// When exponent is negative the returned decimal will have maximum precision of PowPrecisionNegativeExponent places after decimal point. +// +// Example: +// +// d1, err := decimal.NewFromFloat(4.0).PowInt32(4) +// d1.String() // output: "256" +// +// d2, err := decimal.NewFromFloat(3.13).PowInt32(5) +// d2.String() // output: "300.4150512793" +func (d Decimal) PowInt32(exp int32) (Decimal, error) { + if d.IsZero() && exp == 0 { + return Decimal{}, fmt.Errorf("cannot represent undefined value of 0**0") + } + + isExpNeg := exp < 0 + exp = abs(exp) + + n, result := d, New(1, 0) + + for exp > 0 { + if exp%2 == 1 { + result = result.Mul(n) + } + exp /= 2 + + if exp > 0 { + n = n.Mul(n) + } + } + + if isExpNeg { + return New(1, 0).DivRound(result, int32(PowPrecisionNegativeExponent)), nil + } + + return result, nil +} + +// PowBigInt returns d to the power of exp, where exp is big.Int. +// Only returns error when d and exp is 0, thus result is undefined. +// +// When exponent is negative the returned decimal will have maximum precision of PowPrecisionNegativeExponent places after decimal point. +// +// Example: +// +// d1, err := decimal.NewFromFloat(3.0).PowBigInt(big.NewInt(3)) +// d1.String() // output: "27" +// +// d2, err := decimal.NewFromFloat(629.25).PowBigInt(big.NewInt(5)) +// d2.String() // output: "98654323103449.5673828125" +func (d Decimal) PowBigInt(exp *big.Int) (Decimal, error) { + return d.powBigIntWithPrecision(exp, int32(PowPrecisionNegativeExponent)) +} + +func (d Decimal) powBigIntWithPrecision(exp *big.Int, precision int32) (Decimal, error) { + if d.IsZero() && exp.Sign() == 0 { + return Decimal{}, fmt.Errorf("cannot represent undefined value of 0**0") } - temp = d.Pow(d2.Div(NewFromFloat(2))) - if d2.IntPart()%2 == 0 { - return temp.Mul(temp) + + tmpExp := new(big.Int).Set(exp) + isExpNeg := exp.Sign() < 0 + + if isExpNeg { + tmpExp.Abs(tmpExp) + } + + n, result := d, New(1, 0) + + for tmpExp.Sign() > 0 { + if tmpExp.Bit(0) == 1 { + result = result.Mul(n) + } + tmpExp.Rsh(tmpExp, 1) + + if tmpExp.Sign() > 0 { + n = n.Mul(n) + } } - if d2.IntPart() > 0 { - return temp.Mul(temp).Mul(d) + + if isExpNeg { + return New(1, 0).DivRound(result, precision), nil } - return temp.Mul(temp).Div(d) + + return result, nil } // ExpHullAbrham calculates the natural exponent of decimal (e to the power of d) using Hull-Abraham algorithm. @@ -655,9 +952,8 @@ func (d Decimal) Pow(d2 Decimal) Decimal { // // Example: // -// NewFromFloat(26.1).ExpHullAbrham(2).String() // output: "220000000000" -// NewFromFloat(26.1).ExpHullAbrham(20).String() // output: "216314672147.05767284" -// +// NewFromFloat(26.1).ExpHullAbrham(2).String() // output: "220000000000" +// NewFromFloat(26.1).ExpHullAbrham(20).String() // output: "216314672147.05767284" func (d Decimal) ExpHullAbrham(overallPrecision uint32) (Decimal, error) { // Algorithm based on Variable precision exponential function. // ACM Transactions on Mathematical Software by T. E. Hull & A. Abrham. @@ -747,15 +1043,14 @@ func (d Decimal) ExpHullAbrham(overallPrecision uint32) (Decimal, error) { // // Example: // -// d, err := NewFromFloat(26.1).ExpTaylor(2).String() -// d.String() // output: "216314672147.06" -// -// NewFromFloat(26.1).ExpTaylor(20).String() -// d.String() // output: "216314672147.05767284062928674083" +// d, err := NewFromFloat(26.1).ExpTaylor(2).String() +// d.String() // output: "216314672147.06" // -// NewFromFloat(26.1).ExpTaylor(-10).String() -// d.String() // output: "220000000000" +// NewFromFloat(26.1).ExpTaylor(20).String() +// d.String() // output: "216314672147.05767284062928674083" // +// NewFromFloat(26.1).ExpTaylor(-10).String() +// d.String() // output: "220000000000" func (d Decimal) ExpTaylor(precision int32) (Decimal, error) { // Note(mwoss): Implementation can be optimized by exclusively using big.Int API only if d.IsZero() { @@ -812,14 +1107,162 @@ func (d Decimal) ExpTaylor(precision int32) (Decimal, error) { return result, nil } +// Ln calculates natural logarithm of d. +// Precision argument specifies how precise the result must be (number of digits after decimal point). +// Negative precision is allowed. +// +// Example: +// +// d1, err := NewFromFloat(13.3).Ln(2) +// d1.String() // output: "2.59" +// +// d2, err := NewFromFloat(579.161).Ln(10) +// d2.String() // output: "6.3615805046" +func (d Decimal) Ln(precision int32) (Decimal, error) { + // Algorithm based on The Use of Iteration Methods for Approximating the Natural Logarithm, + // James F. Epperson, The American Mathematical Monthly, Vol. 96, No. 9, November 1989, pp. 831-835. + if d.IsNegative() { + return Decimal{}, fmt.Errorf("cannot calculate natural logarithm for negative decimals") + } + + if d.IsZero() { + return Decimal{}, fmt.Errorf("cannot represent natural logarithm of 0, result: -infinity") + } + + calcPrecision := precision + 2 + z := d.Copy() + + var comp1, comp3, comp2, comp4, reduceAdjust Decimal + comp1 = z.Sub(Decimal{oneInt, 0}) + comp3 = Decimal{oneInt, -1} + + // for decimal in range [0.9, 1.1] where ln(d) is close to 0 + usePowerSeries := false + + if comp1.Abs().Cmp(comp3) <= 0 { + usePowerSeries = true + } else { + // reduce input decimal to range [0.1, 1) + expDelta := int32(z.NumDigits()) + z.exp + z.exp -= expDelta + + // Input decimal was reduced by factor of 10^expDelta, thus we will need to add + // ln(10^expDelta) = expDelta * ln(10) + // to the result to compensate that + ln10 := ln10.withPrecision(calcPrecision) + reduceAdjust = NewFromInt32(expDelta) + reduceAdjust = reduceAdjust.Mul(ln10) + + comp1 = z.Sub(Decimal{oneInt, 0}) + + if comp1.Abs().Cmp(comp3) <= 0 { + usePowerSeries = true + } else { + // initial estimate using floats + zFloat := z.InexactFloat64() + comp1 = NewFromFloat(math.Log(zFloat)) + } + } + + epsilon := Decimal{oneInt, -calcPrecision} + + if usePowerSeries { + // Power Series - https://en.wikipedia.org/wiki/Logarithm#Power_series + // Calculating n-th term of formula: ln(z+1) = 2 sum [ 1 / (2n+1) * (z / (z+2))^(2n+1) ] + // until the difference between current and next term is smaller than epsilon. + // Coverage quite fast for decimals close to 1.0 + + // z + 2 + comp2 = comp1.Add(Decimal{twoInt, 0}) + // z / (z + 2) + comp3 = comp1.DivRound(comp2, calcPrecision) + // 2 * (z / (z + 2)) + comp1 = comp3.Add(comp3) + comp2 = comp1.Copy() + + for n := 1; ; n++ { + // 2 * (z / (z+2))^(2n+1) + comp2 = comp2.Mul(comp3).Mul(comp3) + + // 1 / (2n+1) * 2 * (z / (z+2))^(2n+1) + comp4 = NewFromInt(int64(2*n + 1)) + comp4 = comp2.DivRound(comp4, calcPrecision) + + // comp1 = 2 sum [ 1 / (2n+1) * (z / (z+2))^(2n+1) ] + comp1 = comp1.Add(comp4) + + if comp4.Abs().Cmp(epsilon) <= 0 { + break + } + } + } else { + // Halley's Iteration. + // Calculating n-th term of formula: a_(n+1) = a_n - 2 * (exp(a_n) - z) / (exp(a_n) + z), + // until the difference between current and next term is smaller than epsilon + var prevStep Decimal + maxIters := calcPrecision*2 + 10 + + for i := int32(0); i < maxIters; i++ { + // exp(a_n) + comp3, _ = comp1.ExpTaylor(calcPrecision) + // exp(a_n) - z + comp2 = comp3.Sub(z) + // 2 * (exp(a_n) - z) + comp2 = comp2.Add(comp2) + // exp(a_n) + z + comp4 = comp3.Add(z) + // 2 * (exp(a_n) - z) / (exp(a_n) + z) + comp3 = comp2.DivRound(comp4, calcPrecision) + // comp1 = a_(n+1) = a_n - 2 * (exp(a_n) - z) / (exp(a_n) + z) + comp1 = comp1.Sub(comp3) + + if prevStep.Add(comp3).IsZero() { + // If iteration steps oscillate we should return early and prevent an infinity loop + // NOTE(mwoss): This should be quite a rare case, returning error is not necessary + break + } + + if comp3.Abs().Cmp(epsilon) <= 0 { + break + } + + prevStep = comp3 + } + } + + comp1 = comp1.Add(reduceAdjust) + + return comp1.Round(precision), nil +} + // NumDigits returns the number of digits of the decimal coefficient (d.Value) -// Note: Current implementation is extremely slow for large decimals and/or decimals with large fractional part func (d Decimal) NumDigits() int { - // Note(mwoss): It can be optimized, unnecessary cast of big.Int to string - if d.IsNegative() { - return len(d.value.String()) - 1 + if d.value == nil { + return 1 + } + + if d.value.IsInt64() { + i64 := d.value.Int64() + // restrict fast path to integers with exact conversion to float64 + if i64 <= (1<<53) && i64 >= -(1<<53) { + if i64 == 0 { + return 1 + } + return int(math.Log10(math.Abs(float64(i64)))) + 1 + } + } + + estimatedNumDigits := int(float64(d.value.BitLen()) / math.Log2(10)) + + // estimatedNumDigits (lg10) may be off by 1, need to verify + digitsBigInt := big.NewInt(int64(estimatedNumDigits)) + errorCorrectionUnit := digitsBigInt.Exp(tenInt, digitsBigInt, nil) + + if d.value.CmpAbs(errorCorrectionUnit) >= 0 { + return estimatedNumDigits + 1 } - return len(d.value.String()) + + return estimatedNumDigits } // IsInteger returns true when decimal can be represented as an integer value, otherwise, it returns false. @@ -851,10 +1294,9 @@ func abs(n int32) int32 { // Cmp compares the numbers represented by d and d2 and returns: // -// -1 if d < d2 -// 0 if d == d2 -// +1 if d > d2 -// +// -1 if d < d2 +// 0 if d == d2 +// +1 if d > d2 func (d Decimal) Cmp(d2 Decimal) int { d.ensureInitialized() d2.ensureInitialized() @@ -868,12 +1310,21 @@ func (d Decimal) Cmp(d2 Decimal) int { return rd.value.Cmp(rd2.value) } +// Compare compares the numbers represented by d and d2 and returns: +// +// -1 if d < d2 +// 0 if d == d2 +// +1 if d > d2 +func (d Decimal) Compare(d2 Decimal) int { + return d.Cmp(d2) +} + // Equal returns whether the numbers represented by d and d2 are equal. func (d Decimal) Equal(d2 Decimal) bool { return d.Cmp(d2) == 0 } -// Equals is deprecated, please use Equal method instead +// Deprecated: Equals is deprecated, please use Equal method instead. func (d Decimal) Equals(d2 Decimal) bool { return d.Equal(d2) } @@ -905,7 +1356,6 @@ func (d Decimal) LessThanOrEqual(d2 Decimal) bool { // -1 if d < 0 // 0 if d == 0 // +1 if d > 0 -// func (d Decimal) Sign() int { if d.value == nil { return 0 @@ -968,9 +1418,7 @@ func (d Decimal) IntPart() int64 { // BigInt returns integer component of the decimal as a BigInt. func (d Decimal) BigInt() *big.Int { scaledD := d.rescale(0) - i := &big.Int{} - i.SetString(scaledD.String(), 10) - return i + return scaledD.value } // BigFloat returns decimal as BigFloat. @@ -1014,13 +1462,12 @@ func (d Decimal) InexactFloat64() float64 { // // Example: // -// d := New(-12345, -3) -// println(d.String()) +// d := New(-12345, -3) +// println(d.String()) // // Output: // -// -12.345 -// +// -12.345 func (d Decimal) String() string { return d.string(true) } @@ -1030,14 +1477,13 @@ func (d Decimal) String() string { // // Example: // -// NewFromFloat(0).StringFixed(2) // output: "0.00" -// NewFromFloat(0).StringFixed(0) // output: "0" -// NewFromFloat(5.45).StringFixed(0) // output: "5" -// NewFromFloat(5.45).StringFixed(1) // output: "5.5" -// NewFromFloat(5.45).StringFixed(2) // output: "5.45" -// NewFromFloat(5.45).StringFixed(3) // output: "5.450" -// NewFromFloat(545).StringFixed(-1) // output: "550" -// +// NewFromFloat(0).StringFixed(2) // output: "0.00" +// NewFromFloat(0).StringFixed(0) // output: "0" +// NewFromFloat(5.45).StringFixed(0) // output: "5" +// NewFromFloat(5.45).StringFixed(1) // output: "5.5" +// NewFromFloat(5.45).StringFixed(2) // output: "5.45" +// NewFromFloat(5.45).StringFixed(3) // output: "5.450" +// NewFromFloat(545).StringFixed(-1) // output: "550" func (d Decimal) StringFixed(places int32) string { rounded := d.Round(places) return rounded.string(false) @@ -1048,14 +1494,13 @@ func (d Decimal) StringFixed(places int32) string { // // Example: // -// NewFromFloat(0).StringFixedBank(2) // output: "0.00" -// NewFromFloat(0).StringFixedBank(0) // output: "0" -// NewFromFloat(5.45).StringFixedBank(0) // output: "5" -// NewFromFloat(5.45).StringFixedBank(1) // output: "5.4" -// NewFromFloat(5.45).StringFixedBank(2) // output: "5.45" -// NewFromFloat(5.45).StringFixedBank(3) // output: "5.450" -// NewFromFloat(545).StringFixedBank(-1) // output: "540" -// +// NewFromFloat(0).StringFixedBank(2) // output: "0.00" +// NewFromFloat(0).StringFixedBank(0) // output: "0" +// NewFromFloat(5.45).StringFixedBank(0) // output: "5" +// NewFromFloat(5.45).StringFixedBank(1) // output: "5.4" +// NewFromFloat(5.45).StringFixedBank(2) // output: "5.45" +// NewFromFloat(5.45).StringFixedBank(3) // output: "5.450" +// NewFromFloat(545).StringFixedBank(-1) // output: "540" func (d Decimal) StringFixedBank(places int32) string { rounded := d.RoundBank(places) return rounded.string(false) @@ -1073,9 +1518,8 @@ func (d Decimal) StringFixedCash(interval uint8) string { // // Example: // -// NewFromFloat(5.45).Round(1).String() // output: "5.5" -// NewFromFloat(545).Round(-1).String() // output: "550" -// +// NewFromFloat(5.45).Round(1).String() // output: "5.5" +// NewFromFloat(545).Round(-1).String() // output: "550" func (d Decimal) Round(places int32) Decimal { if d.exp == -places { return d @@ -1104,11 +1548,10 @@ func (d Decimal) Round(places int32) Decimal { // // Example: // -// NewFromFloat(545).RoundCeil(-2).String() // output: "600" -// NewFromFloat(500).RoundCeil(-2).String() // output: "500" -// NewFromFloat(1.1001).RoundCeil(2).String() // output: "1.11" -// NewFromFloat(-1.454).RoundCeil(1).String() // output: "-1.5" -// +// NewFromFloat(545).RoundCeil(-2).String() // output: "600" +// NewFromFloat(500).RoundCeil(-2).String() // output: "500" +// NewFromFloat(1.1001).RoundCeil(2).String() // output: "1.11" +// NewFromFloat(-1.454).RoundCeil(1).String() // output: "-1.4" func (d Decimal) RoundCeil(places int32) Decimal { if d.exp >= -places { return d @@ -1130,11 +1573,10 @@ func (d Decimal) RoundCeil(places int32) Decimal { // // Example: // -// NewFromFloat(545).RoundFloor(-2).String() // output: "500" -// NewFromFloat(-500).RoundFloor(-2).String() // output: "-500" -// NewFromFloat(1.1001).RoundFloor(2).String() // output: "1.1" -// NewFromFloat(-1.454).RoundFloor(1).String() // output: "-1.4" -// +// NewFromFloat(545).RoundFloor(-2).String() // output: "500" +// NewFromFloat(-500).RoundFloor(-2).String() // output: "-500" +// NewFromFloat(1.1001).RoundFloor(2).String() // output: "1.1" +// NewFromFloat(-1.454).RoundFloor(1).String() // output: "-1.5" func (d Decimal) RoundFloor(places int32) Decimal { if d.exp >= -places { return d @@ -1156,11 +1598,10 @@ func (d Decimal) RoundFloor(places int32) Decimal { // // Example: // -// NewFromFloat(545).RoundUp(-2).String() // output: "600" -// NewFromFloat(500).RoundUp(-2).String() // output: "500" -// NewFromFloat(1.1001).RoundUp(2).String() // output: "1.11" -// NewFromFloat(-1.454).RoundUp(1).String() // output: "-1.4" -// +// NewFromFloat(545).RoundUp(-2).String() // output: "600" +// NewFromFloat(500).RoundUp(-2).String() // output: "500" +// NewFromFloat(1.1001).RoundUp(2).String() // output: "1.11" +// NewFromFloat(-1.454).RoundUp(1).String() // output: "-1.5" func (d Decimal) RoundUp(places int32) Decimal { if d.exp >= -places { return d @@ -1184,11 +1625,10 @@ func (d Decimal) RoundUp(places int32) Decimal { // // Example: // -// NewFromFloat(545).RoundDown(-2).String() // output: "500" -// NewFromFloat(-500).RoundDown(-2).String() // output: "-500" -// NewFromFloat(1.1001).RoundDown(2).String() // output: "1.1" -// NewFromFloat(-1.454).RoundDown(1).String() // output: "-1.5" -// +// NewFromFloat(545).RoundDown(-2).String() // output: "500" +// NewFromFloat(-500).RoundDown(-2).String() // output: "-500" +// NewFromFloat(1.1001).RoundDown(2).String() // output: "1.1" +// NewFromFloat(-1.454).RoundDown(1).String() // output: "-1.4" func (d Decimal) RoundDown(places int32) Decimal { if d.exp >= -places { return d @@ -1209,13 +1649,12 @@ func (d Decimal) RoundDown(places int32) Decimal { // // Examples: // -// NewFromFloat(5.45).RoundBank(1).String() // output: "5.4" -// NewFromFloat(545).RoundBank(-1).String() // output: "540" -// NewFromFloat(5.46).RoundBank(1).String() // output: "5.5" -// NewFromFloat(546).RoundBank(-1).String() // output: "550" -// NewFromFloat(5.55).RoundBank(1).String() // output: "5.6" -// NewFromFloat(555).RoundBank(-1).String() // output: "560" -// +// NewFromFloat(5.45).RoundBank(1).String() // output: "5.4" +// NewFromFloat(545).RoundBank(-1).String() // output: "540" +// NewFromFloat(5.46).RoundBank(1).String() // output: "5.5" +// NewFromFloat(546).RoundBank(-1).String() // output: "550" +// NewFromFloat(5.55).RoundBank(1).String() // output: "5.6" +// NewFromFloat(555).RoundBank(-1).String() // output: "560" func (d Decimal) RoundBank(places int32) Decimal { round := d.Round(places) @@ -1237,11 +1676,13 @@ func (d Decimal) RoundBank(places int32) Decimal { // interval. The amount payable for a cash transaction is rounded to the nearest // multiple of the minimum currency unit available. The following intervals are // available: 5, 10, 25, 50 and 100; any other number throws a panic. -// 5: 5 cent rounding 3.43 => 3.45 -// 10: 10 cent rounding 3.45 => 3.50 (5 gets rounded up) -// 25: 25 cent rounding 3.41 => 3.50 -// 50: 50 cent rounding 3.75 => 4.00 -// 100: 100 cent rounding 3.50 => 4.00 +// +// 5: 5 cent rounding 3.43 => 3.45 +// 10: 10 cent rounding 3.45 => 3.50 (5 gets rounded up) +// 25: 25 cent rounding 3.41 => 3.50 +// 50: 50 cent rounding 3.75 => 4.00 +// 100: 100 cent rounding 3.50 => 4.00 +// // For more details: https://en.wikipedia.org/wiki/Cash_rounding func (d Decimal) RoundCash(interval uint8) Decimal { var iVal *big.Int @@ -1310,8 +1751,7 @@ func (d Decimal) Ceil() Decimal { // // Example: // -// decimal.NewFromString("123.456").Truncate(2).String() // "123.45" -// +// decimal.NewFromString("123.456").Truncate(2).String() // "123.45" func (d Decimal) Truncate(precision int32) Decimal { d.ensureInitialized() if precision >= 0 && -precision > d.exp { @@ -1373,19 +1813,18 @@ func (d *Decimal) UnmarshalBinary(data []byte) error { // MarshalBinary implements the encoding.BinaryMarshaler interface. func (d Decimal) MarshalBinary() (data []byte, err error) { - // Write the exponent first since it's a fixed size - v1 := make([]byte, 4) - binary.BigEndian.PutUint32(v1, uint32(d.exp)) - - // Add the value - var v2 []byte - if v2, err = d.value.GobEncode(); err != nil { - return + // exp is written first, but encode value first to know output size + var valueData []byte + if valueData, err = d.value.GobEncode(); err != nil { + return nil, err } + // Write the exponent in front, since it's a fixed size + expData := make([]byte, 4, len(valueData)+4) + binary.BigEndian.PutUint32(expData, uint32(d.exp)) + // Return the byte array - data = append(v1, v2...) - return + return append(expData, valueData...), nil } // Scan implements the sql.Scanner interface for database deserialization. @@ -1408,6 +1847,11 @@ func (d *Decimal) Scan(value interface{}) error { *d = New(v, 0) return nil + case uint64: + // while clickhouse may send 0 in db as uint64 + *d = NewFromUint64(v) + return nil + default: // default is trying to interpret value stored as string str, err := unquoteIfQuoted(v) @@ -1455,7 +1899,8 @@ func (d *Decimal) GobDecode(data []byte) error { } // StringScaled first scales the decimal then calls .String() on it. -// NOTE: buggy, unintuitive, and DEPRECATED! Use StringFixed instead. +// +// Deprecated: buggy and unintuitive. Use StringFixed instead. func (d Decimal) StringScaled(exp int32) string { return d.rescale(exp).String() } @@ -1515,7 +1960,7 @@ func (d *Decimal) ensureInitialized() { // // To call this function with an array, you must do: // -// Min(arr[0], arr[1:]...) +// Min(arr[0], arr[1:]...) // // This makes it harder to accidentally call Min with 0 arguments. func Min(first Decimal, rest ...Decimal) Decimal { @@ -1532,7 +1977,7 @@ func Min(first Decimal, rest ...Decimal) Decimal { // // To call this function with an array, you must do: // -// Max(arr[0], arr[1:]...) +// Max(arr[0], arr[1:]...) // // This makes it harder to accidentally call Max with 0 arguments. func Max(first Decimal, rest ...Decimal) Decimal { @@ -1567,22 +2012,13 @@ func RescalePair(d1 Decimal, d2 Decimal) (Decimal, Decimal) { d1.ensureInitialized() d2.ensureInitialized() - if d1.exp == d2.exp { - return d1, d2 + if d1.exp < d2.exp { + return d1, d2.rescale(d1.exp) + } else if d1.exp > d2.exp { + return d1.rescale(d2.exp), d2 } - baseScale := min(d1.exp, d2.exp) - if baseScale != d1.exp { - return d1.rescale(baseScale), d2 - } - return d1, d2.rescale(baseScale) -} - -func min(x, y int32) int32 { - if x >= y { - return y - } - return x + return d1, d2 } func unquoteIfQuoted(value interface{}) (string, error) { @@ -1594,8 +2030,7 @@ func unquoteIfQuoted(value interface{}) (string, error) { case []byte: bytes = v default: - return "", fmt.Errorf("could not convert value '%+v' to byte array of type '%T'", - value, value) + return "", fmt.Errorf("could not convert value '%+v' to byte array of type '%T'", value, value) } // If the amount is quoted, strip the quotes diff --git a/vendor/github.com/skeema/knownhosts/NOTICE b/vendor/github.com/skeema/knownhosts/NOTICE index 619a5a7ea77..a92cb34d674 100644 --- a/vendor/github.com/skeema/knownhosts/NOTICE +++ b/vendor/github.com/skeema/knownhosts/NOTICE @@ -1,4 +1,4 @@ -Copyright 2023 Skeema LLC and the Skeema Knownhosts authors +Copyright 2024 Skeema LLC and the Skeema Knownhosts authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/skeema/knownhosts/README.md b/vendor/github.com/skeema/knownhosts/README.md index 85339bc03b0..36b847614ca 100644 --- a/vendor/github.com/skeema/knownhosts/README.md +++ b/vendor/github.com/skeema/knownhosts/README.md @@ -100,7 +100,7 @@ config := &ssh.ClientConfig{ ## License -**Source code copyright 2023 Skeema LLC and the Skeema Knownhosts authors** +**Source code copyright 2024 Skeema LLC and the Skeema Knownhosts authors** ```text Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/vendor/github.com/skeema/knownhosts/knownhosts.go b/vendor/github.com/skeema/knownhosts/knownhosts.go index c2fb5160576..4dad7771b88 100644 --- a/vendor/github.com/skeema/knownhosts/knownhosts.go +++ b/vendor/github.com/skeema/knownhosts/knownhosts.go @@ -76,13 +76,23 @@ func (hkcb HostKeyCallback) HostKeyAlgorithms(hostWithPort string) (algos []stri // example by https://github.com/golang/crypto/pull/254. hostKeys := hkcb.HostKeys(hostWithPort) seen := make(map[string]struct{}, len(hostKeys)) - for _, key := range hostKeys { - typ := key.Type() + addAlgo := func(typ string) { if _, already := seen[typ]; !already { algos = append(algos, typ) seen[typ] = struct{}{} } } + for _, key := range hostKeys { + typ := key.Type() + if typ == ssh.KeyAlgoRSA { + // KeyAlgoRSASHA256 and KeyAlgoRSASHA512 are only public key algorithms, + // not public key formats, so they can't appear as a PublicKey.Type. + // The corresponding PublicKey.Type is KeyAlgoRSA. See RFC 8332, Section 2. + addAlgo(ssh.KeyAlgoRSASHA512) + addAlgo(ssh.KeyAlgoRSASHA256) + } + addAlgo(typ) + } return algos } diff --git a/vendor/github.com/spectrocloud-labs/herd/.golangci.yml b/vendor/github.com/spectrocloud-labs/herd/.golangci.yml deleted file mode 100644 index d48f0296a91..00000000000 --- a/vendor/github.com/spectrocloud-labs/herd/.golangci.yml +++ /dev/null @@ -1,23 +0,0 @@ -run: - timeout: 5m - tests: false -linters: - enable: - - revive # replacement for golint - - dupl # check duplicated code - - goconst # check strings that can turn into constants - - gofmt # check fmt - - goheader # Check license headers, only checks files in current year - - goimports # check imports - - gocyclo # check complexity - - govet - - gosimple - - deadcode - - ineffassign - - unused - - varcheck - - staticcheck - - typecheck - - structcheck - - godot - - misspell \ No newline at end of file diff --git a/vendor/github.com/spectrocloud-labs/herd/Earthfile b/vendor/github.com/spectrocloud-labs/herd/Earthfile deleted file mode 100644 index 2861a478842..00000000000 --- a/vendor/github.com/spectrocloud-labs/herd/Earthfile +++ /dev/null @@ -1,31 +0,0 @@ -VERSION 0.6 - -ARG GO_VERSION=1.18 -ARG GOLINT_VERSION=1.47.3 - -go-deps: - ARG GO_VERSION - FROM golang:$GO_VERSION - WORKDIR /build - COPY go.mod go.sum ./ - RUN go mod download - RUN apt-get update - SAVE ARTIFACT go.mod AS LOCAL go.mod - SAVE ARTIFACT go.sum AS LOCAL go.sum - -test: - FROM +go-deps - WORKDIR /build - RUN go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo - COPY . . - RUN ginkgo run --race --fail-fast --slow-spec-threshold 30s --covermode=atomic --coverprofile=coverage.out -p -r ./ - SAVE ARTIFACT coverage.out AS LOCAL coverage.out - -lint: - ARG GO_VERSION - FROM golang:$GO_VERSION - ARG GOLINT_VERSION - RUN wget -O- -nv https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v$GOLINT_VERSION - WORKDIR /build - COPY . . - RUN golangci-lint run \ No newline at end of file diff --git a/vendor/github.com/spectrocloud-labs/herd/LICENSE b/vendor/github.com/spectrocloud-labs/herd/LICENSE deleted file mode 100644 index ad671f24470..00000000000 --- a/vendor/github.com/spectrocloud-labs/herd/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 Ettore Di Giacinto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/spectrocloud-labs/herd/README.md b/vendor/github.com/spectrocloud-labs/herd/README.md deleted file mode 100644 index 61f61c8a568..00000000000 --- a/vendor/github.com/spectrocloud-labs/herd/README.md +++ /dev/null @@ -1,57 +0,0 @@ -# :checkered_flag: herd - -[![Go Reference](https://pkg.go.dev/badge/github.com/spectrocloud-labs/herd.svg)](https://pkg.go.dev/github.com/spectrocloud-labs/herd) -[![Lint](https://github.com/spectrocloud-labs/herd/actions/workflows/lint.yaml/badge.svg)](https://github.com/spectrocloud-labs/herd/actions/workflows/lint.yaml) -[![Unit tests](https://github.com/spectrocloud-labs/herd/actions/workflows/test.yaml/badge.svg)](https://github.com/spectrocloud-labs/herd/actions/workflows/test.yaml) - -Herd is a Embedded Runnable DAG (H.E.R.D.). it aims to be a tiny library that allows to define arbitrary DAG, and associate job operations on them. - -## Why? - -I've found couple of nice libraries ([fx](https://github.com/uber-go/fx), or [dag](https://github.com/mostafa-asg/dag) for instance), however none of them satisfied my constraints: - -- Tiny -- Completely tested (TDD) -- Define jobs in a DAG, runs them in sequence, execute the ones that can be done in parallel (parallel topological sorting) in separate go routines -- Provide some sorta of similarity with `systemd` concepts - -## Usage - -`herd` can be used as a library as such: - -```golang -package main - -import ( - "context" - - "github.com/spectrocloud-labs/herd" -) - -func main() { - - // Generic usage - g := herd.DAG() - g.Add("name", ...) - g.Run(context.TODO()) - - // Example - f := "" - g.Add("foo", herd.WithCallback(func(ctx context.Context) error { - f += "foo" - // This executes after "bar" has ended successfully. - return nil - }), herd.WithDeps("bar")) - - g.Add("bar", herd.WithCallback(func(ctx context.Context) error { - f += "bar" - // This execute first - return nil - })) - - // Execute the DAG - g.Run(context.Background()) - // f is "barfoo" -} - -``` \ No newline at end of file diff --git a/vendor/github.com/spectrocloud-labs/herd/dag_options.go b/vendor/github.com/spectrocloud-labs/herd/dag_options.go deleted file mode 100644 index bc359552320..00000000000 --- a/vendor/github.com/spectrocloud-labs/herd/dag_options.go +++ /dev/null @@ -1,15 +0,0 @@ -package herd - -// GraphOption it's the option for the DAG graph. -type GraphOption func(g *Graph) - -// EnableInit enables an Init jobs that takes paternity -// of orphan jobs without dependencies. -var EnableInit GraphOption = func(g *Graph) { - g.init = true -} - -// CollectOrphans enables orphan job collection. -var CollectOrphans GraphOption = func(g *Graph) { - g.collectOrphans = true -} diff --git a/vendor/github.com/spectrocloud-labs/herd/ops.go b/vendor/github.com/spectrocloud-labs/herd/ops.go deleted file mode 100644 index 59ff1e3c297..00000000000 --- a/vendor/github.com/spectrocloud-labs/herd/ops.go +++ /dev/null @@ -1,35 +0,0 @@ -package herd - -import ( - "context" - "sync" -) - -type OpState struct { - sync.Mutex - fn []func(context.Context) error - err error - fatal bool - background bool - executed bool - weak bool - weakdeps []string - deps []string - ignore bool -} - -func (o *OpState) toGraphEntry(name string) GraphEntry { - return GraphEntry{ - WithCallback: o.fn != nil, - Callback: o.fn, - Error: o.err, - Executed: o.executed, - Background: o.background, - WeakDeps: o.weak, - Dependencies: o.deps, - WeakDependencies: o.weakdeps, - Fatal: o.fatal, - Name: name, - Ignored: o.ignore, - } -} diff --git a/vendor/github.com/spectrocloud-labs/herd/ops_options.go b/vendor/github.com/spectrocloud-labs/herd/ops_options.go deleted file mode 100644 index e1c3cc02eed..00000000000 --- a/vendor/github.com/spectrocloud-labs/herd/ops_options.go +++ /dev/null @@ -1,100 +0,0 @@ -package herd - -import "context" - -// OpOption defines the operation settings. -type OpOption func(string, *OpState, *Graph) error - -var NoOp OpOption = func(s string, os *OpState, g *Graph) error { return nil } - -// FatalOp makes the operation fatal. -// Any error will make the DAG to stop and return the error immediately. -var FatalOp OpOption = func(key string, os *OpState, g *Graph) error { - os.fatal = true - return nil -} - -// Background runs the operation in the background. -var Background OpOption = func(key string, os *OpState, g *Graph) error { - os.background = true - return nil -} - -// WeakDeps sets all the dependencies of the job as "weak". -// Any failure of the jobs which depends on won't impact running the job. -// By default, a failure job will make also fail all the children - this is option -// disables this behavor and make the child start too. -var WeakDeps OpOption = func(key string, os *OpState, g *Graph) error { - os.weak = true - return nil -} - -// WithWeakDeps defines dependencies that doesn't prevent the op to trigger. -func WithWeakDeps(deps ...string) OpOption { - return func(key string, os *OpState, g *Graph) error { - - err := WithDeps(deps...)(key, os, g) - if err != nil { - return err - } - os.weakdeps = append(os.weakdeps, deps...) - return nil - } -} - -// WithDeps defines an operation dependency. -// Dependencies can be expressed as a string. -// Note: before running the DAG you must define all the operations. -func WithDeps(deps ...string) OpOption { - return func(key string, os *OpState, g *Graph) error { - os.deps = append(os.deps, deps...) - - for _, d := range deps { - if err := g.Graph.DependOn(key, d); err != nil { - return err - } - } - return nil - } -} - -// ConditionalOption defines an option that is enabled only if the -// conditional callback returns true. -func ConditionalOption(condition func() bool, op OpOption) OpOption { - if condition() { - return op - } - - return NoOp -} - -// IfElse defines options that are enabled if the condition passess or not -// It is just syntax sugar. -func IfElse(condition bool, op, noOp OpOption) OpOption { - if condition { - return op - } - - return noOp -} - -// EnableIf defines an operation dependency. -// Dependencies can be expressed as a string. -// Note: before running the DAG you must define all the operations. -func EnableIf(conditional func() bool) OpOption { - return func(key string, os *OpState, g *Graph) error { - if !conditional() { - os.ignore = true - } - return nil - } -} - -// WithCallback associates a callback to the operation to be executed -// when the DAG is walked-by. -func WithCallback(fn ...func(context.Context) error) OpOption { - return func(s string, os *OpState, g *Graph) error { - os.fn = append(os.fn, fn...) - return nil - } -} diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go index 39f65852099..199480cd05e 100644 --- a/vendor/github.com/spf13/afero/afero.go +++ b/vendor/github.com/spf13/afero/afero.go @@ -97,7 +97,7 @@ type Fs interface { // Chown changes the uid and gid of the named file. Chown(name string, uid, gid int) error - // Chtimes changes the access and modification times of the named file + //Chtimes changes the access and modification times of the named file Chtimes(name string, atime time.Time, mtime time.Time) error } diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go index 2e72793a3e1..70a1d91680b 100644 --- a/vendor/github.com/spf13/afero/basepath.go +++ b/vendor/github.com/spf13/afero/basepath.go @@ -40,6 +40,7 @@ func (f *BasePathFile) Name() string { func (f *BasePathFile) ReadDir(n int) ([]fs.DirEntry, error) { if rdf, ok := f.File.(fs.ReadDirFile); ok { return rdf.ReadDir(n) + } return readDirFile{f.File}.ReadDir(n) } diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go index 184d6dd702a..6ff8f3099a1 100644 --- a/vendor/github.com/spf13/afero/copyOnWriteFs.go +++ b/vendor/github.com/spf13/afero/copyOnWriteFs.go @@ -223,7 +223,7 @@ func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, return nil, err } if isaDir { - if err = u.layer.MkdirAll(dir, 0o777); err != nil { + if err = u.layer.MkdirAll(dir, 0777); err != nil { return nil, err } return u.layer.OpenFile(name, flag, perm) @@ -247,9 +247,8 @@ func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, // This function handles the 9 different possibilities caused // by the union which are the intersection of the following... -// -// layer: doesn't exist, exists as a file, and exists as a directory -// base: doesn't exist, exists as a file, and exists as a directory +// layer: doesn't exist, exists as a file, and exists as a directory +// base: doesn't exist, exists as a file, and exists as a directory func (u *CopyOnWriteFs) Open(name string) (File, error) { // Since the overlay overrides the base we check that first b, err := u.isBaseFile(name) @@ -323,5 +322,5 @@ func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error { } func (u *CopyOnWriteFs) Create(name string) (File, error) { - return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0o666) + return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) } diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go index fa6abe1eeec..386c9cdc227 100644 --- a/vendor/github.com/spf13/afero/ioutil.go +++ b/vendor/github.com/spf13/afero/ioutil.go @@ -141,10 +141,8 @@ func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error { // We generate random temporary file names so that there's a good // chance the file doesn't exist yet - keeps the number of tries in // TempFile to a minimum. -var ( - randNum uint32 - randmu sync.Mutex -) +var randNum uint32 +var randmu sync.Mutex func reseed() uint32 { return uint32(time.Now().UnixNano() + int64(os.Getpid())) @@ -192,7 +190,7 @@ func TempFile(fs Fs, dir, pattern string) (f File, err error) { nconflict := 0 for i := 0; i < 10000; i++ { name := filepath.Join(dir, prefix+nextRandom()+suffix) - f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600) + f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) if os.IsExist(err) { if nconflict++; nconflict > 10 { randmu.Lock() @@ -216,7 +214,6 @@ func TempFile(fs Fs, dir, pattern string) (f File, err error) { func (a Afero) TempDir(dir, prefix string) (name string, err error) { return TempDir(a.Fs, dir, prefix) } - func TempDir(fs Fs, dir, prefix string) (name string, err error) { if dir == "" { dir = os.TempDir() @@ -225,7 +222,7 @@ func TempDir(fs Fs, dir, prefix string) (name string, err error) { nconflict := 0 for i := 0; i < 10000; i++ { try := filepath.Join(dir, prefix+nextRandom()) - err = fs.Mkdir(try, 0o700) + err = fs.Mkdir(try, 0700) if os.IsExist(err) { if nconflict++; nconflict > 10 { randmu.Lock() diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go index 62fe4498e19..3cf4693b5a2 100644 --- a/vendor/github.com/spf13/afero/mem/file.go +++ b/vendor/github.com/spf13/afero/mem/file.go @@ -245,7 +245,7 @@ func (f *File) Truncate(size int64) error { defer f.fileData.Unlock() if size > int64(len(f.fileData.data)) { diff := size - int64(len(f.fileData.data)) - f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{0o0}, int(diff))...) + f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...) } else { f.fileData.data = f.fileData.data[0:size] } @@ -285,7 +285,7 @@ func (f *File) Write(b []byte) (n int, err error) { tail = f.fileData.data[n+int(cur):] } if diff > 0 { - f.fileData.data = append(f.fileData.data, append(bytes.Repeat([]byte{0o0}, int(diff)), b...)...) + f.fileData.data = append(f.fileData.data, append(bytes.Repeat([]byte{00}, int(diff)), b...)...) f.fileData.data = append(f.fileData.data, tail...) } else { f.fileData.data = append(f.fileData.data[:cur], b...) @@ -321,19 +321,16 @@ func (s *FileInfo) Name() string { s.Unlock() return name } - func (s *FileInfo) Mode() os.FileMode { s.Lock() defer s.Unlock() return s.mode } - func (s *FileInfo) ModTime() time.Time { s.Lock() defer s.Unlock() return s.modtime } - func (s *FileInfo) IsDir() bool { s.Lock() defer s.Unlock() diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go index e6b7d70b943..d06975e7127 100644 --- a/vendor/github.com/spf13/afero/memmap.go +++ b/vendor/github.com/spf13/afero/memmap.go @@ -15,7 +15,6 @@ package afero import ( "fmt" - "io" "log" "os" "path/filepath" @@ -44,7 +43,7 @@ func (m *MemMapFs) getData() map[string]*mem.FileData { // Root should always exist, right? // TODO: what about windows? root := mem.CreateDir(FilePathSeparator) - mem.SetMode(root, os.ModeDir|0o755) + mem.SetMode(root, os.ModeDir|0755) m.data[FilePathSeparator] = root }) return m.data @@ -97,12 +96,12 @@ func (m *MemMapFs) registerWithParent(f *mem.FileData, perm os.FileMode) { pdir := filepath.Dir(filepath.Clean(f.Name())) err := m.lockfreeMkdir(pdir, perm) if err != nil { - // log.Println("Mkdir error:", err) + //log.Println("Mkdir error:", err) return } parent, err = m.lockfreeOpen(pdir) if err != nil { - // log.Println("Open after Mkdir error:", err) + //log.Println("Open after Mkdir error:", err) return } } @@ -238,7 +237,7 @@ func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, erro file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data()) } if flag&os.O_APPEND > 0 { - _, err = file.Seek(0, io.SeekEnd) + _, err = file.Seek(0, os.SEEK_END) if err != nil { file.Close() return nil, err @@ -320,18 +319,6 @@ func (m *MemMapFs) Rename(oldname, newname string) error { } else { return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound} } - - for p, fileData := range m.getData() { - if strings.HasPrefix(p, oldname+FilePathSeparator) { - m.mu.RUnlock() - m.mu.Lock() - delete(m.getData(), p) - p := strings.Replace(p, oldname, newname, 1) - m.getData()[p] = fileData - m.mu.Unlock() - m.mu.RLock() - } - } return nil } diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go index 218f3b235bd..ac359c62a05 100644 --- a/vendor/github.com/spf13/afero/regexpfs.go +++ b/vendor/github.com/spf13/afero/regexpfs.go @@ -10,6 +10,7 @@ import ( // The RegexpFs filters files (not directories) by regular expression. Only // files matching the given regexp will be allowed, all others get a ENOENT error ( // "No such file or directory"). +// type RegexpFs struct { re *regexp.Regexp source Fs diff --git a/vendor/github.com/spf13/afero/symlink.go b/vendor/github.com/spf13/afero/symlink.go index aa6ae125b65..d1c6ea53d95 100644 --- a/vendor/github.com/spf13/afero/symlink.go +++ b/vendor/github.com/spf13/afero/symlink.go @@ -21,9 +21,9 @@ import ( // filesystems saying so. // It indicates support for 3 symlink related interfaces that implement the // behaviors of the os methods: -// - Lstat -// - Symlink, and -// - Readlink +// - Lstat +// - Symlink, and +// - Readlink type Symlinker interface { Lstater Linker diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go index 62dd6c93c83..333d367f48b 100644 --- a/vendor/github.com/spf13/afero/unionFile.go +++ b/vendor/github.com/spf13/afero/unionFile.go @@ -47,7 +47,7 @@ func (f *UnionFile) Read(s []byte) (int, error) { if (err == nil || err == io.EOF) && f.Base != nil { // advance the file position also in the base file, the next // call may be a write at this position (or a seek with SEEK_CUR) - if _, seekErr := f.Base.Seek(int64(n), io.SeekCurrent); seekErr != nil { + if _, seekErr := f.Base.Seek(int64(n), os.SEEK_CUR); seekErr != nil { // only overwrite err in case the seek fails: we need to // report an eventual io.EOF to the caller err = seekErr @@ -130,7 +130,7 @@ func (f *UnionFile) Name() string { type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) { - files := make(map[string]os.FileInfo) + var files = make(map[string]os.FileInfo) for _, fi := range lofi { files[fi.Name()] = fi @@ -151,6 +151,7 @@ var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, err } return rfi, nil + } // Readdir will weave the two directories together and @@ -274,7 +275,7 @@ func copyFile(base Fs, layer Fs, name string, bfh File) error { return err } if !exists { - err = layer.MkdirAll(filepath.Dir(name), 0o777) // FIXME? + err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME? if err != nil { return err } diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go index 9e4cba2746a..cb7de23f269 100644 --- a/vendor/github.com/spf13/afero/util.go +++ b/vendor/github.com/spf13/afero/util.go @@ -43,7 +43,7 @@ func WriteReader(fs Fs, path string, r io.Reader) (err error) { ospath := filepath.FromSlash(dir) if ospath != "" { - err = fs.MkdirAll(ospath, 0o777) // rwx, rw, r + err = fs.MkdirAll(ospath, 0777) // rwx, rw, r if err != nil { if err != os.ErrExist { return err @@ -71,7 +71,7 @@ func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) { ospath := filepath.FromSlash(dir) if ospath != "" { - err = fs.MkdirAll(ospath, 0o777) // rwx, rw, r + err = fs.MkdirAll(ospath, 0777) // rwx, rw, r if err != nil { return } @@ -124,7 +124,7 @@ func GetTempDir(fs Fs, subPath string) string { return addSlash(dir) } - err := fs.MkdirAll(dir, 0o777) + err := fs.MkdirAll(dir, 0777) if err != nil { panic(err) } @@ -197,6 +197,7 @@ func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, err // readerContains reports whether any of the subslices is within r. func readerContainsAny(r io.Reader, subslices ...[]byte) bool { + if r == nil || len(subslices) == 0 { return false } diff --git a/vendor/github.com/spf13/cast/README.md b/vendor/github.com/spf13/cast/README.md index 120a573426b..0e9e1459358 100644 --- a/vendor/github.com/spf13/cast/README.md +++ b/vendor/github.com/spf13/cast/README.md @@ -1,8 +1,9 @@ -cast -==== -[![GoDoc](https://godoc.org/github.com/spf13/cast?status.svg)](https://godoc.org/github.com/spf13/cast) -[![Build Status](https://github.com/spf13/cast/actions/workflows/go.yml/badge.svg)](https://github.com/spf13/cast/actions/workflows/go.yml) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast)](https://goreportcard.com/report/github.com/spf13/cast) +# cast + +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/cast/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/cast/actions/workflows/ci.yaml) +[![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/cast)](https://pkg.go.dev/mod/github.com/spf13/cast) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.16-61CFDD.svg?style=flat-square) +[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast?style=flat-square)](https://goreportcard.com/report/github.com/spf13/cast) Easy and safe casting from one type to another in Go @@ -17,7 +18,7 @@ interface into a bool, etc. Cast does this intelligently when an obvious conversion is possible. It doesn’t make any attempts to guess what you meant, for example you can only convert a string to an int when it is a string representation of an int such as “8”. Cast was developed for use in -[Hugo](http://hugo.spf13.com), a website engine which uses YAML, TOML or JSON +[Hugo](https://gohugo.io), a website engine which uses YAML, TOML or JSON for meta data. ## Why use Cast? @@ -72,4 +73,3 @@ the code for a complete set. var eight interface{} = 8 cast.ToInt(eight) // 8 cast.ToInt(nil) // 0 - diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go index 514d759bfb5..d49bbf83ecb 100644 --- a/vendor/github.com/spf13/cast/caste.go +++ b/vendor/github.com/spf13/cast/caste.go @@ -98,10 +98,31 @@ func ToBoolE(i interface{}) (bool, error) { case nil: return false, nil case int: - if i.(int) != 0 { - return true, nil - } - return false, nil + return b != 0, nil + case int64: + return b != 0, nil + case int32: + return b != 0, nil + case int16: + return b != 0, nil + case int8: + return b != 0, nil + case uint: + return b != 0, nil + case uint64: + return b != 0, nil + case uint32: + return b != 0, nil + case uint16: + return b != 0, nil + case uint8: + return b != 0, nil + case float64: + return b != 0, nil + case float32: + return b != 0, nil + case time.Duration: + return b != 0, nil case string: return strconv.ParseBool(i.(string)) case json.Number: @@ -1385,6 +1406,8 @@ func (f timeFormat) hasTimezone() bool { var ( timeFormats = []timeFormat{ + // Keep common formats at the top. + {"2006-01-02", timeFormatNoTimezone}, {time.RFC3339, timeFormatNumericTimezone}, {"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone {time.RFC1123Z, timeFormatNumericTimezone}, @@ -1400,7 +1423,6 @@ var ( {time.UnixDate, timeFormatNamedTimezone}, {time.RubyDate, timeFormatNumericTimezone}, {"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone}, - {"2006-01-02", timeFormatNoTimezone}, {"02 Jan 2006", timeFormatNoTimezone}, {"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone}, {"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone}, diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml index 2578d94b5eb..a618ec24d84 100644 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -19,7 +19,7 @@ linters: disable-all: true enable: #- bodyclose - - deadcode + # - deadcode ! deprecated since v1.49.0; replaced by 'unused' #- depguard #- dogsled #- dupl @@ -51,12 +51,12 @@ linters: #- rowserrcheck #- scopelint #- staticcheck - - structcheck + #- structcheck ! deprecated since v1.49.0; replaced by 'unused' #- stylecheck #- typecheck - unconvert #- unparam - #- unused - - varcheck + - unused + # - varcheck ! deprecated since v1.49.0; replaced by 'unused' #- whitespace fast: false diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md index 592c0b8ab05..6444f4b7f6f 100644 --- a/vendor/github.com/spf13/cobra/README.md +++ b/vendor/github.com/spf13/cobra/README.md @@ -4,7 +4,7 @@ Cobra is a library for creating powerful modern CLI applications. Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/), [Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to -name a few. [This list](./projects_using_cobra.md) contains a more extensive list of projects using Cobra. +name a few. [This list](site/content/projects_using_cobra.md) contains a more extensive list of projects using Cobra. [![](https://img.shields.io/github/actions/workflow/status/spf13/cobra/test.yml?branch=main&longCache=true&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest) [![Go Reference](https://pkg.go.dev/badge/github.com/spf13/cobra.svg)](https://pkg.go.dev/github.com/spf13/cobra) @@ -80,7 +80,7 @@ which maintains the same interface while adding POSIX compliance. # Installing Using Cobra is easy. First, use `go get` to install the latest version -of the library. +of the library. ``` go get -u github.com/spf13/cobra@latest @@ -105,8 +105,8 @@ go install github.com/spf13/cobra-cli@latest For complete details on using the Cobra-CLI generator, please read [The Cobra Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md) -For complete details on using the Cobra library, please read the [The Cobra User Guide](user_guide.md). +For complete details on using the Cobra library, please read the [The Cobra User Guide](site/content/user_guide.md). # License -Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt) +Cobra is released under the Apache 2.0 license. See [LICENSE.txt](LICENSE.txt) diff --git a/vendor/github.com/spf13/cobra/active_help.go b/vendor/github.com/spf13/cobra/active_help.go index 2d0239437a8..5f965e057f2 100644 --- a/vendor/github.com/spf13/cobra/active_help.go +++ b/vendor/github.com/spf13/cobra/active_help.go @@ -17,6 +17,7 @@ package cobra import ( "fmt" "os" + "regexp" "strings" ) @@ -29,6 +30,8 @@ const ( activeHelpGlobalDisable = "0" ) +var activeHelpEnvVarPrefixSubstRegexp = regexp.MustCompile(`[^A-Z0-9_]`) + // AppendActiveHelp adds the specified string to the specified array to be used as ActiveHelp. // Such strings will be processed by the completion script and will be shown as ActiveHelp // to the user. @@ -42,7 +45,7 @@ func AppendActiveHelp(compArray []string, activeHelpStr string) []string { // GetActiveHelpConfig returns the value of the ActiveHelp environment variable // _ACTIVE_HELP where is the name of the root command in upper -// case, with all - replaced by _. +// case, with all non-ASCII-alphanumeric characters replaced by `_`. // It will always return "0" if the global environment variable COBRA_ACTIVE_HELP // is set to "0". func GetActiveHelpConfig(cmd *Command) string { @@ -55,9 +58,10 @@ func GetActiveHelpConfig(cmd *Command) string { // activeHelpEnvVar returns the name of the program-specific ActiveHelp environment // variable. It has the format _ACTIVE_HELP where is the name of the -// root command in upper case, with all - replaced by _. +// root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`. func activeHelpEnvVar(name string) string { // This format should not be changed: users will be using it explicitly. activeHelpEnvVar := strings.ToUpper(fmt.Sprintf("%s%s", name, activeHelpEnvVarSuffix)) - return strings.ReplaceAll(activeHelpEnvVar, "-", "_") + activeHelpEnvVar = activeHelpEnvVarPrefixSubstRegexp.ReplaceAllString(activeHelpEnvVar, "_") + return activeHelpEnvVar } diff --git a/vendor/github.com/spf13/cobra/active_help.md b/vendor/github.com/spf13/cobra/active_help.md deleted file mode 100644 index 5e7f59af380..00000000000 --- a/vendor/github.com/spf13/cobra/active_help.md +++ /dev/null @@ -1,157 +0,0 @@ -# Active Help - -Active Help is a framework provided by Cobra which allows a program to define messages (hints, warnings, etc) that will be printed during program usage. It aims to make it easier for your users to learn how to use your program. If configured by the program, Active Help is printed when the user triggers shell completion. - -For example, -``` -bash-5.1$ helm repo add [tab] -You must choose a name for the repo you are adding. - -bash-5.1$ bin/helm package [tab] -Please specify the path to the chart to package - -bash-5.1$ bin/helm package [tab][tab] -bin/ internal/ scripts/ pkg/ testdata/ -``` - -**Hint**: A good place to use Active Help messages is when the normal completion system does not provide any suggestions. In such cases, Active Help nicely supplements the normal shell completions to guide the user in knowing what is expected by the program. -## Supported shells - -Active Help is currently only supported for the following shells: -- Bash (using [bash completion V2](shell_completions.md#bash-completion-v2) only). Note that bash 4.4 or higher is required for the prompt to appear when an Active Help message is printed. -- Zsh - -## Adding Active Help messages - -As Active Help uses the shell completion system, the implementation of Active Help messages is done by enhancing custom dynamic completions. If you are not familiar with dynamic completions, please refer to [Shell Completions](shell_completions.md). - -Adding Active Help is done through the use of the `cobra.AppendActiveHelp(...)` function, where the program repeatedly adds Active Help messages to the list of completions. Keep reading for details. - -### Active Help for nouns - -Adding Active Help when completing a noun is done within the `ValidArgsFunction(...)` of a command. Please notice the use of `cobra.AppendActiveHelp(...)` in the following example: - -```go -cmd := &cobra.Command{ - Use: "add [NAME] [URL]", - Short: "add a chart repository", - Args: require.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - return addRepo(args) - }, - ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - var comps []string - if len(args) == 0 { - comps = cobra.AppendActiveHelp(comps, "You must choose a name for the repo you are adding") - } else if len(args) == 1 { - comps = cobra.AppendActiveHelp(comps, "You must specify the URL for the repo you are adding") - } else { - comps = cobra.AppendActiveHelp(comps, "This command does not take any more arguments") - } - return comps, cobra.ShellCompDirectiveNoFileComp - }, -} -``` -The example above defines the completions (none, in this specific example) as well as the Active Help messages for the `helm repo add` command. It yields the following behavior: -``` -bash-5.1$ helm repo add [tab] -You must choose a name for the repo you are adding - -bash-5.1$ helm repo add grafana [tab] -You must specify the URL for the repo you are adding - -bash-5.1$ helm repo add grafana https://grafana.github.io/helm-charts [tab] -This command does not take any more arguments -``` -**Hint**: As can be seen in the above example, a good place to use Active Help messages is when the normal completion system does not provide any suggestions. In such cases, Active Help nicely supplements the normal shell completions. - -### Active Help for flags - -Providing Active Help for flags is done in the same fashion as for nouns, but using the completion function registered for the flag. For example: -```go -_ = cmd.RegisterFlagCompletionFunc("version", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - if len(args) != 2 { - return cobra.AppendActiveHelp(nil, "You must first specify the chart to install before the --version flag can be completed"), cobra.ShellCompDirectiveNoFileComp - } - return compVersionFlag(args[1], toComplete) - }) -``` -The example above prints an Active Help message when not enough information was given by the user to complete the `--version` flag. -``` -bash-5.1$ bin/helm install myrelease --version 2.0.[tab] -You must first specify the chart to install before the --version flag can be completed - -bash-5.1$ bin/helm install myrelease bitnami/solr --version 2.0.[tab][tab] -2.0.1 2.0.2 2.0.3 -``` - -## User control of Active Help - -You may want to allow your users to disable Active Help or choose between different levels of Active Help. It is entirely up to the program to define the type of configurability of Active Help that it wants to offer, if any. -Allowing to configure Active Help is entirely optional; you can use Active Help in your program without doing anything about Active Help configuration. - -The way to configure Active Help is to use the program's Active Help environment -variable. That variable is named `_ACTIVE_HELP` where `` is the name of your -program in uppercase with any `-` replaced by an `_`. The variable should be set by the user to whatever -Active Help configuration values are supported by the program. - -For example, say `helm` has chosen to support three levels for Active Help: `on`, `off`, `local`. Then a user -would set the desired behavior to `local` by doing `export HELM_ACTIVE_HELP=local` in their shell. - -For simplicity, when in `cmd.ValidArgsFunction(...)` or a flag's completion function, the program should read the -Active Help configuration using the `cobra.GetActiveHelpConfig(cmd)` function and select what Active Help messages -should or should not be added (instead of reading the environment variable directly). - -For example: -```go -ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - activeHelpLevel := cobra.GetActiveHelpConfig(cmd) - - var comps []string - if len(args) == 0 { - if activeHelpLevel != "off" { - comps = cobra.AppendActiveHelp(comps, "You must choose a name for the repo you are adding") - } - } else if len(args) == 1 { - if activeHelpLevel != "off" { - comps = cobra.AppendActiveHelp(comps, "You must specify the URL for the repo you are adding") - } - } else { - if activeHelpLevel == "local" { - comps = cobra.AppendActiveHelp(comps, "This command does not take any more arguments") - } - } - return comps, cobra.ShellCompDirectiveNoFileComp -}, -``` -**Note 1**: If the `_ACTIVE_HELP` environment variable is set to the string "0", Cobra will automatically disable all Active Help output (even if some output was specified by the program using the `cobra.AppendActiveHelp(...)` function). Using "0" can simplify your code in situations where you want to blindly disable Active Help without having to call `cobra.GetActiveHelpConfig(cmd)` explicitly. - -**Note 2**: If a user wants to disable Active Help for every single program based on Cobra, she can set the environment variable `COBRA_ACTIVE_HELP` to "0". In this case `cobra.GetActiveHelpConfig(cmd)` will return "0" no matter what the variable `_ACTIVE_HELP` is set to. - -**Note 3**: If the user does not set `_ACTIVE_HELP` or `COBRA_ACTIVE_HELP` (which will be a common case), the default value for the Active Help configuration returned by `cobra.GetActiveHelpConfig(cmd)` will be the empty string. -## Active Help with Cobra's default completion command - -Cobra provides a default `completion` command for programs that wish to use it. -When using the default `completion` command, Active Help is configurable in the same -fashion as described above using environment variables. You may wish to document this in more -details for your users. - -## Debugging Active Help - -Debugging your Active Help code is done in the same way as debugging your dynamic completion code, which is with Cobra's hidden `__complete` command. Please refer to [debugging shell completion](shell_completions.md#debugging) for details. - -When debugging with the `__complete` command, if you want to specify different Active Help configurations, you should use the active help environment variable. That variable is named `_ACTIVE_HELP` where any `-` is replaced by an `_`. For example, we can test deactivating some Active Help as shown below: -``` -$ HELM_ACTIVE_HELP=1 bin/helm __complete install wordpress bitnami/h -bitnami/haproxy -bitnami/harbor -_activeHelp_ WARNING: cannot re-use a name that is still in use -:0 -Completion ended with directive: ShellCompDirectiveDefault - -$ HELM_ACTIVE_HELP=0 bin/helm __complete install wordpress bitnami/h -bitnami/haproxy -bitnami/harbor -:0 -Completion ended with directive: ShellCompDirectiveDefault -``` diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go index 10c78847de2..8a531518409 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -85,7 +85,7 @@ __%[1]s_handle_go_custom_completion() local out requestComp lastParam lastChar comp directive args # Prepare the command to request completions for the program. - # Calling ${words[0]} instead of directly %[1]s allows to handle aliases + # Calling ${words[0]} instead of directly %[1]s allows handling aliases args=("${words[@]:1}") # Disable ActiveHelp which is not supported for bash completion v1 requestComp="%[8]s=0 ${words[0]} %[2]s ${args[*]}" diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md deleted file mode 100644 index 52919b2fa6d..00000000000 --- a/vendor/github.com/spf13/cobra/bash_completions.md +++ /dev/null @@ -1,93 +0,0 @@ -# Generating Bash Completions For Your cobra.Command - -Please refer to [Shell Completions](shell_completions.md) for details. - -## Bash legacy dynamic completions - -For backward compatibility, Cobra still supports its legacy dynamic completion solution (described below). Unlike the `ValidArgsFunction` solution, the legacy solution will only work for Bash shell-completion and not for other shells. This legacy solution can be used along-side `ValidArgsFunction` and `RegisterFlagCompletionFunc()`, as long as both solutions are not used for the same command. This provides a path to gradually migrate from the legacy solution to the new solution. - -**Note**: Cobra's default `completion` command uses bash completion V2. If you are currently using Cobra's legacy dynamic completion solution, you should not use the default `completion` command but continue using your own. - -The legacy solution allows you to inject bash functions into the bash completion script. Those bash functions are responsible for providing the completion choices for your own completions. - -Some code that works in kubernetes: - -```bash -const ( - bash_completion_func = `__kubectl_parse_get() -{ - local kubectl_output out - if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then - out=($(echo "${kubectl_output}" | awk '{print $1}')) - COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) ) - fi -} - -__kubectl_get_resource() -{ - if [[ ${#nouns[@]} -eq 0 ]]; then - return 1 - fi - __kubectl_parse_get ${nouns[${#nouns[@]} -1]} - if [[ $? -eq 0 ]]; then - return 0 - fi -} - -__kubectl_custom_func() { - case ${last_command} in - kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop) - __kubectl_get_resource - return - ;; - *) - ;; - esac -} -`) -``` - -And then I set that in my command definition: - -```go -cmds := &cobra.Command{ - Use: "kubectl", - Short: "kubectl controls the Kubernetes cluster manager", - Long: `kubectl controls the Kubernetes cluster manager. - -Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`, - Run: runHelp, - BashCompletionFunction: bash_completion_func, -} -``` - -The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__kubectl_custom_func()` (`___custom_func()`) to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__kubectl_customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__kubectl_custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods! - -Similarly, for flags: - -```go - annotation := make(map[string][]string) - annotation[cobra.BashCompCustom] = []string{"__kubectl_get_namespaces"} - - flag := &pflag.Flag{ - Name: "namespace", - Usage: usage, - Annotations: annotation, - } - cmd.Flags().AddFlag(flag) -``` - -In addition add the `__kubectl_get_namespaces` implementation in the `BashCompletionFunction` -value, e.g.: - -```bash -__kubectl_get_namespaces() -{ - local template - template="{{ range .items }}{{ .metadata.name }} {{ end }}" - local kubectl_out - if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then - COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) ) - fi -} -``` diff --git a/vendor/github.com/spf13/cobra/bash_completionsV2.go b/vendor/github.com/spf13/cobra/bash_completionsV2.go index 19b09560c1e..1cce5c329c2 100644 --- a/vendor/github.com/spf13/cobra/bash_completionsV2.go +++ b/vendor/github.com/spf13/cobra/bash_completionsV2.go @@ -57,7 +57,7 @@ __%[1]s_get_completion_results() { local requestComp lastParam lastChar args # Prepare the command to request completions for the program. - # Calling ${words[0]} instead of directly %[1]s allows to handle aliases + # Calling ${words[0]} instead of directly %[1]s allows handling aliases args=("${words[@]:1}") requestComp="${words[0]} %[2]s ${args[*]}" diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go index b07b44a0ce2..a6b160ce53c 100644 --- a/vendor/github.com/spf13/cobra/cobra.go +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -43,12 +43,13 @@ var initializers []func() var finalizers []func() const ( - defaultPrefixMatching = false - defaultCommandSorting = true - defaultCaseInsensitive = false + defaultPrefixMatching = false + defaultCommandSorting = true + defaultCaseInsensitive = false + defaultTraverseRunHooks = false ) -// EnablePrefixMatching allows to set automatic prefix matching. Automatic prefix matching can be a dangerous thing +// EnablePrefixMatching allows setting automatic prefix matching. Automatic prefix matching can be a dangerous thing // to automatically enable in CLI tools. // Set this to true to enable it. var EnablePrefixMatching = defaultPrefixMatching @@ -60,6 +61,10 @@ var EnableCommandSorting = defaultCommandSorting // EnableCaseInsensitive allows case-insensitive commands names. (case sensitive by default) var EnableCaseInsensitive = defaultCaseInsensitive +// EnableTraverseRunHooks executes persistent pre-run and post-run hooks from all parents. +// By default this is disabled, which means only the first run hook to be found is executed. +var EnableTraverseRunHooks = defaultTraverseRunHooks + // MousetrapHelpText enables an information splash screen on Windows // if the CLI is started from explorer.exe. // To disable the mousetrap, just set this variable to blank string (""). diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index 01f7c6f1c5e..2fbe6c131a7 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -30,7 +30,10 @@ import ( flag "github.com/spf13/pflag" ) -const FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra" +const ( + FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra" + CommandDisplayNameAnnotation = "cobra_annotation_command_display_name" +) // FParseErrWhitelist configures Flag parse errors to be ignored type FParseErrWhitelist flag.ParseErrorsWhitelist @@ -99,7 +102,7 @@ type Command struct { Deprecated string // Annotations are key/value pairs that can be used by applications to identify or - // group commands. + // group commands or set special options. Annotations map[string]string // Version defines the version for this command. If this value is non-empty and the command does not @@ -115,6 +118,8 @@ type Command struct { // * PostRun() // * PersistentPostRun() // All functions get the same args, the arguments after the command name. + // The *PreRun and *PostRun functions will only be executed if the Run function of the current + // command has been declared. // // PersistentPreRun: children of this command will inherit and execute. PersistentPreRun func(cmd *Command, args []string) @@ -181,6 +186,9 @@ type Command struct { // versionTemplate is the version template defined by user. versionTemplate string + // errPrefix is the error message prefix defined by user. + errPrefix string + // inReader is a reader defined by the user that replaces stdin inReader io.Reader // outWriter is a writer defined by the user that replaces stdout @@ -346,6 +354,11 @@ func (c *Command) SetVersionTemplate(s string) { c.versionTemplate = s } +// SetErrPrefix sets error message prefix to be used. Application can use it to set custom prefix. +func (c *Command) SetErrPrefix(s string) { + c.errPrefix = s +} + // SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands. // The user should not have a cyclic dependency on commands. func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) { @@ -595,6 +608,18 @@ func (c *Command) VersionTemplate() string { ` } +// ErrPrefix return error message prefix for the command +func (c *Command) ErrPrefix() string { + if c.errPrefix != "" { + return c.errPrefix + } + + if c.HasParent() { + return c.parent.ErrPrefix() + } + return "Error:" +} + func hasNoOptDefVal(name string, fs *flag.FlagSet) bool { flag := fs.Lookup(name) if flag == nil { @@ -752,7 +777,9 @@ func (c *Command) findNext(next string) *Command { } if len(matches) == 1 { - return matches[0] + // Temporarily disable gosec G602, which produces a false positive. + // See https://github.com/securego/gosec/issues/1005. + return matches[0] // #nosec G602 } return nil @@ -910,15 +937,31 @@ func (c *Command) execute(a []string) (err error) { return err } + parents := make([]*Command, 0, 5) for p := c; p != nil; p = p.Parent() { + if EnableTraverseRunHooks { + // When EnableTraverseRunHooks is set: + // - Execute all persistent pre-runs from the root parent till this command. + // - Execute all persistent post-runs from this command till the root parent. + parents = append([]*Command{p}, parents...) + } else { + // Otherwise, execute only the first found persistent hook. + parents = append(parents, p) + } + } + for _, p := range parents { if p.PersistentPreRunE != nil { if err := p.PersistentPreRunE(c, argWoFlags); err != nil { return err } - break + if !EnableTraverseRunHooks { + break + } } else if p.PersistentPreRun != nil { p.PersistentPreRun(c, argWoFlags) - break + if !EnableTraverseRunHooks { + break + } } } if c.PreRunE != nil { @@ -955,10 +998,14 @@ func (c *Command) execute(a []string) (err error) { if err := p.PersistentPostRunE(c, argWoFlags); err != nil { return err } - break + if !EnableTraverseRunHooks { + break + } } else if p.PersistentPostRun != nil { p.PersistentPostRun(c, argWoFlags) - break + if !EnableTraverseRunHooks { + break + } } } @@ -1048,7 +1095,7 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { c = cmd } if !c.SilenceErrors { - c.PrintErrln("Error:", err.Error()) + c.PrintErrln(c.ErrPrefix(), err.Error()) c.PrintErrf("Run '%v --help' for usage.\n", c.CommandPath()) } return c, err @@ -1077,7 +1124,7 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { // If root command has SilenceErrors flagged, // all subcommands should respect it if !cmd.SilenceErrors && !c.SilenceErrors { - c.PrintErrln("Error:", err.Error()) + c.PrintErrln(cmd.ErrPrefix(), err.Error()) } // If root command has SilenceUsage flagged, @@ -1380,6 +1427,9 @@ func (c *Command) CommandPath() string { if c.HasParent() { return c.Parent().CommandPath() + " " + c.Name() } + if displayName, ok := c.Annotations[CommandDisplayNameAnnotation]; ok { + return displayName + } return c.Name() } @@ -1402,6 +1452,7 @@ func (c *Command) UseLine() string { // DebugFlags used to determine which flags have been assigned to which commands // and which persist. +// nolint:goconst func (c *Command) DebugFlags() { c.Println("DebugFlags called on", c.Name()) var debugflags func(*Command) diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go index ee38c4d0b86..b60f6b20007 100644 --- a/vendor/github.com/spf13/cobra/completions.go +++ b/vendor/github.com/spf13/cobra/completions.go @@ -145,6 +145,20 @@ func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Comman return nil } +// GetFlagCompletionFunc returns the completion function for the given flag of the command, if available. +func (c *Command) GetFlagCompletionFunc(flagName string) (func(*Command, []string, string) ([]string, ShellCompDirective), bool) { + flag := c.Flag(flagName) + if flag == nil { + return nil, false + } + + flagCompletionMutex.RLock() + defer flagCompletionMutex.RUnlock() + + completionFunc, exists := flagCompletionFunctions[flag] + return completionFunc, exists +} + // Returns a string listing the different directive enabled in the specified parameter func (d ShellCompDirective) string() string { var directives []string @@ -283,9 +297,13 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi // These flags are normally added when `execute()` is called on `finalCmd`, // however, when doing completion, we don't call `finalCmd.execute()`. - // Let's add the --help and --version flag ourselves. - finalCmd.InitDefaultHelpFlag() - finalCmd.InitDefaultVersionFlag() + // Let's add the --help and --version flag ourselves but only if the finalCmd + // has not disabled flag parsing; if flag parsing is disabled, it is up to the + // finalCmd itself to handle the completion of *all* flags. + if !finalCmd.DisableFlagParsing { + finalCmd.InitDefaultHelpFlag() + finalCmd.InitDefaultVersionFlag() + } // Check if we are doing flag value completion before parsing the flags. // This is important because if we are completing a flag value, we need to also @@ -389,6 +407,11 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { doCompleteFlags(flag) }) + // Try to complete non-inherited flags even if DisableFlagParsing==true. + // This allows programs to tell Cobra about flags for completion even + // if the actual parsing of flags is not done by Cobra. + // For instance, Helm uses this to provide flag name completion for + // some of its plugins. finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { doCompleteFlags(flag) }) diff --git a/vendor/github.com/spf13/cobra/doc/README.md b/vendor/github.com/spf13/cobra/doc/README.md deleted file mode 100644 index 8e07baae330..00000000000 --- a/vendor/github.com/spf13/cobra/doc/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# Documentation generation - -- [Man page docs](./man_docs.md) -- [Markdown docs](./md_docs.md) -- [Rest docs](./rest_docs.md) -- [Yaml docs](./yaml_docs.md) - -## Options -### `DisableAutoGenTag` - -You may set `cmd.DisableAutoGenTag = true` -to _entirely_ remove the auto generated string "Auto generated by spf13/cobra..." -from any documentation source. - -### `InitDefaultCompletionCmd` - -You may call `cmd.InitDefaultCompletionCmd()` to document the default autocompletion command. diff --git a/vendor/github.com/spf13/cobra/doc/man_docs.md b/vendor/github.com/spf13/cobra/doc/man_docs.md deleted file mode 100644 index 3709160f34f..00000000000 --- a/vendor/github.com/spf13/cobra/doc/man_docs.md +++ /dev/null @@ -1,31 +0,0 @@ -# Generating Man Pages For Your Own cobra.Command - -Generating man pages from a cobra command is incredibly easy. An example is as follows: - -```go -package main - -import ( - "log" - - "github.com/spf13/cobra" - "github.com/spf13/cobra/doc" -) - -func main() { - cmd := &cobra.Command{ - Use: "test", - Short: "my test program", - } - header := &doc.GenManHeader{ - Title: "MINE", - Section: "3", - } - err := doc.GenManTree(cmd, header, "/tmp") - if err != nil { - log.Fatal(err) - } -} -``` - -That will get you a man page `/tmp/test.3` diff --git a/vendor/github.com/spf13/cobra/doc/md_docs.go b/vendor/github.com/spf13/cobra/doc/md_docs.go index c4a27c00935..f98fe2a3b8f 100644 --- a/vendor/github.com/spf13/cobra/doc/md_docs.go +++ b/vendor/github.com/spf13/cobra/doc/md_docs.go @@ -27,6 +27,8 @@ import ( "github.com/spf13/cobra" ) +const markdownExtension = ".md" + func printOptions(buf *bytes.Buffer, cmd *cobra.Command, name string) error { flags := cmd.NonInheritedFlags() flags.SetOutput(buf) @@ -83,7 +85,7 @@ func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) if cmd.HasParent() { parent := cmd.Parent() pname := parent.CommandPath() - link := pname + ".md" + link := pname + markdownExtension link = strings.ReplaceAll(link, " ", "_") buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", pname, linkHandler(link), parent.Short)) cmd.VisitParents(func(c *cobra.Command) { @@ -101,7 +103,7 @@ func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) continue } cname := name + " " + child.Name() - link := cname + ".md" + link := cname + markdownExtension link = strings.ReplaceAll(link, " ", "_") buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", cname, linkHandler(link), child.Short)) } @@ -138,7 +140,7 @@ func GenMarkdownTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHa } } - basename := strings.ReplaceAll(cmd.CommandPath(), " ", "_") + ".md" + basename := strings.ReplaceAll(cmd.CommandPath(), " ", "_") + markdownExtension filename := filepath.Join(dir, basename) f, err := os.Create(filename) if err != nil { diff --git a/vendor/github.com/spf13/cobra/doc/md_docs.md b/vendor/github.com/spf13/cobra/doc/md_docs.md deleted file mode 100644 index 1659175cfda..00000000000 --- a/vendor/github.com/spf13/cobra/doc/md_docs.md +++ /dev/null @@ -1,115 +0,0 @@ -# Generating Markdown Docs For Your Own cobra.Command - -Generating Markdown pages from a cobra command is incredibly easy. An example is as follows: - -```go -package main - -import ( - "log" - - "github.com/spf13/cobra" - "github.com/spf13/cobra/doc" -) - -func main() { - cmd := &cobra.Command{ - Use: "test", - Short: "my test program", - } - err := doc.GenMarkdownTree(cmd, "/tmp") - if err != nil { - log.Fatal(err) - } -} -``` - -That will get you a Markdown document `/tmp/test.md` - -## Generate markdown docs for the entire command tree - -This program can actually generate docs for the kubectl command in the kubernetes project - -```go -package main - -import ( - "log" - "io/ioutil" - "os" - - "k8s.io/kubernetes/pkg/kubectl/cmd" - cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - - "github.com/spf13/cobra/doc" -) - -func main() { - kubectl := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard) - err := doc.GenMarkdownTree(kubectl, "./") - if err != nil { - log.Fatal(err) - } -} -``` - -This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./") - -## Generate markdown docs for a single command - -You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenMarkdown` instead of `GenMarkdownTree` - -```go - out := new(bytes.Buffer) - err := doc.GenMarkdown(cmd, out) - if err != nil { - log.Fatal(err) - } -``` - -This will write the markdown doc for ONLY "cmd" into the out, buffer. - -## Customize the output - -Both `GenMarkdown` and `GenMarkdownTree` have alternate versions with callbacks to get some control of the output: - -```go -func GenMarkdownTreeCustom(cmd *Command, dir string, filePrepender, linkHandler func(string) string) error { - //... -} -``` - -```go -func GenMarkdownCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) string) error { - //... -} -``` - -The `filePrepender` will prepend the return value given the full filepath to the rendered Markdown file. A common use case is to add front matter to use the generated documentation with [Hugo](https://gohugo.io/): - -```go -const fmTemplate = `--- -date: %s -title: "%s" -slug: %s -url: %s ---- -` - -filePrepender := func(filename string) string { - now := time.Now().Format(time.RFC3339) - name := filepath.Base(filename) - base := strings.TrimSuffix(name, path.Ext(name)) - url := "/commands/" + strings.ToLower(base) + "/" - return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url) -} -``` - -The `linkHandler` can be used to customize the rendered internal links to the commands, given a filename: - -```go -linkHandler := func(name string) string { - base := strings.TrimSuffix(name, path.Ext(name)) - return "/commands/" + strings.ToLower(base) + "/" -} -``` diff --git a/vendor/github.com/spf13/cobra/doc/rest_docs.md b/vendor/github.com/spf13/cobra/doc/rest_docs.md deleted file mode 100644 index 3041c573ab0..00000000000 --- a/vendor/github.com/spf13/cobra/doc/rest_docs.md +++ /dev/null @@ -1,114 +0,0 @@ -# Generating ReStructured Text Docs For Your Own cobra.Command - -Generating ReST pages from a cobra command is incredibly easy. An example is as follows: - -```go -package main - -import ( - "log" - - "github.com/spf13/cobra" - "github.com/spf13/cobra/doc" -) - -func main() { - cmd := &cobra.Command{ - Use: "test", - Short: "my test program", - } - err := doc.GenReSTTree(cmd, "/tmp") - if err != nil { - log.Fatal(err) - } -} -``` - -That will get you a ReST document `/tmp/test.rst` - -## Generate ReST docs for the entire command tree - -This program can actually generate docs for the kubectl command in the kubernetes project - -```go -package main - -import ( - "log" - "io/ioutil" - "os" - - "k8s.io/kubernetes/pkg/kubectl/cmd" - cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - - "github.com/spf13/cobra/doc" -) - -func main() { - kubectl := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard) - err := doc.GenReSTTree(kubectl, "./") - if err != nil { - log.Fatal(err) - } -} -``` - -This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./") - -## Generate ReST docs for a single command - -You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenReST` instead of `GenReSTTree` - -```go - out := new(bytes.Buffer) - err := doc.GenReST(cmd, out) - if err != nil { - log.Fatal(err) - } -``` - -This will write the ReST doc for ONLY "cmd" into the out, buffer. - -## Customize the output - -Both `GenReST` and `GenReSTTree` have alternate versions with callbacks to get some control of the output: - -```go -func GenReSTTreeCustom(cmd *Command, dir string, filePrepender func(string) string, linkHandler func(string, string) string) error { - //... -} -``` - -```go -func GenReSTCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string, string) string) error { - //... -} -``` - -The `filePrepender` will prepend the return value given the full filepath to the rendered ReST file. A common use case is to add front matter to use the generated documentation with [Hugo](https://gohugo.io/): - -```go -const fmTemplate = `--- -date: %s -title: "%s" -slug: %s -url: %s ---- -` -filePrepender := func(filename string) string { - now := time.Now().Format(time.RFC3339) - name := filepath.Base(filename) - base := strings.TrimSuffix(name, path.Ext(name)) - url := "/commands/" + strings.ToLower(base) + "/" - return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url) -} -``` - -The `linkHandler` can be used to customize the rendered links to the commands, given a command name and reference. This is useful while converting rst to html or while generating documentation with tools like Sphinx where `:ref:` is used: - -```go -// Sphinx cross-referencing format -linkHandler := func(name, ref string) string { - return fmt.Sprintf(":ref:`%s <%s>`", name, ref) -} -``` diff --git a/vendor/github.com/spf13/cobra/doc/yaml_docs.md b/vendor/github.com/spf13/cobra/doc/yaml_docs.md deleted file mode 100644 index 172e61d1214..00000000000 --- a/vendor/github.com/spf13/cobra/doc/yaml_docs.md +++ /dev/null @@ -1,112 +0,0 @@ -# Generating Yaml Docs For Your Own cobra.Command - -Generating yaml files from a cobra command is incredibly easy. An example is as follows: - -```go -package main - -import ( - "log" - - "github.com/spf13/cobra" - "github.com/spf13/cobra/doc" -) - -func main() { - cmd := &cobra.Command{ - Use: "test", - Short: "my test program", - } - err := doc.GenYamlTree(cmd, "/tmp") - if err != nil { - log.Fatal(err) - } -} -``` - -That will get you a Yaml document `/tmp/test.yaml` - -## Generate yaml docs for the entire command tree - -This program can actually generate docs for the kubectl command in the kubernetes project - -```go -package main - -import ( - "io/ioutil" - "log" - "os" - - "k8s.io/kubernetes/pkg/kubectl/cmd" - cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - - "github.com/spf13/cobra/doc" -) - -func main() { - kubectl := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard) - err := doc.GenYamlTree(kubectl, "./") - if err != nil { - log.Fatal(err) - } -} -``` - -This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./") - -## Generate yaml docs for a single command - -You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenYaml` instead of `GenYamlTree` - -```go - out := new(bytes.Buffer) - doc.GenYaml(cmd, out) -``` - -This will write the yaml doc for ONLY "cmd" into the out, buffer. - -## Customize the output - -Both `GenYaml` and `GenYamlTree` have alternate versions with callbacks to get some control of the output: - -```go -func GenYamlTreeCustom(cmd *Command, dir string, filePrepender, linkHandler func(string) string) error { - //... -} -``` - -```go -func GenYamlCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) string) error { - //... -} -``` - -The `filePrepender` will prepend the return value given the full filepath to the rendered Yaml file. A common use case is to add front matter to use the generated documentation with [Hugo](https://gohugo.io/): - -```go -const fmTemplate = `--- -date: %s -title: "%s" -slug: %s -url: %s ---- -` - -filePrepender := func(filename string) string { - now := time.Now().Format(time.RFC3339) - name := filepath.Base(filename) - base := strings.TrimSuffix(name, path.Ext(name)) - url := "/commands/" + strings.ToLower(base) + "/" - return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url) -} -``` - -The `linkHandler` can be used to customize the rendered internal links to the commands, given a filename: - -```go -linkHandler := func(name string) string { - base := strings.TrimSuffix(name, path.Ext(name)) - return "/commands/" + strings.ToLower(base) + "/" -} -``` diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go index 12ca0d2b11c..12d61b69111 100644 --- a/vendor/github.com/spf13/cobra/fish_completions.go +++ b/vendor/github.com/spf13/cobra/fish_completions.go @@ -113,7 +113,7 @@ function __%[1]s_clear_perform_completion_once_result __%[1]s_debug "" __%[1]s_debug "========= clearing previously set __%[1]s_perform_completion_once_result variable ==========" set --erase __%[1]s_perform_completion_once_result - __%[1]s_debug "Succesfully erased the variable __%[1]s_perform_completion_once_result" + __%[1]s_debug "Successfully erased the variable __%[1]s_perform_completion_once_result" end function __%[1]s_requires_order_preservation diff --git a/vendor/github.com/spf13/cobra/fish_completions.md b/vendor/github.com/spf13/cobra/fish_completions.md deleted file mode 100644 index 19b2ed1293a..00000000000 --- a/vendor/github.com/spf13/cobra/fish_completions.md +++ /dev/null @@ -1,4 +0,0 @@ -## Generating Fish Completions For Your cobra.Command - -Please refer to [Shell Completions](shell_completions.md) for details. - diff --git a/vendor/github.com/spf13/cobra/flag_groups.go b/vendor/github.com/spf13/cobra/flag_groups.go index b35fde15548..0671ec5f202 100644 --- a/vendor/github.com/spf13/cobra/flag_groups.go +++ b/vendor/github.com/spf13/cobra/flag_groups.go @@ -24,6 +24,7 @@ import ( const ( requiredAsGroup = "cobra_annotation_required_if_others_set" + oneRequired = "cobra_annotation_one_required" mutuallyExclusive = "cobra_annotation_mutually_exclusive" ) @@ -43,6 +44,22 @@ func (c *Command) MarkFlagsRequiredTogether(flagNames ...string) { } } +// MarkFlagsOneRequired marks the given flags with annotations so that Cobra errors +// if the command is invoked without at least one flag from the given set of flags. +func (c *Command) MarkFlagsOneRequired(flagNames ...string) { + c.mergePersistentFlags() + for _, v := range flagNames { + f := c.Flags().Lookup(v) + if f == nil { + panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a one-required flag group", v)) + } + if err := c.Flags().SetAnnotation(v, oneRequired, append(f.Annotations[oneRequired], strings.Join(flagNames, " "))); err != nil { + // Only errs if the flag isn't found. + panic(err) + } + } +} + // MarkFlagsMutuallyExclusive marks the given flags with annotations so that Cobra errors // if the command is invoked with more than one flag from the given set of flags. func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) { @@ -59,7 +76,7 @@ func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) { } } -// ValidateFlagGroups validates the mutuallyExclusive/requiredAsGroup logic and returns the +// ValidateFlagGroups validates the mutuallyExclusive/oneRequired/requiredAsGroup logic and returns the // first error encountered. func (c *Command) ValidateFlagGroups() error { if c.DisableFlagParsing { @@ -71,15 +88,20 @@ func (c *Command) ValidateFlagGroups() error { // groupStatus format is the list of flags as a unique ID, // then a map of each flag name and whether it is set or not. groupStatus := map[string]map[string]bool{} + oneRequiredGroupStatus := map[string]map[string]bool{} mutuallyExclusiveGroupStatus := map[string]map[string]bool{} flags.VisitAll(func(pflag *flag.Flag) { processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) + processFlagForGroupAnnotation(flags, pflag, oneRequired, oneRequiredGroupStatus) processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) }) if err := validateRequiredFlagGroups(groupStatus); err != nil { return err } + if err := validateOneRequiredFlagGroups(oneRequiredGroupStatus); err != nil { + return err + } if err := validateExclusiveFlagGroups(mutuallyExclusiveGroupStatus); err != nil { return err } @@ -142,6 +164,27 @@ func validateRequiredFlagGroups(data map[string]map[string]bool) error { return nil } +func validateOneRequiredFlagGroups(data map[string]map[string]bool) error { + keys := sortedKeys(data) + for _, flagList := range keys { + flagnameAndStatus := data[flagList] + var set []string + for flagname, isSet := range flagnameAndStatus { + if isSet { + set = append(set, flagname) + } + } + if len(set) >= 1 { + continue + } + + // Sort values, so they can be tested/scripted against consistently. + sort.Strings(set) + return fmt.Errorf("at least one of the flags in the group [%v] is required", flagList) + } + return nil +} + func validateExclusiveFlagGroups(data map[string]map[string]bool) error { keys := sortedKeys(data) for _, flagList := range keys { @@ -176,6 +219,7 @@ func sortedKeys(m map[string]map[string]bool) []string { // enforceFlagGroupsForCompletion will do the following: // - when a flag in a group is present, other flags in the group will be marked required +// - when none of the flags in a one-required group are present, all flags in the group will be marked required // - when a flag in a mutually exclusive group is present, other flags in the group will be marked as hidden // This allows the standard completion logic to behave appropriately for flag groups func (c *Command) enforceFlagGroupsForCompletion() { @@ -185,9 +229,11 @@ func (c *Command) enforceFlagGroupsForCompletion() { flags := c.Flags() groupStatus := map[string]map[string]bool{} + oneRequiredGroupStatus := map[string]map[string]bool{} mutuallyExclusiveGroupStatus := map[string]map[string]bool{} c.Flags().VisitAll(func(pflag *flag.Flag) { processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) + processFlagForGroupAnnotation(flags, pflag, oneRequired, oneRequiredGroupStatus) processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) }) @@ -204,6 +250,26 @@ func (c *Command) enforceFlagGroupsForCompletion() { } } + // If none of the flags of a one-required group are present, we make all the flags + // of that group required so that the shell completion suggests them automatically + for flagList, flagnameAndStatus := range oneRequiredGroupStatus { + set := 0 + + for _, isSet := range flagnameAndStatus { + if isSet { + set++ + } + } + + // None of the flags of the group are set, mark all flags in the group + // as required + if set == 0 { + for _, fName := range strings.Split(flagList, " ") { + _ = c.MarkFlagRequired(fName) + } + } + } + // If a flag that is mutually exclusive to others is present, we hide the other // flags of that group so the shell completion does not suggest them for flagList, flagnameAndStatus := range mutuallyExclusiveGroupStatus { diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go index 177d2755f21..55195193944 100644 --- a/vendor/github.com/spf13/cobra/powershell_completions.go +++ b/vendor/github.com/spf13/cobra/powershell_completions.go @@ -47,7 +47,7 @@ filter __%[1]s_escapeStringWithSpecialChars { `+" $_ -replace '\\s|#|@|\\$|;|,|''|\\{|\\}|\\(|\\)|\"|`|\\||<|>|&','`$&'"+` } -[scriptblock]$__%[2]sCompleterBlock = { +[scriptblock]${__%[2]sCompleterBlock} = { param( $WordToComplete, $CommandAst, @@ -122,7 +122,7 @@ filter __%[1]s_escapeStringWithSpecialChars { __%[1]s_debug "Calling $RequestComp" # First disable ActiveHelp which is not supported for Powershell - $env:%[10]s=0 + ${env:%[10]s}=0 #call the command store the output in $out and redirect stderr and stdout to null # $Out is an array contains each line per element @@ -279,7 +279,7 @@ filter __%[1]s_escapeStringWithSpecialChars { } } -Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock $__%[2]sCompleterBlock +Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock ${__%[2]sCompleterBlock} `, name, nameForVar, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name))) diff --git a/vendor/github.com/spf13/cobra/powershell_completions.md b/vendor/github.com/spf13/cobra/powershell_completions.md deleted file mode 100644 index c449f1e5c0f..00000000000 --- a/vendor/github.com/spf13/cobra/powershell_completions.md +++ /dev/null @@ -1,3 +0,0 @@ -# Generating PowerShell Completions For Your Own cobra.Command - -Please refer to [Shell Completions](shell_completions.md#powershell-completions) for details. diff --git a/vendor/github.com/spf13/cobra/projects_using_cobra.md b/vendor/github.com/spf13/cobra/projects_using_cobra.md deleted file mode 100644 index 8a291eb20e8..00000000000 --- a/vendor/github.com/spf13/cobra/projects_using_cobra.md +++ /dev/null @@ -1,64 +0,0 @@ -## Projects using Cobra - -- [Allero](https://github.com/allero-io/allero) -- [Arewefastyet](https://benchmark.vitess.io) -- [Arduino CLI](https://github.com/arduino/arduino-cli) -- [Bleve](https://blevesearch.com/) -- [Cilium](https://cilium.io/) -- [CloudQuery](https://github.com/cloudquery/cloudquery) -- [CockroachDB](https://www.cockroachlabs.com/) -- [Constellation](https://github.com/edgelesssys/constellation) -- [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) -- [Datree](https://github.com/datreeio/datree) -- [Delve](https://github.com/derekparker/delve) -- [Docker (distribution)](https://github.com/docker/distribution) -- [Etcd](https://etcd.io/) -- [Gardener](https://github.com/gardener/gardenctl) -- [Giant Swarm's gsctl](https://github.com/giantswarm/gsctl) -- [Git Bump](https://github.com/erdaltsksn/git-bump) -- [GitHub CLI](https://github.com/cli/cli) -- [GitHub Labeler](https://github.com/erdaltsksn/gh-label) -- [Golangci-lint](https://golangci-lint.run) -- [GopherJS](https://github.com/gopherjs/gopherjs) -- [GoReleaser](https://goreleaser.com) -- [Helm](https://helm.sh) -- [Hugo](https://gohugo.io) -- [Infracost](https://github.com/infracost/infracost) -- [Istio](https://istio.io) -- [Kool](https://github.com/kool-dev/kool) -- [Kubernetes](https://kubernetes.io/) -- [Kubescape](https://github.com/kubescape/kubescape) -- [KubeVirt](https://github.com/kubevirt/kubevirt) -- [Linkerd](https://linkerd.io/) -- [Mattermost-server](https://github.com/mattermost/mattermost-server) -- [Mercure](https://mercure.rocks/) -- [Meroxa CLI](https://github.com/meroxa/cli) -- [Metal Stack CLI](https://github.com/metal-stack/metalctl) -- [Moby (former Docker)](https://github.com/moby/moby) -- [Moldy](https://github.com/Moldy-Community/moldy) -- [Multi-gitter](https://github.com/lindell/multi-gitter) -- [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) -- [nFPM](https://nfpm.goreleaser.com) -- [Okteto](https://github.com/okteto/okteto) -- [OpenShift](https://www.openshift.com/) -- [Ory Hydra](https://github.com/ory/hydra) -- [Ory Kratos](https://github.com/ory/kratos) -- [Pixie](https://github.com/pixie-io/pixie) -- [Polygon Edge](https://github.com/0xPolygon/polygon-edge) -- [Pouch](https://github.com/alibaba/pouch) -- [ProjectAtomic (enterprise)](https://www.projectatomic.io/) -- [Prototool](https://github.com/uber/prototool) -- [Pulumi](https://www.pulumi.com) -- [QRcp](https://github.com/claudiodangelis/qrcp) -- [Random](https://github.com/erdaltsksn/random) -- [Rclone](https://rclone.org/) -- [Scaleway CLI](https://github.com/scaleway/scaleway-cli) -- [Sia](https://github.com/SiaFoundation/siad) -- [Skaffold](https://skaffold.dev/) -- [Tendermint](https://github.com/tendermint/tendermint) -- [Twitch CLI](https://github.com/twitchdev/twitch-cli) -- [UpCloud CLI (`upctl`)](https://github.com/UpCloudLtd/upcloud-cli) -- [Vitess](https://vitess.io) -- VMware's [Tanzu Community Edition](https://github.com/vmware-tanzu/community-edition) & [Tanzu Framework](https://github.com/vmware-tanzu/tanzu-framework) -- [Werf](https://werf.io/) -- [ZITADEL](https://github.com/zitadel/zitadel) diff --git a/vendor/github.com/spf13/cobra/shell_completions.md b/vendor/github.com/spf13/cobra/shell_completions.md deleted file mode 100644 index 065c0621d4c..00000000000 --- a/vendor/github.com/spf13/cobra/shell_completions.md +++ /dev/null @@ -1,576 +0,0 @@ -# Generating shell completions - -Cobra can generate shell completions for multiple shells. -The currently supported shells are: -- Bash -- Zsh -- fish -- PowerShell - -Cobra will automatically provide your program with a fully functional `completion` command, -similarly to how it provides the `help` command. - -## Creating your own completion command - -If you do not wish to use the default `completion` command, you can choose to -provide your own, which will take precedence over the default one. (This also provides -backwards-compatibility with programs that already have their own `completion` command.) - -If you are using the `cobra-cli` generator, -which can be found at [spf13/cobra-cli](https://github.com/spf13/cobra-cli), -you can create a completion command by running - -```bash -cobra-cli add completion -``` -and then modifying the generated `cmd/completion.go` file to look something like this -(writing the shell script to stdout allows the most flexible use): - -```go -var completionCmd = &cobra.Command{ - Use: "completion [bash|zsh|fish|powershell]", - Short: "Generate completion script", - Long: fmt.Sprintf(`To load completions: - -Bash: - - $ source <(%[1]s completion bash) - - # To load completions for each session, execute once: - # Linux: - $ %[1]s completion bash > /etc/bash_completion.d/%[1]s - # macOS: - $ %[1]s completion bash > $(brew --prefix)/etc/bash_completion.d/%[1]s - -Zsh: - - # If shell completion is not already enabled in your environment, - # you will need to enable it. You can execute the following once: - - $ echo "autoload -U compinit; compinit" >> ~/.zshrc - - # To load completions for each session, execute once: - $ %[1]s completion zsh > "${fpath[1]}/_%[1]s" - - # You will need to start a new shell for this setup to take effect. - -fish: - - $ %[1]s completion fish | source - - # To load completions for each session, execute once: - $ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish - -PowerShell: - - PS> %[1]s completion powershell | Out-String | Invoke-Expression - - # To load completions for every new session, run: - PS> %[1]s completion powershell > %[1]s.ps1 - # and source this file from your PowerShell profile. -`,cmd.Root().Name()), - DisableFlagsInUseLine: true, - ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, - Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs), - Run: func(cmd *cobra.Command, args []string) { - switch args[0] { - case "bash": - cmd.Root().GenBashCompletion(os.Stdout) - case "zsh": - cmd.Root().GenZshCompletion(os.Stdout) - case "fish": - cmd.Root().GenFishCompletion(os.Stdout, true) - case "powershell": - cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout) - } - }, -} -``` - -**Note:** The cobra generator may include messages printed to stdout, for example, if the config file is loaded; this will break the auto-completion script so must be removed. - -## Adapting the default completion command - -Cobra provides a few options for the default `completion` command. To configure such options you must set -the `CompletionOptions` field on the *root* command. - -To tell Cobra *not* to provide the default `completion` command: -``` -rootCmd.CompletionOptions.DisableDefaultCmd = true -``` - -To tell Cobra to mark the default `completion` command as *hidden*: -``` -rootCmd.CompletionOptions.HiddenDefaultCmd = true -``` - -To tell Cobra *not* to provide the user with the `--no-descriptions` flag to the completion sub-commands: -``` -rootCmd.CompletionOptions.DisableNoDescFlag = true -``` - -To tell Cobra to completely disable descriptions for completions: -``` -rootCmd.CompletionOptions.DisableDescriptions = true -``` - -# Customizing completions - -The generated completion scripts will automatically handle completing commands and flags. However, you can make your completions much more powerful by providing information to complete your program's nouns and flag values. - -## Completion of nouns - -### Static completion of nouns - -Cobra allows you to provide a pre-defined list of completion choices for your nouns using the `ValidArgs` field. -For example, if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. -Some simplified code from `kubectl get` looks like: - -```go -validArgs = []string{ "pod", "node", "service", "replicationcontroller" } - -cmd := &cobra.Command{ - Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)", - Short: "Display one or many resources", - Long: get_long, - Example: get_example, - Run: func(cmd *cobra.Command, args []string) { - cobra.CheckErr(RunGet(f, out, cmd, args)) - }, - ValidArgs: validArgs, -} -``` - -Notice we put the `ValidArgs` field on the `get` sub-command. Doing so will give results like: - -```bash -$ kubectl get [tab][tab] -node pod replicationcontroller service -``` - -#### Aliases for nouns - -If your nouns have aliases, you can define them alongside `ValidArgs` using `ArgAliases`: - -```go -argAliases = []string { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" } - -cmd := &cobra.Command{ - ... - ValidArgs: validArgs, - ArgAliases: argAliases -} -``` - -The aliases are shown to the user on tab completion only if no completions were found within sub-commands or `ValidArgs`. - -### Dynamic completion of nouns - -In some cases it is not possible to provide a list of completions in advance. Instead, the list of completions must be determined at execution-time. In a similar fashion as for static completions, you can use the `ValidArgsFunction` field to provide a Go function that Cobra will execute when it needs the list of completion choices for the nouns of a command. Note that either `ValidArgs` or `ValidArgsFunction` can be used for a single cobra command, but not both. -Simplified code from `helm status` looks like: - -```go -cmd := &cobra.Command{ - Use: "status RELEASE_NAME", - Short: "Display the status of the named release", - Long: status_long, - RunE: func(cmd *cobra.Command, args []string) { - RunGet(args[0]) - }, - ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - if len(args) != 0 { - return nil, cobra.ShellCompDirectiveNoFileComp - } - return getReleasesFromCluster(toComplete), cobra.ShellCompDirectiveNoFileComp - }, -} -``` -Where `getReleasesFromCluster()` is a Go function that obtains the list of current Helm releases running on the Kubernetes cluster. -Notice we put the `ValidArgsFunction` on the `status` sub-command. Let's assume the Helm releases on the cluster are: `harbor`, `notary`, `rook` and `thanos` then this dynamic completion will give results like: - -```bash -$ helm status [tab][tab] -harbor notary rook thanos -``` -You may have noticed the use of `cobra.ShellCompDirective`. These directives are bit fields allowing to control some shell completion behaviors for your particular completion. You can combine them with the bit-or operator such as `cobra.ShellCompDirectiveNoSpace | cobra.ShellCompDirectiveNoFileComp` -```go -// Indicates that the shell will perform its default behavior after completions -// have been provided (this implies none of the other directives). -ShellCompDirectiveDefault - -// Indicates an error occurred and completions should be ignored. -ShellCompDirectiveError - -// Indicates that the shell should not add a space after the completion, -// even if there is a single completion provided. -ShellCompDirectiveNoSpace - -// Indicates that the shell should not provide file completion even when -// no completion is provided. -ShellCompDirectiveNoFileComp - -// Indicates that the returned completions should be used as file extension filters. -// For example, to complete only files of the form *.json or *.yaml: -// return []string{"yaml", "json"}, ShellCompDirectiveFilterFileExt -// For flags, using MarkFlagFilename() and MarkPersistentFlagFilename() -// is a shortcut to using this directive explicitly. -// -ShellCompDirectiveFilterFileExt - -// Indicates that only directory names should be provided in file completion. -// For example: -// return nil, ShellCompDirectiveFilterDirs -// For flags, using MarkFlagDirname() is a shortcut to using this directive explicitly. -// -// To request directory names within another directory, the returned completions -// should specify a single directory name within which to search. For example, -// to complete directories within "themes/": -// return []string{"themes"}, ShellCompDirectiveFilterDirs -// -ShellCompDirectiveFilterDirs - -// ShellCompDirectiveKeepOrder indicates that the shell should preserve the order -// in which the completions are provided -ShellCompDirectiveKeepOrder -``` - -***Note***: When using the `ValidArgsFunction`, Cobra will call your registered function after having parsed all flags and arguments provided in the command-line. You therefore don't need to do this parsing yourself. For example, when a user calls `helm status --namespace my-rook-ns [tab][tab]`, Cobra will call your registered `ValidArgsFunction` after having parsed the `--namespace` flag, as it would have done when calling the `RunE` function. - -#### Debugging - -Cobra achieves dynamic completion through the use of a hidden command called by the completion script. To debug your Go completion code, you can call this hidden command directly: -```bash -$ helm __complete status har -harbor -:4 -Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr -``` -***Important:*** If the noun to complete is empty (when the user has not yet typed any letters of that noun), you must pass an empty parameter to the `__complete` command: -```bash -$ helm __complete status "" -harbor -notary -rook -thanos -:4 -Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr -``` -Calling the `__complete` command directly allows you to run the Go debugger to troubleshoot your code. You can also add printouts to your code; Cobra provides the following functions to use for printouts in Go completion code: -```go -// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE -// is set to a file path) and optionally prints to stderr. -cobra.CompDebug(msg string, printToStdErr bool) { -cobra.CompDebugln(msg string, printToStdErr bool) - -// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE -// is set to a file path) and to stderr. -cobra.CompError(msg string) -cobra.CompErrorln(msg string) -``` -***Important:*** You should **not** leave traces that print directly to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned above. - -## Completions for flags - -### Mark flags as required - -Most of the time completions will only show sub-commands. But if a flag is required to make a sub-command work, you probably want it to show up when the user types [tab][tab]. You can mark a flag as 'Required' like so: - -```go -cmd.MarkFlagRequired("pod") -cmd.MarkFlagRequired("container") -``` - -and you'll get something like - -```bash -$ kubectl exec [tab][tab] --c --container= -p --pod= -``` - -### Specify dynamic flag completion - -As for nouns, Cobra provides a way of defining dynamic completion of flags. To provide a Go function that Cobra will execute when it needs the list of completion choices for a flag, you must register the function using the `command.RegisterFlagCompletionFunc()` function. - -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"json", "table", "yaml"}, cobra.ShellCompDirectiveDefault -}) -``` -Notice that calling `RegisterFlagCompletionFunc()` is done through the `command` with which the flag is associated. In our example this dynamic completion will give results like so: - -```bash -$ helm status --output [tab][tab] -json table yaml -``` - -#### Debugging - -You can also easily debug your Go completion code for flags: -```bash -$ helm __complete status --output "" -json -table -yaml -:4 -Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr -``` -***Important:*** You should **not** leave traces that print to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned further above. - -### Specify valid filename extensions for flags that take a filename - -To limit completions of flag values to file names with certain extensions you can either use the different `MarkFlagFilename()` functions or a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterFileExt`, like so: -```go -flagName := "output" -cmd.MarkFlagFilename(flagName, "yaml", "json") -``` -or -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"yaml", "json"}, ShellCompDirectiveFilterFileExt}) -``` - -### Limit flag completions to directory names - -To limit completions of flag values to directory names you can either use the `MarkFlagDirname()` functions or a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterDirs`, like so: -```go -flagName := "output" -cmd.MarkFlagDirname(flagName) -``` -or -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return nil, cobra.ShellCompDirectiveFilterDirs -}) -``` -To limit completions of flag values to directory names *within another directory* you can use a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterDirs` like so: -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"themes"}, cobra.ShellCompDirectiveFilterDirs -}) -``` -### Descriptions for completions - -Cobra provides support for completion descriptions. Such descriptions are supported for each shell -(however, for bash, it is only available in the [completion V2 version](#bash-completion-v2)). -For commands and flags, Cobra will provide the descriptions automatically, based on usage information. -For example, using zsh: -``` -$ helm s[tab] -search -- search for a keyword in charts -show -- show information of a chart -status -- displays the status of the named release -``` -while using fish: -``` -$ helm s[tab] -search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) -``` - -Cobra allows you to add descriptions to your own completions. Simply add the description text after each completion, following a `\t` separator. This technique applies to completions returned by `ValidArgs`, `ValidArgsFunction` and `RegisterFlagCompletionFunc()`. For example: -```go -ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"harbor\tAn image registry", "thanos\tLong-term metrics"}, cobra.ShellCompDirectiveNoFileComp -}} -``` -or -```go -ValidArgs: []string{"bash\tCompletions for bash", "zsh\tCompletions for zsh"} -``` - -If you don't want to show descriptions in the completions, you can add `--no-descriptions` to the default `completion` command to disable them, like: - -```bash -$ source <(helm completion bash) -$ helm completion [tab][tab] -bash (generate autocompletion script for bash) powershell (generate autocompletion script for powershell) -fish (generate autocompletion script for fish) zsh (generate autocompletion script for zsh) - -$ source <(helm completion bash --no-descriptions) -$ helm completion [tab][tab] -bash fish powershell zsh -``` -## Bash completions - -### Dependencies - -The bash completion script generated by Cobra requires the `bash_completion` package. You should update the help text of your completion command to show how to install the `bash_completion` package ([Kubectl docs](https://kubernetes.io/docs/tasks/tools/install-kubectl/#enabling-shell-autocompletion)) - -### Aliases - -You can also configure `bash` aliases for your program and they will also support completions. - -```bash -alias aliasname=origcommand -complete -o default -F __start_origcommand aliasname - -# and now when you run `aliasname` completion will make -# suggestions as it did for `origcommand`. - -$ aliasname -completion firstcommand secondcommand -``` -### Bash legacy dynamic completions - -For backward compatibility, Cobra still supports its bash legacy dynamic completion solution. -Please refer to [Bash Completions](bash_completions.md) for details. - -### Bash completion V2 - -Cobra provides two versions for bash completion. The original bash completion (which started it all!) can be used by calling -`GenBashCompletion()` or `GenBashCompletionFile()`. - -A new V2 bash completion version is also available. This version can be used by calling `GenBashCompletionV2()` or -`GenBashCompletionFileV2()`. The V2 version does **not** support the legacy dynamic completion -(see [Bash Completions](bash_completions.md)) but instead works only with the Go dynamic completion -solution described in this document. -Unless your program already uses the legacy dynamic completion solution, it is recommended that you use the bash -completion V2 solution which provides the following extra features: -- Supports completion descriptions (like the other shells) -- Small completion script of less than 300 lines (v1 generates scripts of thousands of lines; `kubectl` for example has a bash v1 completion script of over 13K lines) -- Streamlined user experience thanks to a completion behavior aligned with the other shells - -`Bash` completion V2 supports descriptions for completions. When calling `GenBashCompletionV2()` or `GenBashCompletionFileV2()` -you must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra -will provide the description automatically based on usage information. You can choose to make this option configurable by -your users. - -``` -# With descriptions -$ helm s[tab][tab] -search (search for a keyword in charts) status (display the status of the named release) -show (show information of a chart) - -# Without descriptions -$ helm s[tab][tab] -search show status -``` -**Note**: Cobra's default `completion` command uses bash completion V2. If for some reason you need to use bash completion V1, you will need to implement your own `completion` command. -## Zsh completions - -Cobra supports native zsh completion generated from the root `cobra.Command`. -The generated completion script should be put somewhere in your `$fpath` and be named -`_`. You will need to start a new shell for the completions to become available. - -Zsh supports descriptions for completions. Cobra will provide the description automatically, -based on usage information. Cobra provides a way to completely disable such descriptions by -using `GenZshCompletionNoDesc()` or `GenZshCompletionFileNoDesc()`. You can choose to make -this a configurable option to your users. -``` -# With descriptions -$ helm s[tab] -search -- search for a keyword in charts -show -- show information of a chart -status -- displays the status of the named release - -# Without descriptions -$ helm s[tab] -search show status -``` -*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`. - -### Limitations - -* Custom completions implemented in Bash scripting (legacy) are not supported and will be ignored for `zsh` (including the use of the `BashCompCustom` flag annotation). - * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). -* The function `MarkFlagCustom()` is not supported and will be ignored for `zsh`. - * You should instead use `RegisterFlagCompletionFunc()`. - -### Zsh completions standardization - -Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backward-compatible, some small changes in behavior were introduced. -Please refer to [Zsh Completions](zsh_completions.md) for details. - -## fish completions - -Cobra supports native fish completions generated from the root `cobra.Command`. You can use the `command.GenFishCompletion()` or `command.GenFishCompletionFile()` functions. You must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users. -``` -# With descriptions -$ helm s[tab] -search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) - -# Without descriptions -$ helm s[tab] -search show status -``` -*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`. - -### Limitations - -* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `fish` (including the use of the `BashCompCustom` flag annotation). - * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). -* The function `MarkFlagCustom()` is not supported and will be ignored for `fish`. - * You should instead use `RegisterFlagCompletionFunc()`. -* The following flag completion annotations are not supported and will be ignored for `fish`: - * `BashCompFilenameExt` (filtering by file extension) - * `BashCompSubdirsInDir` (filtering by directory) -* The functions corresponding to the above annotations are consequently not supported and will be ignored for `fish`: - * `MarkFlagFilename()` and `MarkPersistentFlagFilename()` (filtering by file extension) - * `MarkFlagDirname()` and `MarkPersistentFlagDirname()` (filtering by directory) -* Similarly, the following completion directives are not supported and will be ignored for `fish`: - * `ShellCompDirectiveFilterFileExt` (filtering by file extension) - * `ShellCompDirectiveFilterDirs` (filtering by directory) - -## PowerShell completions - -Cobra supports native PowerShell completions generated from the root `cobra.Command`. You can use the `command.GenPowerShellCompletion()` or `command.GenPowerShellCompletionFile()` functions. To include descriptions use `command.GenPowerShellCompletionWithDesc()` and `command.GenPowerShellCompletionFileWithDesc()`. Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users. - -The script is designed to support all three PowerShell completion modes: - -* TabCompleteNext (default windows style - on each key press the next option is displayed) -* Complete (works like bash) -* MenuComplete (works like zsh) - -You set the mode with `Set-PSReadLineKeyHandler -Key Tab -Function `. Descriptions are only displayed when using the `Complete` or `MenuComplete` mode. - -Users need PowerShell version 5.0 or above, which comes with Windows 10 and can be downloaded separately for Windows 7 or 8.1. They can then write the completions to a file and source this file from their PowerShell profile, which is referenced by the `$Profile` environment variable. See `Get-Help about_Profiles` for more info about PowerShell profiles. - -``` -# With descriptions and Mode 'Complete' -$ helm s[tab] -search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) - -# With descriptions and Mode 'MenuComplete' The description of the current selected value will be displayed below the suggestions. -$ helm s[tab] -search show status - -search for a keyword in charts - -# Without descriptions -$ helm s[tab] -search show status -``` -### Aliases - -You can also configure `powershell` aliases for your program and they will also support completions. - -``` -$ sal aliasname origcommand -$ Register-ArgumentCompleter -CommandName 'aliasname' -ScriptBlock $__origcommandCompleterBlock - -# and now when you run `aliasname` completion will make -# suggestions as it did for `origcommand`. - -$ aliasname -completion firstcommand secondcommand -``` -The name of the completer block variable is of the form `$__CompleterBlock` where every `-` and `:` in the program name have been replaced with `_`, to respect powershell naming syntax. - -### Limitations - -* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `powershell` (including the use of the `BashCompCustom` flag annotation). - * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). -* The function `MarkFlagCustom()` is not supported and will be ignored for `powershell`. - * You should instead use `RegisterFlagCompletionFunc()`. -* The following flag completion annotations are not supported and will be ignored for `powershell`: - * `BashCompFilenameExt` (filtering by file extension) - * `BashCompSubdirsInDir` (filtering by directory) -* The functions corresponding to the above annotations are consequently not supported and will be ignored for `powershell`: - * `MarkFlagFilename()` and `MarkPersistentFlagFilename()` (filtering by file extension) - * `MarkFlagDirname()` and `MarkPersistentFlagDirname()` (filtering by directory) -* Similarly, the following completion directives are not supported and will be ignored for `powershell`: - * `ShellCompDirectiveFilterFileExt` (filtering by file extension) - * `ShellCompDirectiveFilterDirs` (filtering by directory) diff --git a/vendor/github.com/spf13/cobra/user_guide.md b/vendor/github.com/spf13/cobra/user_guide.md deleted file mode 100644 index 85201d840c8..00000000000 --- a/vendor/github.com/spf13/cobra/user_guide.md +++ /dev/null @@ -1,726 +0,0 @@ -# User Guide - -While you are welcome to provide your own organization, typically a Cobra-based -application will follow the following organizational structure: - -``` - ▾ appName/ - ▾ cmd/ - add.go - your.go - commands.go - here.go - main.go -``` - -In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra. - -```go -package main - -import ( - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -## Using the Cobra Generator - -Cobra-CLI is its own program that will create your application and add any -commands you want. It's the easiest way to incorporate Cobra into your application. - -For complete details on using the Cobra generator, please refer to [The Cobra-CLI Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md) - -## Using the Cobra Library - -To manually implement Cobra you need to create a bare main.go file and a rootCmd file. -You will optionally provide additional commands as you see fit. - -### Create rootCmd - -Cobra doesn't require any special constructors. Simply create your commands. - -Ideally you place this in app/cmd/root.go: - -```go -var rootCmd = &cobra.Command{ - Use: "hugo", - Short: "Hugo is a very fast static site generator", - Long: `A Fast and Flexible Static Site Generator built with - love by spf13 and friends in Go. - Complete documentation is available at https://gohugo.io/documentation/`, - Run: func(cmd *cobra.Command, args []string) { - // Do Stuff Here - }, -} - -func Execute() { - if err := rootCmd.Execute(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} -``` - -You will additionally define flags and handle configuration in your init() function. - -For example cmd/root.go: - -```go -package cmd - -import ( - "fmt" - "os" - - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var ( - // Used for flags. - cfgFile string - userLicense string - - rootCmd = &cobra.Command{ - Use: "cobra-cli", - Short: "A generator for Cobra based Applications", - Long: `Cobra is a CLI library for Go that empowers applications. -This application is a tool to generate the needed files -to quickly create a Cobra application.`, - } -) - -// Execute executes the root command. -func Execute() error { - return rootCmd.Execute() -} - -func init() { - cobra.OnInitialize(initConfig) - - rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") - rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution") - rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project") - rootCmd.PersistentFlags().Bool("viper", true, "use Viper for configuration") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) - viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper")) - viper.SetDefault("author", "NAME HERE ") - viper.SetDefault("license", "apache") - - rootCmd.AddCommand(addCmd) - rootCmd.AddCommand(initCmd) -} - -func initConfig() { - if cfgFile != "" { - // Use config file from the flag. - viper.SetConfigFile(cfgFile) - } else { - // Find home directory. - home, err := os.UserHomeDir() - cobra.CheckErr(err) - - // Search config in home directory with name ".cobra" (without extension). - viper.AddConfigPath(home) - viper.SetConfigType("yaml") - viper.SetConfigName(".cobra") - } - - viper.AutomaticEnv() - - if err := viper.ReadInConfig(); err == nil { - fmt.Println("Using config file:", viper.ConfigFileUsed()) - } -} -``` - -### Create your main.go - -With the root command you need to have your main function execute it. -Execute should be run on the root for clarity, though it can be called on any command. - -In a Cobra app, typically the main.go file is very bare. It serves one purpose: to initialize Cobra. - -```go -package main - -import ( - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -### Create additional commands - -Additional commands can be defined and typically are each given their own file -inside of the cmd/ directory. - -If you wanted to create a version command you would create cmd/version.go and -populate it with the following: - -```go -package cmd - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(versionCmd) -} - -var versionCmd = &cobra.Command{ - Use: "version", - Short: "Print the version number of Hugo", - Long: `All software has versions. This is Hugo's`, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") - }, -} -``` - -### Organizing subcommands - -A command may have subcommands which in turn may have other subcommands. This is achieved by using -`AddCommand`. In some cases, especially in larger applications, each subcommand may be defined in -its own go package. - -The suggested approach is for the parent command to use `AddCommand` to add its most immediate -subcommands. For example, consider the following directory structure: - -```text -├── cmd -│   ├── root.go -│   └── sub1 -│   ├── sub1.go -│   └── sub2 -│   ├── leafA.go -│   ├── leafB.go -│   └── sub2.go -└── main.go -``` - -In this case: - -* The `init` function of `root.go` adds the command defined in `sub1.go` to the root command. -* The `init` function of `sub1.go` adds the command defined in `sub2.go` to the sub1 command. -* The `init` function of `sub2.go` adds the commands defined in `leafA.go` and `leafB.go` to the - sub2 command. - -This approach ensures the subcommands are always included at compile time while avoiding cyclic -references. - -### Returning and handling errors - -If you wish to return an error to the caller of a command, `RunE` can be used. - -```go -package cmd - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(tryCmd) -} - -var tryCmd = &cobra.Command{ - Use: "try", - Short: "Try and possibly fail at something", - RunE: func(cmd *cobra.Command, args []string) error { - if err := someFunc(); err != nil { - return err - } - return nil - }, -} -``` - -The error can then be caught at the execute function call. - -## Working with Flags - -Flags provide modifiers to control how the action command operates. - -### Assign flags to a command - -Since the flags are defined and used in different locations, we need to -define a variable outside with the correct scope to assign the flag to -work with. - -```go -var Verbose bool -var Source string -``` - -There are two different approaches to assign a flag. - -### Persistent Flags - -A flag can be 'persistent', meaning that this flag will be available to the -command it's assigned to as well as every command under that command. For -global flags, assign a flag as a persistent flag on the root. - -```go -rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") -``` - -### Local Flags - -A flag can also be assigned locally, which will only apply to that specific command. - -```go -localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") -``` - -### Local Flag on Parent Commands - -By default, Cobra only parses local flags on the target command, and any local flags on -parent commands are ignored. By enabling `Command.TraverseChildren`, Cobra will -parse local flags on each command before executing the target command. - -```go -command := cobra.Command{ - Use: "print [OPTIONS] [COMMANDS]", - TraverseChildren: true, -} -``` - -### Bind Flags with Config - -You can also bind your flags with [viper](https://github.com/spf13/viper): -```go -var author string - -func init() { - rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) -} -``` - -In this example, the persistent flag `author` is bound with `viper`. -**Note**: the variable `author` will not be set to the value from config, -when the `--author` flag is provided by user. - -More in [viper documentation](https://github.com/spf13/viper#working-with-flags). - -### Required flags - -Flags are optional by default. If instead you wish your command to report an error -when a flag has not been set, mark it as required: -```go -rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)") -rootCmd.MarkFlagRequired("region") -``` - -Or, for persistent flags: -```go -rootCmd.PersistentFlags().StringVarP(&Region, "region", "r", "", "AWS region (required)") -rootCmd.MarkPersistentFlagRequired("region") -``` - -### Flag Groups - -If you have different flags that must be provided together (e.g. if they provide the `--username` flag they MUST provide the `--password` flag as well) then -Cobra can enforce that requirement: -```go -rootCmd.Flags().StringVarP(&u, "username", "u", "", "Username (required if password is set)") -rootCmd.Flags().StringVarP(&pw, "password", "p", "", "Password (required if username is set)") -rootCmd.MarkFlagsRequiredTogether("username", "password") -``` - -You can also prevent different flags from being provided together if they represent mutually -exclusive options such as specifying an output format as either `--json` or `--yaml` but never both: -```go -rootCmd.Flags().BoolVar(&ofJson, "json", false, "Output in JSON") -rootCmd.Flags().BoolVar(&ofYaml, "yaml", false, "Output in YAML") -rootCmd.MarkFlagsMutuallyExclusive("json", "yaml") -``` - -In both of these cases: - - both local and persistent flags can be used - - **NOTE:** the group is only enforced on commands where every flag is defined - - a flag may appear in multiple groups - - a group may contain any number of flags - -## Positional and Custom Arguments - -Validation of positional arguments can be specified using the `Args` field of `Command`. -The following validators are built in: - -- Number of arguments: - - `NoArgs` - report an error if there are any positional args. - - `ArbitraryArgs` - accept any number of args. - - `MinimumNArgs(int)` - report an error if less than N positional args are provided. - - `MaximumNArgs(int)` - report an error if more than N positional args are provided. - - `ExactArgs(int)` - report an error if there are not exactly N positional args. - - `RangeArgs(min, max)` - report an error if the number of args is not between `min` and `max`. -- Content of the arguments: - - `OnlyValidArgs` - report an error if there are any positional args not specified in the `ValidArgs` field of `Command`, which can optionally be set to a list of valid values for positional args. - -If `Args` is undefined or `nil`, it defaults to `ArbitraryArgs`. - -Moreover, `MatchAll(pargs ...PositionalArgs)` enables combining existing checks with arbitrary other checks. -For instance, if you want to report an error if there are not exactly N positional args OR if there are any positional -args that are not in the `ValidArgs` field of `Command`, you can call `MatchAll` on `ExactArgs` and `OnlyValidArgs`, as -shown below: - -```go -var cmd = &cobra.Command{ - Short: "hello", - Args: cobra.MatchAll(cobra.ExactArgs(2), cobra.OnlyValidArgs), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hello, World!") - }, -} -``` - -It is possible to set any custom validator that satisfies `func(cmd *cobra.Command, args []string) error`. -For example: - -```go -var cmd = &cobra.Command{ - Short: "hello", - Args: func(cmd *cobra.Command, args []string) error { - // Optionally run one of the validators provided by cobra - if err := cobra.MinimumNArgs(1)(cmd, args); err != nil { - return err - } - // Run the custom validation logic - if myapp.IsValidColor(args[0]) { - return nil - } - return fmt.Errorf("invalid color specified: %s", args[0]) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hello, World!") - }, -} -``` - -## Example - -In the example below, we have defined three commands. Two are at the top level -and one (cmdTimes) is a child of one of the top commands. In this case the root -is not executable, meaning that a subcommand is required. This is accomplished -by not providing a 'Run' for the 'rootCmd'. - -We have only defined one flag for a single command. - -More documentation about flags is available at https://github.com/spf13/pflag - -```go -package main - -import ( - "fmt" - "strings" - - "github.com/spf13/cobra" -) - -func main() { - var echoTimes int - - var cmdPrint = &cobra.Command{ - Use: "print [string to print]", - Short: "Print anything to the screen", - Long: `print is for printing anything back to the screen. -For many years people have printed back to the screen.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdEcho = &cobra.Command{ - Use: "echo [string to echo]", - Short: "Echo anything to the screen", - Long: `echo is for echoing anything back. -Echo works a lot like print, except it has a child command.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Echo: " + strings.Join(args, " ")) - }, - } - - var cmdTimes = &cobra.Command{ - Use: "times [string to echo]", - Short: "Echo anything to the screen more times", - Long: `echo things multiple times back to the user by providing -a count and a string.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - for i := 0; i < echoTimes; i++ { - fmt.Println("Echo: " + strings.Join(args, " ")) - } - }, - } - - cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") - - var rootCmd = &cobra.Command{Use: "app"} - rootCmd.AddCommand(cmdPrint, cmdEcho) - cmdEcho.AddCommand(cmdTimes) - rootCmd.Execute() -} -``` - -For a more complete example of a larger application, please checkout [Hugo](https://gohugo.io/). - -## Help Command - -Cobra automatically adds a help command to your application when you have subcommands. -This will be called when a user runs 'app help'. Additionally, help will also -support all other commands as input. Say, for instance, you have a command called -'create' without any additional configuration; Cobra will work when 'app help -create' is called. Every command will automatically have the '--help' flag added. - -### Example - -The following output is automatically generated by Cobra. Nothing beyond the -command and flag definitions are needed. - - $ cobra-cli help - - Cobra is a CLI library for Go that empowers applications. - This application is a tool to generate the needed files - to quickly create a Cobra application. - - Usage: - cobra-cli [command] - - Available Commands: - add Add a command to a Cobra Application - completion Generate the autocompletion script for the specified shell - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra-cli - -l, --license string name of license for the project - --viper use Viper for configuration - - Use "cobra-cli [command] --help" for more information about a command. - - -Help is just a command like any other. There is no special logic or behavior -around it. In fact, you can provide your own if you want. - -### Grouping commands in help - -Cobra supports grouping of available commands in the help output. To group commands, each group must be explicitly -defined using `AddGroup()` on the parent command. Then a subcommand can be added to a group using the `GroupID` element -of that subcommand. The groups will appear in the help output in the same order as they are defined using different -calls to `AddGroup()`. If you use the generated `help` or `completion` commands, you can set their group ids using -`SetHelpCommandGroupId()` and `SetCompletionCommandGroupId()` on the root command, respectively. - -### Defining your own help - -You can provide your own Help command or your own template for the default command to use -with the following functions: - -```go -cmd.SetHelpCommand(cmd *Command) -cmd.SetHelpFunc(f func(*Command, []string)) -cmd.SetHelpTemplate(s string) -``` - -The latter two will also apply to any children commands. - -## Usage Message - -When the user provides an invalid flag or invalid command, Cobra responds by -showing the user the 'usage'. - -### Example -You may recognize this from the help above. That's because the default help -embeds the usage as part of its output. - - $ cobra-cli --invalid - Error: unknown flag: --invalid - Usage: - cobra-cli [command] - - Available Commands: - add Add a command to a Cobra Application - completion Generate the autocompletion script for the specified shell - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra-cli - -l, --license string name of license for the project - --viper use Viper for configuration - - Use "cobra [command] --help" for more information about a command. - -### Defining your own usage -You can provide your own usage function or template for Cobra to use. -Like help, the function and template are overridable through public methods: - -```go -cmd.SetUsageFunc(f func(*Command) error) -cmd.SetUsageTemplate(s string) -``` - -## Version Flag - -Cobra adds a top-level '--version' flag if the Version field is set on the root command. -Running an application with the '--version' flag will print the version to stdout using -the version template. The template can be customized using the -`cmd.SetVersionTemplate(s string)` function. - -## PreRun and PostRun Hooks - -It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order: - -- `PersistentPreRun` -- `PreRun` -- `Run` -- `PostRun` -- `PersistentPostRun` - -An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: - -```go -package main - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func main() { - - var rootCmd = &cobra.Command{ - Use: "root [sub]", - Short: "My root command", - PersistentPreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) - }, - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) - }, - } - - var subCmd = &cobra.Command{ - Use: "sub [no options!]", - Short: "My subcommand", - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) - }, - } - - rootCmd.AddCommand(subCmd) - - rootCmd.SetArgs([]string{""}) - rootCmd.Execute() - fmt.Println() - rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) - rootCmd.Execute() -} -``` - -Output: -``` -Inside rootCmd PersistentPreRun with args: [] -Inside rootCmd PreRun with args: [] -Inside rootCmd Run with args: [] -Inside rootCmd PostRun with args: [] -Inside rootCmd PersistentPostRun with args: [] - -Inside rootCmd PersistentPreRun with args: [arg1 arg2] -Inside subCmd PreRun with args: [arg1 arg2] -Inside subCmd Run with args: [arg1 arg2] -Inside subCmd PostRun with args: [arg1 arg2] -Inside subCmd PersistentPostRun with args: [arg1 arg2] -``` - -## Suggestions when "unknown command" happens - -Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: - -``` -$ hugo srever -Error: unknown command "srever" for "hugo" - -Did you mean this? - server - -Run 'hugo --help' for usage. -``` - -Suggestions are automatically generated based on existing subcommands and use an implementation of [Levenshtein distance](https://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. - -If you need to disable suggestions or tweak the string distance in your command, use: - -```go -command.DisableSuggestions = true -``` - -or - -```go -command.SuggestionsMinimumDistance = 1 -``` - -You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but make sense in your set of commands but for which -you don't want aliases. Example: - -``` -$ kubectl remove -Error: unknown command "remove" for "kubectl" - -Did you mean this? - delete - -Run 'kubectl help' for usage. -``` - -## Generating documentation for your command - -Cobra can generate documentation based on subcommands, flags, etc. Read more about it in the [docs generation documentation](doc/README.md). - -## Generating shell completions - -Cobra can generate a shell-completion file for the following shells: bash, zsh, fish, PowerShell. If you add more information to your commands, these completions can be amazingly powerful and flexible. Read more about it in [Shell Completions](shell_completions.md). - -## Providing Active Help - -Cobra makes use of the shell-completion system to define a framework allowing you to provide Active Help to your users. Active Help are messages (hints, warnings, etc) printed as the program is being used. Read more about it in [Active Help](active_help.md). diff --git a/vendor/github.com/spf13/cobra/zsh_completions.md b/vendor/github.com/spf13/cobra/zsh_completions.md deleted file mode 100644 index 7cff61787f4..00000000000 --- a/vendor/github.com/spf13/cobra/zsh_completions.md +++ /dev/null @@ -1,48 +0,0 @@ -## Generating Zsh Completion For Your cobra.Command - -Please refer to [Shell Completions](shell_completions.md) for details. - -## Zsh completions standardization - -Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backwards-compatible, some small changes in behavior were introduced. - -### Deprecation summary - -See further below for more details on these deprecations. - -* `cmd.MarkZshCompPositionalArgumentFile(pos, []string{})` is no longer needed. It is therefore **deprecated** and silently ignored. -* `cmd.MarkZshCompPositionalArgumentFile(pos, glob[])` is **deprecated** and silently ignored. - * Instead use `ValidArgsFunction` with `ShellCompDirectiveFilterFileExt`. -* `cmd.MarkZshCompPositionalArgumentWords()` is **deprecated** and silently ignored. - * Instead use `ValidArgsFunction`. - -### Behavioral changes - -**Noun completion** -|Old behavior|New behavior| -|---|---| -|No file completion by default (opposite of bash)|File completion by default; use `ValidArgsFunction` with `ShellCompDirectiveNoFileComp` to turn off file completion on a per-argument basis| -|Completion of flag names without the `-` prefix having been typed|Flag names are only completed if the user has typed the first `-`| -`cmd.MarkZshCompPositionalArgumentFile(pos, []string{})` used to turn on file completion on a per-argument position basis|File completion for all arguments by default; `cmd.MarkZshCompPositionalArgumentFile()` is **deprecated** and silently ignored| -|`cmd.MarkZshCompPositionalArgumentFile(pos, glob[])` used to turn on file completion **with glob filtering** on a per-argument position basis (zsh-specific)|`cmd.MarkZshCompPositionalArgumentFile()` is **deprecated** and silently ignored; use `ValidArgsFunction` with `ShellCompDirectiveFilterFileExt` for file **extension** filtering (not full glob filtering)| -|`cmd.MarkZshCompPositionalArgumentWords(pos, words[])` used to provide completion choices on a per-argument position basis (zsh-specific)|`cmd.MarkZshCompPositionalArgumentWords()` is **deprecated** and silently ignored; use `ValidArgsFunction` to achieve the same behavior| - -**Flag-value completion** - -|Old behavior|New behavior| -|---|---| -|No file completion by default (opposite of bash)|File completion by default; use `RegisterFlagCompletionFunc()` with `ShellCompDirectiveNoFileComp` to turn off file completion| -|`cmd.MarkFlagFilename(flag, []string{})` and similar used to turn on file completion|File completion by default; `cmd.MarkFlagFilename(flag, []string{})` no longer needed in this context and silently ignored| -|`cmd.MarkFlagFilename(flag, glob[])` used to turn on file completion **with glob filtering** (syntax of `[]string{"*.yaml", "*.yml"}` incompatible with bash)|Will continue to work, however, support for bash syntax is added and should be used instead so as to work for all shells (`[]string{"yaml", "yml"}`)| -|`cmd.MarkFlagDirname(flag)` only completes directories (zsh-specific)|Has been added for all shells| -|Completion of a flag name does not repeat, unless flag is of type `*Array` or `*Slice` (not supported by bash)|Retained for `zsh` and added to `fish`| -|Completion of a flag name does not provide the `=` form (unlike bash)|Retained for `zsh` and added to `fish`| - -**Improvements** - -* Custom completion support (`ValidArgsFunction` and `RegisterFlagCompletionFunc()`) -* File completion by default if no other completions found -* Handling of required flags -* File extension filtering no longer mutually exclusive with bash usage -* Completion of directory names *within* another directory -* Support for `=` form of flags diff --git a/vendor/github.com/tredoe/osutil/AUTHORS.md b/vendor/github.com/tredoe/osutil/AUTHORS.md new file mode 100644 index 00000000000..518f194222d --- /dev/null +++ b/vendor/github.com/tredoe/osutil/AUTHORS.md @@ -0,0 +1,16 @@ +# Authors + +This is the official list of authors for copyright purposes. +This file is distinct from the 'CONTRIBUTORS' file. See the latter for an explanation. + +Names should be added to this file as: + + Name or Organization / (url address) + +(The email address is not required for organizations) + +Please keep the list sorted. + +## Code + +* Jonas mg (https://github.com/tredoe) diff --git a/vendor/github.com/tredoe/osutil/CONTRIBUTORS.md b/vendor/github.com/tredoe/osutil/CONTRIBUTORS.md new file mode 100644 index 00000000000..b630fff1c14 --- /dev/null +++ b/vendor/github.com/tredoe/osutil/CONTRIBUTORS.md @@ -0,0 +1,22 @@ +# Contributors + +This is the official list of people who can contribute (and typically +have contributed) to the repository. + +The 'AUTHORS' file lists the copyright holders; this file lists people. For +example, the employees of an organization are listed here but not in 'AUTHORS', +because the organization holds the copyright. + +Names should be added to this file as: + + Name / (url address) + +Please keep the list sorted. + +## Code + +* Jonas mg (https://github.com/tredoe) + +* DJ Gregor +* Jeramey Crawford +* Misha Seltzer (https://github.com/mishas) diff --git a/vendor/github.com/tredoe/osutil/LICENSE-MPL.txt b/vendor/github.com/tredoe/osutil/LICENSE-MPL.txt new file mode 100644 index 00000000000..52d135112ee --- /dev/null +++ b/vendor/github.com/tredoe/osutil/LICENSE-MPL.txt @@ -0,0 +1,374 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/tredoe/osutil/v2/userutil/crypt/AUTHORS.md b/vendor/github.com/tredoe/osutil/user/crypt/AUTHORS.md similarity index 100% rename from vendor/github.com/tredoe/osutil/v2/userutil/crypt/AUTHORS.md rename to vendor/github.com/tredoe/osutil/user/crypt/AUTHORS.md diff --git a/vendor/github.com/tredoe/osutil/v2/userutil/crypt/LICENSE b/vendor/github.com/tredoe/osutil/user/crypt/LICENSE similarity index 100% rename from vendor/github.com/tredoe/osutil/v2/userutil/crypt/LICENSE rename to vendor/github.com/tredoe/osutil/user/crypt/LICENSE diff --git a/vendor/github.com/tredoe/osutil/v2/userutil/crypt/README.md b/vendor/github.com/tredoe/osutil/user/crypt/README.md similarity index 100% rename from vendor/github.com/tredoe/osutil/v2/userutil/crypt/README.md rename to vendor/github.com/tredoe/osutil/user/crypt/README.md diff --git a/vendor/github.com/tredoe/osutil/v2/userutil/crypt/common/base64.go b/vendor/github.com/tredoe/osutil/user/crypt/common/base64.go similarity index 100% rename from vendor/github.com/tredoe/osutil/v2/userutil/crypt/common/base64.go rename to vendor/github.com/tredoe/osutil/user/crypt/common/base64.go diff --git a/vendor/github.com/tredoe/osutil/v2/userutil/crypt/common/doc.go b/vendor/github.com/tredoe/osutil/user/crypt/common/doc.go similarity index 100% rename from vendor/github.com/tredoe/osutil/v2/userutil/crypt/common/doc.go rename to vendor/github.com/tredoe/osutil/user/crypt/common/doc.go diff --git a/vendor/github.com/tredoe/osutil/v2/userutil/crypt/common/salt.go b/vendor/github.com/tredoe/osutil/user/crypt/common/salt.go similarity index 100% rename from vendor/github.com/tredoe/osutil/v2/userutil/crypt/common/salt.go rename to vendor/github.com/tredoe/osutil/user/crypt/common/salt.go diff --git a/vendor/github.com/tredoe/osutil/v2/userutil/crypt/crypt.go b/vendor/github.com/tredoe/osutil/user/crypt/crypt.go similarity index 88% rename from vendor/github.com/tredoe/osutil/v2/userutil/crypt/crypt.go rename to vendor/github.com/tredoe/osutil/user/crypt/crypt.go index 13e7791e65a..5ded2da8c66 100644 --- a/vendor/github.com/tredoe/osutil/v2/userutil/crypt/crypt.go +++ b/vendor/github.com/tredoe/osutil/user/crypt/crypt.go @@ -12,7 +12,7 @@ import ( "errors" "strings" - "github.com/tredoe/osutil/v2/userutil/crypt/common" + "github.com/tredoe/osutil/user/crypt/common" ) var ErrKeyMismatch = errors.New("hashed value is not the hash of the given password") @@ -52,10 +52,10 @@ type Crypter interface { type Crypt uint const ( - APR1 Crypt = iota + 1 // import "github.com/tredoe/osutil/v2/user/crypt/apr1_crypt" - MD5 // import "github.com/tredoe/osutil/v2/user/crypt/md5_crypt" - SHA256 // import "github.com/tredoe/osutil/v2/user/crypt/sha256_crypt" - SHA512 // import "github.com/tredoe/osutil/v2/user/crypt/sha512_crypt" + APR1 Crypt = iota + 1 // import "github.com/tredoe/osutil/user/crypt/apr1_crypt" + MD5 // import "github.com/tredoe/osutil/user/crypt/md5_crypt" + SHA256 // import "github.com/tredoe/osutil/user/crypt/sha256_crypt" + SHA512 // import "github.com/tredoe/osutil/user/crypt/sha512_crypt" maxCrypt ) diff --git a/vendor/github.com/tredoe/osutil/v2/userutil/crypt/sha512_crypt/sha512_crypt.go b/vendor/github.com/tredoe/osutil/user/crypt/sha512_crypt/sha512_crypt.go similarity index 98% rename from vendor/github.com/tredoe/osutil/v2/userutil/crypt/sha512_crypt/sha512_crypt.go rename to vendor/github.com/tredoe/osutil/user/crypt/sha512_crypt/sha512_crypt.go index 18ee1630bf4..fd55e881243 100644 --- a/vendor/github.com/tredoe/osutil/v2/userutil/crypt/sha512_crypt/sha512_crypt.go +++ b/vendor/github.com/tredoe/osutil/user/crypt/sha512_crypt/sha512_crypt.go @@ -17,8 +17,8 @@ import ( "crypto/sha512" "strconv" - "github.com/tredoe/osutil/v2/userutil/crypt" - "github.com/tredoe/osutil/v2/userutil/crypt/common" + "github.com/tredoe/osutil/user/crypt" + "github.com/tredoe/osutil/user/crypt/common" ) func init() { diff --git a/vendor/github.com/ulikunitz/xz/README.md b/vendor/github.com/ulikunitz/xz/README.md index 55471852132..56d49275a74 100644 --- a/vendor/github.com/ulikunitz/xz/README.md +++ b/vendor/github.com/ulikunitz/xz/README.md @@ -75,3 +75,14 @@ To decompress it use the following command. $ gxz -d bigfile.xz +## Security & Vulnerabilities + +The security policy is documented in [SECURITY.md](SECURITY.md). + +The software is not affected by the supply chain attack on the original xz +implementation, [CVE-2024-3094](https://nvd.nist.gov/vuln/detail/CVE-2024-3094). +This implementation doesn't share any files with the original xz implementation +and no patches or pull requests are accepted without a review. + +All security advisories for this project are published under +[github.com/ulikunitz/xz/security/advisories](https://github.com/ulikunitz/xz/security/advisories?state=published). diff --git a/vendor/github.com/ulikunitz/xz/SECURITY.md b/vendor/github.com/ulikunitz/xz/SECURITY.md index 5f7ec01b3b6..1bdc88878d4 100644 --- a/vendor/github.com/ulikunitz/xz/SECURITY.md +++ b/vendor/github.com/ulikunitz/xz/SECURITY.md @@ -6,5 +6,14 @@ Currently the last minor version v0.5.x is supported. ## Reporting a Vulnerability -Report a vulnerability by creating a Github issue at -. Expect a response in a week. +You can privately report a vulnerability following this +[procedure](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability). +Alternatively you can create a Github issue at +. + +In both cases expect a response in at least 7 days. + +## Security Advisories + +All security advisories for this project are published under +[github.com/ulikunitz/xz/security/advisories](https://github.com/ulikunitz/xz/security/advisories?state=published). diff --git a/vendor/github.com/ulikunitz/xz/TODO.md b/vendor/github.com/ulikunitz/xz/TODO.md index a3d6f19250c..c466ffeda5c 100644 --- a/vendor/github.com/ulikunitz/xz/TODO.md +++ b/vendor/github.com/ulikunitz/xz/TODO.md @@ -86,6 +86,11 @@ ## Log +### 2024-04-03 + +Release v0.5.12 updates README.md and SECURITY.md to address the supply chain +attack on the original xz implementation. + ### 2022-12-12 Matt Dantay (@bodgit) reported an issue with the LZMA reader. The implementation @@ -99,7 +104,7 @@ it. Mituo Heijo has fuzzed xz and found a bug in the function readIndexBody. The function allocated a slice of records immediately after reading the value -without further checks. Sincex the number has been too large the make function +without further checks. Since the number has been too large the make function did panic. The fix is to check the number against the expected number of records before allocating the records. diff --git a/vendor/github.com/vishvananda/netns/.golangci.yml b/vendor/github.com/vishvananda/netns/.golangci.yml new file mode 100644 index 00000000000..600bef78e2b --- /dev/null +++ b/vendor/github.com/vishvananda/netns/.golangci.yml @@ -0,0 +1,2 @@ +run: + timeout: 5m diff --git a/vendor/github.com/vishvananda/netns/README.md b/vendor/github.com/vishvananda/netns/README.md index 1fdb2d3e4ae..bdfedbe81fa 100644 --- a/vendor/github.com/vishvananda/netns/README.md +++ b/vendor/github.com/vishvananda/netns/README.md @@ -23,6 +23,7 @@ import ( "fmt" "net" "runtime" + "github.com/vishvananda/netns" ) @@ -48,14 +49,3 @@ func main() { } ``` - -## NOTE - -The library can be safely used only with Go >= 1.10 due to [golang/go#20676](https://github.com/golang/go/issues/20676). - -After locking a goroutine to its current OS thread with `runtime.LockOSThread()` -and changing its network namespace, any new subsequent goroutine won't be -scheduled on that thread while it's locked. Therefore, the new goroutine -will run in a different namespace leading to unexpected results. - -See [here](https://www.weave.works/blog/linux-namespaces-golang-followup) for more details. diff --git a/vendor/github.com/vishvananda/netns/doc.go b/vendor/github.com/vishvananda/netns/doc.go new file mode 100644 index 00000000000..cd4093a4d7e --- /dev/null +++ b/vendor/github.com/vishvananda/netns/doc.go @@ -0,0 +1,9 @@ +// Package netns allows ultra-simple network namespace handling. NsHandles +// can be retrieved and set. Note that the current namespace is thread +// local so actions that set and reset namespaces should use LockOSThread +// to make sure the namespace doesn't change due to a goroutine switch. +// It is best to close NsHandles when you are done with them. This can be +// accomplished via a `defer ns.Close()` on the handle. Changing namespaces +// requires elevated privileges, so in most cases this code needs to be run +// as root. +package netns diff --git a/vendor/github.com/vishvananda/netns/netns_linux.go b/vendor/github.com/vishvananda/netns/netns_linux.go index 36e64906b6c..2ed7c7e2fa4 100644 --- a/vendor/github.com/vishvananda/netns/netns_linux.go +++ b/vendor/github.com/vishvananda/netns/netns_linux.go @@ -1,33 +1,31 @@ -// +build linux,go1.10 - package netns import ( "fmt" - "io/ioutil" "os" "path" "path/filepath" "strconv" "strings" - "syscall" "golang.org/x/sys/unix" ) -// Deprecated: use syscall pkg instead (go >= 1.5 needed). +// Deprecated: use golang.org/x/sys/unix pkg instead. const ( - CLONE_NEWUTS = 0x04000000 /* New utsname group? */ - CLONE_NEWIPC = 0x08000000 /* New ipcs */ - CLONE_NEWUSER = 0x10000000 /* New user namespace */ - CLONE_NEWPID = 0x20000000 /* New pid namespace */ - CLONE_NEWNET = 0x40000000 /* New network namespace */ - CLONE_IO = 0x80000000 /* Get io context */ - bindMountPath = "/run/netns" /* Bind mount path for named netns */ + CLONE_NEWUTS = unix.CLONE_NEWUTS /* New utsname group? */ + CLONE_NEWIPC = unix.CLONE_NEWIPC /* New ipcs */ + CLONE_NEWUSER = unix.CLONE_NEWUSER /* New user namespace */ + CLONE_NEWPID = unix.CLONE_NEWPID /* New pid namespace */ + CLONE_NEWNET = unix.CLONE_NEWNET /* New network namespace */ + CLONE_IO = unix.CLONE_IO /* Get io context */ ) -// Setns sets namespace using syscall. Note that this should be a method -// in syscall but it has not been added. +const bindMountPath = "/run/netns" /* Bind mount path for named netns */ + +// Setns sets namespace using golang.org/x/sys/unix.Setns. +// +// Deprecated: Use golang.org/x/sys/unix.Setns instead. func Setns(ns NsHandle, nstype int) (err error) { return unix.Setns(int(ns), nstype) } @@ -35,19 +33,20 @@ func Setns(ns NsHandle, nstype int) (err error) { // Set sets the current network namespace to the namespace represented // by NsHandle. func Set(ns NsHandle) (err error) { - return Setns(ns, CLONE_NEWNET) + return unix.Setns(int(ns), unix.CLONE_NEWNET) } // New creates a new network namespace, sets it as current and returns // a handle to it. func New() (ns NsHandle, err error) { - if err := unix.Unshare(CLONE_NEWNET); err != nil { + if err := unix.Unshare(unix.CLONE_NEWNET); err != nil { return -1, err } return Get() } -// NewNamed creates a new named network namespace and returns a handle to it +// NewNamed creates a new named network namespace, sets it as current, +// and returns a handle to it func NewNamed(name string) (NsHandle, error) { if _, err := os.Stat(bindMountPath); os.IsNotExist(err) { err = os.MkdirAll(bindMountPath, 0755) @@ -65,13 +64,15 @@ func NewNamed(name string) (NsHandle, error) { f, err := os.OpenFile(namedPath, os.O_CREATE|os.O_EXCL, 0444) if err != nil { + newNs.Close() return None(), err } f.Close() - nsPath := fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), syscall.Gettid()) - err = syscall.Mount(nsPath, namedPath, "bind", syscall.MS_BIND, "") + nsPath := fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid()) + err = unix.Mount(nsPath, namedPath, "bind", unix.MS_BIND, "") if err != nil { + newNs.Close() return None(), err } @@ -82,7 +83,7 @@ func NewNamed(name string) (NsHandle, error) { func DeleteNamed(name string) error { namedPath := path.Join(bindMountPath, name) - err := syscall.Unmount(namedPath, syscall.MNT_DETACH) + err := unix.Unmount(namedPath, unix.MNT_DETACH) if err != nil { return err } @@ -108,7 +109,7 @@ func GetFromPath(path string) (NsHandle, error) { // GetFromName gets a handle to a named network namespace such as one // created by `ip netns add`. func GetFromName(name string) (NsHandle, error) { - return GetFromPath(fmt.Sprintf("/var/run/netns/%s", name)) + return GetFromPath(filepath.Join(bindMountPath, name)) } // GetFromPid gets a handle to the network namespace of a given pid. @@ -133,33 +134,38 @@ func GetFromDocker(id string) (NsHandle, error) { } // borrowed from docker/utils/utils.go -func findCgroupMountpoint(cgroupType string) (string, error) { - output, err := ioutil.ReadFile("/proc/mounts") +func findCgroupMountpoint(cgroupType string) (int, string, error) { + output, err := os.ReadFile("/proc/mounts") if err != nil { - return "", err + return -1, "", err } // /proc/mounts has 6 fields per line, one mount per line, e.g. // cgroup /sys/fs/cgroup/devices cgroup rw,relatime,devices 0 0 for _, line := range strings.Split(string(output), "\n") { parts := strings.Split(line, " ") - if len(parts) == 6 && parts[2] == "cgroup" { - for _, opt := range strings.Split(parts[3], ",") { - if opt == cgroupType { - return parts[1], nil + if len(parts) == 6 { + switch parts[2] { + case "cgroup2": + return 2, parts[1], nil + case "cgroup": + for _, opt := range strings.Split(parts[3], ",") { + if opt == cgroupType { + return 1, parts[1], nil + } } } } } - return "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType) + return -1, "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType) } // Returns the relative path to the cgroup docker is running in. // borrowed from docker/utils/utils.go // modified to get the docker pid instead of using /proc/self -func getThisCgroup(cgroupType string) (string, error) { - dockerpid, err := ioutil.ReadFile("/var/run/docker.pid") +func getDockerCgroup(cgroupVer int, cgroupType string) (string, error) { + dockerpid, err := os.ReadFile("/var/run/docker.pid") if err != nil { return "", err } @@ -171,14 +177,15 @@ func getThisCgroup(cgroupType string) (string, error) { if err != nil { return "", err } - output, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/cgroup", pid)) + output, err := os.ReadFile(fmt.Sprintf("/proc/%d/cgroup", pid)) if err != nil { return "", err } for _, line := range strings.Split(string(output), "\n") { parts := strings.Split(line, ":") // any type used by docker should work - if parts[1] == cgroupType { + if (cgroupVer == 1 && parts[1] == cgroupType) || + (cgroupVer == 2 && parts[1] == "") { return parts[2], nil } } @@ -190,46 +197,56 @@ func getThisCgroup(cgroupType string) (string, error) { // modified to only return the first pid // modified to glob with id // modified to search for newer docker containers +// modified to look for cgroups v2 func getPidForContainer(id string) (int, error) { pid := 0 // memory is chosen randomly, any cgroup used by docker works cgroupType := "memory" - cgroupRoot, err := findCgroupMountpoint(cgroupType) + cgroupVer, cgroupRoot, err := findCgroupMountpoint(cgroupType) if err != nil { return pid, err } - cgroupThis, err := getThisCgroup(cgroupType) + cgroupDocker, err := getDockerCgroup(cgroupVer, cgroupType) if err != nil { return pid, err } id += "*" + var pidFile string + if cgroupVer == 1 { + pidFile = "tasks" + } else if cgroupVer == 2 { + pidFile = "cgroup.procs" + } else { + return -1, fmt.Errorf("Invalid cgroup version '%d'", cgroupVer) + } + attempts := []string{ - filepath.Join(cgroupRoot, cgroupThis, id, "tasks"), + filepath.Join(cgroupRoot, cgroupDocker, id, pidFile), // With more recent lxc versions use, cgroup will be in lxc/ - filepath.Join(cgroupRoot, cgroupThis, "lxc", id, "tasks"), + filepath.Join(cgroupRoot, cgroupDocker, "lxc", id, pidFile), // With more recent docker, cgroup will be in docker/ - filepath.Join(cgroupRoot, cgroupThis, "docker", id, "tasks"), + filepath.Join(cgroupRoot, cgroupDocker, "docker", id, pidFile), // Even more recent docker versions under systemd use docker-.scope/ - filepath.Join(cgroupRoot, "system.slice", "docker-"+id+".scope", "tasks"), + filepath.Join(cgroupRoot, "system.slice", "docker-"+id+".scope", pidFile), // Even more recent docker versions under cgroup/systemd/docker// - filepath.Join(cgroupRoot, "..", "systemd", "docker", id, "tasks"), + filepath.Join(cgroupRoot, "..", "systemd", "docker", id, pidFile), // Kubernetes with docker and CNI is even more different. Works for BestEffort and Burstable QoS - filepath.Join(cgroupRoot, "..", "systemd", "kubepods", "*", "pod*", id, "tasks"), + filepath.Join(cgroupRoot, "..", "systemd", "kubepods", "*", "pod*", id, pidFile), // Same as above but for Guaranteed QoS - filepath.Join(cgroupRoot, "..", "systemd", "kubepods", "pod*", id, "tasks"), + filepath.Join(cgroupRoot, "..", "systemd", "kubepods", "pod*", id, pidFile), // Another flavor of containers location in recent kubernetes 1.11+. Works for BestEffort and Burstable QoS - filepath.Join(cgroupRoot, cgroupThis, "kubepods.slice", "*.slice", "*", "docker-"+id+".scope", "tasks"), + filepath.Join(cgroupRoot, cgroupDocker, "kubepods.slice", "*.slice", "*", "docker-"+id+".scope", pidFile), // Same as above but for Guaranteed QoS - filepath.Join(cgroupRoot, cgroupThis, "kubepods.slice", "*", "docker-"+id+".scope", "tasks"), + filepath.Join(cgroupRoot, cgroupDocker, "kubepods.slice", "*", "docker-"+id+".scope", pidFile), // When runs inside of a container with recent kubernetes 1.11+. Works for BestEffort and Burstable QoS - filepath.Join(cgroupRoot, "kubepods.slice", "*.slice", "*", "docker-"+id+".scope", "tasks"), + filepath.Join(cgroupRoot, "kubepods.slice", "*.slice", "*", "docker-"+id+".scope", pidFile), // Same as above but for Guaranteed QoS - filepath.Join(cgroupRoot, "kubepods.slice", "*", "docker-"+id+".scope", "tasks"), + filepath.Join(cgroupRoot, "kubepods.slice", "*", "docker-"+id+".scope", pidFile), } var filename string @@ -247,7 +264,7 @@ func getPidForContainer(id string) (int, error) { return pid, fmt.Errorf("Unable to find container: %v", id[:len(id)-1]) } - output, err := ioutil.ReadFile(filename) + output, err := os.ReadFile(filename) if err != nil { return pid, err } diff --git a/vendor/github.com/vishvananda/netns/netns_unspecified.go b/vendor/github.com/vishvananda/netns/netns_others.go similarity index 63% rename from vendor/github.com/vishvananda/netns/netns_unspecified.go rename to vendor/github.com/vishvananda/netns/netns_others.go index d06af62b68a..0489837741b 100644 --- a/vendor/github.com/vishvananda/netns/netns_unspecified.go +++ b/vendor/github.com/vishvananda/netns/netns_others.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package netns @@ -10,6 +11,14 @@ var ( ErrNotImplemented = errors.New("not implemented") ) +// Setns sets namespace using golang.org/x/sys/unix.Setns on Linux. It +// is not implemented on other platforms. +// +// Deprecated: Use golang.org/x/sys/unix.Setns instead. +func Setns(ns NsHandle, nstype int) (err error) { + return ErrNotImplemented +} + func Set(ns NsHandle) (err error) { return ErrNotImplemented } @@ -18,6 +27,14 @@ func New() (ns NsHandle, err error) { return -1, ErrNotImplemented } +func NewNamed(name string) (NsHandle, error) { + return -1, ErrNotImplemented +} + +func DeleteNamed(name string) error { + return ErrNotImplemented +} + func Get() (NsHandle, error) { return -1, ErrNotImplemented } diff --git a/vendor/github.com/vishvananda/netns/netns.go b/vendor/github.com/vishvananda/netns/nshandle_linux.go similarity index 75% rename from vendor/github.com/vishvananda/netns/netns.go rename to vendor/github.com/vishvananda/netns/nshandle_linux.go index 116befd548c..1baffb66acb 100644 --- a/vendor/github.com/vishvananda/netns/netns.go +++ b/vendor/github.com/vishvananda/netns/nshandle_linux.go @@ -1,11 +1,3 @@ -// Package netns allows ultra-simple network namespace handling. NsHandles -// can be retrieved and set. Note that the current namespace is thread -// local so actions that set and reset namespaces should use LockOSThread -// to make sure the namespace doesn't change due to a goroutine switch. -// It is best to close NsHandles when you are done with them. This can be -// accomplished via a `defer ns.Close()` on the handle. Changing namespaces -// requires elevated privileges, so in most cases this code needs to be run -// as root. package netns import ( @@ -38,7 +30,7 @@ func (ns NsHandle) Equal(other NsHandle) bool { // String shows the file descriptor number and its dev and inode. func (ns NsHandle) String() string { if ns == -1 { - return "NS(None)" + return "NS(none)" } var s unix.Stat_t if err := unix.Fstat(int(ns), &s); err != nil { @@ -71,7 +63,7 @@ func (ns *NsHandle) Close() error { if err := unix.Close(int(*ns)); err != nil { return err } - (*ns) = -1 + *ns = -1 return nil } diff --git a/vendor/github.com/vishvananda/netns/nshandle_others.go b/vendor/github.com/vishvananda/netns/nshandle_others.go new file mode 100644 index 00000000000..af727bc091c --- /dev/null +++ b/vendor/github.com/vishvananda/netns/nshandle_others.go @@ -0,0 +1,45 @@ +//go:build !linux +// +build !linux + +package netns + +// NsHandle is a handle to a network namespace. It can only be used on Linux, +// but provides stub methods on other platforms. +type NsHandle int + +// Equal determines if two network handles refer to the same network +// namespace. It is only implemented on Linux. +func (ns NsHandle) Equal(_ NsHandle) bool { + return false +} + +// String shows the file descriptor number and its dev and inode. +// It is only implemented on Linux, and returns "NS(none)" on other +// platforms. +func (ns NsHandle) String() string { + return "NS(none)" +} + +// UniqueId returns a string which uniquely identifies the namespace +// associated with the network handle. It is only implemented on Linux, +// and returns "NS(none)" on other platforms. +func (ns NsHandle) UniqueId() string { + return "NS(none)" +} + +// IsOpen returns true if Close() has not been called. It is only implemented +// on Linux and always returns false on other platforms. +func (ns NsHandle) IsOpen() bool { + return false +} + +// Close closes the NsHandle and resets its file descriptor to -1. +// It is only implemented on Linux. +func (ns *NsHandle) Close() error { + return nil +} + +// None gets an empty (closed) NsHandle. +func None() NsHandle { + return NsHandle(-1) +} diff --git a/vendor/github.com/willdonnelly/passwd/README b/vendor/github.com/willdonnelly/passwd/README deleted file mode 100644 index 0024c0a4f2a..00000000000 --- a/vendor/github.com/willdonnelly/passwd/README +++ /dev/null @@ -1,26 +0,0 @@ -> godoc github.com/willdonnelly/passwd - -PACKAGE - -package passwd - import "github.com/willdonnelly/passwd" - - -FUNCTIONS - -func Parse() (map[string]Entry, error) - Parse opens the '/etc/passwd' file and parses it into a map from - usernames to Entries - - -TYPES - -type Entry struct { - Pass string - Uid string - Gid string - Gecos string - Home string - Shell string -} - An Entry contains all the fields for a specific user diff --git a/vendor/github.com/willdonnelly/passwd/passwd.go b/vendor/github.com/willdonnelly/passwd/passwd.go deleted file mode 100644 index de9f6b462c6..00000000000 --- a/vendor/github.com/willdonnelly/passwd/passwd.go +++ /dev/null @@ -1,71 +0,0 @@ -// Package passwd parsed content and files in form of /etc/passwd -package passwd - -import ( - "bufio" - "errors" - "io" - "os" - "strings" -) - -// An Entry contains all the fields for a specific user -type Entry struct { - Pass string - Uid string - Gid string - Gecos string - Home string - Shell string -} - -// Parse opens the '/etc/passwd' file and parses it into a map from usernames -// to Entries -func Parse() (map[string]Entry, error) { - return ParseFile("/etc/passwd") -} - -// ParseFile opens the file and parses it into a map from usernames to Entries -func ParseFile(path string) (map[string]Entry, error) { - file, err := os.Open(path) - if err != nil { - return nil, err - } - - defer file.Close() - - return ParseReader(file) -} - -// ParseReader consumes the contents of r and parses it into a map from -// usernames to Entries -func ParseReader(r io.Reader) (map[string]Entry, error) { - lines := bufio.NewReader(r) - entries := make(map[string]Entry) - for { - line, _, err := lines.ReadLine() - if err != nil { - break - } - name, entry, err := parseLine(string(copyBytes(line))) - if err != nil { - return nil, err - } - entries[name] = entry - } - return entries, nil -} - -func parseLine(line string) (string, Entry, error) { - fs := strings.Split(line, ":") - if len(fs) != 7 { - return "", Entry{}, errors.New("Unexpected number of fields in /etc/passwd") - } - return fs[0], Entry{fs[1], fs[2], fs[3], fs[4], fs[5], fs[6]}, nil -} - -func copyBytes(x []byte) []byte { - y := make([]byte, len(x)) - copy(y, x) - return y -} diff --git a/vendor/github.com/zcalusic/sysinfo/README.md b/vendor/github.com/zcalusic/sysinfo/README.md index 3d4beee12a0..83cdfe555c8 100644 --- a/vendor/github.com/zcalusic/sysinfo/README.md +++ b/vendor/github.com/zcalusic/sysinfo/README.md @@ -1,10 +1,10 @@ # Sysinfo -[![Build Status](https://travis-ci.org/zcalusic/sysinfo.svg?branch=master)](https://travis-ci.org/zcalusic/sysinfo) +[![Build Status](https://img.shields.io/github/actions/workflow/status/zcalusic/sysinfo/test.yaml)](https://github.com/zcalusic/sysinfo/actions/workflows/test.yaml) [![Go Report Card](https://goreportcard.com/badge/github.com/zcalusic/sysinfo)](https://goreportcard.com/report/github.com/zcalusic/sysinfo) [![GoDoc](https://godoc.org/github.com/zcalusic/sysinfo?status.svg)](https://godoc.org/github.com/zcalusic/sysinfo) [![License](https://img.shields.io/badge/license-MIT-a31f34.svg?maxAge=2592000)](https://github.com/zcalusic/sysinfo/blob/master/LICENSE) -[![Powered by](https://img.shields.io/badge/powered_by-Go-5272b4.svg?maxAge=2592000)](https://golang.org/) +[![Powered by](https://img.shields.io/badge/powered_by-Go-5272b4.svg?maxAge=2592000)](https://go.dev/) [![Platform](https://img.shields.io/badge/platform-Linux-009bde.svg?maxAge=2592000)](https://www.linuxfoundation.org/) Package sysinfo is a Go library providing Linux OS / kernel / hardware system information. It's completely standalone, diff --git a/vendor/github.com/zcalusic/sysinfo/kernel_darwin.go b/vendor/github.com/zcalusic/sysinfo/kernel_darwin.go new file mode 100644 index 00000000000..b5868968873 --- /dev/null +++ b/vendor/github.com/zcalusic/sysinfo/kernel_darwin.go @@ -0,0 +1,13 @@ +//+build darwin + +package sysinfo + +// Kernel information. +type Kernel struct { + Release string `json:"release,omitempty"` + Version string `json:"version,omitempty"` + Architecture string `json:"architecture,omitempty"` +} + +func (si *SysInfo) getKernelInfo() { +} diff --git a/vendor/github.com/zcalusic/sysinfo/kernel.go b/vendor/github.com/zcalusic/sysinfo/kernel_linux.go similarity index 97% rename from vendor/github.com/zcalusic/sysinfo/kernel.go rename to vendor/github.com/zcalusic/sysinfo/kernel_linux.go index b0242150e47..64e1b6cec3a 100644 --- a/vendor/github.com/zcalusic/sysinfo/kernel.go +++ b/vendor/github.com/zcalusic/sysinfo/kernel_linux.go @@ -2,6 +2,8 @@ // // Use of this source code is governed by an MIT-style license that can be found in the LICENSE file. +//+build linux + package sysinfo import ( diff --git a/vendor/github.com/zcalusic/sysinfo/node.go b/vendor/github.com/zcalusic/sysinfo/node.go index e36f144124e..480ac0d0919 100644 --- a/vendor/github.com/zcalusic/sysinfo/node.go +++ b/vendor/github.com/zcalusic/sysinfo/node.go @@ -74,11 +74,14 @@ func (si *SysInfo) getSetMachineID() { } func (si *SysInfo) getTimezone() { + const zoneInfoPrefix = "/usr/share/zoneinfo/" + if fi, err := os.Lstat("/etc/localtime"); err == nil { if fi.Mode()&os.ModeSymlink == os.ModeSymlink { if tzfile, err := os.Readlink("/etc/localtime"); err == nil { - if strings.HasPrefix(tzfile, "/usr/share/zoneinfo/") { - si.Node.Timezone = strings.TrimPrefix(tzfile, "/usr/share/zoneinfo/") + tzfile = strings.TrimPrefix(tzfile, "..") + if strings.HasPrefix(tzfile, zoneInfoPrefix) { + si.Node.Timezone = strings.TrimPrefix(tzfile, zoneInfoPrefix) return } } diff --git a/vendor/github.com/zcalusic/sysinfo/os.go b/vendor/github.com/zcalusic/sysinfo/os.go index f6b716dfec0..82486eda75d 100644 --- a/vendor/github.com/zcalusic/sysinfo/os.go +++ b/vendor/github.com/zcalusic/sysinfo/os.go @@ -25,7 +25,7 @@ var ( reID = regexp.MustCompile(`^ID=(.*)$`) reVersionID = regexp.MustCompile(`^VERSION_ID=(.*)$`) reUbuntu = regexp.MustCompile(`[\( ]([\d\.]+)`) - reCentOS = regexp.MustCompile(`^CentOS( Linux)? release ([\d\.]+) `) + reCentOS = regexp.MustCompile(`^CentOS( Linux)? release ([\d\.]+)`) reRedHat = regexp.MustCompile(`[\( ]([\d\.]+)`) ) diff --git a/vendor/github.com/zcalusic/sysinfo/product.go b/vendor/github.com/zcalusic/sysinfo/product.go index 382b56d6d78..83b0897c8e4 100644 --- a/vendor/github.com/zcalusic/sysinfo/product.go +++ b/vendor/github.com/zcalusic/sysinfo/product.go @@ -4,12 +4,16 @@ package sysinfo +import "github.com/google/uuid" + // Product information. type Product struct { - Name string `json:"name,omitempty"` - Vendor string `json:"vendor,omitempty"` - Version string `json:"version,omitempty"` - Serial string `json:"serial,omitempty"` + Name string `json:"name,omitempty"` + Vendor string `json:"vendor,omitempty"` + Version string `json:"version,omitempty"` + Serial string `json:"serial,omitempty"` + UUID uuid.UUID `json:"uuid,omitempty"` + SKU string `json:"sku,omitempty"` } func (si *SysInfo) getProductInfo() { @@ -17,4 +21,10 @@ func (si *SysInfo) getProductInfo() { si.Product.Vendor = slurpFile("/sys/class/dmi/id/sys_vendor") si.Product.Version = slurpFile("/sys/class/dmi/id/product_version") si.Product.Serial = slurpFile("/sys/class/dmi/id/product_serial") + si.Product.SKU = slurpFile("/sys/class/dmi/id/product_sku") + + uid, err := uuid.Parse(slurpFile("/sys/class/dmi/id/product_uuid")) + if err == nil { + si.Product.UUID = uid + } } diff --git a/vendor/github.com/zcalusic/sysinfo/version.go b/vendor/github.com/zcalusic/sysinfo/version.go index 0b909393ccd..e20684c9809 100644 --- a/vendor/github.com/zcalusic/sysinfo/version.go +++ b/vendor/github.com/zcalusic/sysinfo/version.go @@ -5,4 +5,4 @@ package sysinfo // Version of the sysinfo library. -const Version = "0.9.5" +const Version = "1.0.2" diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s index 66aebae2588..c672ccf6986 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s @@ -33,6 +33,9 @@ #define CONSTBASE R16 #define BLOCKS R17 +// for VPERMXOR +#define MASK R18 + DATA consts<>+0x00(SB)/8, $0x3320646e61707865 DATA consts<>+0x08(SB)/8, $0x6b20657479622d32 DATA consts<>+0x10(SB)/8, $0x0000000000000001 @@ -53,7 +56,11 @@ DATA consts<>+0x80(SB)/8, $0x6b2065746b206574 DATA consts<>+0x88(SB)/8, $0x6b2065746b206574 DATA consts<>+0x90(SB)/8, $0x0000000100000000 DATA consts<>+0x98(SB)/8, $0x0000000300000002 -GLOBL consts<>(SB), RODATA, $0xa0 +DATA consts<>+0xa0(SB)/8, $0x5566774411223300 +DATA consts<>+0xa8(SB)/8, $0xddeeffcc99aabb88 +DATA consts<>+0xb0(SB)/8, $0x6677445522330011 +DATA consts<>+0xb8(SB)/8, $0xeeffccddaabb8899 +GLOBL consts<>(SB), RODATA, $0xc0 //func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32) TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 @@ -70,6 +77,9 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 MOVD $48, R10 MOVD $64, R11 SRD $6, LEN, BLOCKS + // for VPERMXOR + MOVD $consts<>+0xa0(SB), MASK + MOVD $16, R20 // V16 LXVW4X (CONSTBASE)(R0), VS48 ADD $80,CONSTBASE @@ -87,6 +97,10 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 // V28 LXVW4X (CONSTBASE)(R11), VS60 + // Load mask constants for VPERMXOR + LXVW4X (MASK)(R0), V20 + LXVW4X (MASK)(R20), V21 + // splat slot from V19 -> V26 VSPLTW $0, V19, V26 @@ -97,7 +111,7 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 MOVD $10, R14 MOVD R14, CTR - + PCALIGN $16 loop_outer_vsx: // V0, V1, V2, V3 LXVW4X (R0)(CONSTBASE), VS32 @@ -128,22 +142,17 @@ loop_outer_vsx: VSPLTISW $12, V28 VSPLTISW $8, V29 VSPLTISW $7, V30 - + PCALIGN $16 loop_vsx: VADDUWM V0, V4, V0 VADDUWM V1, V5, V1 VADDUWM V2, V6, V2 VADDUWM V3, V7, V3 - VXOR V12, V0, V12 - VXOR V13, V1, V13 - VXOR V14, V2, V14 - VXOR V15, V3, V15 - - VRLW V12, V27, V12 - VRLW V13, V27, V13 - VRLW V14, V27, V14 - VRLW V15, V27, V15 + VPERMXOR V12, V0, V21, V12 + VPERMXOR V13, V1, V21, V13 + VPERMXOR V14, V2, V21, V14 + VPERMXOR V15, V3, V21, V15 VADDUWM V8, V12, V8 VADDUWM V9, V13, V9 @@ -165,15 +174,10 @@ loop_vsx: VADDUWM V2, V6, V2 VADDUWM V3, V7, V3 - VXOR V12, V0, V12 - VXOR V13, V1, V13 - VXOR V14, V2, V14 - VXOR V15, V3, V15 - - VRLW V12, V29, V12 - VRLW V13, V29, V13 - VRLW V14, V29, V14 - VRLW V15, V29, V15 + VPERMXOR V12, V0, V20, V12 + VPERMXOR V13, V1, V20, V13 + VPERMXOR V14, V2, V20, V14 + VPERMXOR V15, V3, V20, V15 VADDUWM V8, V12, V8 VADDUWM V9, V13, V9 @@ -195,15 +199,10 @@ loop_vsx: VADDUWM V2, V7, V2 VADDUWM V3, V4, V3 - VXOR V15, V0, V15 - VXOR V12, V1, V12 - VXOR V13, V2, V13 - VXOR V14, V3, V14 - - VRLW V15, V27, V15 - VRLW V12, V27, V12 - VRLW V13, V27, V13 - VRLW V14, V27, V14 + VPERMXOR V15, V0, V21, V15 + VPERMXOR V12, V1, V21, V12 + VPERMXOR V13, V2, V21, V13 + VPERMXOR V14, V3, V21, V14 VADDUWM V10, V15, V10 VADDUWM V11, V12, V11 @@ -225,15 +224,10 @@ loop_vsx: VADDUWM V2, V7, V2 VADDUWM V3, V4, V3 - VXOR V15, V0, V15 - VXOR V12, V1, V12 - VXOR V13, V2, V13 - VXOR V14, V3, V14 - - VRLW V15, V29, V15 - VRLW V12, V29, V12 - VRLW V13, V29, V13 - VRLW V14, V29, V14 + VPERMXOR V15, V0, V20, V15 + VPERMXOR V12, V1, V20, V12 + VPERMXOR V13, V2, V20, V13 + VPERMXOR V14, V3, V20, V14 VADDUWM V10, V15, V10 VADDUWM V11, V12, V11 @@ -249,48 +243,48 @@ loop_vsx: VRLW V6, V30, V6 VRLW V7, V30, V7 VRLW V4, V30, V4 - BC 16, LT, loop_vsx + BDNZ loop_vsx VADDUWM V12, V26, V12 - WORD $0x13600F8C // VMRGEW V0, V1, V27 - WORD $0x13821F8C // VMRGEW V2, V3, V28 + VMRGEW V0, V1, V27 + VMRGEW V2, V3, V28 - WORD $0x10000E8C // VMRGOW V0, V1, V0 - WORD $0x10421E8C // VMRGOW V2, V3, V2 + VMRGOW V0, V1, V0 + VMRGOW V2, V3, V2 - WORD $0x13A42F8C // VMRGEW V4, V5, V29 - WORD $0x13C63F8C // VMRGEW V6, V7, V30 + VMRGEW V4, V5, V29 + VMRGEW V6, V7, V30 XXPERMDI VS32, VS34, $0, VS33 XXPERMDI VS32, VS34, $3, VS35 XXPERMDI VS59, VS60, $0, VS32 XXPERMDI VS59, VS60, $3, VS34 - WORD $0x10842E8C // VMRGOW V4, V5, V4 - WORD $0x10C63E8C // VMRGOW V6, V7, V6 + VMRGOW V4, V5, V4 + VMRGOW V6, V7, V6 - WORD $0x13684F8C // VMRGEW V8, V9, V27 - WORD $0x138A5F8C // VMRGEW V10, V11, V28 + VMRGEW V8, V9, V27 + VMRGEW V10, V11, V28 XXPERMDI VS36, VS38, $0, VS37 XXPERMDI VS36, VS38, $3, VS39 XXPERMDI VS61, VS62, $0, VS36 XXPERMDI VS61, VS62, $3, VS38 - WORD $0x11084E8C // VMRGOW V8, V9, V8 - WORD $0x114A5E8C // VMRGOW V10, V11, V10 + VMRGOW V8, V9, V8 + VMRGOW V10, V11, V10 - WORD $0x13AC6F8C // VMRGEW V12, V13, V29 - WORD $0x13CE7F8C // VMRGEW V14, V15, V30 + VMRGEW V12, V13, V29 + VMRGEW V14, V15, V30 XXPERMDI VS40, VS42, $0, VS41 XXPERMDI VS40, VS42, $3, VS43 XXPERMDI VS59, VS60, $0, VS40 XXPERMDI VS59, VS60, $3, VS42 - WORD $0x118C6E8C // VMRGOW V12, V13, V12 - WORD $0x11CE7E8C // VMRGOW V14, V15, V14 + VMRGOW V12, V13, V12 + VMRGOW V14, V15, V14 VSPLTISW $4, V27 VADDUWM V26, V27, V26 @@ -431,7 +425,7 @@ tail_vsx: ADD $-1, R11, R12 ADD $-1, INP ADD $-1, OUT - + PCALIGN $16 looptail_vsx: // Copying the result to OUT // in bytes. @@ -439,7 +433,7 @@ looptail_vsx: MOVBZU 1(INP), TMP XOR KEY, TMP, KEY MOVBU KEY, 1(OUT) - BC 16, LT, looptail_vsx + BDNZ looptail_vsx // Clear the stack values STXVW4X VS48, (R11)(R0) diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go index d861bca5286..b4fbbf8695c 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go @@ -143,6 +143,12 @@ func (s *asmState) Write(b []byte) (int, error) { // Read squeezes an arbitrary number of bytes from the sponge. func (s *asmState) Read(out []byte) (n int, err error) { + // The 'compute last message digest' instruction only stores the digest + // at the first operand (dst) for SHAKE functions. + if s.function != shake_128 && s.function != shake_256 { + panic("sha3: can only call Read for SHAKE functions") + } + n = len(out) // need to pad if we were absorbing @@ -202,8 +208,17 @@ func (s *asmState) Sum(b []byte) []byte { // Hash the buffer. Note that we don't clear it because we // aren't updating the state. - klmd(s.function, &a, nil, s.buf) - return append(b, a[:s.outputLen]...) + switch s.function { + case sha3_224, sha3_256, sha3_384, sha3_512: + klmd(s.function, &a, nil, s.buf) + return append(b, a[:s.outputLen]...) + case shake_128, shake_256: + d := make([]byte, s.outputLen, 64) + klmd(s.function, &a, d, s.buf) + return append(b, d[:s.outputLen]...) + default: + panic("sha3: unknown function") + } } // Reset resets the Hash to its initial state. diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go index 34bf089d0bb..9486c598623 100644 --- a/vendor/golang.org/x/crypto/ssh/client_auth.go +++ b/vendor/golang.org/x/crypto/ssh/client_auth.go @@ -404,10 +404,10 @@ func validateKey(key PublicKey, algo string, user string, c packetConn) (bool, e return false, err } - return confirmKeyAck(key, algo, c) + return confirmKeyAck(key, c) } -func confirmKeyAck(key PublicKey, algo string, c packetConn) (bool, error) { +func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { pubKey := key.Marshal() for { @@ -425,7 +425,15 @@ func confirmKeyAck(key PublicKey, algo string, c packetConn) (bool, error) { if err := Unmarshal(packet, &msg); err != nil { return false, err } - if msg.Algo != algo || !bytes.Equal(msg.PubKey, pubKey) { + // According to RFC 4252 Section 7 the algorithm in + // SSH_MSG_USERAUTH_PK_OK should match that of the request but some + // servers send the key type instead. OpenSSH allows any algorithm + // that matches the public key, so we do the same. + // https://github.com/openssh/openssh-portable/blob/86bdd385/sshconnect2.c#L709 + if !contains(algorithmsForKeyFormat(key.Type()), msg.Algo) { + return false, nil + } + if !bytes.Equal(msg.PubKey, pubKey) { return false, nil } return true, nil diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index c2dfe3268c5..e2ae4f891bb 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -426,6 +426,35 @@ func (l ServerAuthError) Error() string { return "[" + strings.Join(errs, ", ") + "]" } +// ServerAuthCallbacks defines server-side authentication callbacks. +type ServerAuthCallbacks struct { + // PasswordCallback behaves like [ServerConfig.PasswordCallback]. + PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error) + + // PublicKeyCallback behaves like [ServerConfig.PublicKeyCallback]. + PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) + + // KeyboardInteractiveCallback behaves like [ServerConfig.KeyboardInteractiveCallback]. + KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error) + + // GSSAPIWithMICConfig behaves like [ServerConfig.GSSAPIWithMICConfig]. + GSSAPIWithMICConfig *GSSAPIWithMICConfig +} + +// PartialSuccessError can be returned by any of the [ServerConfig] +// authentication callbacks to indicate to the client that authentication has +// partially succeeded, but further steps are required. +type PartialSuccessError struct { + // Next defines the authentication callbacks to apply to further steps. The + // available methods communicated to the client are based on the non-nil + // ServerAuthCallbacks fields. + Next ServerAuthCallbacks +} + +func (p *PartialSuccessError) Error() string { + return "ssh: authenticated with partial success" +} + // ErrNoAuth is the error value returned if no // authentication method has been passed yet. This happens as a normal // part of the authentication loop, since the client first tries @@ -439,8 +468,18 @@ func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, err var perms *Permissions authFailures := 0 + noneAuthCount := 0 var authErrs []error var displayedBanner bool + partialSuccessReturned := false + // Set the initial authentication callbacks from the config. They can be + // changed if a PartialSuccessError is returned. + authConfig := ServerAuthCallbacks{ + PasswordCallback: config.PasswordCallback, + PublicKeyCallback: config.PublicKeyCallback, + KeyboardInteractiveCallback: config.KeyboardInteractiveCallback, + GSSAPIWithMICConfig: config.GSSAPIWithMICConfig, + } userAuthLoop: for { @@ -471,6 +510,11 @@ userAuthLoop: return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service) } + if s.user != userAuthReq.User && partialSuccessReturned { + return nil, fmt.Errorf("ssh: client changed the user after a partial success authentication, previous user %q, current user %q", + s.user, userAuthReq.User) + } + s.user = userAuthReq.User if !displayedBanner && config.BannerCallback != nil { @@ -491,20 +535,18 @@ userAuthLoop: switch userAuthReq.Method { case "none": - if config.NoClientAuth { + noneAuthCount++ + // We don't allow none authentication after a partial success + // response. + if config.NoClientAuth && !partialSuccessReturned { if config.NoClientAuthCallback != nil { perms, authErr = config.NoClientAuthCallback(s) } else { authErr = nil } } - - // allow initial attempt of 'none' without penalty - if authFailures == 0 { - authFailures-- - } case "password": - if config.PasswordCallback == nil { + if authConfig.PasswordCallback == nil { authErr = errors.New("ssh: password auth not configured") break } @@ -518,17 +560,17 @@ userAuthLoop: return nil, parseError(msgUserAuthRequest) } - perms, authErr = config.PasswordCallback(s, password) + perms, authErr = authConfig.PasswordCallback(s, password) case "keyboard-interactive": - if config.KeyboardInteractiveCallback == nil { + if authConfig.KeyboardInteractiveCallback == nil { authErr = errors.New("ssh: keyboard-interactive auth not configured") break } prompter := &sshClientKeyboardInteractive{s} - perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge) + perms, authErr = authConfig.KeyboardInteractiveCallback(s, prompter.Challenge) case "publickey": - if config.PublicKeyCallback == nil { + if authConfig.PublicKeyCallback == nil { authErr = errors.New("ssh: publickey auth not configured") break } @@ -562,11 +604,18 @@ userAuthLoop: if !ok { candidate.user = s.user candidate.pubKeyData = pubKeyData - candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey) - if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { - candidate.result = checkSourceAddress( + candidate.perms, candidate.result = authConfig.PublicKeyCallback(s, pubKey) + _, isPartialSuccessError := candidate.result.(*PartialSuccessError) + + if (candidate.result == nil || isPartialSuccessError) && + candidate.perms != nil && + candidate.perms.CriticalOptions != nil && + candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { + if err := checkSourceAddress( s.RemoteAddr(), - candidate.perms.CriticalOptions[sourceAddressCriticalOption]) + candidate.perms.CriticalOptions[sourceAddressCriticalOption]); err != nil { + candidate.result = err + } } cache.add(candidate) } @@ -578,8 +627,8 @@ userAuthLoop: if len(payload) > 0 { return nil, parseError(msgUserAuthRequest) } - - if candidate.result == nil { + _, isPartialSuccessError := candidate.result.(*PartialSuccessError) + if candidate.result == nil || isPartialSuccessError { okMsg := userAuthPubKeyOkMsg{ Algo: algo, PubKey: pubKeyData, @@ -629,11 +678,11 @@ userAuthLoop: perms = candidate.perms } case "gssapi-with-mic": - if config.GSSAPIWithMICConfig == nil { + if authConfig.GSSAPIWithMICConfig == nil { authErr = errors.New("ssh: gssapi-with-mic auth not configured") break } - gssapiConfig := config.GSSAPIWithMICConfig + gssapiConfig := authConfig.GSSAPIWithMICConfig userAuthRequestGSSAPI, err := parseGSSAPIPayload(userAuthReq.Payload) if err != nil { return nil, parseError(msgUserAuthRequest) @@ -689,49 +738,70 @@ userAuthLoop: break userAuthLoop } - authFailures++ - if config.MaxAuthTries > 0 && authFailures >= config.MaxAuthTries { - // If we have hit the max attempts, don't bother sending the - // final SSH_MSG_USERAUTH_FAILURE message, since there are - // no more authentication methods which can be attempted, - // and this message may cause the client to re-attempt - // authentication while we send the disconnect message. - // Continue, and trigger the disconnect at the start of - // the loop. - // - // The SSH specification is somewhat confusing about this, - // RFC 4252 Section 5.1 requires each authentication failure - // be responded to with a respective SSH_MSG_USERAUTH_FAILURE - // message, but Section 4 says the server should disconnect - // after some number of attempts, but it isn't explicit which - // message should take precedence (i.e. should there be a failure - // message than a disconnect message, or if we are going to - // disconnect, should we only send that message.) - // - // Either way, OpenSSH disconnects immediately after the last - // failed authnetication attempt, and given they are typically - // considered the golden implementation it seems reasonable - // to match that behavior. - continue + var failureMsg userAuthFailureMsg + + if partialSuccess, ok := authErr.(*PartialSuccessError); ok { + // After a partial success error we don't allow changing the user + // name and execute the NoClientAuthCallback. + partialSuccessReturned = true + + // In case a partial success is returned, the server may send + // a new set of authentication methods. + authConfig = partialSuccess.Next + + // Reset pubkey cache, as the new PublicKeyCallback might + // accept a different set of public keys. + cache = pubKeyCache{} + + // Send back a partial success message to the user. + failureMsg.PartialSuccess = true + } else { + // Allow initial attempt of 'none' without penalty. + if authFailures > 0 || userAuthReq.Method != "none" || noneAuthCount != 1 { + authFailures++ + } + if config.MaxAuthTries > 0 && authFailures >= config.MaxAuthTries { + // If we have hit the max attempts, don't bother sending the + // final SSH_MSG_USERAUTH_FAILURE message, since there are + // no more authentication methods which can be attempted, + // and this message may cause the client to re-attempt + // authentication while we send the disconnect message. + // Continue, and trigger the disconnect at the start of + // the loop. + // + // The SSH specification is somewhat confusing about this, + // RFC 4252 Section 5.1 requires each authentication failure + // be responded to with a respective SSH_MSG_USERAUTH_FAILURE + // message, but Section 4 says the server should disconnect + // after some number of attempts, but it isn't explicit which + // message should take precedence (i.e. should there be a failure + // message than a disconnect message, or if we are going to + // disconnect, should we only send that message.) + // + // Either way, OpenSSH disconnects immediately after the last + // failed authentication attempt, and given they are typically + // considered the golden implementation it seems reasonable + // to match that behavior. + continue + } } - var failureMsg userAuthFailureMsg - if config.PasswordCallback != nil { + if authConfig.PasswordCallback != nil { failureMsg.Methods = append(failureMsg.Methods, "password") } - if config.PublicKeyCallback != nil { + if authConfig.PublicKeyCallback != nil { failureMsg.Methods = append(failureMsg.Methods, "publickey") } - if config.KeyboardInteractiveCallback != nil { + if authConfig.KeyboardInteractiveCallback != nil { failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive") } - if config.GSSAPIWithMICConfig != nil && config.GSSAPIWithMICConfig.Server != nil && - config.GSSAPIWithMICConfig.AllowLogin != nil { + if authConfig.GSSAPIWithMICConfig != nil && authConfig.GSSAPIWithMICConfig.Server != nil && + authConfig.GSSAPIWithMICConfig.AllowLogin != nil { failureMsg.Methods = append(failureMsg.Methods, "gssapi-with-mic") } if len(failureMsg.Methods) == 0 { - return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") + return nil, errors.New("ssh: no authentication methods available") } if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil { diff --git a/vendor/golang.org/x/exp/AUTHORS b/vendor/golang.org/x/exp/AUTHORS deleted file mode 100644 index 15167cd746c..00000000000 --- a/vendor/golang.org/x/exp/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/exp/CONTRIBUTORS b/vendor/golang.org/x/exp/CONTRIBUTORS deleted file mode 100644 index 1c4577e9680..00000000000 --- a/vendor/golang.org/x/exp/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE deleted file mode 100644 index 6a66aea5eaf..00000000000 --- a/vendor/golang.org/x/exp/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS deleted file mode 100644 index 733099041f8..00000000000 --- a/vendor/golang.org/x/exp/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go deleted file mode 100644 index 2c033dff47e..00000000000 --- a/vendor/golang.org/x/exp/constraints/constraints.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package constraints defines a set of useful constraints to be used -// with type parameters. -package constraints - -// Signed is a constraint that permits any signed integer type. -// If future releases of Go add new predeclared signed integer types, -// this constraint will be modified to include them. -type Signed interface { - ~int | ~int8 | ~int16 | ~int32 | ~int64 -} - -// Unsigned is a constraint that permits any unsigned integer type. -// If future releases of Go add new predeclared unsigned integer types, -// this constraint will be modified to include them. -type Unsigned interface { - ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr -} - -// Integer is a constraint that permits any integer type. -// If future releases of Go add new predeclared integer types, -// this constraint will be modified to include them. -type Integer interface { - Signed | Unsigned -} - -// Float is a constraint that permits any floating-point type. -// If future releases of Go add new predeclared floating-point types, -// this constraint will be modified to include them. -type Float interface { - ~float32 | ~float64 -} - -// Complex is a constraint that permits any complex numeric type. -// If future releases of Go add new predeclared complex numeric types, -// this constraint will be modified to include them. -type Complex interface { - ~complex64 | ~complex128 -} - -// Ordered is a constraint that permits any ordered type: any type -// that supports the operators < <= >= >. -// If future releases of Go add new ordered types, -// this constraint will be modified to include them. -type Ordered interface { - Integer | Float | ~string -} diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE deleted file mode 100644 index 6a66aea5eaf..00000000000 --- a/vendor/golang.org/x/mod/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/mod/PATENTS b/vendor/golang.org/x/mod/PATENTS deleted file mode 100644 index 733099041f8..00000000000 --- a/vendor/golang.org/x/mod/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/mod/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go deleted file mode 100644 index 9a2dfd33a77..00000000000 --- a/vendor/golang.org/x/mod/semver/semver.go +++ /dev/null @@ -1,401 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package semver implements comparison of semantic version strings. -// In this package, semantic version strings must begin with a leading "v", -// as in "v1.0.0". -// -// The general form of a semantic version string accepted by this package is -// -// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]] -// -// where square brackets indicate optional parts of the syntax; -// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros; -// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers -// using only alphanumeric characters and hyphens; and -// all-numeric PRERELEASE identifiers must not have leading zeros. -// -// This package follows Semantic Versioning 2.0.0 (see semver.org) -// with two exceptions. First, it requires the "v" prefix. Second, it recognizes -// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes) -// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0. -package semver - -import "sort" - -// parsed returns the parsed form of a semantic version string. -type parsed struct { - major string - minor string - patch string - short string - prerelease string - build string -} - -// IsValid reports whether v is a valid semantic version string. -func IsValid(v string) bool { - _, ok := parse(v) - return ok -} - -// Canonical returns the canonical formatting of the semantic version v. -// It fills in any missing .MINOR or .PATCH and discards build metadata. -// Two semantic versions compare equal only if their canonical formattings -// are identical strings. -// The canonical invalid semantic version is the empty string. -func Canonical(v string) string { - p, ok := parse(v) - if !ok { - return "" - } - if p.build != "" { - return v[:len(v)-len(p.build)] - } - if p.short != "" { - return v + p.short - } - return v -} - -// Major returns the major version prefix of the semantic version v. -// For example, Major("v2.1.0") == "v2". -// If v is an invalid semantic version string, Major returns the empty string. -func Major(v string) string { - pv, ok := parse(v) - if !ok { - return "" - } - return v[:1+len(pv.major)] -} - -// MajorMinor returns the major.minor version prefix of the semantic version v. -// For example, MajorMinor("v2.1.0") == "v2.1". -// If v is an invalid semantic version string, MajorMinor returns the empty string. -func MajorMinor(v string) string { - pv, ok := parse(v) - if !ok { - return "" - } - i := 1 + len(pv.major) - if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor { - return v[:j] - } - return v[:i] + "." + pv.minor -} - -// Prerelease returns the prerelease suffix of the semantic version v. -// For example, Prerelease("v2.1.0-pre+meta") == "-pre". -// If v is an invalid semantic version string, Prerelease returns the empty string. -func Prerelease(v string) string { - pv, ok := parse(v) - if !ok { - return "" - } - return pv.prerelease -} - -// Build returns the build suffix of the semantic version v. -// For example, Build("v2.1.0+meta") == "+meta". -// If v is an invalid semantic version string, Build returns the empty string. -func Build(v string) string { - pv, ok := parse(v) - if !ok { - return "" - } - return pv.build -} - -// Compare returns an integer comparing two versions according to -// semantic version precedence. -// The result will be 0 if v == w, -1 if v < w, or +1 if v > w. -// -// An invalid semantic version string is considered less than a valid one. -// All invalid semantic version strings compare equal to each other. -func Compare(v, w string) int { - pv, ok1 := parse(v) - pw, ok2 := parse(w) - if !ok1 && !ok2 { - return 0 - } - if !ok1 { - return -1 - } - if !ok2 { - return +1 - } - if c := compareInt(pv.major, pw.major); c != 0 { - return c - } - if c := compareInt(pv.minor, pw.minor); c != 0 { - return c - } - if c := compareInt(pv.patch, pw.patch); c != 0 { - return c - } - return comparePrerelease(pv.prerelease, pw.prerelease) -} - -// Max canonicalizes its arguments and then returns the version string -// that compares greater. -// -// Deprecated: use [Compare] instead. In most cases, returning a canonicalized -// version is not expected or desired. -func Max(v, w string) string { - v = Canonical(v) - w = Canonical(w) - if Compare(v, w) > 0 { - return v - } - return w -} - -// ByVersion implements [sort.Interface] for sorting semantic version strings. -type ByVersion []string - -func (vs ByVersion) Len() int { return len(vs) } -func (vs ByVersion) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } -func (vs ByVersion) Less(i, j int) bool { - cmp := Compare(vs[i], vs[j]) - if cmp != 0 { - return cmp < 0 - } - return vs[i] < vs[j] -} - -// Sort sorts a list of semantic version strings using [ByVersion]. -func Sort(list []string) { - sort.Sort(ByVersion(list)) -} - -func parse(v string) (p parsed, ok bool) { - if v == "" || v[0] != 'v' { - return - } - p.major, v, ok = parseInt(v[1:]) - if !ok { - return - } - if v == "" { - p.minor = "0" - p.patch = "0" - p.short = ".0.0" - return - } - if v[0] != '.' { - ok = false - return - } - p.minor, v, ok = parseInt(v[1:]) - if !ok { - return - } - if v == "" { - p.patch = "0" - p.short = ".0" - return - } - if v[0] != '.' { - ok = false - return - } - p.patch, v, ok = parseInt(v[1:]) - if !ok { - return - } - if len(v) > 0 && v[0] == '-' { - p.prerelease, v, ok = parsePrerelease(v) - if !ok { - return - } - } - if len(v) > 0 && v[0] == '+' { - p.build, v, ok = parseBuild(v) - if !ok { - return - } - } - if v != "" { - ok = false - return - } - ok = true - return -} - -func parseInt(v string) (t, rest string, ok bool) { - if v == "" { - return - } - if v[0] < '0' || '9' < v[0] { - return - } - i := 1 - for i < len(v) && '0' <= v[i] && v[i] <= '9' { - i++ - } - if v[0] == '0' && i != 1 { - return - } - return v[:i], v[i:], true -} - -func parsePrerelease(v string) (t, rest string, ok bool) { - // "A pre-release version MAY be denoted by appending a hyphen and - // a series of dot separated identifiers immediately following the patch version. - // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-]. - // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes." - if v == "" || v[0] != '-' { - return - } - i := 1 - start := 1 - for i < len(v) && v[i] != '+' { - if !isIdentChar(v[i]) && v[i] != '.' { - return - } - if v[i] == '.' { - if start == i || isBadNum(v[start:i]) { - return - } - start = i + 1 - } - i++ - } - if start == i || isBadNum(v[start:i]) { - return - } - return v[:i], v[i:], true -} - -func parseBuild(v string) (t, rest string, ok bool) { - if v == "" || v[0] != '+' { - return - } - i := 1 - start := 1 - for i < len(v) { - if !isIdentChar(v[i]) && v[i] != '.' { - return - } - if v[i] == '.' { - if start == i { - return - } - start = i + 1 - } - i++ - } - if start == i { - return - } - return v[:i], v[i:], true -} - -func isIdentChar(c byte) bool { - return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-' -} - -func isBadNum(v string) bool { - i := 0 - for i < len(v) && '0' <= v[i] && v[i] <= '9' { - i++ - } - return i == len(v) && i > 1 && v[0] == '0' -} - -func isNum(v string) bool { - i := 0 - for i < len(v) && '0' <= v[i] && v[i] <= '9' { - i++ - } - return i == len(v) -} - -func compareInt(x, y string) int { - if x == y { - return 0 - } - if len(x) < len(y) { - return -1 - } - if len(x) > len(y) { - return +1 - } - if x < y { - return -1 - } else { - return +1 - } -} - -func comparePrerelease(x, y string) int { - // "When major, minor, and patch are equal, a pre-release version has - // lower precedence than a normal version. - // Example: 1.0.0-alpha < 1.0.0. - // Precedence for two pre-release versions with the same major, minor, - // and patch version MUST be determined by comparing each dot separated - // identifier from left to right until a difference is found as follows: - // identifiers consisting of only digits are compared numerically and - // identifiers with letters or hyphens are compared lexically in ASCII - // sort order. Numeric identifiers always have lower precedence than - // non-numeric identifiers. A larger set of pre-release fields has a - // higher precedence than a smaller set, if all of the preceding - // identifiers are equal. - // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta < - // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0." - if x == y { - return 0 - } - if x == "" { - return +1 - } - if y == "" { - return -1 - } - for x != "" && y != "" { - x = x[1:] // skip - or . - y = y[1:] // skip - or . - var dx, dy string - dx, x = nextIdent(x) - dy, y = nextIdent(y) - if dx != dy { - ix := isNum(dx) - iy := isNum(dy) - if ix != iy { - if ix { - return -1 - } else { - return +1 - } - } - if ix { - if len(dx) < len(dy) { - return -1 - } - if len(dx) > len(dy) { - return +1 - } - } - if dx < dy { - return -1 - } else { - return +1 - } - } - } - if x == "" { - return -1 - } else { - return +1 - } -} - -func nextIdent(x string) (dx, rest string) { - i := 0 - for i < len(x) && x[i] != '.' { - i++ - } - return x[:i], x[i:] -} diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go index 2466ae3d9a5..3a7e5ab1765 100644 --- a/vendor/golang.org/x/net/html/doc.go +++ b/vendor/golang.org/x/net/html/doc.go @@ -104,7 +104,7 @@ tokenization, and tokenization and tree construction stages of the WHATWG HTML parsing specification respectively. While the tokenizer parses and normalizes individual HTML tokens, only the parser constructs the DOM tree from the tokenized HTML, as described in the tree construction stage of the -specification, dynamically modifying or extending the docuemnt's DOM tree. +specification, dynamically modifying or extending the document's DOM tree. If your use case requires semantically well-formed HTML documents, as defined by the WHATWG specification, the parser should be used rather than the tokenizer. diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index 4756ad5f795..8fa707aa4ba 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -103,6 +103,7 @@ var ARM64 struct { HasASIMDDP bool // Advanced SIMD double precision instruction set HasSHA512 bool // SHA512 hardware implementation HasSVE bool // Scalable Vector Extensions + HasSVE2 bool // Scalable Vector Extensions 2 HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 _ CacheLinePad } diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go index f3eb993bf24..0e27a21e1f8 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -28,6 +28,7 @@ func initOptions() { {Name: "sm3", Feature: &ARM64.HasSM3}, {Name: "sm4", Feature: &ARM64.HasSM4}, {Name: "sve", Feature: &ARM64.HasSVE}, + {Name: "sve2", Feature: &ARM64.HasSVE2}, {Name: "crc32", Feature: &ARM64.HasCRC32}, {Name: "atomics", Feature: &ARM64.HasATOMICS}, {Name: "asimdhp", Feature: &ARM64.HasASIMDHP}, @@ -164,6 +165,15 @@ func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { switch extractBits(pfr0, 32, 35) { case 1: ARM64.HasSVE = true + + parseARM64SVERegister(getzfr0()) + } +} + +func parseARM64SVERegister(zfr0 uint64) { + switch extractBits(zfr0, 0, 3) { + case 1: + ARM64.HasSVE2 = true } } diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s index fcb9a388820..22cc99844a7 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.s +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -29,3 +29,11 @@ TEXT ·getpfr0(SB),NOSPLIT,$0-8 WORD $0xd5380400 MOVD R0, ret+0(FP) RET + +// func getzfr0() uint64 +TEXT ·getzfr0(SB),NOSPLIT,$0-8 + // get SVE Feature Register 0 into x0 + // mrs x0, ID_AA64ZFR0_EL1 = d5380480 + WORD $0xd5380480 + MOVD R0, ret+0(FP) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go index a8acd3e3285..6ac6e1efb20 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go @@ -9,3 +9,4 @@ package cpu func getisar0() uint64 func getisar1() uint64 func getpfr0() uint64 +func getzfr0() uint64 diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go index a968b80fa6a..3d386d0fc21 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -35,6 +35,8 @@ const ( hwcap_SHA512 = 1 << 21 hwcap_SVE = 1 << 22 hwcap_ASIMDFHM = 1 << 23 + + hwcap2_SVE2 = 1 << 1 ) // linuxKernelCanEmulateCPUID reports whether we're running @@ -104,6 +106,9 @@ func doinit() { ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) ARM64.HasSVE = isSet(hwCap, hwcap_SVE) ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) + + // HWCAP2 feature bits + ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2) } func isSet(hwc uint, value uint) bool { diff --git a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s index 2f67ba86d57..813dfad7d26 100644 --- a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s +++ b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s @@ -9,9 +9,11 @@ #define PSALAA 1208(R0) #define GTAB64(x) 80(x) #define LCA64(x) 88(x) +#define SAVSTACK_ASYNC(x) 336(x) // in the LCA #define CAA(x) 8(x) -#define EDCHPXV(x) 1016(x) // in the CAA -#define SAVSTACK_ASYNC(x) 336(x) // in the LCA +#define CEECAATHDID(x) 976(x) // in the CAA +#define EDCHPXV(x) 1016(x) // in the CAA +#define GOCB(x) 1104(x) // in the CAA // SS_*, where x=SAVSTACK_ASYNC #define SS_LE(x) 0(x) @@ -19,405 +21,362 @@ #define SS_ERRNO(x) 16(x) #define SS_ERRNOJR(x) 20(x) -#define LE_CALL BYTE $0x0D; BYTE $0x76; // BL R7, R6 +// Function Descriptor Offsets +#define __errno 0x156*16 +#define __err2ad 0x16C*16 -TEXT ·clearErrno(SB),NOSPLIT,$0-0 - BL addrerrno<>(SB) - MOVD $0, 0(R3) +// Call Instructions +#define LE_CALL BYTE $0x0D; BYTE $0x76 // BL R7, R6 +#define SVC_LOAD BYTE $0x0A; BYTE $0x08 // SVC 08 LOAD +#define SVC_DELETE BYTE $0x0A; BYTE $0x09 // SVC 09 DELETE + +DATA zosLibVec<>(SB)/8, $0 +GLOBL zosLibVec<>(SB), NOPTR, $8 + +TEXT ·initZosLibVec(SB), NOSPLIT|NOFRAME, $0-0 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD CAA(R8), R8 + MOVD EDCHPXV(R8), R8 + MOVD R8, zosLibVec<>(SB) + RET + +TEXT ·GetZosLibVec(SB), NOSPLIT|NOFRAME, $0-0 + MOVD zosLibVec<>(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·clearErrno(SB), NOSPLIT, $0-0 + BL addrerrno<>(SB) + MOVD $0, 0(R3) RET // Returns the address of errno in R3. -TEXT addrerrno<>(SB),NOSPLIT|NOFRAME,$0-0 +TEXT addrerrno<>(SB), NOSPLIT|NOFRAME, $0-0 // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 // Get __errno FuncDesc. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - ADD $(0x156*16), R9 - LMG 0(R9), R5, R6 + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + ADD $(__errno), R9 + LMG 0(R9), R5, R6 // Switch to saved LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) // Call __errno function. LE_CALL NOPH // Switch back to Go stack. - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. RET -TEXT ·syscall_syscall(SB),NOSPLIT,$0-56 - BL runtime·entersyscall(SB) - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 +// func svcCall(fnptr unsafe.Pointer, argv *unsafe.Pointer, dsa *uint64) +TEXT ·svcCall(SB), NOSPLIT, $0 + BL runtime·save_g(SB) // Save g and stack pointer + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD R15, 0(R9) - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 + MOVD argv+8(FP), R1 // Move function arguments into registers + MOVD dsa+16(FP), g + MOVD fnptr+0(FP), R15 - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 + BYTE $0x0D // Branch to function + BYTE $0xEF - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) + BL runtime·load_g(SB) // Restore g and stack pointer + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R15 - // Call function. - LE_CALL - NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+32(FP) - MOVD R0, r2+40(FP) - MOVD R0, err+48(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+48(FP) -done: - BL runtime·exitsyscall(SB) RET -TEXT ·syscall_rawsyscall(SB),NOSPLIT,$0-56 - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 +// func svcLoad(name *byte) unsafe.Pointer +TEXT ·svcLoad(SB), NOSPLIT, $0 + MOVD R15, R2 // Save go stack pointer + MOVD name+0(FP), R0 // Move SVC args into registers + MOVD $0x80000000, R1 + MOVD $0, R15 + SVC_LOAD + MOVW R15, R3 // Save return code from SVC + MOVD R2, R15 // Restore go stack pointer + CMP R3, $0 // Check SVC return code + BNE error + + MOVD $-2, R3 // Reset last bit of entry point to zero + AND R0, R3 + MOVD R3, ret+8(FP) // Return entry point returned by SVC + CMP R0, R3 // Check if last bit of entry point was set + BNE done + + MOVD R15, R2 // Save go stack pointer + MOVD $0, R15 // Move SVC args into registers (entry point still in r0 from SVC 08) + SVC_DELETE + MOVD R2, R15 // Restore go stack pointer - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) +error: + MOVD $0, ret+8(FP) // Return 0 on failure - // Call function. - LE_CALL - NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+32(FP) - MOVD R0, r2+40(FP) - MOVD R0, err+48(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+48(FP) done: + XOR R0, R0 // Reset r0 to 0 RET -TEXT ·syscall_syscall6(SB),NOSPLIT,$0-80 - BL runtime·entersyscall(SB) - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 +// func svcUnload(name *byte, fnptr unsafe.Pointer) int64 +TEXT ·svcUnload(SB), NOSPLIT, $0 + MOVD R15, R2 // Save go stack pointer + MOVD name+0(FP), R0 // Move SVC args into registers + MOVD fnptr+8(FP), R15 + SVC_DELETE + XOR R0, R0 // Reset r0 to 0 + MOVD R15, R1 // Save SVC return code + MOVD R2, R15 // Restore go stack pointer + MOVD R1, ret+16(FP) // Return SVC return code + RET +// func gettid() uint64 +TEXT ·gettid(SB), NOSPLIT, $0 // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 + // Get CEECAATHDID + MOVD CAA(R8), R9 + MOVD CEECAATHDID(R9), R9 + MOVD R9, ret+0(FP) - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Fill in parameter list. - MOVD a4+32(FP), R12 - MOVD R12, (2176+24)(R4) - MOVD a5+40(FP), R12 - MOVD R12, (2176+32)(R4) - MOVD a6+48(FP), R12 - MOVD R12, (2176+40)(R4) - - // Call function. - LE_CALL - NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+56(FP) - MOVD R0, r2+64(FP) - MOVD R0, err+72(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+72(FP) -done: - BL runtime·exitsyscall(SB) RET -TEXT ·syscall_rawsyscall6(SB),NOSPLIT,$0-80 - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 +// +// Call LE function, if the return is -1 +// errno and errno2 is retrieved +// +TEXT ·CallLeFuncWithErr(SB), NOSPLIT, $0 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD CAA(R8), R9 + MOVD g, GOCB(R9) // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Fill in parameter list. - MOVD a4+32(FP), R12 - MOVD R12, (2176+24)(R4) - MOVD a5+40(FP), R12 - MOVD R12, (2176+32)(R4) - MOVD a6+48(FP), R12 - MOVD R12, (2176+40)(R4) - - // Call function. - LE_CALL + MOVD SAVSTACK_ASYNC(R8), R9 // R9-> LE stack frame saving address + MOVD 0(R9), R4 // R4-> restore previously saved stack frame pointer + + MOVD parms_base+8(FP), R7 // R7 -> argument array + MOVD parms_len+16(FP), R8 // R8 number of arguments + + // arg 1 ---> R1 + CMP R8, $0 + BEQ docall + SUB $1, R8 + MOVD 0(R7), R1 + + // arg 2 ---> R2 + CMP R8, $0 + BEQ docall + SUB $1, R8 + ADD $8, R7 + MOVD 0(R7), R2 + + // arg 3 --> R3 + CMP R8, $0 + BEQ docall + SUB $1, R8 + ADD $8, R7 + MOVD 0(R7), R3 + + CMP R8, $0 + BEQ docall + MOVD $2176+16, R6 // starting LE stack address-8 to store 4th argument + +repeat: + ADD $8, R7 + MOVD 0(R7), R0 // advance arg pointer by 8 byte + ADD $8, R6 // advance LE argument address by 8 byte + MOVD R0, (R4)(R6*1) // copy argument from go-slice to le-frame + SUB $1, R8 + CMP R8, $0 + BNE repeat + +docall: + MOVD funcdesc+0(FP), R8 // R8-> function descriptor + LMG 0(R8), R5, R6 + MOVD $0, 0(R9) // R9 address of SAVSTACK_ASYNC + LE_CALL // balr R7, R6 (return #1) + NOPH + MOVD R3, ret+32(FP) + CMP R3, $-1 // compare result to -1 + BNE done + + // retrieve errno and errno2 + MOVD zosLibVec<>(SB), R8 + ADD $(__errno), R8 + LMG 0(R8), R5, R6 + LE_CALL // balr R7, R6 __errno (return #3) NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+56(FP) - MOVD R0, r2+64(FP) - MOVD R0, err+72(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL ·rrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+72(FP) + MOVWZ 0(R3), R3 + MOVD R3, err+48(FP) + MOVD zosLibVec<>(SB), R8 + ADD $(__err2ad), R8 + LMG 0(R8), R5, R6 + LE_CALL // balr R7, R6 __err2ad (return #2) + NOPH + MOVW (R3), R2 // retrieve errno2 + MOVD R2, errno2+40(FP) // store in return area + done: + MOVD R4, 0(R9) // Save stack pointer. RET -TEXT ·syscall_syscall9(SB),NOSPLIT,$0 - BL runtime·entersyscall(SB) - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 +// +// Call LE function, if the return is 0 +// errno and errno2 is retrieved +// +TEXT ·CallLeFuncWithPtrReturn(SB), NOSPLIT, $0 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD CAA(R8), R9 + MOVD g, GOCB(R9) // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Fill in parameter list. - MOVD a4+32(FP), R12 - MOVD R12, (2176+24)(R4) - MOVD a5+40(FP), R12 - MOVD R12, (2176+32)(R4) - MOVD a6+48(FP), R12 - MOVD R12, (2176+40)(R4) - MOVD a7+56(FP), R12 - MOVD R12, (2176+48)(R4) - MOVD a8+64(FP), R12 - MOVD R12, (2176+56)(R4) - MOVD a9+72(FP), R12 - MOVD R12, (2176+64)(R4) - - // Call function. - LE_CALL + MOVD SAVSTACK_ASYNC(R8), R9 // R9-> LE stack frame saving address + MOVD 0(R9), R4 // R4-> restore previously saved stack frame pointer + + MOVD parms_base+8(FP), R7 // R7 -> argument array + MOVD parms_len+16(FP), R8 // R8 number of arguments + + // arg 1 ---> R1 + CMP R8, $0 + BEQ docall + SUB $1, R8 + MOVD 0(R7), R1 + + // arg 2 ---> R2 + CMP R8, $0 + BEQ docall + SUB $1, R8 + ADD $8, R7 + MOVD 0(R7), R2 + + // arg 3 --> R3 + CMP R8, $0 + BEQ docall + SUB $1, R8 + ADD $8, R7 + MOVD 0(R7), R3 + + CMP R8, $0 + BEQ docall + MOVD $2176+16, R6 // starting LE stack address-8 to store 4th argument + +repeat: + ADD $8, R7 + MOVD 0(R7), R0 // advance arg pointer by 8 byte + ADD $8, R6 // advance LE argument address by 8 byte + MOVD R0, (R4)(R6*1) // copy argument from go-slice to le-frame + SUB $1, R8 + CMP R8, $0 + BNE repeat + +docall: + MOVD funcdesc+0(FP), R8 // R8-> function descriptor + LMG 0(R8), R5, R6 + MOVD $0, 0(R9) // R9 address of SAVSTACK_ASYNC + LE_CALL // balr R7, R6 (return #1) NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+80(FP) - MOVD R0, r2+88(FP) - MOVD R0, err+96(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+96(FP) -done: - BL runtime·exitsyscall(SB) - RET - -TEXT ·syscall_rawsyscall9(SB),NOSPLIT,$0 - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 - - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Fill in parameter list. - MOVD a4+32(FP), R12 - MOVD R12, (2176+24)(R4) - MOVD a5+40(FP), R12 - MOVD R12, (2176+32)(R4) - MOVD a6+48(FP), R12 - MOVD R12, (2176+40)(R4) - MOVD a7+56(FP), R12 - MOVD R12, (2176+48)(R4) - MOVD a8+64(FP), R12 - MOVD R12, (2176+56)(R4) - MOVD a9+72(FP), R12 - MOVD R12, (2176+64)(R4) - - // Call function. - LE_CALL + MOVD R3, ret+32(FP) + CMP R3, $0 // compare result to 0 + BNE done + + // retrieve errno and errno2 + MOVD zosLibVec<>(SB), R8 + ADD $(__errno), R8 + LMG 0(R8), R5, R6 + LE_CALL // balr R7, R6 __errno (return #3) NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+80(FP) - MOVD R0, r2+88(FP) - MOVD R0, err+96(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+96(FP) -done: - RET - -// func svcCall(fnptr unsafe.Pointer, argv *unsafe.Pointer, dsa *uint64) -TEXT ·svcCall(SB),NOSPLIT,$0 - BL runtime·save_g(SB) // Save g and stack pointer - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD R15, 0(R9) - - MOVD argv+8(FP), R1 // Move function arguments into registers - MOVD dsa+16(FP), g - MOVD fnptr+0(FP), R15 - - BYTE $0x0D // Branch to function - BYTE $0xEF - - BL runtime·load_g(SB) // Restore g and stack pointer - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R15 - - RET - -// func svcLoad(name *byte) unsafe.Pointer -TEXT ·svcLoad(SB),NOSPLIT,$0 - MOVD R15, R2 // Save go stack pointer - MOVD name+0(FP), R0 // Move SVC args into registers - MOVD $0x80000000, R1 - MOVD $0, R15 - BYTE $0x0A // SVC 08 LOAD - BYTE $0x08 - MOVW R15, R3 // Save return code from SVC - MOVD R2, R15 // Restore go stack pointer - CMP R3, $0 // Check SVC return code - BNE error - - MOVD $-2, R3 // Reset last bit of entry point to zero - AND R0, R3 - MOVD R3, addr+8(FP) // Return entry point returned by SVC - CMP R0, R3 // Check if last bit of entry point was set - BNE done - - MOVD R15, R2 // Save go stack pointer - MOVD $0, R15 // Move SVC args into registers (entry point still in r0 from SVC 08) - BYTE $0x0A // SVC 09 DELETE - BYTE $0x09 - MOVD R2, R15 // Restore go stack pointer + MOVWZ 0(R3), R3 + MOVD R3, err+48(FP) + MOVD zosLibVec<>(SB), R8 + ADD $(__err2ad), R8 + LMG 0(R8), R5, R6 + LE_CALL // balr R7, R6 __err2ad (return #2) + NOPH + MOVW (R3), R2 // retrieve errno2 + MOVD R2, errno2+40(FP) // store in return area + XOR R2, R2 + MOVWZ R2, (R3) // clear errno2 -error: - MOVD $0, addr+8(FP) // Return 0 on failure done: - XOR R0, R0 // Reset r0 to 0 + MOVD R4, 0(R9) // Save stack pointer. RET -// func svcUnload(name *byte, fnptr unsafe.Pointer) int64 -TEXT ·svcUnload(SB),NOSPLIT,$0 - MOVD R15, R2 // Save go stack pointer - MOVD name+0(FP), R0 // Move SVC args into registers - MOVD addr+8(FP), R15 - BYTE $0x0A // SVC 09 - BYTE $0x09 - XOR R0, R0 // Reset r0 to 0 - MOVD R15, R1 // Save SVC return code - MOVD R2, R15 // Restore go stack pointer - MOVD R1, rc+0(FP) // Return SVC return code +// +// function to test if a pointer can be safely dereferenced (content read) +// return 0 for succces +// +TEXT ·ptrtest(SB), NOSPLIT, $0-16 + MOVD arg+0(FP), R10 // test pointer in R10 + + // set up R2 to point to CEECAADMC + BYTE $0xE3; BYTE $0x20; BYTE $0x04; BYTE $0xB8; BYTE $0x00; BYTE $0x17 // llgt 2,1208 + BYTE $0xB9; BYTE $0x17; BYTE $0x00; BYTE $0x22 // llgtr 2,2 + BYTE $0xA5; BYTE $0x26; BYTE $0x7F; BYTE $0xFF // nilh 2,32767 + BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x58; BYTE $0x00; BYTE $0x04 // lg 2,88(2) + BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x08; BYTE $0x00; BYTE $0x04 // lg 2,8(2) + BYTE $0x41; BYTE $0x22; BYTE $0x03; BYTE $0x68 // la 2,872(2) + + // set up R5 to point to the "shunt" path which set 1 to R3 (failure) + BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x33 // xgr 3,3 + BYTE $0xA7; BYTE $0x55; BYTE $0x00; BYTE $0x04 // bras 5,lbl1 + BYTE $0xA7; BYTE $0x39; BYTE $0x00; BYTE $0x01 // lghi 3,1 + + // if r3 is not zero (failed) then branch to finish + BYTE $0xB9; BYTE $0x02; BYTE $0x00; BYTE $0x33 // lbl1 ltgr 3,3 + BYTE $0xA7; BYTE $0x74; BYTE $0x00; BYTE $0x08 // brc b'0111',lbl2 + + // stomic store shunt address in R5 into CEECAADMC + BYTE $0xE3; BYTE $0x52; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 5,0(2) + + // now try reading from the test pointer in R10, if it fails it branches to the "lghi" instruction above + BYTE $0xE3; BYTE $0x9A; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x04 // lg 9,0(10) + + // finish here, restore 0 into CEECAADMC + BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x99 // lbl2 xgr 9,9 + BYTE $0xE3; BYTE $0x92; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 9,0(2) + MOVD R3, ret+8(FP) // result in R3 RET -// func gettid() uint64 -TEXT ·gettid(SB), NOSPLIT, $0 - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get CEECAATHDID - MOVD CAA(R8), R9 - MOVD 0x3D0(R9), R9 - MOVD R9, ret+0(FP) - +// +// function to test if a untptr can be loaded from a pointer +// return 1: the 8-byte content +// 2: 0 for success, 1 for failure +// +// func safeload(ptr uintptr) ( value uintptr, error uintptr) +TEXT ·safeload(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R10 // test pointer in R10 + MOVD $0x0, R6 + BYTE $0xE3; BYTE $0x20; BYTE $0x04; BYTE $0xB8; BYTE $0x00; BYTE $0x17 // llgt 2,1208 + BYTE $0xB9; BYTE $0x17; BYTE $0x00; BYTE $0x22 // llgtr 2,2 + BYTE $0xA5; BYTE $0x26; BYTE $0x7F; BYTE $0xFF // nilh 2,32767 + BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x58; BYTE $0x00; BYTE $0x04 // lg 2,88(2) + BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x08; BYTE $0x00; BYTE $0x04 // lg 2,8(2) + BYTE $0x41; BYTE $0x22; BYTE $0x03; BYTE $0x68 // la 2,872(2) + BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x33 // xgr 3,3 + BYTE $0xA7; BYTE $0x55; BYTE $0x00; BYTE $0x04 // bras 5,lbl1 + BYTE $0xA7; BYTE $0x39; BYTE $0x00; BYTE $0x01 // lghi 3,1 + BYTE $0xB9; BYTE $0x02; BYTE $0x00; BYTE $0x33 // lbl1 ltgr 3,3 + BYTE $0xA7; BYTE $0x74; BYTE $0x00; BYTE $0x08 // brc b'0111',lbl2 + BYTE $0xE3; BYTE $0x52; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 5,0(2) + BYTE $0xE3; BYTE $0x6A; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x04 // lg 6,0(10) + BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x99 // lbl2 xgr 9,9 + BYTE $0xE3; BYTE $0x92; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 9,0(2) + MOVD R6, value+8(FP) // result in R6 + MOVD R3, error+16(FP) // error in R3 RET diff --git a/vendor/golang.org/x/sys/unix/bpxsvc_zos.go b/vendor/golang.org/x/sys/unix/bpxsvc_zos.go new file mode 100644 index 00000000000..39d647d863a --- /dev/null +++ b/vendor/golang.org/x/sys/unix/bpxsvc_zos.go @@ -0,0 +1,657 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos + +package unix + +import ( + "bytes" + "fmt" + "unsafe" +) + +//go:noescape +func bpxcall(plist []unsafe.Pointer, bpx_offset int64) + +//go:noescape +func A2e([]byte) + +//go:noescape +func E2a([]byte) + +const ( + BPX4STA = 192 // stat + BPX4FST = 104 // fstat + BPX4LST = 132 // lstat + BPX4OPN = 156 // open + BPX4CLO = 72 // close + BPX4CHR = 500 // chattr + BPX4FCR = 504 // fchattr + BPX4LCR = 1180 // lchattr + BPX4CTW = 492 // cond_timed_wait + BPX4GTH = 1056 // __getthent + BPX4PTQ = 412 // pthread_quiesc + BPX4PTR = 320 // ptrace +) + +const ( + //options + //byte1 + BPX_OPNFHIGH = 0x80 + //byte2 + BPX_OPNFEXEC = 0x80 + //byte3 + BPX_O_NOLARGEFILE = 0x08 + BPX_O_LARGEFILE = 0x04 + BPX_O_ASYNCSIG = 0x02 + BPX_O_SYNC = 0x01 + //byte4 + BPX_O_CREXCL = 0xc0 + BPX_O_CREAT = 0x80 + BPX_O_EXCL = 0x40 + BPX_O_NOCTTY = 0x20 + BPX_O_TRUNC = 0x10 + BPX_O_APPEND = 0x08 + BPX_O_NONBLOCK = 0x04 + BPX_FNDELAY = 0x04 + BPX_O_RDWR = 0x03 + BPX_O_RDONLY = 0x02 + BPX_O_WRONLY = 0x01 + BPX_O_ACCMODE = 0x03 + BPX_O_GETFL = 0x0f + + //mode + // byte1 (file type) + BPX_FT_DIR = 1 + BPX_FT_CHARSPEC = 2 + BPX_FT_REGFILE = 3 + BPX_FT_FIFO = 4 + BPX_FT_SYMLINK = 5 + BPX_FT_SOCKET = 6 + //byte3 + BPX_S_ISUID = 0x08 + BPX_S_ISGID = 0x04 + BPX_S_ISVTX = 0x02 + BPX_S_IRWXU1 = 0x01 + BPX_S_IRUSR = 0x01 + //byte4 + BPX_S_IRWXU2 = 0xc0 + BPX_S_IWUSR = 0x80 + BPX_S_IXUSR = 0x40 + BPX_S_IRWXG = 0x38 + BPX_S_IRGRP = 0x20 + BPX_S_IWGRP = 0x10 + BPX_S_IXGRP = 0x08 + BPX_S_IRWXOX = 0x07 + BPX_S_IROTH = 0x04 + BPX_S_IWOTH = 0x02 + BPX_S_IXOTH = 0x01 + + CW_INTRPT = 1 + CW_CONDVAR = 32 + CW_TIMEOUT = 64 + + PGTHA_NEXT = 2 + PGTHA_CURRENT = 1 + PGTHA_FIRST = 0 + PGTHA_LAST = 3 + PGTHA_PROCESS = 0x80 + PGTHA_CONTTY = 0x40 + PGTHA_PATH = 0x20 + PGTHA_COMMAND = 0x10 + PGTHA_FILEDATA = 0x08 + PGTHA_THREAD = 0x04 + PGTHA_PTAG = 0x02 + PGTHA_COMMANDLONG = 0x01 + PGTHA_THREADFAST = 0x80 + PGTHA_FILEPATH = 0x40 + PGTHA_THDSIGMASK = 0x20 + // thread quiece mode + QUIESCE_TERM int32 = 1 + QUIESCE_FORCE int32 = 2 + QUIESCE_QUERY int32 = 3 + QUIESCE_FREEZE int32 = 4 + QUIESCE_UNFREEZE int32 = 5 + FREEZE_THIS_THREAD int32 = 6 + FREEZE_EXIT int32 = 8 + QUIESCE_SRB int32 = 9 +) + +type Pgtha struct { + Pid uint32 // 0 + Tid0 uint32 // 4 + Tid1 uint32 + Accesspid byte // C + Accesstid byte // D + Accessasid uint16 // E + Loginname [8]byte // 10 + Flag1 byte // 18 + Flag1b2 byte // 19 +} + +type Bpxystat_t struct { // DSECT BPXYSTAT + St_id [4]uint8 // 0 + St_length uint16 // 0x4 + St_version uint16 // 0x6 + St_mode uint32 // 0x8 + St_ino uint32 // 0xc + St_dev uint32 // 0x10 + St_nlink uint32 // 0x14 + St_uid uint32 // 0x18 + St_gid uint32 // 0x1c + St_size uint64 // 0x20 + St_atime uint32 // 0x28 + St_mtime uint32 // 0x2c + St_ctime uint32 // 0x30 + St_rdev uint32 // 0x34 + St_auditoraudit uint32 // 0x38 + St_useraudit uint32 // 0x3c + St_blksize uint32 // 0x40 + St_createtime uint32 // 0x44 + St_auditid [4]uint32 // 0x48 + St_res01 uint32 // 0x58 + Ft_ccsid uint16 // 0x5c + Ft_flags uint16 // 0x5e + St_res01a [2]uint32 // 0x60 + St_res02 uint32 // 0x68 + St_blocks uint32 // 0x6c + St_opaque [3]uint8 // 0x70 + St_visible uint8 // 0x73 + St_reftime uint32 // 0x74 + St_fid uint64 // 0x78 + St_filefmt uint8 // 0x80 + St_fspflag2 uint8 // 0x81 + St_res03 [2]uint8 // 0x82 + St_ctimemsec uint32 // 0x84 + St_seclabel [8]uint8 // 0x88 + St_res04 [4]uint8 // 0x90 + // end of version 1 + _ uint32 // 0x94 + St_atime64 uint64 // 0x98 + St_mtime64 uint64 // 0xa0 + St_ctime64 uint64 // 0xa8 + St_createtime64 uint64 // 0xb0 + St_reftime64 uint64 // 0xb8 + _ uint64 // 0xc0 + St_res05 [16]uint8 // 0xc8 + // end of version 2 +} + +type BpxFilestatus struct { + Oflag1 byte + Oflag2 byte + Oflag3 byte + Oflag4 byte +} + +type BpxMode struct { + Ftype byte + Mode1 byte + Mode2 byte + Mode3 byte +} + +// Thr attribute structure for extended attributes +type Bpxyatt_t struct { // DSECT BPXYATT + Att_id [4]uint8 + Att_version uint16 + Att_res01 [2]uint8 + Att_setflags1 uint8 + Att_setflags2 uint8 + Att_setflags3 uint8 + Att_setflags4 uint8 + Att_mode uint32 + Att_uid uint32 + Att_gid uint32 + Att_opaquemask [3]uint8 + Att_visblmaskres uint8 + Att_opaque [3]uint8 + Att_visibleres uint8 + Att_size_h uint32 + Att_size_l uint32 + Att_atime uint32 + Att_mtime uint32 + Att_auditoraudit uint32 + Att_useraudit uint32 + Att_ctime uint32 + Att_reftime uint32 + // end of version 1 + Att_filefmt uint8 + Att_res02 [3]uint8 + Att_filetag uint32 + Att_res03 [8]uint8 + // end of version 2 + Att_atime64 uint64 + Att_mtime64 uint64 + Att_ctime64 uint64 + Att_reftime64 uint64 + Att_seclabel [8]uint8 + Att_ver3res02 [8]uint8 + // end of version 3 +} + +func BpxOpen(name string, options *BpxFilestatus, mode *BpxMode) (rv int32, rc int32, rn int32) { + if len(name) < 1024 { + var namebuf [1024]byte + sz := int32(copy(namebuf[:], name)) + A2e(namebuf[:sz]) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(options) + parms[3] = unsafe.Pointer(mode) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4OPN) + return rv, rc, rn + } + return -1, -1, -1 +} + +func BpxClose(fd int32) (rv int32, rc int32, rn int32) { + var parms [4]unsafe.Pointer + parms[0] = unsafe.Pointer(&fd) + parms[1] = unsafe.Pointer(&rv) + parms[2] = unsafe.Pointer(&rc) + parms[3] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4CLO) + return rv, rc, rn +} + +func BpxFileFStat(fd int32, st *Bpxystat_t) (rv int32, rc int32, rn int32) { + st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3} + st.St_version = 2 + stat_sz := uint32(unsafe.Sizeof(*st)) + var parms [6]unsafe.Pointer + parms[0] = unsafe.Pointer(&fd) + parms[1] = unsafe.Pointer(&stat_sz) + parms[2] = unsafe.Pointer(st) + parms[3] = unsafe.Pointer(&rv) + parms[4] = unsafe.Pointer(&rc) + parms[5] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4FST) + return rv, rc, rn +} + +func BpxFileStat(name string, st *Bpxystat_t) (rv int32, rc int32, rn int32) { + if len(name) < 1024 { + var namebuf [1024]byte + sz := int32(copy(namebuf[:], name)) + A2e(namebuf[:sz]) + st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3} + st.St_version = 2 + stat_sz := uint32(unsafe.Sizeof(*st)) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(&stat_sz) + parms[3] = unsafe.Pointer(st) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4STA) + return rv, rc, rn + } + return -1, -1, -1 +} + +func BpxFileLStat(name string, st *Bpxystat_t) (rv int32, rc int32, rn int32) { + if len(name) < 1024 { + var namebuf [1024]byte + sz := int32(copy(namebuf[:], name)) + A2e(namebuf[:sz]) + st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3} + st.St_version = 2 + stat_sz := uint32(unsafe.Sizeof(*st)) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(&stat_sz) + parms[3] = unsafe.Pointer(st) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4LST) + return rv, rc, rn + } + return -1, -1, -1 +} + +func BpxChattr(path string, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) { + if len(path) >= 1024 { + return -1, -1, -1 + } + var namebuf [1024]byte + sz := int32(copy(namebuf[:], path)) + A2e(namebuf[:sz]) + attr_sz := uint32(unsafe.Sizeof(*attr)) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(&attr_sz) + parms[3] = unsafe.Pointer(attr) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4CHR) + return rv, rc, rn +} + +func BpxLchattr(path string, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) { + if len(path) >= 1024 { + return -1, -1, -1 + } + var namebuf [1024]byte + sz := int32(copy(namebuf[:], path)) + A2e(namebuf[:sz]) + attr_sz := uint32(unsafe.Sizeof(*attr)) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(&attr_sz) + parms[3] = unsafe.Pointer(attr) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4LCR) + return rv, rc, rn +} + +func BpxFchattr(fd int32, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) { + attr_sz := uint32(unsafe.Sizeof(*attr)) + var parms [6]unsafe.Pointer + parms[0] = unsafe.Pointer(&fd) + parms[1] = unsafe.Pointer(&attr_sz) + parms[2] = unsafe.Pointer(attr) + parms[3] = unsafe.Pointer(&rv) + parms[4] = unsafe.Pointer(&rc) + parms[5] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4FCR) + return rv, rc, rn +} + +func BpxCondTimedWait(sec uint32, nsec uint32, events uint32, secrem *uint32, nsecrem *uint32) (rv int32, rc int32, rn int32) { + var parms [8]unsafe.Pointer + parms[0] = unsafe.Pointer(&sec) + parms[1] = unsafe.Pointer(&nsec) + parms[2] = unsafe.Pointer(&events) + parms[3] = unsafe.Pointer(secrem) + parms[4] = unsafe.Pointer(nsecrem) + parms[5] = unsafe.Pointer(&rv) + parms[6] = unsafe.Pointer(&rc) + parms[7] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4CTW) + return rv, rc, rn +} +func BpxGetthent(in *Pgtha, outlen *uint32, out unsafe.Pointer) (rv int32, rc int32, rn int32) { + var parms [7]unsafe.Pointer + inlen := uint32(26) // nothing else will work. Go says Pgtha is 28-byte because of alignment, but Pgtha is "packed" and must be 26-byte + parms[0] = unsafe.Pointer(&inlen) + parms[1] = unsafe.Pointer(&in) + parms[2] = unsafe.Pointer(outlen) + parms[3] = unsafe.Pointer(&out) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4GTH) + return rv, rc, rn +} +func ZosJobname() (jobname string, err error) { + var pgtha Pgtha + pgtha.Pid = uint32(Getpid()) + pgtha.Accesspid = PGTHA_CURRENT + pgtha.Flag1 = PGTHA_PROCESS + var out [256]byte + var outlen uint32 + outlen = 256 + rv, rc, rn := BpxGetthent(&pgtha, &outlen, unsafe.Pointer(&out[0])) + if rv == 0 { + gthc := []byte{0x87, 0xa3, 0x88, 0x83} // 'gthc' in ebcdic + ix := bytes.Index(out[:], gthc) + if ix == -1 { + err = fmt.Errorf("BPX4GTH: gthc return data not found") + return + } + jn := out[ix+80 : ix+88] // we didn't declare Pgthc, but jobname is 8-byte at offset 80 + E2a(jn) + jobname = string(bytes.TrimRight(jn, " ")) + + } else { + err = fmt.Errorf("BPX4GTH: rc=%d errno=%d reason=code=0x%x", rv, rc, rn) + } + return +} +func Bpx4ptq(code int32, data string) (rv int32, rc int32, rn int32) { + var userdata [8]byte + var parms [5]unsafe.Pointer + copy(userdata[:], data+" ") + A2e(userdata[:]) + parms[0] = unsafe.Pointer(&code) + parms[1] = unsafe.Pointer(&userdata[0]) + parms[2] = unsafe.Pointer(&rv) + parms[3] = unsafe.Pointer(&rc) + parms[4] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4PTQ) + return rv, rc, rn +} + +const ( + PT_TRACE_ME = 0 // Debug this process + PT_READ_I = 1 // Read a full word + PT_READ_D = 2 // Read a full word + PT_READ_U = 3 // Read control info + PT_WRITE_I = 4 //Write a full word + PT_WRITE_D = 5 //Write a full word + PT_CONTINUE = 7 //Continue the process + PT_KILL = 8 //Terminate the process + PT_READ_GPR = 11 // Read GPR, CR, PSW + PT_READ_FPR = 12 // Read FPR + PT_READ_VR = 13 // Read VR + PT_WRITE_GPR = 14 // Write GPR, CR, PSW + PT_WRITE_FPR = 15 // Write FPR + PT_WRITE_VR = 16 // Write VR + PT_READ_BLOCK = 17 // Read storage + PT_WRITE_BLOCK = 19 // Write storage + PT_READ_GPRH = 20 // Read GPRH + PT_WRITE_GPRH = 21 // Write GPRH + PT_REGHSET = 22 // Read all GPRHs + PT_ATTACH = 30 // Attach to a process + PT_DETACH = 31 // Detach from a process + PT_REGSET = 32 // Read all GPRs + PT_REATTACH = 33 // Reattach to a process + PT_LDINFO = 34 // Read loader info + PT_MULTI = 35 // Multi process mode + PT_LD64INFO = 36 // RMODE64 Info Area + PT_BLOCKREQ = 40 // Block request + PT_THREAD_INFO = 60 // Read thread info + PT_THREAD_MODIFY = 61 + PT_THREAD_READ_FOCUS = 62 + PT_THREAD_WRITE_FOCUS = 63 + PT_THREAD_HOLD = 64 + PT_THREAD_SIGNAL = 65 + PT_EXPLAIN = 66 + PT_EVENTS = 67 + PT_THREAD_INFO_EXTENDED = 68 + PT_REATTACH2 = 71 + PT_CAPTURE = 72 + PT_UNCAPTURE = 73 + PT_GET_THREAD_TCB = 74 + PT_GET_ALET = 75 + PT_SWAPIN = 76 + PT_EXTENDED_EVENT = 98 + PT_RECOVER = 99 // Debug a program check + PT_GPR0 = 0 // General purpose register 0 + PT_GPR1 = 1 // General purpose register 1 + PT_GPR2 = 2 // General purpose register 2 + PT_GPR3 = 3 // General purpose register 3 + PT_GPR4 = 4 // General purpose register 4 + PT_GPR5 = 5 // General purpose register 5 + PT_GPR6 = 6 // General purpose register 6 + PT_GPR7 = 7 // General purpose register 7 + PT_GPR8 = 8 // General purpose register 8 + PT_GPR9 = 9 // General purpose register 9 + PT_GPR10 = 10 // General purpose register 10 + PT_GPR11 = 11 // General purpose register 11 + PT_GPR12 = 12 // General purpose register 12 + PT_GPR13 = 13 // General purpose register 13 + PT_GPR14 = 14 // General purpose register 14 + PT_GPR15 = 15 // General purpose register 15 + PT_FPR0 = 16 // Floating point register 0 + PT_FPR1 = 17 // Floating point register 1 + PT_FPR2 = 18 // Floating point register 2 + PT_FPR3 = 19 // Floating point register 3 + PT_FPR4 = 20 // Floating point register 4 + PT_FPR5 = 21 // Floating point register 5 + PT_FPR6 = 22 // Floating point register 6 + PT_FPR7 = 23 // Floating point register 7 + PT_FPR8 = 24 // Floating point register 8 + PT_FPR9 = 25 // Floating point register 9 + PT_FPR10 = 26 // Floating point register 10 + PT_FPR11 = 27 // Floating point register 11 + PT_FPR12 = 28 // Floating point register 12 + PT_FPR13 = 29 // Floating point register 13 + PT_FPR14 = 30 // Floating point register 14 + PT_FPR15 = 31 // Floating point register 15 + PT_FPC = 32 // Floating point control register + PT_PSW = 40 // PSW + PT_PSW0 = 40 // Left half of the PSW + PT_PSW1 = 41 // Right half of the PSW + PT_CR0 = 42 // Control register 0 + PT_CR1 = 43 // Control register 1 + PT_CR2 = 44 // Control register 2 + PT_CR3 = 45 // Control register 3 + PT_CR4 = 46 // Control register 4 + PT_CR5 = 47 // Control register 5 + PT_CR6 = 48 // Control register 6 + PT_CR7 = 49 // Control register 7 + PT_CR8 = 50 // Control register 8 + PT_CR9 = 51 // Control register 9 + PT_CR10 = 52 // Control register 10 + PT_CR11 = 53 // Control register 11 + PT_CR12 = 54 // Control register 12 + PT_CR13 = 55 // Control register 13 + PT_CR14 = 56 // Control register 14 + PT_CR15 = 57 // Control register 15 + PT_GPRH0 = 58 // GP High register 0 + PT_GPRH1 = 59 // GP High register 1 + PT_GPRH2 = 60 // GP High register 2 + PT_GPRH3 = 61 // GP High register 3 + PT_GPRH4 = 62 // GP High register 4 + PT_GPRH5 = 63 // GP High register 5 + PT_GPRH6 = 64 // GP High register 6 + PT_GPRH7 = 65 // GP High register 7 + PT_GPRH8 = 66 // GP High register 8 + PT_GPRH9 = 67 // GP High register 9 + PT_GPRH10 = 68 // GP High register 10 + PT_GPRH11 = 69 // GP High register 11 + PT_GPRH12 = 70 // GP High register 12 + PT_GPRH13 = 71 // GP High register 13 + PT_GPRH14 = 72 // GP High register 14 + PT_GPRH15 = 73 // GP High register 15 + PT_VR0 = 74 // Vector register 0 + PT_VR1 = 75 // Vector register 1 + PT_VR2 = 76 // Vector register 2 + PT_VR3 = 77 // Vector register 3 + PT_VR4 = 78 // Vector register 4 + PT_VR5 = 79 // Vector register 5 + PT_VR6 = 80 // Vector register 6 + PT_VR7 = 81 // Vector register 7 + PT_VR8 = 82 // Vector register 8 + PT_VR9 = 83 // Vector register 9 + PT_VR10 = 84 // Vector register 10 + PT_VR11 = 85 // Vector register 11 + PT_VR12 = 86 // Vector register 12 + PT_VR13 = 87 // Vector register 13 + PT_VR14 = 88 // Vector register 14 + PT_VR15 = 89 // Vector register 15 + PT_VR16 = 90 // Vector register 16 + PT_VR17 = 91 // Vector register 17 + PT_VR18 = 92 // Vector register 18 + PT_VR19 = 93 // Vector register 19 + PT_VR20 = 94 // Vector register 20 + PT_VR21 = 95 // Vector register 21 + PT_VR22 = 96 // Vector register 22 + PT_VR23 = 97 // Vector register 23 + PT_VR24 = 98 // Vector register 24 + PT_VR25 = 99 // Vector register 25 + PT_VR26 = 100 // Vector register 26 + PT_VR27 = 101 // Vector register 27 + PT_VR28 = 102 // Vector register 28 + PT_VR29 = 103 // Vector register 29 + PT_VR30 = 104 // Vector register 30 + PT_VR31 = 105 // Vector register 31 + PT_PSWG = 106 // PSWG + PT_PSWG0 = 106 // Bytes 0-3 + PT_PSWG1 = 107 // Bytes 4-7 + PT_PSWG2 = 108 // Bytes 8-11 (IA high word) + PT_PSWG3 = 109 // Bytes 12-15 (IA low word) +) + +func Bpx4ptr(request int32, pid int32, addr unsafe.Pointer, data unsafe.Pointer, buffer unsafe.Pointer) (rv int32, rc int32, rn int32) { + var parms [8]unsafe.Pointer + parms[0] = unsafe.Pointer(&request) + parms[1] = unsafe.Pointer(&pid) + parms[2] = unsafe.Pointer(&addr) + parms[3] = unsafe.Pointer(&data) + parms[4] = unsafe.Pointer(&buffer) + parms[5] = unsafe.Pointer(&rv) + parms[6] = unsafe.Pointer(&rc) + parms[7] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4PTR) + return rv, rc, rn +} + +func copyU8(val uint8, dest []uint8) int { + if len(dest) < 1 { + return 0 + } + dest[0] = val + return 1 +} + +func copyU8Arr(src, dest []uint8) int { + if len(dest) < len(src) { + return 0 + } + for i, v := range src { + dest[i] = v + } + return len(src) +} + +func copyU16(val uint16, dest []uint16) int { + if len(dest) < 1 { + return 0 + } + dest[0] = val + return 1 +} + +func copyU32(val uint32, dest []uint32) int { + if len(dest) < 1 { + return 0 + } + dest[0] = val + return 1 +} + +func copyU32Arr(src, dest []uint32) int { + if len(dest) < len(src) { + return 0 + } + for i, v := range src { + dest[i] = v + } + return len(src) +} + +func copyU64(val uint64, dest []uint64) int { + if len(dest) < 1 { + return 0 + } + dest[0] = val + return 1 +} diff --git a/vendor/golang.org/x/sys/unix/bpxsvc_zos.s b/vendor/golang.org/x/sys/unix/bpxsvc_zos.s new file mode 100644 index 00000000000..4bd4a179821 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/bpxsvc_zos.s @@ -0,0 +1,192 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "go_asm.h" +#include "textflag.h" + +// function to call USS assembly language services +// +// doc: https://www.ibm.com/support/knowledgecenter/en/SSLTBW_3.1.0/com.ibm.zos.v3r1.bpxb100/bit64env.htm +// +// arg1 unsafe.Pointer array that ressembles an OS PLIST +// +// arg2 function offset as in +// doc: https://www.ibm.com/support/knowledgecenter/en/SSLTBW_3.1.0/com.ibm.zos.v3r1.bpxb100/bpx2cr_List_of_offsets.htm +// +// func bpxcall(plist []unsafe.Pointer, bpx_offset int64) + +TEXT ·bpxcall(SB), NOSPLIT|NOFRAME, $0 + MOVD plist_base+0(FP), R1 // r1 points to plist + MOVD bpx_offset+24(FP), R2 // r2 offset to BPX vector table + MOVD R14, R7 // save r14 + MOVD R15, R8 // save r15 + MOVWZ 16(R0), R9 + MOVWZ 544(R9), R9 + MOVWZ 24(R9), R9 // call vector in r9 + ADD R2, R9 // add offset to vector table + MOVWZ (R9), R9 // r9 points to entry point + BYTE $0x0D // BL R14,R9 --> basr r14,r9 + BYTE $0xE9 // clobbers 0,1,14,15 + MOVD R8, R15 // restore 15 + JMP R7 // return via saved return address + +// func A2e(arr [] byte) +// code page conversion from 819 to 1047 +TEXT ·A2e(SB), NOSPLIT|NOFRAME, $0 + MOVD arg_base+0(FP), R2 // pointer to arry of characters + MOVD arg_len+8(FP), R3 // count + XOR R0, R0 + XOR R1, R1 + BYTE $0xA7; BYTE $0x15; BYTE $0x00; BYTE $0x82 // BRAS 1,(2+(256/2)) + + // ASCII -> EBCDIC conversion table: + BYTE $0x00; BYTE $0x01; BYTE $0x02; BYTE $0x03 + BYTE $0x37; BYTE $0x2d; BYTE $0x2e; BYTE $0x2f + BYTE $0x16; BYTE $0x05; BYTE $0x15; BYTE $0x0b + BYTE $0x0c; BYTE $0x0d; BYTE $0x0e; BYTE $0x0f + BYTE $0x10; BYTE $0x11; BYTE $0x12; BYTE $0x13 + BYTE $0x3c; BYTE $0x3d; BYTE $0x32; BYTE $0x26 + BYTE $0x18; BYTE $0x19; BYTE $0x3f; BYTE $0x27 + BYTE $0x1c; BYTE $0x1d; BYTE $0x1e; BYTE $0x1f + BYTE $0x40; BYTE $0x5a; BYTE $0x7f; BYTE $0x7b + BYTE $0x5b; BYTE $0x6c; BYTE $0x50; BYTE $0x7d + BYTE $0x4d; BYTE $0x5d; BYTE $0x5c; BYTE $0x4e + BYTE $0x6b; BYTE $0x60; BYTE $0x4b; BYTE $0x61 + BYTE $0xf0; BYTE $0xf1; BYTE $0xf2; BYTE $0xf3 + BYTE $0xf4; BYTE $0xf5; BYTE $0xf6; BYTE $0xf7 + BYTE $0xf8; BYTE $0xf9; BYTE $0x7a; BYTE $0x5e + BYTE $0x4c; BYTE $0x7e; BYTE $0x6e; BYTE $0x6f + BYTE $0x7c; BYTE $0xc1; BYTE $0xc2; BYTE $0xc3 + BYTE $0xc4; BYTE $0xc5; BYTE $0xc6; BYTE $0xc7 + BYTE $0xc8; BYTE $0xc9; BYTE $0xd1; BYTE $0xd2 + BYTE $0xd3; BYTE $0xd4; BYTE $0xd5; BYTE $0xd6 + BYTE $0xd7; BYTE $0xd8; BYTE $0xd9; BYTE $0xe2 + BYTE $0xe3; BYTE $0xe4; BYTE $0xe5; BYTE $0xe6 + BYTE $0xe7; BYTE $0xe8; BYTE $0xe9; BYTE $0xad + BYTE $0xe0; BYTE $0xbd; BYTE $0x5f; BYTE $0x6d + BYTE $0x79; BYTE $0x81; BYTE $0x82; BYTE $0x83 + BYTE $0x84; BYTE $0x85; BYTE $0x86; BYTE $0x87 + BYTE $0x88; BYTE $0x89; BYTE $0x91; BYTE $0x92 + BYTE $0x93; BYTE $0x94; BYTE $0x95; BYTE $0x96 + BYTE $0x97; BYTE $0x98; BYTE $0x99; BYTE $0xa2 + BYTE $0xa3; BYTE $0xa4; BYTE $0xa5; BYTE $0xa6 + BYTE $0xa7; BYTE $0xa8; BYTE $0xa9; BYTE $0xc0 + BYTE $0x4f; BYTE $0xd0; BYTE $0xa1; BYTE $0x07 + BYTE $0x20; BYTE $0x21; BYTE $0x22; BYTE $0x23 + BYTE $0x24; BYTE $0x25; BYTE $0x06; BYTE $0x17 + BYTE $0x28; BYTE $0x29; BYTE $0x2a; BYTE $0x2b + BYTE $0x2c; BYTE $0x09; BYTE $0x0a; BYTE $0x1b + BYTE $0x30; BYTE $0x31; BYTE $0x1a; BYTE $0x33 + BYTE $0x34; BYTE $0x35; BYTE $0x36; BYTE $0x08 + BYTE $0x38; BYTE $0x39; BYTE $0x3a; BYTE $0x3b + BYTE $0x04; BYTE $0x14; BYTE $0x3e; BYTE $0xff + BYTE $0x41; BYTE $0xaa; BYTE $0x4a; BYTE $0xb1 + BYTE $0x9f; BYTE $0xb2; BYTE $0x6a; BYTE $0xb5 + BYTE $0xbb; BYTE $0xb4; BYTE $0x9a; BYTE $0x8a + BYTE $0xb0; BYTE $0xca; BYTE $0xaf; BYTE $0xbc + BYTE $0x90; BYTE $0x8f; BYTE $0xea; BYTE $0xfa + BYTE $0xbe; BYTE $0xa0; BYTE $0xb6; BYTE $0xb3 + BYTE $0x9d; BYTE $0xda; BYTE $0x9b; BYTE $0x8b + BYTE $0xb7; BYTE $0xb8; BYTE $0xb9; BYTE $0xab + BYTE $0x64; BYTE $0x65; BYTE $0x62; BYTE $0x66 + BYTE $0x63; BYTE $0x67; BYTE $0x9e; BYTE $0x68 + BYTE $0x74; BYTE $0x71; BYTE $0x72; BYTE $0x73 + BYTE $0x78; BYTE $0x75; BYTE $0x76; BYTE $0x77 + BYTE $0xac; BYTE $0x69; BYTE $0xed; BYTE $0xee + BYTE $0xeb; BYTE $0xef; BYTE $0xec; BYTE $0xbf + BYTE $0x80; BYTE $0xfd; BYTE $0xfe; BYTE $0xfb + BYTE $0xfc; BYTE $0xba; BYTE $0xae; BYTE $0x59 + BYTE $0x44; BYTE $0x45; BYTE $0x42; BYTE $0x46 + BYTE $0x43; BYTE $0x47; BYTE $0x9c; BYTE $0x48 + BYTE $0x54; BYTE $0x51; BYTE $0x52; BYTE $0x53 + BYTE $0x58; BYTE $0x55; BYTE $0x56; BYTE $0x57 + BYTE $0x8c; BYTE $0x49; BYTE $0xcd; BYTE $0xce + BYTE $0xcb; BYTE $0xcf; BYTE $0xcc; BYTE $0xe1 + BYTE $0x70; BYTE $0xdd; BYTE $0xde; BYTE $0xdb + BYTE $0xdc; BYTE $0x8d; BYTE $0x8e; BYTE $0xdf + +retry: + WORD $0xB9931022 // TROO 2,2,b'0001' + BVS retry + RET + +// func e2a(arr [] byte) +// code page conversion from 1047 to 819 +TEXT ·E2a(SB), NOSPLIT|NOFRAME, $0 + MOVD arg_base+0(FP), R2 // pointer to arry of characters + MOVD arg_len+8(FP), R3 // count + XOR R0, R0 + XOR R1, R1 + BYTE $0xA7; BYTE $0x15; BYTE $0x00; BYTE $0x82 // BRAS 1,(2+(256/2)) + + // EBCDIC -> ASCII conversion table: + BYTE $0x00; BYTE $0x01; BYTE $0x02; BYTE $0x03 + BYTE $0x9c; BYTE $0x09; BYTE $0x86; BYTE $0x7f + BYTE $0x97; BYTE $0x8d; BYTE $0x8e; BYTE $0x0b + BYTE $0x0c; BYTE $0x0d; BYTE $0x0e; BYTE $0x0f + BYTE $0x10; BYTE $0x11; BYTE $0x12; BYTE $0x13 + BYTE $0x9d; BYTE $0x0a; BYTE $0x08; BYTE $0x87 + BYTE $0x18; BYTE $0x19; BYTE $0x92; BYTE $0x8f + BYTE $0x1c; BYTE $0x1d; BYTE $0x1e; BYTE $0x1f + BYTE $0x80; BYTE $0x81; BYTE $0x82; BYTE $0x83 + BYTE $0x84; BYTE $0x85; BYTE $0x17; BYTE $0x1b + BYTE $0x88; BYTE $0x89; BYTE $0x8a; BYTE $0x8b + BYTE $0x8c; BYTE $0x05; BYTE $0x06; BYTE $0x07 + BYTE $0x90; BYTE $0x91; BYTE $0x16; BYTE $0x93 + BYTE $0x94; BYTE $0x95; BYTE $0x96; BYTE $0x04 + BYTE $0x98; BYTE $0x99; BYTE $0x9a; BYTE $0x9b + BYTE $0x14; BYTE $0x15; BYTE $0x9e; BYTE $0x1a + BYTE $0x20; BYTE $0xa0; BYTE $0xe2; BYTE $0xe4 + BYTE $0xe0; BYTE $0xe1; BYTE $0xe3; BYTE $0xe5 + BYTE $0xe7; BYTE $0xf1; BYTE $0xa2; BYTE $0x2e + BYTE $0x3c; BYTE $0x28; BYTE $0x2b; BYTE $0x7c + BYTE $0x26; BYTE $0xe9; BYTE $0xea; BYTE $0xeb + BYTE $0xe8; BYTE $0xed; BYTE $0xee; BYTE $0xef + BYTE $0xec; BYTE $0xdf; BYTE $0x21; BYTE $0x24 + BYTE $0x2a; BYTE $0x29; BYTE $0x3b; BYTE $0x5e + BYTE $0x2d; BYTE $0x2f; BYTE $0xc2; BYTE $0xc4 + BYTE $0xc0; BYTE $0xc1; BYTE $0xc3; BYTE $0xc5 + BYTE $0xc7; BYTE $0xd1; BYTE $0xa6; BYTE $0x2c + BYTE $0x25; BYTE $0x5f; BYTE $0x3e; BYTE $0x3f + BYTE $0xf8; BYTE $0xc9; BYTE $0xca; BYTE $0xcb + BYTE $0xc8; BYTE $0xcd; BYTE $0xce; BYTE $0xcf + BYTE $0xcc; BYTE $0x60; BYTE $0x3a; BYTE $0x23 + BYTE $0x40; BYTE $0x27; BYTE $0x3d; BYTE $0x22 + BYTE $0xd8; BYTE $0x61; BYTE $0x62; BYTE $0x63 + BYTE $0x64; BYTE $0x65; BYTE $0x66; BYTE $0x67 + BYTE $0x68; BYTE $0x69; BYTE $0xab; BYTE $0xbb + BYTE $0xf0; BYTE $0xfd; BYTE $0xfe; BYTE $0xb1 + BYTE $0xb0; BYTE $0x6a; BYTE $0x6b; BYTE $0x6c + BYTE $0x6d; BYTE $0x6e; BYTE $0x6f; BYTE $0x70 + BYTE $0x71; BYTE $0x72; BYTE $0xaa; BYTE $0xba + BYTE $0xe6; BYTE $0xb8; BYTE $0xc6; BYTE $0xa4 + BYTE $0xb5; BYTE $0x7e; BYTE $0x73; BYTE $0x74 + BYTE $0x75; BYTE $0x76; BYTE $0x77; BYTE $0x78 + BYTE $0x79; BYTE $0x7a; BYTE $0xa1; BYTE $0xbf + BYTE $0xd0; BYTE $0x5b; BYTE $0xde; BYTE $0xae + BYTE $0xac; BYTE $0xa3; BYTE $0xa5; BYTE $0xb7 + BYTE $0xa9; BYTE $0xa7; BYTE $0xb6; BYTE $0xbc + BYTE $0xbd; BYTE $0xbe; BYTE $0xdd; BYTE $0xa8 + BYTE $0xaf; BYTE $0x5d; BYTE $0xb4; BYTE $0xd7 + BYTE $0x7b; BYTE $0x41; BYTE $0x42; BYTE $0x43 + BYTE $0x44; BYTE $0x45; BYTE $0x46; BYTE $0x47 + BYTE $0x48; BYTE $0x49; BYTE $0xad; BYTE $0xf4 + BYTE $0xf6; BYTE $0xf2; BYTE $0xf3; BYTE $0xf5 + BYTE $0x7d; BYTE $0x4a; BYTE $0x4b; BYTE $0x4c + BYTE $0x4d; BYTE $0x4e; BYTE $0x4f; BYTE $0x50 + BYTE $0x51; BYTE $0x52; BYTE $0xb9; BYTE $0xfb + BYTE $0xfc; BYTE $0xf9; BYTE $0xfa; BYTE $0xff + BYTE $0x5c; BYTE $0xf7; BYTE $0x53; BYTE $0x54 + BYTE $0x55; BYTE $0x56; BYTE $0x57; BYTE $0x58 + BYTE $0x59; BYTE $0x5a; BYTE $0xb2; BYTE $0xd4 + BYTE $0xd6; BYTE $0xd2; BYTE $0xd3; BYTE $0xd5 + BYTE $0x30; BYTE $0x31; BYTE $0x32; BYTE $0x33 + BYTE $0x34; BYTE $0x35; BYTE $0x36; BYTE $0x37 + BYTE $0x38; BYTE $0x39; BYTE $0xb3; BYTE $0xdb + BYTE $0xdc; BYTE $0xd9; BYTE $0xda; BYTE $0x9f + +retry: + WORD $0xB9931022 // TROO 2,2,b'0001' + BVS retry + RET diff --git a/vendor/golang.org/x/sys/unix/epoll_zos.go b/vendor/golang.org/x/sys/unix/epoll_zos.go deleted file mode 100644 index 7753fddea81..00000000000 --- a/vendor/golang.org/x/sys/unix/epoll_zos.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build zos && s390x - -package unix - -import ( - "sync" -) - -// This file simulates epoll on z/OS using poll. - -// Analogous to epoll_event on Linux. -// TODO(neeilan): Pad is because the Linux kernel expects a 96-bit struct. We never pass this to the kernel; remove? -type EpollEvent struct { - Events uint32 - Fd int32 - Pad int32 -} - -const ( - EPOLLERR = 0x8 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDNORM = 0x40 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - // The following constants are part of the epoll API, but represent - // currently unsupported functionality on z/OS. - // EPOLL_CLOEXEC = 0x80000 - // EPOLLET = 0x80000000 - // EPOLLONESHOT = 0x40000000 - // EPOLLRDHUP = 0x2000 // Typically used with edge-triggered notis - // EPOLLEXCLUSIVE = 0x10000000 // Exclusive wake-up mode - // EPOLLWAKEUP = 0x20000000 // Relies on Linux's BLOCK_SUSPEND capability -) - -// TODO(neeilan): We can eliminate these epToPoll / pToEpoll calls by using identical mask values for POLL/EPOLL -// constants where possible The lower 16 bits of epoll events (uint32) can fit any system poll event (int16). - -// epToPollEvt converts epoll event field to poll equivalent. -// In epoll, Events is a 32-bit field, while poll uses 16 bits. -func epToPollEvt(events uint32) int16 { - var ep2p = map[uint32]int16{ - EPOLLIN: POLLIN, - EPOLLOUT: POLLOUT, - EPOLLHUP: POLLHUP, - EPOLLPRI: POLLPRI, - EPOLLERR: POLLERR, - } - - var pollEvts int16 = 0 - for epEvt, pEvt := range ep2p { - if (events & epEvt) != 0 { - pollEvts |= pEvt - } - } - - return pollEvts -} - -// pToEpollEvt converts 16 bit poll event bitfields to 32-bit epoll event fields. -func pToEpollEvt(revents int16) uint32 { - var p2ep = map[int16]uint32{ - POLLIN: EPOLLIN, - POLLOUT: EPOLLOUT, - POLLHUP: EPOLLHUP, - POLLPRI: EPOLLPRI, - POLLERR: EPOLLERR, - } - - var epollEvts uint32 = 0 - for pEvt, epEvt := range p2ep { - if (revents & pEvt) != 0 { - epollEvts |= epEvt - } - } - - return epollEvts -} - -// Per-process epoll implementation. -type epollImpl struct { - mu sync.Mutex - epfd2ep map[int]*eventPoll - nextEpfd int -} - -// eventPoll holds a set of file descriptors being watched by the process. A process can have multiple epoll instances. -// On Linux, this is an in-kernel data structure accessed through a fd. -type eventPoll struct { - mu sync.Mutex - fds map[int]*EpollEvent -} - -// epoll impl for this process. -var impl epollImpl = epollImpl{ - epfd2ep: make(map[int]*eventPoll), - nextEpfd: 0, -} - -func (e *epollImpl) epollcreate(size int) (epfd int, err error) { - e.mu.Lock() - defer e.mu.Unlock() - epfd = e.nextEpfd - e.nextEpfd++ - - e.epfd2ep[epfd] = &eventPoll{ - fds: make(map[int]*EpollEvent), - } - return epfd, nil -} - -func (e *epollImpl) epollcreate1(flag int) (fd int, err error) { - return e.epollcreate(4) -} - -func (e *epollImpl) epollctl(epfd int, op int, fd int, event *EpollEvent) (err error) { - e.mu.Lock() - defer e.mu.Unlock() - - ep, ok := e.epfd2ep[epfd] - if !ok { - - return EBADF - } - - switch op { - case EPOLL_CTL_ADD: - // TODO(neeilan): When we make epfds and fds disjoint, detect epoll - // loops here (instances watching each other) and return ELOOP. - if _, ok := ep.fds[fd]; ok { - return EEXIST - } - ep.fds[fd] = event - case EPOLL_CTL_MOD: - if _, ok := ep.fds[fd]; !ok { - return ENOENT - } - ep.fds[fd] = event - case EPOLL_CTL_DEL: - if _, ok := ep.fds[fd]; !ok { - return ENOENT - } - delete(ep.fds, fd) - - } - return nil -} - -// Must be called while holding ep.mu -func (ep *eventPoll) getFds() []int { - fds := make([]int, len(ep.fds)) - for fd := range ep.fds { - fds = append(fds, fd) - } - return fds -} - -func (e *epollImpl) epollwait(epfd int, events []EpollEvent, msec int) (n int, err error) { - e.mu.Lock() // in [rare] case of concurrent epollcreate + epollwait - ep, ok := e.epfd2ep[epfd] - - if !ok { - e.mu.Unlock() - return 0, EBADF - } - - pollfds := make([]PollFd, 4) - for fd, epollevt := range ep.fds { - pollfds = append(pollfds, PollFd{Fd: int32(fd), Events: epToPollEvt(epollevt.Events)}) - } - e.mu.Unlock() - - n, err = Poll(pollfds, msec) - if err != nil { - return n, err - } - - i := 0 - for _, pFd := range pollfds { - if pFd.Revents != 0 { - events[i] = EpollEvent{Fd: pFd.Fd, Events: pToEpollEvt(pFd.Revents)} - i++ - } - - if i == n { - break - } - } - - return n, nil -} - -func EpollCreate(size int) (fd int, err error) { - return impl.epollcreate(size) -} - -func EpollCreate1(flag int) (fd int, err error) { - return impl.epollcreate1(flag) -} - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - return impl.epollctl(epfd, op, fd, event) -} - -// Because EpollWait mutates events, the caller is expected to coordinate -// concurrent access if calling with the same epfd from multiple goroutines. -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - return impl.epollwait(epfd, events, msec) -} diff --git a/vendor/golang.org/x/sys/unix/fstatfs_zos.go b/vendor/golang.org/x/sys/unix/fstatfs_zos.go deleted file mode 100644 index c8bde601e77..00000000000 --- a/vendor/golang.org/x/sys/unix/fstatfs_zos.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build zos && s390x - -package unix - -import ( - "unsafe" -) - -// This file simulates fstatfs on z/OS using fstatvfs and w_getmntent. - -func Fstatfs(fd int, stat *Statfs_t) (err error) { - var stat_v Statvfs_t - err = Fstatvfs(fd, &stat_v) - if err == nil { - // populate stat - stat.Type = 0 - stat.Bsize = stat_v.Bsize - stat.Blocks = stat_v.Blocks - stat.Bfree = stat_v.Bfree - stat.Bavail = stat_v.Bavail - stat.Files = stat_v.Files - stat.Ffree = stat_v.Ffree - stat.Fsid = stat_v.Fsid - stat.Namelen = stat_v.Namemax - stat.Frsize = stat_v.Frsize - stat.Flags = stat_v.Flag - for passn := 0; passn < 5; passn++ { - switch passn { - case 0: - err = tryGetmntent64(stat) - break - case 1: - err = tryGetmntent128(stat) - break - case 2: - err = tryGetmntent256(stat) - break - case 3: - err = tryGetmntent512(stat) - break - case 4: - err = tryGetmntent1024(stat) - break - default: - break - } - //proceed to return if: err is nil (found), err is nonnil but not ERANGE (another error occurred) - if err == nil || err != nil && err != ERANGE { - break - } - } - } - return err -} - -func tryGetmntent64(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [64]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} - -func tryGetmntent128(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [128]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} - -func tryGetmntent256(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [256]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} - -func tryGetmntent512(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [512]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} - -func tryGetmntent1024(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [1024]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} diff --git a/vendor/golang.org/x/sys/unix/mmap_nomremap.go b/vendor/golang.org/x/sys/unix/mmap_nomremap.go index 4b68e59780a..7f602ffd26d 100644 --- a/vendor/golang.org/x/sys/unix/mmap_nomremap.go +++ b/vendor/golang.org/x/sys/unix/mmap_nomremap.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris +//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris || zos package unix diff --git a/vendor/golang.org/x/sys/unix/pagesize_unix.go b/vendor/golang.org/x/sys/unix/pagesize_unix.go index 4d0a3430edc..0482408d7c6 100644 --- a/vendor/golang.org/x/sys/unix/pagesize_unix.go +++ b/vendor/golang.org/x/sys/unix/pagesize_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos // For Unix, get the pagesize from the runtime. diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go index 130398b6b76..b903c00604b 100644 --- a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go +++ b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin +//go:build darwin || zos package unix diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_zos.go b/vendor/golang.org/x/sys/unix/sockcmsg_zos.go new file mode 100644 index 00000000000..3e53dbc0286 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/sockcmsg_zos.go @@ -0,0 +1,58 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Socket control messages + +package unix + +import "unsafe" + +// UnixCredentials encodes credentials into a socket control message +// for sending to another process. This can be used for +// authentication. +func UnixCredentials(ucred *Ucred) []byte { + b := make([]byte, CmsgSpace(SizeofUcred)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_SOCKET + h.Type = SCM_CREDENTIALS + h.SetLen(CmsgLen(SizeofUcred)) + *(*Ucred)(h.data(0)) = *ucred + return b +} + +// ParseUnixCredentials decodes a socket control message that contains +// credentials in a Ucred structure. To receive such a message, the +// SO_PASSCRED option must be enabled on the socket. +func ParseUnixCredentials(m *SocketControlMessage) (*Ucred, error) { + if m.Header.Level != SOL_SOCKET { + return nil, EINVAL + } + if m.Header.Type != SCM_CREDENTIALS { + return nil, EINVAL + } + ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0])) + return &ucred, nil +} + +// PktInfo4 encodes Inet4Pktinfo into a socket control message of type IP_PKTINFO. +func PktInfo4(info *Inet4Pktinfo) []byte { + b := make([]byte, CmsgSpace(SizeofInet4Pktinfo)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_IP + h.Type = IP_PKTINFO + h.SetLen(CmsgLen(SizeofInet4Pktinfo)) + *(*Inet4Pktinfo)(h.data(0)) = *info + return b +} + +// PktInfo6 encodes Inet6Pktinfo into a socket control message of type IPV6_PKTINFO. +func PktInfo6(info *Inet6Pktinfo) []byte { + b := make([]byte, CmsgSpace(SizeofInet6Pktinfo)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_IPV6 + h.Type = IPV6_PKTINFO + h.SetLen(CmsgLen(SizeofInet6Pktinfo)) + *(*Inet6Pktinfo)(h.data(0)) = *info + return b +} diff --git a/vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s b/vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s new file mode 100644 index 00000000000..3c4f33cb6a8 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s @@ -0,0 +1,75 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x && gc + +#include "textflag.h" + +// provide the address of function variable to be fixed up. + +TEXT ·getPipe2Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Pipe2(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_FlockAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Flock(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_GetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Getxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_NanosleepAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Nanosleep(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_SetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Setxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_Wait4Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Wait4(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_MountAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Mount(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_UnmountAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Unmount(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_UtimesNanoAtAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·UtimesNanoAt(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_UtimesNanoAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·UtimesNano(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_MkfifoatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Mkfifoat(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_ChtagAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Chtag(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_ReadlinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Readlinkat(SB), R8 + MOVD R8, ret+0(FP) + RET + diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index b473038c615..312ae6ac1d2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -4,11 +4,21 @@ //go:build zos && s390x +// Many of the following syscalls are not available on all versions of z/OS. +// Some missing calls have legacy implementations/simulations but others +// will be missing completely. To achieve consistent failing behaviour on +// legacy systems, we first test the function pointer via a safeloading +// mechanism to see if the function exists on a given system. Then execution +// is branched to either continue the function call, or return an error. + package unix import ( "bytes" "fmt" + "os" + "reflect" + "regexp" "runtime" "sort" "strings" @@ -17,17 +27,205 @@ import ( "unsafe" ) +//go:noescape +func initZosLibVec() + +//go:noescape +func GetZosLibVec() uintptr + +func init() { + initZosLibVec() + r0, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS_____GETENV_A<<4, uintptr(unsafe.Pointer(&([]byte("__ZOS_XSYSTRACE\x00"))[0]))) + if r0 != 0 { + n, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___ATOI_A<<4, r0) + ZosTraceLevel = int(n) + r0, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS_____GETENV_A<<4, uintptr(unsafe.Pointer(&([]byte("__ZOS_XSYSTRACEFD\x00"))[0]))) + if r0 != 0 { + fd, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___ATOI_A<<4, r0) + f := os.NewFile(fd, "zostracefile") + if f != nil { + ZosTracefile = f + } + } + + } +} + +//go:noescape +func CallLeFuncWithErr(funcdesc uintptr, parms ...uintptr) (ret, errno2 uintptr, err Errno) + +//go:noescape +func CallLeFuncWithPtrReturn(funcdesc uintptr, parms ...uintptr) (ret, errno2 uintptr, err Errno) + +// ------------------------------- +// pointer validity test +// good pointer returns 0 +// bad pointer returns 1 +// +//go:nosplit +func ptrtest(uintptr) uint64 + +// Load memory at ptr location with error handling if the location is invalid +// +//go:noescape +func safeload(ptr uintptr) (value uintptr, error uintptr) + const ( - O_CLOEXEC = 0 // Dummy value (not supported). - AF_LOCAL = AF_UNIX // AF_LOCAL is an alias for AF_UNIX + entrypointLocationOffset = 8 // From function descriptor + + xplinkEyecatcher = 0x00c300c500c500f1 // ".C.E.E.1" + eyecatcherOffset = 16 // From function entrypoint (negative) + ppa1LocationOffset = 8 // From function entrypoint (negative) + + nameLenOffset = 0x14 // From PPA1 start + nameOffset = 0x16 // From PPA1 start ) -func syscall_syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) -func syscall_rawsyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) -func syscall_syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) -func syscall_rawsyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) -func syscall_syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) -func syscall_rawsyscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) +func getPpaOffset(funcptr uintptr) int64 { + entrypoint, err := safeload(funcptr + entrypointLocationOffset) + if err != 0 { + return -1 + } + + // XPLink functions have ".C.E.E.1" as the first 8 bytes (EBCDIC) + val, err := safeload(entrypoint - eyecatcherOffset) + if err != 0 { + return -1 + } + if val != xplinkEyecatcher { + return -1 + } + + ppaoff, err := safeload(entrypoint - ppa1LocationOffset) + if err != 0 { + return -1 + } + + ppaoff >>= 32 + return int64(ppaoff) +} + +//------------------------------- +// function descriptor pointer validity test +// good pointer returns 0 +// bad pointer returns 1 + +// TODO: currently mksyscall_zos_s390x.go generate empty string for funcName +// have correct funcName pass to the funcptrtest function +func funcptrtest(funcptr uintptr, funcName string) uint64 { + entrypoint, err := safeload(funcptr + entrypointLocationOffset) + if err != 0 { + return 1 + } + + ppaoff := getPpaOffset(funcptr) + if ppaoff == -1 { + return 1 + } + + // PPA1 offset value is from the start of the entire function block, not the entrypoint + ppa1 := (entrypoint - eyecatcherOffset) + uintptr(ppaoff) + + nameLen, err := safeload(ppa1 + nameLenOffset) + if err != 0 { + return 1 + } + + nameLen >>= 48 + if nameLen > 128 { + return 1 + } + + // no function name input to argument end here + if funcName == "" { + return 0 + } + + var funcname [128]byte + for i := 0; i < int(nameLen); i += 8 { + v, err := safeload(ppa1 + nameOffset + uintptr(i)) + if err != 0 { + return 1 + } + funcname[i] = byte(v >> 56) + funcname[i+1] = byte(v >> 48) + funcname[i+2] = byte(v >> 40) + funcname[i+3] = byte(v >> 32) + funcname[i+4] = byte(v >> 24) + funcname[i+5] = byte(v >> 16) + funcname[i+6] = byte(v >> 8) + funcname[i+7] = byte(v) + } + + runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, // __e2a_l + []uintptr{uintptr(unsafe.Pointer(&funcname[0])), nameLen}) + + name := string(funcname[:nameLen]) + if name != funcName { + return 1 + } + + return 0 +} + +// For detection of capabilities on a system. +// Is function descriptor f a valid function? +func isValidLeFunc(f uintptr) error { + ret := funcptrtest(f, "") + if ret != 0 { + return fmt.Errorf("Bad pointer, not an LE function ") + } + return nil +} + +// Retrieve function name from descriptor +func getLeFuncName(f uintptr) (string, error) { + // assume it has been checked, only check ppa1 validity here + entry := ((*[2]uintptr)(unsafe.Pointer(f)))[1] + preamp := ((*[4]uint32)(unsafe.Pointer(entry - eyecatcherOffset))) + + offsetPpa1 := preamp[2] + if offsetPpa1 > 0x0ffff { + return "", fmt.Errorf("PPA1 offset seems too big 0x%x\n", offsetPpa1) + } + + ppa1 := uintptr(unsafe.Pointer(preamp)) + uintptr(offsetPpa1) + res := ptrtest(ppa1) + if res != 0 { + return "", fmt.Errorf("PPA1 address not valid") + } + + size := *(*uint16)(unsafe.Pointer(ppa1 + nameLenOffset)) + if size > 128 { + return "", fmt.Errorf("Function name seems too long, length=%d\n", size) + } + + var name [128]byte + funcname := (*[128]byte)(unsafe.Pointer(ppa1 + nameOffset)) + copy(name[0:size], funcname[0:size]) + + runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, // __e2a_l + []uintptr{uintptr(unsafe.Pointer(&name[0])), uintptr(size)}) + + return string(name[:size]), nil +} + +// Check z/OS version +func zosLeVersion() (version, release uint32) { + p1 := (*(*uintptr)(unsafe.Pointer(uintptr(1208)))) >> 32 + p1 = *(*uintptr)(unsafe.Pointer(uintptr(p1 + 88))) + p1 = *(*uintptr)(unsafe.Pointer(uintptr(p1 + 8))) + p1 = *(*uintptr)(unsafe.Pointer(uintptr(p1 + 984))) + vrm := *(*uint32)(unsafe.Pointer(p1 + 80)) + version = (vrm & 0x00ff0000) >> 16 + release = (vrm & 0x0000ff00) >> 8 + return +} + +// returns a zos C FILE * for stdio fd 0, 1, 2 +func ZosStdioFilep(fd int32) uintptr { + return uintptr(*(*uint64)(unsafe.Pointer(uintptr(*(*uint64)(unsafe.Pointer(uintptr(*(*uint64)(unsafe.Pointer(uintptr(uint64(*(*uint32)(unsafe.Pointer(uintptr(1208)))) + 80))) + uint64((fd+2)<<3)))))))) +} func copyStat(stat *Stat_t, statLE *Stat_LE_t) { stat.Dev = uint64(statLE.Dev) @@ -65,6 +263,21 @@ func (d *Dirent) NameString() string { } } +func DecodeData(dest []byte, sz int, val uint64) { + for i := 0; i < sz; i++ { + dest[sz-1-i] = byte((val >> (uint64(i * 8))) & 0xff) + } +} + +func EncodeData(data []byte) uint64 { + var value uint64 + sz := len(data) + for i := 0; i < sz; i++ { + value |= uint64(data[i]) << uint64(((sz - i - 1) * 8)) + } + return value +} + func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { if sa.Port < 0 || sa.Port > 0xFFFF { return nil, 0, EINVAL @@ -74,7 +287,9 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - sa.raw.Addr = sa.Addr + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -88,7 +303,9 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - sa.raw.Addr = sa.Addr + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -146,7 +363,9 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - sa.Addr = pp.Addr + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } return sa, nil case AF_INET6: @@ -155,7 +374,9 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - sa.Addr = pp.Addr + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } return sa, nil } return nil, EAFNOSUPPORT @@ -177,6 +398,43 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { return } +func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + nfd, err = accept4(fd, &rsa, &len, flags) + if err != nil { + return + } + if len > SizeofSockaddrAny { + panic("RawSockaddrAny too small") + } + // TODO(neeilan): Remove 0 in call + sa, err = anyToSockaddr(0, &rsa) + if err != nil { + Close(nfd) + nfd = 0 + } + return +} + +func Ctermid() (tty string, err error) { + var termdev [1025]byte + runtime.EnterSyscall() + r0, err2, err1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___CTERMID_A<<4, uintptr(unsafe.Pointer(&termdev[0]))) + runtime.ExitSyscall() + if r0 == 0 { + return "", fmt.Errorf("%s (errno2=0x%x)\n", err1.Error(), err2) + } + s := string(termdev[:]) + idx := strings.Index(s, string(rune(0))) + if idx == -1 { + tty = s + } else { + tty = s[:idx] + } + return +} + func (iov *Iovec) SetLen(length int) { iov.Len = uint64(length) } @@ -190,10 +448,16 @@ func (cmsg *Cmsghdr) SetLen(length int) { } //sys fcntl(fd int, cmd int, arg int) (val int, err error) +//sys Flistxattr(fd int, dest []byte) (sz int, err error) = SYS___FLISTXATTR_A +//sys Fremovexattr(fd int, attr string) (err error) = SYS___FREMOVEXATTR_A //sys read(fd int, p []byte) (n int, err error) //sys write(fd int, p []byte) (n int, err error) +//sys Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) = SYS___FGETXATTR_A +//sys Fsetxattr(fd int, attr string, data []byte, flag int) (err error) = SYS___FSETXATTR_A + //sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) = SYS___ACCEPT_A +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) = SYS___ACCEPT4_A //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = SYS___BIND_A //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = SYS___CONNECT_A //sysnb getgroups(n int, list *_Gid_t) (nn int, err error) @@ -204,6 +468,7 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) //sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = SYS___GETPEERNAME_A //sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = SYS___GETSOCKNAME_A +//sys Removexattr(path string, attr string) (err error) = SYS___REMOVEXATTR_A //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) = SYS___RECVFROM_A //sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) = SYS___SENDTO_A //sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = SYS___RECVMSG_A @@ -212,6 +477,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sys munmap(addr uintptr, length uintptr) (err error) = SYS_MUNMAP //sys ioctl(fd int, req int, arg uintptr) (err error) = SYS_IOCTL //sys ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) = SYS_IOCTL +//sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) = SYS_SHMAT +//sys shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) = SYS_SHMCTL64 +//sys shmdt(addr uintptr) (err error) = SYS_SHMDT +//sys shmget(key int, size int, flag int) (id int, err error) = SYS_SHMGET //sys Access(path string, mode uint32) (err error) = SYS___ACCESS_A //sys Chdir(path string) (err error) = SYS___CHDIR_A @@ -220,14 +489,31 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sys Creat(path string, mode uint32) (fd int, err error) = SYS___CREAT_A //sys Dup(oldfd int) (fd int, err error) //sys Dup2(oldfd int, newfd int) (err error) +//sys Dup3(oldfd int, newfd int, flags int) (err error) = SYS_DUP3 +//sys Dirfd(dirp uintptr) (fd int, err error) = SYS_DIRFD +//sys EpollCreate(size int) (fd int, err error) = SYS_EPOLL_CREATE +//sys EpollCreate1(flags int) (fd int, err error) = SYS_EPOLL_CREATE1 +//sys EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) = SYS_EPOLL_CTL +//sys EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) = SYS_EPOLL_PWAIT +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_WAIT //sys Errno2() (er2 int) = SYS___ERRNO2 -//sys Err2ad() (eadd *int) = SYS___ERR2AD +//sys Eventfd(initval uint, flags int) (fd int, err error) = SYS_EVENTFD //sys Exit(code int) +//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) = SYS___FACCESSAT_A + +func Faccessat2(dirfd int, path string, mode uint32, flags int) (err error) { + return Faccessat(dirfd, path, mode, flags) +} + //sys Fchdir(fd int) (err error) //sys Fchmod(fd int, mode uint32) (err error) +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) = SYS___FCHMODAT_A //sys Fchown(fd int, uid int, gid int) (err error) +//sys Fchownat(fd int, path string, uid int, gid int, flags int) (err error) = SYS___FCHOWNAT_A //sys FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) = SYS_FCNTL +//sys Fdatasync(fd int) (err error) = SYS_FDATASYNC //sys fstat(fd int, stat *Stat_LE_t) (err error) +//sys fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) = SYS___FSTATAT_A func Fstat(fd int, stat *Stat_t) (err error) { var statLE Stat_LE_t @@ -236,28 +522,208 @@ func Fstat(fd int, stat *Stat_t) (err error) { return } +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { + var statLE Stat_LE_t + err = fstatat(dirfd, path, &statLE, flags) + copyStat(stat, &statLE) + return +} + +func impl_Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest))) + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_GetxattrAddr() *(func(path string, attr string, dest []byte) (sz int, err error)) + +var Getxattr = enter_Getxattr + +func enter_Getxattr(path string, attr string, dest []byte) (sz int, err error) { + funcref := get_GetxattrAddr() + if validGetxattr() { + *funcref = impl_Getxattr + } else { + *funcref = error_Getxattr + } + return (*funcref)(path, attr, dest) +} + +func error_Getxattr(path string, attr string, dest []byte) (sz int, err error) { + return -1, ENOSYS +} + +func validGetxattr() bool { + if funcptrtest(GetZosLibVec()+SYS___GETXATTR_A<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS___GETXATTR_A<<4); err == nil { + return name == "__getxattr_a" + } + } + return false +} + +//sys Lgetxattr(link string, attr string, dest []byte) (sz int, err error) = SYS___LGETXATTR_A +//sys Lsetxattr(path string, attr string, data []byte, flags int) (err error) = SYS___LSETXATTR_A + +func impl_Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_SetxattrAddr() *(func(path string, attr string, data []byte, flags int) (err error)) + +var Setxattr = enter_Setxattr + +func enter_Setxattr(path string, attr string, data []byte, flags int) (err error) { + funcref := get_SetxattrAddr() + if validSetxattr() { + *funcref = impl_Setxattr + } else { + *funcref = error_Setxattr + } + return (*funcref)(path, attr, data, flags) +} + +func error_Setxattr(path string, attr string, data []byte, flags int) (err error) { + return ENOSYS +} + +func validSetxattr() bool { + if funcptrtest(GetZosLibVec()+SYS___SETXATTR_A<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS___SETXATTR_A<<4); err == nil { + return name == "__setxattr_a" + } + } + return false +} + +//sys Fstatfs(fd int, buf *Statfs_t) (err error) = SYS_FSTATFS //sys Fstatvfs(fd int, stat *Statvfs_t) (err error) = SYS_FSTATVFS //sys Fsync(fd int) (err error) +//sys Futimes(fd int, tv []Timeval) (err error) = SYS_FUTIMES +//sys Futimesat(dirfd int, path string, tv []Timeval) (err error) = SYS___FUTIMESAT_A //sys Ftruncate(fd int, length int64) (err error) -//sys Getpagesize() (pgsize int) = SYS_GETPAGESIZE +//sys Getrandom(buf []byte, flags int) (n int, err error) = SYS_GETRANDOM +//sys InotifyInit() (fd int, err error) = SYS_INOTIFY_INIT +//sys InotifyInit1(flags int) (fd int, err error) = SYS_INOTIFY_INIT1 +//sys InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) = SYS___INOTIFY_ADD_WATCH_A +//sys InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) = SYS_INOTIFY_RM_WATCH +//sys Listxattr(path string, dest []byte) (sz int, err error) = SYS___LISTXATTR_A +//sys Llistxattr(path string, dest []byte) (sz int, err error) = SYS___LLISTXATTR_A +//sys Lremovexattr(path string, attr string) (err error) = SYS___LREMOVEXATTR_A +//sys Lutimes(path string, tv []Timeval) (err error) = SYS___LUTIMES_A //sys Mprotect(b []byte, prot int) (err error) = SYS_MPROTECT //sys Msync(b []byte, flags int) (err error) = SYS_MSYNC +//sys Console2(cmsg *ConsMsg2, modstr *byte, concmd *uint32) (err error) = SYS___CONSOLE2 + +// Pipe2 begin + +//go:nosplit +func getPipe2Addr() *(func([]int, int) error) + +var Pipe2 = pipe2Enter + +func pipe2Enter(p []int, flags int) (err error) { + if funcptrtest(GetZosLibVec()+SYS_PIPE2<<4, "") == 0 { + *getPipe2Addr() = pipe2Impl + } else { + *getPipe2Addr() = pipe2Error + } + return (*getPipe2Addr())(p, flags) +} + +func pipe2Impl(p []int, flags int) (err error) { + var pp [2]_C_int + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PIPE2<<4, uintptr(unsafe.Pointer(&pp[0])), uintptr(flags)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } else { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } + return +} +func pipe2Error(p []int, flags int) (err error) { + return fmt.Errorf("Pipe2 is not available on this system") +} + +// Pipe2 end + //sys Poll(fds []PollFd, timeout int) (n int, err error) = SYS_POLL + +func Readdir(dir uintptr) (dirent *Dirent, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___READDIR_A<<4, uintptr(dir)) + runtime.ExitSyscall() + dirent = (*Dirent)(unsafe.Pointer(r0)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//sys Readdir_r(dirp uintptr, entry *direntLE, result **direntLE) (err error) = SYS___READDIR_R_A +//sys Statfs(path string, buf *Statfs_t) (err error) = SYS___STATFS_A +//sys Syncfs(fd int) (err error) = SYS_SYNCFS //sys Times(tms *Tms) (ticks uintptr, err error) = SYS_TIMES //sys W_Getmntent(buff *byte, size int) (lastsys int, err error) = SYS_W_GETMNTENT //sys W_Getmntent_A(buff *byte, size int) (lastsys int, err error) = SYS___W_GETMNTENT_A //sys mount_LE(path string, filesystem string, fstype string, mtm uint32, parmlen int32, parm string) (err error) = SYS___MOUNT_A -//sys unmount(filesystem string, mtm int) (err error) = SYS___UMOUNT_A +//sys unmount_LE(filesystem string, mtm int) (err error) = SYS___UMOUNT_A //sys Chroot(path string) (err error) = SYS___CHROOT_A //sys Select(nmsgsfds int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (ret int, err error) = SYS_SELECT -//sysnb Uname(buf *Utsname) (err error) = SYS___UNAME_A +//sysnb Uname(buf *Utsname) (err error) = SYS_____OSNAME_A +//sys Unshare(flags int) (err error) = SYS_UNSHARE func Ptsname(fd int) (name string, err error) { - r0, _, e1 := syscall_syscall(SYS___PTSNAME_A, uintptr(fd), 0, 0) - name = u2s(unsafe.Pointer(r0)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___PTSNAME_A<<4, uintptr(fd)) + runtime.ExitSyscall() + if r0 == 0 { + err = errnoErr2(e1, e2) + } else { + name = u2s(unsafe.Pointer(r0)) } return } @@ -272,13 +738,19 @@ func u2s(cstr unsafe.Pointer) string { } func Close(fd int) (err error) { - _, _, e1 := syscall_syscall(SYS_CLOSE, uintptr(fd), 0, 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_CLOSE<<4, uintptr(fd)) + runtime.ExitSyscall() for i := 0; e1 == EAGAIN && i < 10; i++ { - _, _, _ = syscall_syscall(SYS_USLEEP, uintptr(10), 0, 0) - _, _, e1 = syscall_syscall(SYS_CLOSE, uintptr(fd), 0, 0) + runtime.EnterSyscall() + CallLeFuncWithErr(GetZosLibVec()+SYS_USLEEP<<4, uintptr(10)) + runtime.ExitSyscall() + runtime.EnterSyscall() + r0, e2, e1 = CallLeFuncWithErr(GetZosLibVec()+SYS_CLOSE<<4, uintptr(fd)) + runtime.ExitSyscall() } - if e1 != 0 { - err = errnoErr(e1) + if r0 != 0 { + err = errnoErr2(e1, e2) } return } @@ -288,9 +760,15 @@ func Madvise(b []byte, advice int) (err error) { return } +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return mapper.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return mapper.Munmap(b) +} + //sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A -//sysnb Getegid() (egid int) -//sysnb Geteuid() (uid int) //sysnb Getgid() (gid int) //sysnb Getpid() (pid int) //sysnb Getpgid(pid int) (pgid int, err error) = SYS_GETPGID @@ -317,11 +795,14 @@ func Getrusage(who int, rusage *Rusage) (err error) { return } +//sys Getegid() (egid int) = SYS_GETEGID +//sys Geteuid() (euid int) = SYS_GETEUID //sysnb Getsid(pid int) (sid int, err error) = SYS_GETSID //sysnb Getuid() (uid int) //sysnb Kill(pid int, sig Signal) (err error) //sys Lchown(path string, uid int, gid int) (err error) = SYS___LCHOWN_A //sys Link(path string, link string) (err error) = SYS___LINK_A +//sys Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) = SYS___LINKAT_A //sys Listen(s int, n int) (err error) //sys lstat(path string, stat *Stat_LE_t) (err error) = SYS___LSTAT_A @@ -332,15 +813,150 @@ func Lstat(path string, stat *Stat_t) (err error) { return } +// for checking symlinks begins with $VERSION/ $SYSNAME/ $SYSSYMR/ $SYSSYMA/ +func isSpecialPath(path []byte) (v bool) { + var special = [4][8]byte{ + [8]byte{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, + [8]byte{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, + [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, + [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} + + var i, j int + for i = 0; i < len(special); i++ { + for j = 0; j < len(special[i]); j++ { + if path[j] != special[i][j] { + break + } + } + if j == len(special[i]) { + return true + } + } + return false +} + +func realpath(srcpath string, abspath []byte) (pathlen int, errno int) { + var source [1024]byte + copy(source[:], srcpath) + source[len(srcpath)] = 0 + ret := runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___REALPATH_A<<4, //__realpath_a() + []uintptr{uintptr(unsafe.Pointer(&source[0])), + uintptr(unsafe.Pointer(&abspath[0]))}) + if ret != 0 { + index := bytes.IndexByte(abspath[:], byte(0)) + if index != -1 { + return index, 0 + } + } else { + errptr := (*int)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, []uintptr{}))) //__errno() + return 0, *errptr + } + return 0, 245 // EBADDATA 245 +} + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + n = int(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___READLINK_A<<4, + []uintptr{uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))})) + runtime.KeepAlive(unsafe.Pointer(_p0)) + if n == -1 { + value := *(*int32)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, []uintptr{}))) + err = errnoErr(Errno(value)) + } else { + if buf[0] == '$' { + if isSpecialPath(buf[1:9]) { + cnt, err1 := realpath(path, buf) + if err1 == 0 { + n = cnt + } + } + } + } + return +} + +func impl_Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___READLINKAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + return n, err + } else { + if buf[0] == '$' { + if isSpecialPath(buf[1:9]) { + cnt, err1 := realpath(path, buf) + if err1 == 0 { + n = cnt + } + } + } + } + return +} + +//go:nosplit +func get_ReadlinkatAddr() *(func(dirfd int, path string, buf []byte) (n int, err error)) + +var Readlinkat = enter_Readlinkat + +func enter_Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + funcref := get_ReadlinkatAddr() + if funcptrtest(GetZosLibVec()+SYS___READLINKAT_A<<4, "") == 0 { + *funcref = impl_Readlinkat + } else { + *funcref = error_Readlinkat + } + return (*funcref)(dirfd, path, buf) +} + +func error_Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + n = -1 + err = ENOSYS + return +} + //sys Mkdir(path string, mode uint32) (err error) = SYS___MKDIR_A +//sys Mkdirat(dirfd int, path string, mode uint32) (err error) = SYS___MKDIRAT_A //sys Mkfifo(path string, mode uint32) (err error) = SYS___MKFIFO_A //sys Mknod(path string, mode uint32, dev int) (err error) = SYS___MKNOD_A +//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) = SYS___MKNODAT_A +//sys PivotRoot(newroot string, oldroot string) (err error) = SYS___PIVOT_ROOT_A //sys Pread(fd int, p []byte, offset int64) (n int, err error) //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) -//sys Readlink(path string, buf []byte) (n int, err error) = SYS___READLINK_A +//sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) = SYS___PRCTL_A +//sysnb Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT //sys Rename(from string, to string) (err error) = SYS___RENAME_A +//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) = SYS___RENAMEAT_A +//sys Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) = SYS___RENAMEAT2_A //sys Rmdir(path string) (err error) = SYS___RMDIR_A //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK +//sys Setegid(egid int) (err error) = SYS_SETEGID +//sys Seteuid(euid int) (err error) = SYS_SETEUID +//sys Sethostname(p []byte) (err error) = SYS___SETHOSTNAME_A +//sys Setns(fd int, nstype int) (err error) = SYS_SETNS //sys Setpriority(which int, who int, prio int) (err error) //sysnb Setpgid(pid int, pgid int) (err error) = SYS_SETPGID //sysnb Setrlimit(resource int, lim *Rlimit) (err error) @@ -360,32 +976,57 @@ func Stat(path string, sta *Stat_t) (err error) { } //sys Symlink(path string, link string) (err error) = SYS___SYMLINK_A +//sys Symlinkat(oldPath string, dirfd int, newPath string) (err error) = SYS___SYMLINKAT_A //sys Sync() = SYS_SYNC //sys Truncate(path string, length int64) (err error) = SYS___TRUNCATE_A //sys Tcgetattr(fildes int, termptr *Termios) (err error) = SYS_TCGETATTR //sys Tcsetattr(fildes int, when int, termptr *Termios) (err error) = SYS_TCSETATTR //sys Umask(mask int) (oldmask int) //sys Unlink(path string) (err error) = SYS___UNLINK_A +//sys Unlinkat(dirfd int, path string, flags int) (err error) = SYS___UNLINKAT_A //sys Utime(path string, utim *Utimbuf) (err error) = SYS___UTIME_A //sys open(path string, mode int, perm uint32) (fd int, err error) = SYS___OPEN_A func Open(path string, mode int, perm uint32) (fd int, err error) { + if mode&O_ACCMODE == 0 { + mode |= O_RDONLY + } return open(path, mode, perm) } -func Mkfifoat(dirfd int, path string, mode uint32) (err error) { - wd, err := Getwd() - if err != nil { - return err +//sys openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) = SYS___OPENAT_A + +func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + if flags&O_ACCMODE == 0 { + flags |= O_RDONLY } + return openat(dirfd, path, flags, mode) +} - if err := Fchdir(dirfd); err != nil { - return err +//sys openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) = SYS___OPENAT2_A + +func Openat2(dirfd int, path string, how *OpenHow) (fd int, err error) { + if how.Flags&O_ACCMODE == 0 { + how.Flags |= O_RDONLY } - defer Chdir(wd) + return openat2(dirfd, path, how, SizeofOpenHow) +} - return Mkfifo(path, mode) +func ZosFdToPath(dirfd int) (path string, err error) { + var buffer [1024]byte + runtime.EnterSyscall() + ret, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_W_IOCTL<<4, uintptr(dirfd), 17, 1024, uintptr(unsafe.Pointer(&buffer[0]))) + runtime.ExitSyscall() + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + CallLeFuncWithErr(GetZosLibVec()+SYS___E2A_L<<4, uintptr(unsafe.Pointer(&buffer[0])), uintptr(zb)) + return string(buffer[:zb]), nil + } + return "", errnoErr2(e1, e2) } //sys remove(path string) (err error) @@ -403,10 +1044,12 @@ func Getcwd(buf []byte) (n int, err error) { } else { p = unsafe.Pointer(&_zero) } - _, _, e := syscall_syscall(SYS___GETCWD_A, uintptr(p), uintptr(len(buf)), 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___GETCWD_A<<4, uintptr(p), uintptr(len(buf))) + runtime.ExitSyscall() n = clen(buf) + 1 - if e != 0 { - err = errnoErr(e) + if r0 == 0 { + err = errnoErr2(e1, e2) } return } @@ -520,9 +1163,41 @@ func (w WaitStatus) StopSignal() Signal { func (w WaitStatus) TrapCause() int { return -1 } +//sys waitid(idType int, id int, info *Siginfo, options int) (err error) + +func Waitid(idType int, id int, info *Siginfo, options int, rusage *Rusage) (err error) { + return waitid(idType, id, info, options) +} + //sys waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) -func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { +func impl_Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WAIT4<<4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage))) + runtime.ExitSyscall() + wpid = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_Wait4Addr() *(func(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error)) + +var Wait4 = enter_Wait4 + +func enter_Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { + funcref := get_Wait4Addr() + if funcptrtest(GetZosLibVec()+SYS_WAIT4<<4, "") == 0 { + *funcref = impl_Wait4 + } else { + *funcref = legacyWait4 + } + return (*funcref)(pid, wstatus, options, rusage) +} + +func legacyWait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { // TODO(mundaym): z/OS doesn't have wait4. I don't think getrusage does what we want. // At the moment rusage will not be touched. var status _C_int @@ -571,23 +1246,62 @@ func Pipe(p []int) (err error) { } var pp [2]_C_int err = pipe(&pp) - if err == nil { - p[0] = int(pp[0]) - p[1] = int(pp[1]) - } + p[0] = int(pp[0]) + p[1] = int(pp[1]) return } //sys utimes(path string, timeval *[2]Timeval) (err error) = SYS___UTIMES_A func Utimes(path string, tv []Timeval) (err error) { + if tv == nil { + return utimes(path, nil) + } if len(tv) != 2 { return EINVAL } return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } -func UtimesNano(path string, ts []Timespec) error { +//sys utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) = SYS___UTIMENSAT_A + +func validUtimensat() bool { + if funcptrtest(GetZosLibVec()+SYS___UTIMENSAT_A<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS___UTIMENSAT_A<<4); err == nil { + return name == "__utimensat_a" + } + } + return false +} + +// Begin UtimesNano + +//go:nosplit +func get_UtimesNanoAddr() *(func(path string, ts []Timespec) (err error)) + +var UtimesNano = enter_UtimesNano + +func enter_UtimesNano(path string, ts []Timespec) (err error) { + funcref := get_UtimesNanoAddr() + if validUtimensat() { + *funcref = utimesNanoImpl + } else { + *funcref = legacyUtimesNano + } + return (*funcref)(path, ts) +} + +func utimesNanoImpl(path string, ts []Timespec) (err error) { + if ts == nil { + return utimensat(AT_FDCWD, path, nil, 0) + } + if len(ts) != 2 { + return EINVAL + } + return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + +func legacyUtimesNano(path string, ts []Timespec) (err error) { if len(ts) != 2 { return EINVAL } @@ -600,6 +1314,70 @@ func UtimesNano(path string, ts []Timespec) error { return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } +// End UtimesNano + +// Begin UtimesNanoAt + +//go:nosplit +func get_UtimesNanoAtAddr() *(func(dirfd int, path string, ts []Timespec, flags int) (err error)) + +var UtimesNanoAt = enter_UtimesNanoAt + +func enter_UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) (err error) { + funcref := get_UtimesNanoAtAddr() + if validUtimensat() { + *funcref = utimesNanoAtImpl + } else { + *funcref = legacyUtimesNanoAt + } + return (*funcref)(dirfd, path, ts, flags) +} + +func utimesNanoAtImpl(dirfd int, path string, ts []Timespec, flags int) (err error) { + if ts == nil { + return utimensat(dirfd, path, nil, flags) + } + if len(ts) != 2 { + return EINVAL + } + return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags) +} + +func legacyUtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) (err error) { + if path[0] != '/' { + dirPath, err := ZosFdToPath(dirfd) + if err != nil { + return err + } + path = dirPath + "/" + path + } + if flags == AT_SYMLINK_NOFOLLOW { + if len(ts) != 2 { + return EINVAL + } + + if ts[0].Nsec >= 5e8 { + ts[0].Sec++ + } + ts[0].Nsec = 0 + if ts[1].Nsec >= 5e8 { + ts[1].Sec++ + } + ts[1].Nsec = 0 + + // Not as efficient as it could be because Timespec and + // Timeval have different types in the different OSes + tv := []Timeval{ + NsecToTimeval(TimespecToNsec(ts[0])), + NsecToTimeval(TimespecToNsec(ts[1])), + } + return Lutimes(path, tv) + } + return UtimesNano(path, ts) +} + +// End UtimesNanoAt + func Getsockname(fd int) (sa Sockaddr, err error) { var rsa RawSockaddrAny var len _Socklen = SizeofSockaddrAny @@ -1186,67 +1964,46 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) return n, nil } -func Opendir(name string) (uintptr, error) { - p, err := BytePtrFromString(name) - if err != nil { - return 0, err - } - dir, _, e := syscall_syscall(SYS___OPENDIR_A, uintptr(unsafe.Pointer(p)), 0, 0) - runtime.KeepAlive(unsafe.Pointer(p)) - if e != 0 { - err = errnoErr(e) - } - return dir, err -} - -// clearsyscall.Errno resets the errno value to 0. -func clearErrno() - -func Readdir(dir uintptr) (*Dirent, error) { - var ent Dirent - var res uintptr - // __readdir_r_a returns errno at the end of the directory stream, rather than 0. - // Therefore to avoid false positives we clear errno before calling it. - - // TODO(neeilan): Commented this out to get sys/unix compiling on z/OS. Uncomment and fix. Error: "undefined: clearsyscall" - //clearsyscall.Errno() // TODO(mundaym): check pre-emption rules. - - e, _, _ := syscall_syscall(SYS___READDIR_R_A, dir, uintptr(unsafe.Pointer(&ent)), uintptr(unsafe.Pointer(&res))) - var err error - if e != 0 { - err = errnoErr(Errno(e)) - } - if res == 0 { - return nil, err - } - return &ent, err -} - -func readdir_r(dirp uintptr, entry *direntLE, result **direntLE) (err error) { - r0, _, e1 := syscall_syscall(SYS___READDIR_R_A, dirp, uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) - if int64(r0) == -1 { - err = errnoErr(Errno(e1)) +func Opendir(name string) (uintptr, error) { + p, err := BytePtrFromString(name) + if err != nil { + return 0, err } - return + err = nil + runtime.EnterSyscall() + dir, e2, e1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___OPENDIR_A<<4, uintptr(unsafe.Pointer(p))) + runtime.ExitSyscall() + runtime.KeepAlive(unsafe.Pointer(p)) + if dir == 0 { + err = errnoErr2(e1, e2) + } + return dir, err } +// clearsyscall.Errno resets the errno value to 0. +func clearErrno() + func Closedir(dir uintptr) error { - _, _, e := syscall_syscall(SYS_CLOSEDIR, dir, 0, 0) - if e != 0 { - return errnoErr(e) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_CLOSEDIR<<4, dir) + runtime.ExitSyscall() + if r0 != 0 { + return errnoErr2(e1, e2) } return nil } func Seekdir(dir uintptr, pos int) { - _, _, _ = syscall_syscall(SYS_SEEKDIR, dir, uintptr(pos), 0) + runtime.EnterSyscall() + CallLeFuncWithErr(GetZosLibVec()+SYS_SEEKDIR<<4, dir, uintptr(pos)) + runtime.ExitSyscall() } func Telldir(dir uintptr) (int, error) { - p, _, e := syscall_syscall(SYS_TELLDIR, dir, 0, 0) + p, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TELLDIR<<4, dir) pos := int(p) - if pos == -1 { - return pos, errnoErr(e) + if int64(p) == -1 { + return pos, errnoErr2(e1, e2) } return pos, nil } @@ -1261,19 +2018,55 @@ func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { *(*int64)(unsafe.Pointer(&flock[4])) = lk.Start *(*int64)(unsafe.Pointer(&flock[12])) = lk.Len *(*int32)(unsafe.Pointer(&flock[20])) = lk.Pid - _, _, errno := syscall_syscall(SYS_FCNTL, fd, uintptr(cmd), uintptr(unsafe.Pointer(&flock))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, fd, uintptr(cmd), uintptr(unsafe.Pointer(&flock))) + runtime.ExitSyscall() lk.Type = *(*int16)(unsafe.Pointer(&flock[0])) lk.Whence = *(*int16)(unsafe.Pointer(&flock[2])) lk.Start = *(*int64)(unsafe.Pointer(&flock[4])) lk.Len = *(*int64)(unsafe.Pointer(&flock[12])) lk.Pid = *(*int32)(unsafe.Pointer(&flock[20])) - if errno == 0 { + if r0 == 0 { return nil } - return errno + return errnoErr2(e1, e2) +} + +func impl_Flock(fd int, how int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FLOCK<<4, uintptr(fd), uintptr(how)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FlockAddr() *(func(fd int, how int) (err error)) + +var Flock = enter_Flock + +func validFlock(fp uintptr) bool { + if funcptrtest(GetZosLibVec()+SYS_FLOCK<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS_FLOCK<<4); err == nil { + return name == "flock" + } + } + return false +} + +func enter_Flock(fd int, how int) (err error) { + funcref := get_FlockAddr() + if validFlock(GetZosLibVec() + SYS_FLOCK<<4) { + *funcref = impl_Flock + } else { + *funcref = legacyFlock + } + return (*funcref)(fd, how) } -func Flock(fd int, how int) error { +func legacyFlock(fd int, how int) error { var flock_type int16 var fcntl_cmd int @@ -1307,41 +2100,51 @@ func Flock(fd int, how int) error { } func Mlock(b []byte) (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_NONSWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } func Mlock2(b []byte, flags int) (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_NONSWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } func Mlockall(flags int) (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_NONSWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } func Munlock(b []byte) (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_SWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_SWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } func Munlockall() (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_SWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_SWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } @@ -1372,15 +2175,104 @@ func ClockGettime(clockid int32, ts *Timespec) error { return nil } -func Statfs(path string, stat *Statfs_t) (err error) { - fd, err := open(path, O_RDONLY, 0) - defer Close(fd) - if err != nil { - return err +// Chtag + +//go:nosplit +func get_ChtagAddr() *(func(path string, ccsid uint64, textbit uint64) error) + +var Chtag = enter_Chtag + +func enter_Chtag(path string, ccsid uint64, textbit uint64) error { + funcref := get_ChtagAddr() + if validSetxattr() { + *funcref = impl_Chtag + } else { + *funcref = legacy_Chtag + } + return (*funcref)(path, ccsid, textbit) +} + +func legacy_Chtag(path string, ccsid uint64, textbit uint64) error { + tag := ccsid<<16 | textbit<<15 + var tag_buff [8]byte + DecodeData(tag_buff[:], 8, tag) + return Setxattr(path, "filetag", tag_buff[:], XATTR_REPLACE) +} + +func impl_Chtag(path string, ccsid uint64, textbit uint64) error { + tag := ccsid<<16 | textbit<<15 + var tag_buff [4]byte + DecodeData(tag_buff[:], 4, tag) + return Setxattr(path, "system.filetag", tag_buff[:], XATTR_REPLACE) +} + +// End of Chtag + +// Nanosleep + +//go:nosplit +func get_NanosleepAddr() *(func(time *Timespec, leftover *Timespec) error) + +var Nanosleep = enter_Nanosleep + +func enter_Nanosleep(time *Timespec, leftover *Timespec) error { + funcref := get_NanosleepAddr() + if funcptrtest(GetZosLibVec()+SYS_NANOSLEEP<<4, "") == 0 { + *funcref = impl_Nanosleep + } else { + *funcref = legacyNanosleep + } + return (*funcref)(time, leftover) +} + +func impl_Nanosleep(time *Timespec, leftover *Timespec) error { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_NANOSLEEP<<4, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover))) + runtime.ExitSyscall() + if int64(r0) == -1 { + return errnoErr2(e1, e2) + } + return nil +} + +func legacyNanosleep(time *Timespec, leftover *Timespec) error { + t0 := runtime.Nanotime1() + var secrem uint32 + var nsecrem uint32 + total := time.Sec*1000000000 + time.Nsec + elapsed := runtime.Nanotime1() - t0 + var rv int32 + var rc int32 + var err error + // repeatedly sleep for 1 second until less than 1 second left + for total-elapsed > 1000000000 { + rv, rc, _ = BpxCondTimedWait(uint32(1), uint32(0), uint32(CW_CONDVAR), &secrem, &nsecrem) + if rv != 0 && rc != 112 { // 112 is EAGAIN + if leftover != nil && rc == 120 { // 120 is EINTR + leftover.Sec = int64(secrem) + leftover.Nsec = int64(nsecrem) + } + err = Errno(rc) + return err + } + elapsed = runtime.Nanotime1() - t0 } - return Fstatfs(fd, stat) + // sleep the remainder + if total > elapsed { + rv, rc, _ = BpxCondTimedWait(uint32(0), uint32(total-elapsed), uint32(CW_CONDVAR), &secrem, &nsecrem) + } + if leftover != nil && rc == 120 { + leftover.Sec = int64(secrem) + leftover.Nsec = int64(nsecrem) + } + if rv != 0 && rc != 112 { + err = Errno(rc) + } + return err } +// End of Nanosleep + var ( Stdin = 0 Stdout = 1 @@ -1395,6 +2287,9 @@ var ( errENOENT error = syscall.ENOENT ) +var ZosTraceLevel int +var ZosTracefile *os.File + var ( signalNameMapOnce sync.Once signalNameMap map[string]syscall.Signal @@ -1416,6 +2311,56 @@ func errnoErr(e Errno) error { return e } +var reg *regexp.Regexp + +// enhanced with zos specific errno2 +func errnoErr2(e Errno, e2 uintptr) error { + switch e { + case 0: + return nil + case EAGAIN: + return errEAGAIN + /* + Allow the retrieval of errno2 for EINVAL and ENOENT on zos + case EINVAL: + return errEINVAL + case ENOENT: + return errENOENT + */ + } + if ZosTraceLevel > 0 { + var name string + if reg == nil { + reg = regexp.MustCompile("(^unix\\.[^/]+$|.*\\/unix\\.[^/]+$)") + } + i := 1 + pc, file, line, ok := runtime.Caller(i) + if ok { + name = runtime.FuncForPC(pc).Name() + } + for ok && reg.MatchString(runtime.FuncForPC(pc).Name()) { + i += 1 + pc, file, line, ok = runtime.Caller(i) + } + if ok { + if ZosTracefile == nil { + ZosConsolePrintf("From %s:%d\n", file, line) + ZosConsolePrintf("%s: %s (errno2=0x%x)\n", name, e.Error(), e2) + } else { + fmt.Fprintf(ZosTracefile, "From %s:%d\n", file, line) + fmt.Fprintf(ZosTracefile, "%s: %s (errno2=0x%x)\n", name, e.Error(), e2) + } + } else { + if ZosTracefile == nil { + ZosConsolePrintf("%s (errno2=0x%x)\n", e.Error(), e2) + } else { + fmt.Fprintf(ZosTracefile, "%s (errno2=0x%x)\n", e.Error(), e2) + } + } + } + return e +} + // ErrnoName returns the error name for error number e. func ErrnoName(e Errno) string { i := sort.Search(len(errorList), func(i int) bool { @@ -1474,6 +2419,9 @@ func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (d return nil, EINVAL } + // Set __MAP_64 by default + flags |= __MAP_64 + // Map the requested memory. addr, errno := m.mmap(0, uintptr(length), prot, flags, fd, offset) if errno != nil { @@ -1778,83 +2726,170 @@ func Exec(argv0 string, argv []string, envv []string) error { return syscall.Exec(argv0, argv, envv) } -func Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { +func Getag(path string) (ccsid uint16, flag uint16, err error) { + var val [8]byte + sz, err := Getxattr(path, "ccsid", val[:]) + if err != nil { + return + } + ccsid = uint16(EncodeData(val[0:sz])) + sz, err = Getxattr(path, "flags", val[:]) + if err != nil { + return + } + flag = uint16(EncodeData(val[0:sz]) >> 15) + return +} + +// Mount begin +func impl_Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + var _p3 *byte + _p3, err = BytePtrFromString(data) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MOUNT1_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(_p3))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_MountAddr() *(func(source string, target string, fstype string, flags uintptr, data string) (err error)) + +var Mount = enter_Mount + +func enter_Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + funcref := get_MountAddr() + if validMount() { + *funcref = impl_Mount + } else { + *funcref = legacyMount + } + return (*funcref)(source, target, fstype, flags, data) +} + +func legacyMount(source string, target string, fstype string, flags uintptr, data string) (err error) { if needspace := 8 - len(fstype); needspace <= 0 { - fstype = fstype[:8] + fstype = fstype[0:8] } else { - fstype += " "[:needspace] + fstype += " "[0:needspace] } return mount_LE(target, source, fstype, uint32(flags), int32(len(data)), data) } -func Unmount(name string, mtm int) (err error) { +func validMount() bool { + if funcptrtest(GetZosLibVec()+SYS___MOUNT1_A<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS___MOUNT1_A<<4); err == nil { + return name == "__mount1_a" + } + } + return false +} + +// Mount end + +// Unmount begin +func impl_Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UMOUNT2_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_UnmountAddr() *(func(target string, flags int) (err error)) + +var Unmount = enter_Unmount + +func enter_Unmount(target string, flags int) (err error) { + funcref := get_UnmountAddr() + if funcptrtest(GetZosLibVec()+SYS___UMOUNT2_A<<4, "") == 0 { + *funcref = impl_Unmount + } else { + *funcref = legacyUnmount + } + return (*funcref)(target, flags) +} + +func legacyUnmount(name string, mtm int) (err error) { // mountpoint is always a full path and starts with a '/' // check if input string is not a mountpoint but a filesystem name if name[0] != '/' { - return unmount(name, mtm) + return unmount_LE(name, mtm) } // treat name as mountpoint b2s := func(arr []byte) string { - nulli := bytes.IndexByte(arr, 0) - if nulli == -1 { - return string(arr) - } else { - return string(arr[:nulli]) + var str string + for i := 0; i < len(arr); i++ { + if arr[i] == 0 { + str = string(arr[:i]) + break + } } + return str } var buffer struct { header W_Mnth fsinfo [64]W_Mntent } - fsCount, err := W_Getmntent_A((*byte)(unsafe.Pointer(&buffer)), int(unsafe.Sizeof(buffer))) - if err != nil { - return err - } - if fsCount == 0 { - return EINVAL - } - for i := 0; i < fsCount; i++ { - if b2s(buffer.fsinfo[i].Mountpoint[:]) == name { - err = unmount(b2s(buffer.fsinfo[i].Fsname[:]), mtm) - break + fs_count, err := W_Getmntent_A((*byte)(unsafe.Pointer(&buffer)), int(unsafe.Sizeof(buffer))) + if err == nil { + err = EINVAL + for i := 0; i < fs_count; i++ { + if b2s(buffer.fsinfo[i].Mountpoint[:]) == name { + err = unmount_LE(b2s(buffer.fsinfo[i].Fsname[:]), mtm) + break + } } + } else if fs_count == 0 { + err = EINVAL } return err } -func fdToPath(dirfd int) (path string, err error) { - var buffer [1024]byte - // w_ctrl() - ret := runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_W_IOCTL<<4, - []uintptr{uintptr(dirfd), 17, 1024, uintptr(unsafe.Pointer(&buffer[0]))}) - if ret == 0 { - zb := bytes.IndexByte(buffer[:], 0) - if zb == -1 { - zb = len(buffer) - } - // __e2a_l() - runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, - []uintptr{uintptr(unsafe.Pointer(&buffer[0])), uintptr(zb)}) - return string(buffer[:zb]), nil - } - // __errno() - errno := int(*(*int32)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, - []uintptr{})))) - // __errno2() - errno2 := int(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO2<<4, - []uintptr{})) - // strerror_r() - ret = runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_STRERROR_R<<4, - []uintptr{uintptr(errno), uintptr(unsafe.Pointer(&buffer[0])), 1024}) - if ret == 0 { - zb := bytes.IndexByte(buffer[:], 0) - if zb == -1 { - zb = len(buffer) - } - return "", fmt.Errorf("%s (errno2=0x%x)", buffer[:zb], errno2) - } else { - return "", fmt.Errorf("fdToPath errno %d (errno2=0x%x)", errno, errno2) +// Unmount end + +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + reclen, ok := direntReclen(buf) + if !ok { + return 0, false } + return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true } func direntLeToDirentUnix(dirent *direntLE, dir uintptr, path string) (Dirent, error) { @@ -1896,7 +2931,7 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { } // Get path from fd to avoid unavailable call (fdopendir) - path, err := fdToPath(fd) + path, err := ZosFdToPath(fd) if err != nil { return 0, err } @@ -1910,7 +2945,7 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { for { var entryLE direntLE var entrypLE *direntLE - e := readdir_r(d, &entryLE, &entrypLE) + e := Readdir_r(d, &entryLE, &entrypLE) if e != nil { return n, e } @@ -1956,23 +2991,127 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { return n, nil } -func ReadDirent(fd int, buf []byte) (n int, err error) { - var base = (*uintptr)(unsafe.Pointer(new(uint64))) - return Getdirentries(fd, buf, base) +func Err2ad() (eadd *int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS___ERR2AD<<4) + eadd = (*int)(unsafe.Pointer(r0)) + return } -func direntIno(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) +func ZosConsolePrintf(format string, v ...interface{}) (int, error) { + type __cmsg struct { + _ uint16 + _ [2]uint8 + __msg_length uint32 + __msg uintptr + _ [4]uint8 + } + msg := fmt.Sprintf(format, v...) + strptr := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&msg)).Data) + len := (*reflect.StringHeader)(unsafe.Pointer(&msg)).Len + cmsg := __cmsg{__msg_length: uint32(len), __msg: uintptr(strptr)} + cmd := uint32(0) + runtime.EnterSyscall() + rc, err2, err1 := CallLeFuncWithErr(GetZosLibVec()+SYS_____CONSOLE_A<<4, uintptr(unsafe.Pointer(&cmsg)), 0, uintptr(unsafe.Pointer(&cmd))) + runtime.ExitSyscall() + if rc != 0 { + return 0, fmt.Errorf("%s (errno2=0x%x)\n", err1.Error(), err2) + } + return 0, nil +} +func ZosStringToEbcdicBytes(str string, nullterm bool) (ebcdicBytes []byte) { + if nullterm { + ebcdicBytes = []byte(str + "\x00") + } else { + ebcdicBytes = []byte(str) + } + A2e(ebcdicBytes) + return +} +func ZosEbcdicBytesToString(b []byte, trimRight bool) (str string) { + res := make([]byte, len(b)) + copy(res, b) + E2a(res) + if trimRight { + str = string(bytes.TrimRight(res, " \x00")) + } else { + str = string(res) + } + return } -func direntReclen(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +func fdToPath(dirfd int) (path string, err error) { + var buffer [1024]byte + // w_ctrl() + ret := runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_W_IOCTL<<4, + []uintptr{uintptr(dirfd), 17, 1024, uintptr(unsafe.Pointer(&buffer[0]))}) + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + // __e2a_l() + runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, + []uintptr{uintptr(unsafe.Pointer(&buffer[0])), uintptr(zb)}) + return string(buffer[:zb]), nil + } + // __errno() + errno := int(*(*int32)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, + []uintptr{})))) + // __errno2() + errno2 := int(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO2<<4, + []uintptr{})) + // strerror_r() + ret = runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_STRERROR_R<<4, + []uintptr{uintptr(errno), uintptr(unsafe.Pointer(&buffer[0])), 1024}) + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + return "", fmt.Errorf("%s (errno2=0x%x)", buffer[:zb], errno2) + } else { + return "", fmt.Errorf("fdToPath errno %d (errno2=0x%x)", errno, errno2) + } } -func direntNamlen(buf []byte) (uint64, bool) { - reclen, ok := direntReclen(buf) - if !ok { - return 0, false +func impl_Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return } - return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKFIFOAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_MkfifoatAddr() *(func(dirfd int, path string, mode uint32) (err error)) + +var Mkfifoat = enter_Mkfifoat + +func enter_Mkfifoat(dirfd int, path string, mode uint32) (err error) { + funcref := get_MkfifoatAddr() + if funcptrtest(GetZosLibVec()+SYS___MKFIFOAT_A<<4, "") == 0 { + *funcref = impl_Mkfifoat + } else { + *funcref = legacy_Mkfifoat + } + return (*funcref)(dirfd, path, mode) +} + +func legacy_Mkfifoat(dirfd int, path string, mode uint32) (err error) { + dirname, err := ZosFdToPath(dirfd) + if err != nil { + return err + } + return Mkfifo(dirname+"/"+path, mode) } + +//sys Posix_openpt(oflag int) (fd int, err error) = SYS_POSIX_OPENPT +//sys Grantpt(fildes int) (rc int, err error) = SYS_GRANTPT +//sys Unlockpt(fildes int) (rc int, err error) = SYS_UNLOCKPT diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix.go b/vendor/golang.org/x/sys/unix/sysvshm_unix.go index 79a84f18b46..672d6b0a880 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (darwin && !ios) || linux +//go:build (darwin && !ios) || linux || zos package unix diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go index 9eb0db664cb..8b7977a28c0 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin && !ios +//go:build (darwin && !ios) || zos package unix diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 36bf8399f4f..93a38a97d9c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -491,6 +491,7 @@ const ( BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_TEST_REG_INVARIANTS = 0x80 BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RUN_ON_CPU = 0x1 BPF_F_TEST_STATE_FREQ = 0x8 @@ -1697,6 +1698,7 @@ const ( KEXEC_ARCH_S390 = 0x160000 KEXEC_ARCH_SH = 0x2a0000 KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_DEBUG = 0x8 KEXEC_FILE_NO_INITRAMFS = 0x4 KEXEC_FILE_ON_CRASH = 0x2 KEXEC_FILE_UNLOAD = 0x1 @@ -1898,6 +1900,7 @@ const ( MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MNT_ID_REQ_SIZE_VER0 = 0x18 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -2302,6 +2305,7 @@ const ( PERF_AUX_FLAG_PARTIAL = 0x4 PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK = 0xff00 PERF_AUX_FLAG_TRUNCATED = 0x1 + PERF_BRANCH_ENTRY_INFO_BITS_MAX = 0x21 PERF_BR_ARM64_DEBUG_DATA = 0x7 PERF_BR_ARM64_DEBUG_EXIT = 0x5 PERF_BR_ARM64_DEBUG_HALT = 0x4 @@ -3168,6 +3172,7 @@ const ( STATX_GID = 0x10 STATX_INO = 0x100 STATX_MNT_ID = 0x1000 + STATX_MNT_ID_UNIQUE = 0x4000 STATX_MODE = 0x2 STATX_MTIME = 0x40 STATX_NLINK = 0x4 @@ -3562,12 +3567,16 @@ const ( XDP_RX_RING = 0x2 XDP_SHARED_UMEM = 0x1 XDP_STATISTICS = 0x7 + XDP_TXMD_FLAGS_CHECKSUM = 0x2 + XDP_TXMD_FLAGS_TIMESTAMP = 0x1 + XDP_TX_METADATA = 0x2 XDP_TX_RING = 0x3 XDP_UMEM_COMPLETION_RING = 0x6 XDP_UMEM_FILL_RING = 0x5 XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 XDP_UMEM_PGOFF_FILL_RING = 0x100000000 XDP_UMEM_REG = 0x4 + XDP_UMEM_TX_SW_CSUM = 0x2 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_USE_NEED_WAKEUP = 0x8 XDP_USE_SG = 0x10 diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index 4dfd2e051d3..da08b2ab3d9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -10,41 +10,99 @@ package unix const ( - BRKINT = 0x0001 - CLOCK_MONOTONIC = 0x1 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_THREAD_CPUTIME_ID = 0x3 - CS8 = 0x0030 - CSIZE = 0x0030 - ECHO = 0x00000008 - ECHONL = 0x00000001 - FD_CLOEXEC = 0x01 - FD_CLOFORK = 0x02 - FNDELAY = 0x04 - F_CLOSFD = 9 - F_CONTROL_CVT = 13 - F_DUPFD = 0 - F_DUPFD2 = 8 - F_GETFD = 1 - F_GETFL = 259 - F_GETLK = 5 - F_GETOWN = 10 - F_OK = 0x0 - F_RDLCK = 1 - F_SETFD = 2 - F_SETFL = 4 - F_SETLK = 6 - F_SETLKW = 7 - F_SETOWN = 11 - F_SETTAG = 12 - F_UNLCK = 3 - F_WRLCK = 2 - FSTYPE_ZFS = 0xe9 //"Z" - FSTYPE_HFS = 0xc8 //"H" - FSTYPE_NFS = 0xd5 //"N" - FSTYPE_TFS = 0xe3 //"T" - FSTYPE_AUTOMOUNT = 0xc1 //"A" + BRKINT = 0x0001 + CLOCAL = 0x1 + CLOCK_MONOTONIC = 0x1 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLONE_NEWIPC = 0x08000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x00020000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWUTS = 0x04000000 + CLONE_PARENT = 0x00008000 + CS8 = 0x0030 + CSIZE = 0x0030 + ECHO = 0x00000008 + ECHONL = 0x00000001 + EFD_SEMAPHORE = 0x00002000 + EFD_CLOEXEC = 0x00001000 + EFD_NONBLOCK = 0x00000004 + EPOLL_CLOEXEC = 0x00001000 + EPOLL_CTL_ADD = 0 + EPOLL_CTL_MOD = 1 + EPOLL_CTL_DEL = 2 + EPOLLRDNORM = 0x0001 + EPOLLRDBAND = 0x0002 + EPOLLIN = 0x0003 + EPOLLOUT = 0x0004 + EPOLLWRBAND = 0x0008 + EPOLLPRI = 0x0010 + EPOLLERR = 0x0020 + EPOLLHUP = 0x0040 + EPOLLEXCLUSIVE = 0x20000000 + EPOLLONESHOT = 0x40000000 + FD_CLOEXEC = 0x01 + FD_CLOFORK = 0x02 + FD_SETSIZE = 0x800 + FNDELAY = 0x04 + F_CLOSFD = 9 + F_CONTROL_CVT = 13 + F_DUPFD = 0 + F_DUPFD2 = 8 + F_GETFD = 1 + F_GETFL = 259 + F_GETLK = 5 + F_GETOWN = 10 + F_OK = 0x0 + F_RDLCK = 1 + F_SETFD = 2 + F_SETFL = 4 + F_SETLK = 6 + F_SETLKW = 7 + F_SETOWN = 11 + F_SETTAG = 12 + F_UNLCK = 3 + F_WRLCK = 2 + FSTYPE_ZFS = 0xe9 //"Z" + FSTYPE_HFS = 0xc8 //"H" + FSTYPE_NFS = 0xd5 //"N" + FSTYPE_TFS = 0xe3 //"T" + FSTYPE_AUTOMOUNT = 0xc1 //"A" + GRND_NONBLOCK = 1 + GRND_RANDOM = 2 + HUPCL = 0x0100 // Hang up on last close + IN_CLOEXEC = 0x00001000 + IN_NONBLOCK = 0x00000004 + IN_ACCESS = 0x00000001 + IN_MODIFY = 0x00000002 + IN_ATTRIB = 0x00000004 + IN_CLOSE_WRITE = 0x00000008 + IN_CLOSE_NOWRITE = 0x00000010 + IN_OPEN = 0x00000020 + IN_MOVED_FROM = 0x00000040 + IN_MOVED_TO = 0x00000080 + IN_CREATE = 0x00000100 + IN_DELETE = 0x00000200 + IN_DELETE_SELF = 0x00000400 + IN_MOVE_SELF = 0x00000800 + IN_UNMOUNT = 0x00002000 + IN_Q_OVERFLOW = 0x00004000 + IN_IGNORED = 0x00008000 + IN_CLOSE = (IN_CLOSE_WRITE | IN_CLOSE_NOWRITE) + IN_MOVE = (IN_MOVED_FROM | IN_MOVED_TO) + IN_ALL_EVENTS = (IN_ACCESS | IN_MODIFY | IN_ATTRIB | + IN_CLOSE | IN_OPEN | IN_MOVE | + IN_CREATE | IN_DELETE | IN_DELETE_SELF | + IN_MOVE_SELF) + IN_ONLYDIR = 0x01000000 + IN_DONT_FOLLOW = 0x02000000 + IN_EXCL_UNLINK = 0x04000000 + IN_MASK_CREATE = 0x10000000 + IN_MASK_ADD = 0x20000000 + IN_ISDIR = 0x40000000 + IN_ONESHOT = 0x80000000 IP6F_MORE_FRAG = 0x0001 IP6F_OFF_MASK = 0xfff8 IP6F_RESERVED_MASK = 0x0006 @@ -152,10 +210,18 @@ const ( IP_PKTINFO = 101 IP_RECVPKTINFO = 102 IP_TOS = 2 - IP_TTL = 3 + IP_TTL = 14 IP_UNBLOCK_SOURCE = 11 + ICMP6_FILTER = 1 + MCAST_INCLUDE = 0 + MCAST_EXCLUDE = 1 + MCAST_JOIN_GROUP = 40 + MCAST_LEAVE_GROUP = 41 + MCAST_JOIN_SOURCE_GROUP = 42 + MCAST_LEAVE_SOURCE_GROUP = 43 + MCAST_BLOCK_SOURCE = 44 + MCAST_UNBLOCK_SOURCE = 46 ICANON = 0x0010 - ICMP6_FILTER = 0x26 ICRNL = 0x0002 IEXTEN = 0x0020 IGNBRK = 0x0004 @@ -165,10 +231,10 @@ const ( ISTRIP = 0x0080 IXON = 0x0200 IXOFF = 0x0100 - LOCK_SH = 0x1 // Not exist on zOS - LOCK_EX = 0x2 // Not exist on zOS - LOCK_NB = 0x4 // Not exist on zOS - LOCK_UN = 0x8 // Not exist on zOS + LOCK_SH = 0x1 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_UN = 0x8 POLLIN = 0x0003 POLLOUT = 0x0004 POLLPRI = 0x0010 @@ -182,15 +248,29 @@ const ( MAP_PRIVATE = 0x1 // changes are private MAP_SHARED = 0x2 // changes are shared MAP_FIXED = 0x4 // place exactly - MCAST_JOIN_GROUP = 40 - MCAST_LEAVE_GROUP = 41 - MCAST_JOIN_SOURCE_GROUP = 42 - MCAST_LEAVE_SOURCE_GROUP = 43 - MCAST_BLOCK_SOURCE = 44 - MCAST_UNBLOCK_SOURCE = 45 + __MAP_MEGA = 0x8 + __MAP_64 = 0x10 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 MS_SYNC = 0x1 // msync - synchronous writes MS_ASYNC = 0x2 // asynchronous writes MS_INVALIDATE = 0x4 // invalidate mappings + MS_BIND = 0x00001000 + MS_MOVE = 0x00002000 + MS_NOSUID = 0x00000002 + MS_PRIVATE = 0x00040000 + MS_REC = 0x00004000 + MS_REMOUNT = 0x00008000 + MS_RDONLY = 0x00000001 + MS_UNBINDABLE = 0x00020000 + MNT_DETACH = 0x00000004 + ZOSDSFS_SUPER_MAGIC = 0x44534653 // zOS DSFS + NFS_SUPER_MAGIC = 0x6969 // NFS + NSFS_MAGIC = 0x6e736673 // PROCNS + PROC_SUPER_MAGIC = 0x9fa0 // proc FS + ZOSTFS_SUPER_MAGIC = 0x544653 // zOS TFS + ZOSUFS_SUPER_MAGIC = 0x554653 // zOS UFS + ZOSZFS_SUPER_MAGIC = 0x5A4653 // zOS ZFS MTM_RDONLY = 0x80000000 MTM_RDWR = 0x40000000 MTM_UMOUNT = 0x10000000 @@ -205,13 +285,20 @@ const ( MTM_REMOUNT = 0x00000100 MTM_NOSECURITY = 0x00000080 NFDBITS = 0x20 + ONLRET = 0x0020 // NL performs CR function O_ACCMODE = 0x03 O_APPEND = 0x08 O_ASYNCSIG = 0x0200 O_CREAT = 0x80 + O_DIRECT = 0x00002000 + O_NOFOLLOW = 0x00004000 + O_DIRECTORY = 0x00008000 + O_PATH = 0x00080000 + O_CLOEXEC = 0x00001000 O_EXCL = 0x40 O_GETFL = 0x0F O_LARGEFILE = 0x0400 + O_NDELAY = 0x4 O_NONBLOCK = 0x04 O_RDONLY = 0x02 O_RDWR = 0x03 @@ -248,6 +335,7 @@ const ( AF_IUCV = 17 AF_LAT = 14 AF_LINK = 18 + AF_LOCAL = AF_UNIX // AF_LOCAL is an alias for AF_UNIX AF_MAX = 30 AF_NBS = 7 AF_NDD = 23 @@ -285,15 +373,33 @@ const ( RLIMIT_AS = 5 RLIMIT_NOFILE = 6 RLIMIT_MEMLIMIT = 7 + RLIMIT_MEMLOCK = 0x8 RLIM_INFINITY = 2147483647 + SCHED_FIFO = 0x2 + SCM_CREDENTIALS = 0x2 SCM_RIGHTS = 0x01 SF_CLOSE = 0x00000002 SF_REUSE = 0x00000001 + SHM_RND = 0x2 + SHM_RDONLY = 0x1 + SHMLBA = 0x1000 + IPC_STAT = 0x3 + IPC_SET = 0x2 + IPC_RMID = 0x1 + IPC_PRIVATE = 0x0 + IPC_CREAT = 0x1000000 + __IPC_MEGA = 0x4000000 + __IPC_SHAREAS = 0x20000000 + __IPC_BELOWBAR = 0x10000000 + IPC_EXCL = 0x2000000 + __IPC_GIGA = 0x8000000 SHUT_RD = 0 SHUT_RDWR = 2 SHUT_WR = 1 + SOCK_CLOEXEC = 0x00001000 SOCK_CONN_DGRAM = 6 SOCK_DGRAM = 2 + SOCK_NONBLOCK = 0x800 SOCK_RAW = 3 SOCK_RDM = 4 SOCK_SEQPACKET = 5 @@ -378,8 +484,6 @@ const ( S_IFMST = 0x00FF0000 TCP_KEEPALIVE = 0x8 TCP_NODELAY = 0x1 - TCP_INFO = 0xb - TCP_USER_TIMEOUT = 0x1 TIOCGWINSZ = 0x4008a368 TIOCSWINSZ = 0x8008a367 TIOCSBRK = 0x2000a77b @@ -427,7 +531,10 @@ const ( VSUSP = 9 VTIME = 10 WCONTINUED = 0x4 + WEXITED = 0x8 WNOHANG = 0x1 + WNOWAIT = 0x20 + WSTOPPED = 0x10 WUNTRACED = 0x2 _BPX_SWAP = 1 _BPX_NONSWAP = 2 @@ -452,8 +559,28 @@ const ( MADV_FREE = 15 // for Linux compatibility -- no zos semantics MADV_WIPEONFORK = 16 // for Linux compatibility -- no zos semantics MADV_KEEPONFORK = 17 // for Linux compatibility -- no zos semantics - AT_SYMLINK_NOFOLLOW = 1 // for Unix compatibility -- no zos semantics - AT_FDCWD = 2 // for Unix compatibility -- no zos semantics + AT_SYMLINK_FOLLOW = 0x400 + AT_SYMLINK_NOFOLLOW = 0x100 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 + P_PID = 0 + P_PGID = 1 + P_ALL = 2 + PR_SET_NAME = 15 + PR_GET_NAME = 16 + PR_SET_NO_NEW_PRIVS = 38 + PR_GET_NO_NEW_PRIVS = 39 + PR_SET_DUMPABLE = 4 + PR_GET_DUMPABLE = 3 + PR_SET_PDEATHSIG = 1 + PR_GET_PDEATHSIG = 2 + PR_SET_CHILD_SUBREAPER = 36 + PR_GET_CHILD_SUBREAPER = 37 + AT_FDCWD = -100 + AT_EACCESS = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_REMOVEDIR = 0x200 + RENAME_NOREPLACE = 1 << 0 ) const ( @@ -476,6 +603,7 @@ const ( EMLINK = Errno(125) ENAMETOOLONG = Errno(126) ENFILE = Errno(127) + ENOATTR = Errno(265) ENODEV = Errno(128) ENOENT = Errno(129) ENOEXEC = Errno(130) @@ -700,7 +828,7 @@ var errorList = [...]struct { {145, "EDC5145I", "The parameter list is too long, or the message to receive was too large for the buffer."}, {146, "EDC5146I", "Too many levels of symbolic links."}, {147, "EDC5147I", "Illegal byte sequence."}, - {148, "", ""}, + {148, "EDC5148I", "The named attribute or data not available."}, {149, "EDC5149I", "Value Overflow Error."}, {150, "EDC5150I", "UNIX System Services is not active."}, {151, "EDC5151I", "Dynamic allocation error."}, @@ -743,6 +871,7 @@ var errorList = [...]struct { {259, "EDC5259I", "A CUN_RS_NO_CONVERSION error was issued by Unicode Services."}, {260, "EDC5260I", "A CUN_RS_TABLE_NOT_ALIGNED error was issued by Unicode Services."}, {262, "EDC5262I", "An iconv() function encountered an unexpected error while using Unicode Services."}, + {265, "EDC5265I", "The named attribute not available."}, {1000, "EDC8000I", "A bad socket-call constant was found in the IUCV header."}, {1001, "EDC8001I", "An error was found in the IUCV header."}, {1002, "EDC8002I", "A socket descriptor is out of range."}, diff --git a/vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s b/vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s new file mode 100644 index 00000000000..b77ff5db90d --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s @@ -0,0 +1,364 @@ +// go run mksyscall_zos_s390x.go -o_sysnum zsysnum_zos_s390x.go -o_syscall zsyscall_zos_s390x.go -i_syscall syscall_zos_s390x.go -o_asm zsymaddr_zos_s390x.s +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build zos && s390x +#include "textflag.h" + +// provide the address of function variable to be fixed up. + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FlistxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Flistxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FremovexattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fremovexattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FgetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fgetxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FsetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fsetxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_accept4Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·accept4(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_RemovexattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Removexattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_Dup3Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Dup3(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_DirfdAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Dirfd(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollCreateAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollCreate(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollCreate1Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollCreate1(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollCtlAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollCtl(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollPwaitAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollPwait(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollWaitAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollWait(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EventfdAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Eventfd(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FaccessatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Faccessat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FchmodatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fchmodat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FchownatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fchownat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FdatasyncAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fdatasync(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_fstatatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·fstatat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LgetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Lgetxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LsetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Lsetxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FstatfsAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fstatfs(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FutimesAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Futimes(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FutimesatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Futimesat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_GetrandomAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Getrandom(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_InotifyInitAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·InotifyInit(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_InotifyInit1Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·InotifyInit1(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_InotifyAddWatchAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·InotifyAddWatch(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_InotifyRmWatchAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·InotifyRmWatch(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_ListxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Listxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LlistxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Llistxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LremovexattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Lremovexattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LutimesAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Lutimes(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_StatfsAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Statfs(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_SyncfsAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Syncfs(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_UnshareAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Unshare(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Linkat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_MkdiratAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Mkdirat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_MknodatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Mknodat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_PivotRootAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·PivotRoot(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_PrctlAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Prctl(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_PrlimitAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Prlimit(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_RenameatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Renameat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_Renameat2Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Renameat2(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_SethostnameAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Sethostname(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_SetnsAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Setns(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_SymlinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Symlinkat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_UnlinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Unlinkat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_openatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·openat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_openat2Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·openat2(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_utimensatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·utimensat(SB), R8 + MOVD R8, ret+0(FP) + RET diff --git a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go index 94f01123831..7ccf66b7ee0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -tags zos,s390x syscall_zos_s390x.go +// go run mksyscall_zos_s390x.go -o_sysnum zsysnum_zos_s390x.go -o_syscall zsyscall_zos_s390x.go -i_syscall syscall_zos_s390x.go -o_asm zsymaddr_zos_s390x.s // Code generated by the command above; see README.md. DO NOT EDIT. //go:build zos && s390x @@ -6,17 +6,100 @@ package unix import ( + "runtime" + "syscall" "unsafe" ) +var _ syscall.Errno + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := syscall_syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), uintptr(arg)) + runtime.ExitSyscall() val = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Flistxattr(fd int, dest []byte) (sz int, err error) { + var _p0 unsafe.Pointer + if len(dest) > 0 { + _p0 = unsafe.Pointer(&dest[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FLISTXATTR_A<<4, uintptr(fd), uintptr(_p0), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FlistxattrAddr() *(func(fd int, dest []byte) (sz int, err error)) + +var Flistxattr = enter_Flistxattr + +func enter_Flistxattr(fd int, dest []byte) (sz int, err error) { + funcref := get_FlistxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___FLISTXATTR_A<<4, "") == 0 { + *funcref = impl_Flistxattr + } else { + *funcref = error_Flistxattr + } + return (*funcref)(fd, dest) +} + +func error_Flistxattr(fd int, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fremovexattr(fd int, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FREMOVEXATTR_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FremovexattrAddr() *(func(fd int, attr string) (err error)) + +var Fremovexattr = enter_Fremovexattr + +func enter_Fremovexattr(fd int, attr string) (err error) { + funcref := get_FremovexattrAddr() + if funcptrtest(GetZosLibVec()+SYS___FREMOVEXATTR_A<<4, "") == 0 { + *funcref = impl_Fremovexattr + } else { + *funcref = error_Fremovexattr } + return (*funcref)(fd, attr) +} + +func error_Fremovexattr(fd int, attr string) (err error) { + err = ENOSYS return } @@ -29,10 +112,12 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_READ<<4, uintptr(fd), uintptr(_p0), uintptr(len(p))) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -46,31 +131,159 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WRITE<<4, uintptr(fd), uintptr(_p0), uintptr(len(p))) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FGETXATTR_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FgetxattrAddr() *(func(fd int, attr string, dest []byte) (sz int, err error)) + +var Fgetxattr = enter_Fgetxattr + +func enter_Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + funcref := get_FgetxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___FGETXATTR_A<<4, "") == 0 { + *funcref = impl_Fgetxattr + } else { + *funcref = error_Fgetxattr + } + return (*funcref)(fd, attr, dest) +} + +func error_Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fsetxattr(fd int, attr string, data []byte, flag int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(data) > 0 { + _p1 = unsafe.Pointer(&data[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FSETXATTR_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(data)), uintptr(flag)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FsetxattrAddr() *(func(fd int, attr string, data []byte, flag int) (err error)) + +var Fsetxattr = enter_Fsetxattr + +func enter_Fsetxattr(fd int, attr string, data []byte, flag int) (err error) { + funcref := get_FsetxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___FSETXATTR_A<<4, "") == 0 { + *funcref = impl_Fsetxattr + } else { + *funcref = error_Fsetxattr } + return (*funcref)(fd, attr, data, flag) +} + +func error_Fsetxattr(fd int, attr string, data []byte, flag int) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := syscall_syscall(SYS___ACCEPT_A, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___ACCEPT_A<<4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___ACCEPT4_A<<4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags)) + runtime.ExitSyscall() fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_accept4Addr() *(func(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)) + +var accept4 = enter_accept4 + +func enter_accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + funcref := get_accept4Addr() + if funcptrtest(GetZosLibVec()+SYS___ACCEPT4_A<<4, "") == 0 { + *funcref = impl_accept4 + } else { + *funcref = error_accept4 } + return (*funcref)(s, rsa, addrlen, flags) +} + +func error_accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(SYS___BIND_A, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___BIND_A<<4, uintptr(s), uintptr(addr), uintptr(addrlen)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -78,9 +291,11 @@ func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(SYS___CONNECT_A, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CONNECT_A<<4, uintptr(s), uintptr(addr), uintptr(addrlen)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -88,10 +303,10 @@ func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(n int, list *_Gid_t) (nn int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETGROUPS<<4, uintptr(n), uintptr(unsafe.Pointer(list))) nn = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -99,9 +314,9 @@ func getgroups(n int, list *_Gid_t) (nn int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(n int, list *_Gid_t) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETGROUPS<<4, uintptr(n), uintptr(unsafe.Pointer(list))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -109,9 +324,11 @@ func setgroups(n int, list *_Gid_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := syscall_syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETSOCKOPT<<4, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -119,9 +336,11 @@ func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := syscall_syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETSOCKOPT<<4, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -129,10 +348,10 @@ func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SOCKET<<4, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -140,9 +359,9 @@ func socket(domain int, typ int, proto int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := syscall_rawsyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SOCKETPAIR<<4, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -150,9 +369,9 @@ func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawsyscall(SYS___GETPEERNAME_A, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETPEERNAME_A<<4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -160,10 +379,52 @@ func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawsyscall(SYS___GETSOCKNAME_A, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETSOCKNAME_A<<4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___REMOVEXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_RemovexattrAddr() *(func(path string, attr string) (err error)) + +var Removexattr = enter_Removexattr + +func enter_Removexattr(path string, attr string) (err error) { + funcref := get_RemovexattrAddr() + if funcptrtest(GetZosLibVec()+SYS___REMOVEXATTR_A<<4, "") == 0 { + *funcref = impl_Removexattr + } else { + *funcref = error_Removexattr } + return (*funcref)(path, attr) +} + +func error_Removexattr(path string, attr string) (err error) { + err = ENOSYS return } @@ -176,10 +437,12 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(SYS___RECVFROM_A, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RECVFROM_A<<4, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -193,9 +456,11 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall6(SYS___SENDTO_A, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SENDTO_A<<4, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -203,10 +468,12 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(SYS___RECVMSG_A, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RECVMSG_A<<4, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -214,10 +481,12 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(SYS___SENDMSG_A, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SENDMSG_A<<4, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -225,10 +494,12 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := syscall_syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MMAP<<4, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + runtime.ExitSyscall() ret = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -236,9 +507,11 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := syscall_syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MUNMAP<<4, uintptr(addr), uintptr(length)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -246,9 +519,11 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req int, arg uintptr) (err error) { - _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_IOCTL<<4, uintptr(fd), uintptr(req), uintptr(arg)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -256,9 +531,62 @@ func ioctl(fd int, req int, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { - _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_IOCTL<<4, uintptr(fd), uintptr(req), uintptr(arg)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmat(id int, addr uintptr, flag int) (ret uintptr, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMAT<<4, uintptr(id), uintptr(addr), uintptr(flag)) + runtime.ExitSyscall() + ret = uintptr(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMCTL64<<4, uintptr(id), uintptr(cmd), uintptr(unsafe.Pointer(buf))) + runtime.ExitSyscall() + result = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmdt(addr uintptr) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMDT<<4, uintptr(addr)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmget(key int, size int, flag int) (id int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMGET<<4, uintptr(key), uintptr(size), uintptr(flag)) + runtime.ExitSyscall() + id = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -271,9 +599,11 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___ACCESS_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___ACCESS_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -286,9 +616,11 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___CHDIR_A, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHDIR_A<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -301,9 +633,11 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___CHOWN_A, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHOWN_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -316,9 +650,11 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___CHMOD_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHMOD_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -331,10 +667,12 @@ func Creat(path string, mode uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall(SYS___CREAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CREAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -342,10 +680,12 @@ func Creat(path string, mode uint32) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := syscall_syscall(SYS_DUP, uintptr(oldfd), 0, 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DUP<<4, uintptr(oldfd)) + runtime.ExitSyscall() fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -353,617 +693,2216 @@ func Dup(oldfd int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(oldfd int, newfd int) (err error) { - _, _, e1 := syscall_syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DUP2<<4, uintptr(oldfd), uintptr(newfd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Errno2() (er2 int) { - uer2, _, _ := syscall_syscall(SYS___ERRNO2, 0, 0, 0) - er2 = int(uer2) +func impl_Dup3(oldfd int, newfd int, flags int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DUP3<<4, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_Dup3Addr() *(func(oldfd int, newfd int, flags int) (err error)) -func Err2ad() (eadd *int) { - ueadd, _, _ := syscall_syscall(SYS___ERR2AD, 0, 0, 0) - eadd = (*int)(unsafe.Pointer(ueadd)) - return -} +var Dup3 = enter_Dup3 -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func enter_Dup3(oldfd int, newfd int, flags int) (err error) { + funcref := get_Dup3Addr() + if funcptrtest(GetZosLibVec()+SYS_DUP3<<4, "") == 0 { + *funcref = impl_Dup3 + } else { + *funcref = error_Dup3 + } + return (*funcref)(oldfd, newfd, flags) +} -func Exit(code int) { - syscall_syscall(SYS_EXIT, uintptr(code), 0, 0) +func error_Dup3(oldfd int, newfd int, flags int) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchdir(fd int) (err error) { - _, _, e1 := syscall_syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_Dirfd(dirp uintptr) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DIRFD<<4, uintptr(dirp)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_DirfdAddr() *(func(dirp uintptr) (fd int, err error)) -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := syscall_syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) +var Dirfd = enter_Dirfd + +func enter_Dirfd(dirp uintptr) (fd int, err error) { + funcref := get_DirfdAddr() + if funcptrtest(GetZosLibVec()+SYS_DIRFD<<4, "") == 0 { + *funcref = impl_Dirfd + } else { + *funcref = error_Dirfd } + return (*funcref)(dirp) +} + +func error_Dirfd(dirp uintptr) (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := syscall_syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) +func impl_EpollCreate(size int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_CREATE<<4, uintptr(size)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollCreateAddr() *(func(size int) (fd int, err error)) -func FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) { - r0, _, e1 := syscall_syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - retval = int(r0) - if e1 != 0 { - err = errnoErr(e1) +var EpollCreate = enter_EpollCreate + +func enter_EpollCreate(size int) (fd int, err error) { + funcref := get_EpollCreateAddr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_CREATE<<4, "") == 0 { + *funcref = impl_EpollCreate + } else { + *funcref = error_EpollCreate } + return (*funcref)(size) +} + +func error_EpollCreate(size int) (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat(fd int, stat *Stat_LE_t) (err error) { - _, _, e1 := syscall_syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_EpollCreate1(flags int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_CREATE1<<4, uintptr(flags)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollCreate1Addr() *(func(flags int) (fd int, err error)) -func Fstatvfs(fd int, stat *Statvfs_t) (err error) { - _, _, e1 := syscall_syscall(SYS_FSTATVFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) +var EpollCreate1 = enter_EpollCreate1 + +func enter_EpollCreate1(flags int) (fd int, err error) { + funcref := get_EpollCreate1Addr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_CREATE1<<4, "") == 0 { + *funcref = impl_EpollCreate1 + } else { + *funcref = error_EpollCreate1 } + return (*funcref)(flags) +} + +func error_EpollCreate1(flags int) (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fsync(fd int) (err error) { - _, _, e1 := syscall_syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_CTL<<4, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollCtlAddr() *(func(epfd int, op int, fd int, event *EpollEvent) (err error)) -func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := syscall_syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) +var EpollCtl = enter_EpollCtl + +func enter_EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + funcref := get_EpollCtlAddr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_CTL<<4, "") == 0 { + *funcref = impl_EpollCtl + } else { + *funcref = error_EpollCtl } - return + return (*funcref)(epfd, op, fd, event) } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpagesize() (pgsize int) { - r0, _, _ := syscall_syscall(SYS_GETPAGESIZE, 0, 0, 0) - pgsize = int(r0) +func error_EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mprotect(b []byte, prot int) (err error) { +func impl_EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) { var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_PWAIT<<4, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), uintptr(unsafe.Pointer(sigmask))) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollPwaitAddr() *(func(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error)) -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) +var EpollPwait = enter_EpollPwait + +func enter_EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) { + funcref := get_EpollPwaitAddr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_PWAIT<<4, "") == 0 { + *funcref = impl_EpollPwait } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) + *funcref = error_EpollPwait } + return (*funcref)(epfd, events, msec, sigmask) +} + +func error_EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) { + n = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Poll(fds []PollFd, timeout int) (n int, err error) { +func impl_EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer - if len(fds) > 0 { - _p0 = unsafe.Pointer(&fds[0]) + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(SYS_POLL, uintptr(_p0), uintptr(len(fds)), uintptr(timeout)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_WAIT<<4, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec)) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollWaitAddr() *(func(epfd int, events []EpollEvent, msec int) (n int, err error)) -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := syscall_syscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) +var EpollWait = enter_EpollWait + +func enter_EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + funcref := get_EpollWaitAddr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_WAIT<<4, "") == 0 { + *funcref = impl_EpollWait + } else { + *funcref = error_EpollWait } + return (*funcref)(epfd, events, msec) +} + +func error_EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + n = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func W_Getmntent(buff *byte, size int) (lastsys int, err error) { - r0, _, e1 := syscall_syscall(SYS_W_GETMNTENT, uintptr(unsafe.Pointer(buff)), uintptr(size), 0) - lastsys = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func Errno2() (er2 int) { + runtime.EnterSyscall() + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS___ERRNO2<<4) + runtime.ExitSyscall() + er2 = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func W_Getmntent_A(buff *byte, size int) (lastsys int, err error) { - r0, _, e1 := syscall_syscall(SYS___W_GETMNTENT_A, uintptr(unsafe.Pointer(buff)), uintptr(size), 0) - lastsys = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func impl_Eventfd(initval uint, flags int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EVENTFD<<4, uintptr(initval), uintptr(flags)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } +//go:nosplit +func get_EventfdAddr() *(func(initval uint, flags int) (fd int, err error)) + +var Eventfd = enter_Eventfd + +func enter_Eventfd(initval uint, flags int) (fd int, err error) { + funcref := get_EventfdAddr() + if funcptrtest(GetZosLibVec()+SYS_EVENTFD<<4, "") == 0 { + *funcref = impl_Eventfd + } else { + *funcref = error_Eventfd + } + return (*funcref)(initval, flags) +} + +func error_Eventfd(initval uint, flags int) (fd int, err error) { + fd = -1 + err = ENOSYS + return +} + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mount_LE(path string, filesystem string, fstype string, mtm uint32, parmlen int32, parm string) (err error) { +func Exit(code int) { + runtime.EnterSyscall() + CallLeFuncWithErr(GetZosLibVec()+SYS_EXIT<<4, uintptr(code)) + runtime.ExitSyscall() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - var _p1 *byte - _p1, err = BytePtrFromString(filesystem) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - var _p3 *byte - _p3, err = BytePtrFromString(parm) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(SYS___MOUNT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(mtm), uintptr(parmlen), uintptr(unsafe.Pointer(_p3))) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FACCESSAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_FaccessatAddr() *(func(dirfd int, path string, mode uint32, flags int) (err error)) -func unmount(filesystem string, mtm int) (err error) { +var Faccessat = enter_Faccessat + +func enter_Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + funcref := get_FaccessatAddr() + if funcptrtest(GetZosLibVec()+SYS___FACCESSAT_A<<4, "") == 0 { + *funcref = impl_Faccessat + } else { + *funcref = error_Faccessat + } + return (*funcref)(dirfd, path, mode, flags) +} + +func error_Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCHDIR<<4, uintptr(fd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCHMOD<<4, uintptr(fd), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(filesystem) + _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___UMOUNT_A, uintptr(unsafe.Pointer(_p0)), uintptr(mtm), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FCHMODAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FchmodatAddr() *(func(dirfd int, path string, mode uint32, flags int) (err error)) + +var Fchmodat = enter_Fchmodat + +func enter_Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + funcref := get_FchmodatAddr() + if funcptrtest(GetZosLibVec()+SYS___FCHMODAT_A<<4, "") == 0 { + *funcref = impl_Fchmodat + } else { + *funcref = error_Fchmodat + } + return (*funcref)(dirfd, path, mode, flags) +} + +func error_Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCHOWN<<4, uintptr(fd), uintptr(uid), uintptr(gid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Chroot(path string) (err error) { +func impl_Fchownat(fd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___CHROOT_A, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FCHOWNAT_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FchownatAddr() *(func(fd int, path string, uid int, gid int, flags int) (err error)) + +var Fchownat = enter_Fchownat + +func enter_Fchownat(fd int, path string, uid int, gid int, flags int) (err error) { + funcref := get_FchownatAddr() + if funcptrtest(GetZosLibVec()+SYS___FCHOWNAT_A<<4, "") == 0 { + *funcref = impl_Fchownat + } else { + *funcref = error_Fchownat } + return (*funcref)(fd, path, uid, gid, flags) +} + +func error_Fchownat(fd int, path string, uid int, gid int, flags int) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Uname(buf *Utsname) (err error) { - _, _, e1 := syscall_rawsyscall(SYS___UNAME_A, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) +func FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), uintptr(arg)) + runtime.ExitSyscall() + retval = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Gethostname(buf []byte) (err error) { +func impl_Fdatasync(fd int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FDATASYNC<<4, uintptr(fd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FdatasyncAddr() *(func(fd int) (err error)) + +var Fdatasync = enter_Fdatasync + +func enter_Fdatasync(fd int) (err error) { + funcref := get_FdatasyncAddr() + if funcptrtest(GetZosLibVec()+SYS_FDATASYNC<<4, "") == 0 { + *funcref = impl_Fdatasync + } else { + *funcref = error_Fdatasync + } + return (*funcref)(fd) +} + +func error_Fdatasync(fd int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fstat(fd int, stat *Stat_LE_t) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSTAT<<4, uintptr(fd), uintptr(unsafe.Pointer(stat))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FSTATAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_fstatatAddr() *(func(dirfd int, path string, stat *Stat_LE_t, flags int) (err error)) + +var fstatat = enter_fstatat + +func enter_fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) { + funcref := get_fstatatAddr() + if funcptrtest(GetZosLibVec()+SYS___FSTATAT_A<<4, "") == 0 { + *funcref = impl_fstatat + } else { + *funcref = error_fstatat + } + return (*funcref)(dirfd, path, stat, flags) +} + +func error_fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LGETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LgetxattrAddr() *(func(link string, attr string, dest []byte) (sz int, err error)) + +var Lgetxattr = enter_Lgetxattr + +func enter_Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { + funcref := get_LgetxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LGETXATTR_A<<4, "") == 0 { + *funcref = impl_Lgetxattr + } else { + *funcref = error_Lgetxattr + } + return (*funcref)(link, attr, dest) +} + +func error_Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LSETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LsetxattrAddr() *(func(path string, attr string, data []byte, flags int) (err error)) + +var Lsetxattr = enter_Lsetxattr + +func enter_Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + funcref := get_LsetxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LSETXATTR_A<<4, "") == 0 { + *funcref = impl_Lsetxattr + } else { + *funcref = error_Lsetxattr + } + return (*funcref)(path, attr, data, flags) +} + +func error_Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fstatfs(fd int, buf *Statfs_t) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSTATFS<<4, uintptr(fd), uintptr(unsafe.Pointer(buf))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FstatfsAddr() *(func(fd int, buf *Statfs_t) (err error)) + +var Fstatfs = enter_Fstatfs + +func enter_Fstatfs(fd int, buf *Statfs_t) (err error) { + funcref := get_FstatfsAddr() + if funcptrtest(GetZosLibVec()+SYS_FSTATFS<<4, "") == 0 { + *funcref = impl_Fstatfs + } else { + *funcref = error_Fstatfs + } + return (*funcref)(fd, buf) +} + +func error_Fstatfs(fd int, buf *Statfs_t) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatvfs(fd int, stat *Statvfs_t) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSTATVFS<<4, uintptr(fd), uintptr(unsafe.Pointer(stat))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSYNC<<4, uintptr(fd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Futimes(fd int, tv []Timeval) (err error) { var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) + if len(tv) > 0 { + _p0 = unsafe.Pointer(&tv[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(SYS___GETHOSTNAME_A, uintptr(_p0), uintptr(len(buf)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FUTIMES<<4, uintptr(fd), uintptr(_p0), uintptr(len(tv))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_FutimesAddr() *(func(fd int, tv []Timeval) (err error)) -func Getegid() (egid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETEGID, 0, 0, 0) - egid = int(r0) +var Futimes = enter_Futimes + +func enter_Futimes(fd int, tv []Timeval) (err error) { + funcref := get_FutimesAddr() + if funcptrtest(GetZosLibVec()+SYS_FUTIMES<<4, "") == 0 { + *funcref = impl_Futimes + } else { + *funcref = error_Futimes + } + return (*funcref)(fd, tv) +} + +func error_Futimes(fd int, tv []Timeval) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Geteuid() (uid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETEUID, 0, 0, 0) - uid = int(r0) +func impl_Futimesat(dirfd int, path string, tv []Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(tv) > 0 { + _p1 = unsafe.Pointer(&tv[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FUTIMESAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(tv))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FutimesatAddr() *(func(dirfd int, path string, tv []Timeval) (err error)) + +var Futimesat = enter_Futimesat + +func enter_Futimesat(dirfd int, path string, tv []Timeval) (err error) { + funcref := get_FutimesatAddr() + if funcptrtest(GetZosLibVec()+SYS___FUTIMESAT_A<<4, "") == 0 { + *funcref = impl_Futimesat + } else { + *funcref = error_Futimesat + } + return (*funcref)(dirfd, path, tv) +} + +func error_Futimesat(dirfd int, path string, tv []Timeval) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getgid() (gid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETGID, 0, 0, 0) - gid = int(r0) +func Ftruncate(fd int, length int64) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FTRUNCATE<<4, uintptr(fd), uintptr(length)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getpid() (pid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETPID, 0, 0, 0) - pid = int(r0) +func impl_Getrandom(buf []byte, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETRANDOM<<4, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_GetrandomAddr() *(func(buf []byte, flags int) (n int, err error)) + +var Getrandom = enter_Getrandom + +func enter_Getrandom(buf []byte, flags int) (n int, err error) { + funcref := get_GetrandomAddr() + if funcptrtest(GetZosLibVec()+SYS_GETRANDOM<<4, "") == 0 { + *funcref = impl_Getrandom + } else { + *funcref = error_Getrandom + } + return (*funcref)(buf, flags) +} + +func error_Getrandom(buf []byte, flags int) (n int, err error) { + n = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func impl_InotifyInit() (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec() + SYS_INOTIFY_INIT<<4) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_InotifyInitAddr() *(func() (fd int, err error)) + +var InotifyInit = enter_InotifyInit + +func enter_InotifyInit() (fd int, err error) { + funcref := get_InotifyInitAddr() + if funcptrtest(GetZosLibVec()+SYS_INOTIFY_INIT<<4, "") == 0 { + *funcref = impl_InotifyInit + } else { + *funcref = error_InotifyInit } + return (*funcref)() +} + +func error_InotifyInit() (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getppid() (pid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETPPID, 0, 0, 0) - pid = int(r0) +func impl_InotifyInit1(flags int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_INOTIFY_INIT1<<4, uintptr(flags)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_InotifyInit1Addr() *(func(flags int) (fd int, err error)) + +var InotifyInit1 = enter_InotifyInit1 + +func enter_InotifyInit1(flags int) (fd int, err error) { + funcref := get_InotifyInit1Addr() + if funcptrtest(GetZosLibVec()+SYS_INOTIFY_INIT1<<4, "") == 0 { + *funcref = impl_InotifyInit1 + } else { + *funcref = error_InotifyInit1 + } + return (*funcref)(flags) +} + +func error_InotifyInit1(flags int) (fd int, err error) { + fd = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___INOTIFY_ADD_WATCH_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + runtime.ExitSyscall() + watchdesc = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_InotifyAddWatchAddr() *(func(fd int, pathname string, mask uint32) (watchdesc int, err error)) + +var InotifyAddWatch = enter_InotifyAddWatch + +func enter_InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + funcref := get_InotifyAddWatchAddr() + if funcptrtest(GetZosLibVec()+SYS___INOTIFY_ADD_WATCH_A<<4, "") == 0 { + *funcref = impl_InotifyAddWatch + } else { + *funcref = error_InotifyAddWatch + } + return (*funcref)(fd, pathname, mask) +} + +func error_InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + watchdesc = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_INOTIFY_RM_WATCH<<4, uintptr(fd), uintptr(watchdesc)) + runtime.ExitSyscall() + success = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_InotifyRmWatchAddr() *(func(fd int, watchdesc uint32) (success int, err error)) + +var InotifyRmWatch = enter_InotifyRmWatch + +func enter_InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + funcref := get_InotifyRmWatchAddr() + if funcptrtest(GetZosLibVec()+SYS_INOTIFY_RM_WATCH<<4, "") == 0 { + *funcref = impl_InotifyRmWatch + } else { + *funcref = error_InotifyRmWatch + } + return (*funcref)(fd, watchdesc) +} + +func error_InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + success = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LISTXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_ListxattrAddr() *(func(path string, dest []byte) (sz int, err error)) + +var Listxattr = enter_Listxattr + +func enter_Listxattr(path string, dest []byte) (sz int, err error) { + funcref := get_ListxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LISTXATTR_A<<4, "") == 0 { + *funcref = impl_Listxattr + } else { + *funcref = error_Listxattr + } + return (*funcref)(path, dest) +} + +func error_Listxattr(path string, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LLISTXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LlistxattrAddr() *(func(path string, dest []byte) (sz int, err error)) + +var Llistxattr = enter_Llistxattr + +func enter_Llistxattr(path string, dest []byte) (sz int, err error) { + funcref := get_LlistxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LLISTXATTR_A<<4, "") == 0 { + *funcref = impl_Llistxattr + } else { + *funcref = error_Llistxattr + } + return (*funcref)(path, dest) +} + +func error_Llistxattr(path string, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LREMOVEXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LremovexattrAddr() *(func(path string, attr string) (err error)) + +var Lremovexattr = enter_Lremovexattr + +func enter_Lremovexattr(path string, attr string) (err error) { + funcref := get_LremovexattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LREMOVEXATTR_A<<4, "") == 0 { + *funcref = impl_Lremovexattr + } else { + *funcref = error_Lremovexattr + } + return (*funcref)(path, attr) +} + +func error_Lremovexattr(path string, attr string) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Lutimes(path string, tv []Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(tv) > 0 { + _p1 = unsafe.Pointer(&tv[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LUTIMES_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(tv))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LutimesAddr() *(func(path string, tv []Timeval) (err error)) + +var Lutimes = enter_Lutimes + +func enter_Lutimes(path string, tv []Timeval) (err error) { + funcref := get_LutimesAddr() + if funcptrtest(GetZosLibVec()+SYS___LUTIMES_A<<4, "") == 0 { + *funcref = impl_Lutimes + } else { + *funcref = error_Lutimes + } + return (*funcref)(path, tv) +} + +func error_Lutimes(path string, tv []Timeval) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MPROTECT<<4, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MSYNC<<4, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Console2(cmsg *ConsMsg2, modstr *byte, concmd *uint32) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CONSOLE2<<4, uintptr(unsafe.Pointer(cmsg)), uintptr(unsafe.Pointer(modstr)), uintptr(unsafe.Pointer(concmd))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Poll(fds []PollFd, timeout int) (n int, err error) { + var _p0 unsafe.Pointer + if len(fds) > 0 { + _p0 = unsafe.Pointer(&fds[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_POLL<<4, uintptr(_p0), uintptr(len(fds)), uintptr(timeout)) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readdir_r(dirp uintptr, entry *direntLE, result **direntLE) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___READDIR_R_A<<4, uintptr(dirp), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Statfs(path string, buf *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___STATFS_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_StatfsAddr() *(func(path string, buf *Statfs_t) (err error)) + +var Statfs = enter_Statfs + +func enter_Statfs(path string, buf *Statfs_t) (err error) { + funcref := get_StatfsAddr() + if funcptrtest(GetZosLibVec()+SYS___STATFS_A<<4, "") == 0 { + *funcref = impl_Statfs + } else { + *funcref = error_Statfs + } + return (*funcref)(path, buf) +} + +func error_Statfs(path string, buf *Statfs_t) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Syncfs(fd int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SYNCFS<<4, uintptr(fd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_SyncfsAddr() *(func(fd int) (err error)) + +var Syncfs = enter_Syncfs + +func enter_Syncfs(fd int) (err error) { + funcref := get_SyncfsAddr() + if funcptrtest(GetZosLibVec()+SYS_SYNCFS<<4, "") == 0 { + *funcref = impl_Syncfs + } else { + *funcref = error_Syncfs + } + return (*funcref)(fd) +} + +func error_Syncfs(fd int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TIMES<<4, uintptr(unsafe.Pointer(tms))) + runtime.ExitSyscall() + ticks = uintptr(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func W_Getmntent(buff *byte, size int) (lastsys int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_W_GETMNTENT<<4, uintptr(unsafe.Pointer(buff)), uintptr(size)) + runtime.ExitSyscall() + lastsys = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func W_Getmntent_A(buff *byte, size int) (lastsys int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___W_GETMNTENT_A<<4, uintptr(unsafe.Pointer(buff)), uintptr(size)) + runtime.ExitSyscall() + lastsys = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount_LE(path string, filesystem string, fstype string, mtm uint32, parmlen int32, parm string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(filesystem) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + var _p3 *byte + _p3, err = BytePtrFromString(parm) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MOUNT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(mtm), uintptr(parmlen), uintptr(unsafe.Pointer(_p3))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unmount_LE(filesystem string, mtm int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(filesystem) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UMOUNT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mtm)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHROOT_A<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nmsgsfds int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (ret int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SELECT<<4, uintptr(nmsgsfds), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout))) + runtime.ExitSyscall() + ret = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_____OSNAME_A<<4, uintptr(unsafe.Pointer(buf))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Unshare(flags int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_UNSHARE<<4, uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_UnshareAddr() *(func(flags int) (err error)) + +var Unshare = enter_Unshare + +func enter_Unshare(flags int) (err error) { + funcref := get_UnshareAddr() + if funcptrtest(GetZosLibVec()+SYS_UNSHARE<<4, "") == 0 { + *funcref = impl_Unshare + } else { + *funcref = error_Unshare + } + return (*funcref)(flags) +} + +func error_Unshare(flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gethostname(buf []byte) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETHOSTNAME_A<<4, uintptr(_p0), uintptr(len(buf))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETGID<<4) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETPID<<4) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETPGID<<4, uintptr(pid)) + pgid = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (pid int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETPPID<<4) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETPRIORITY<<4, uintptr(which), uintptr(who)) + runtime.ExitSyscall() + prio = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETRLIMIT<<4, uintptr(resource), uintptr(unsafe.Pointer(rlim))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getrusage(who int, rusage *rusage_zos) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETRUSAGE<<4, uintptr(who), uintptr(unsafe.Pointer(rusage))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + runtime.EnterSyscall() + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETEGID<<4) + runtime.ExitSyscall() + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + runtime.EnterSyscall() + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETEUID<<4) + runtime.ExitSyscall() + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETSID<<4, uintptr(pid)) + sid = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETUID<<4) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig Signal) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_KILL<<4, uintptr(pid), uintptr(sig)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LCHOWN_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LINK_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldPath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newPath) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LINKAT_A<<4, uintptr(oldDirFd), uintptr(unsafe.Pointer(_p0)), uintptr(newDirFd), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LinkatAddr() *(func(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error)) + +var Linkat = enter_Linkat + +func enter_Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) { + funcref := get_LinkatAddr() + if funcptrtest(GetZosLibVec()+SYS___LINKAT_A<<4, "") == 0 { + *funcref = impl_Linkat + } else { + *funcref = error_Linkat + } + return (*funcref)(oldDirFd, oldPath, newDirFd, newPath, flags) +} + +func error_Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_LISTEN<<4, uintptr(s), uintptr(n)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lstat(path string, stat *Stat_LE_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LSTAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKDIR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKDIRAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_MkdiratAddr() *(func(dirfd int, path string, mode uint32) (err error)) + +var Mkdirat = enter_Mkdirat + +func enter_Mkdirat(dirfd int, path string, mode uint32) (err error) { + funcref := get_MkdiratAddr() + if funcptrtest(GetZosLibVec()+SYS___MKDIRAT_A<<4, "") == 0 { + *funcref = impl_Mkdirat + } else { + *funcref = error_Mkdirat + } + return (*funcref)(dirfd, path, mode) +} + +func error_Mkdirat(dirfd int, path string, mode uint32) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKFIFO_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKNOD_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKNODAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_MknodatAddr() *(func(dirfd int, path string, mode uint32, dev int) (err error)) + +var Mknodat = enter_Mknodat + +func enter_Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + funcref := get_MknodatAddr() + if funcptrtest(GetZosLibVec()+SYS___MKNODAT_A<<4, "") == 0 { + *funcref = impl_Mknodat + } else { + *funcref = error_Mknodat + } + return (*funcref)(dirfd, path, mode, dev) +} + +func error_Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_PivotRoot(newroot string, oldroot string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(oldroot) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___PIVOT_ROOT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_PivotRootAddr() *(func(newroot string, oldroot string) (err error)) + +var PivotRoot = enter_PivotRoot + +func enter_PivotRoot(newroot string, oldroot string) (err error) { + funcref := get_PivotRootAddr() + if funcptrtest(GetZosLibVec()+SYS___PIVOT_ROOT_A<<4, "") == 0 { + *funcref = impl_PivotRoot + } else { + *funcref = error_PivotRoot + } + return (*funcref)(newroot, oldroot) +} + +func error_PivotRoot(newroot string, oldroot string) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PREAD<<4, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset)) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := syscall_syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PWRITE<<4, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset)) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___PRCTL_A<<4, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_PrctlAddr() *(func(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error)) -func getrusage(who int, rusage *rusage_zos) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) +var Prctl = enter_Prctl + +func enter_Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + funcref := get_PrctlAddr() + if funcptrtest(GetZosLibVec()+SYS___PRCTL_A<<4, "") == 0 { + *funcref = impl_Prctl + } else { + *funcref = error_Prctl } - return + return (*funcref)(option, arg2, arg3, arg4, arg5) } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func error_Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getuid() (uid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETUID, 0, 0, 0) - uid = int(r0) +func impl_Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PRLIMIT<<4, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_PrlimitAddr() *(func(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error)) -func Kill(pid int, sig Signal) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) +var Prlimit = enter_Prlimit + +func enter_Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + funcref := get_PrlimitAddr() + if funcptrtest(GetZosLibVec()+SYS_PRLIMIT<<4, "") == 0 { + *funcref = impl_Prlimit + } else { + *funcref = error_Prlimit } + return (*funcref)(pid, resource, newlimit, old) +} + +func error_Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lchown(path string, uid int, gid int) (err error) { +func Rename(from string, to string) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(path) + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___LCHOWN_A, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RENAME_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Link(path string, link string) (err error) { +func impl_Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(path) + _p0, err = BytePtrFromString(oldpath) if err != nil { return } var _p1 *byte - _p1, err = BytePtrFromString(link) + _p1, err = BytePtrFromString(newpath) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___LINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RENAMEAT_A<<4, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_RenameatAddr() *(func(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)) -func Listen(s int, n int) (err error) { - _, _, e1 := syscall_syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) - if e1 != 0 { - err = errnoErr(e1) +var Renameat = enter_Renameat + +func enter_Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + funcref := get_RenameatAddr() + if funcptrtest(GetZosLibVec()+SYS___RENAMEAT_A<<4, "") == 0 { + *funcref = impl_Renameat + } else { + *funcref = error_Renameat } + return (*funcref)(olddirfd, oldpath, newdirfd, newpath) +} + +func error_Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func lstat(path string, stat *Stat_LE_t) (err error) { +func impl_Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(path) + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___LSTAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RENAMEAT2_A<<4, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_Renameat2Addr() *(func(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error)) -func Mkdir(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(SYS___MKDIR_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) +var Renameat2 = enter_Renameat2 + +func enter_Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + funcref := get_Renameat2Addr() + if funcptrtest(GetZosLibVec()+SYS___RENAMEAT2_A<<4, "") == 0 { + *funcref = impl_Renameat2 + } else { + *funcref = error_Renameat2 } + return (*funcref)(olddirfd, oldpath, newdirfd, newpath, flags) +} + +func error_Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mkfifo(path string, mode uint32) (err error) { +func Rmdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___MKFIFO_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RMDIR_A<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(SYS___MKNOD_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) +func Seek(fd int, offset int64, whence int) (off int64, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_LSEEK<<4, uintptr(fd), uintptr(offset), uintptr(whence)) + runtime.ExitSyscall() + off = int64(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) +func Setegid(egid int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETEGID<<4, uintptr(egid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } - r0, _, e1 := syscall_syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETEUID<<4, uintptr(euid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func impl_Sethostname(p []byte) (err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SETHOSTNAME_A<<4, uintptr(_p0), uintptr(len(p))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_SethostnameAddr() *(func(p []byte) (err error)) -func Readlink(path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) +var Sethostname = enter_Sethostname + +func enter_Sethostname(p []byte) (err error) { + funcref := get_SethostnameAddr() + if funcptrtest(GetZosLibVec()+SYS___SETHOSTNAME_A<<4, "") == 0 { + *funcref = impl_Sethostname } else { - _p1 = unsafe.Pointer(&_zero) + *funcref = error_Sethostname } - r0, _, e1 := syscall_syscall(SYS___READLINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return + return (*funcref)(p) } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Rename(from string, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := syscall_syscall(SYS___RENAME_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } +func error_Sethostname(p []byte) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Rmdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(SYS___RMDIR_A, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_Setns(fd int, nstype int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETNS<<4, uintptr(fd), uintptr(nstype)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_SetnsAddr() *(func(fd int, nstype int) (err error)) -func Seek(fd int, offset int64, whence int) (off int64, err error) { - r0, _, e1 := syscall_syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) - off = int64(r0) - if e1 != 0 { - err = errnoErr(e1) +var Setns = enter_Setns + +func enter_Setns(fd int, nstype int) (err error) { + funcref := get_SetnsAddr() + if funcptrtest(GetZosLibVec()+SYS_SETNS<<4, "") == 0 { + *funcref = impl_Setns + } else { + *funcref = error_Setns } + return (*funcref)(fd, nstype) +} + +func error_Setns(fd int, nstype int) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := syscall_syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETPRIORITY<<4, uintptr(which), uintptr(who), uintptr(prio)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -971,9 +2910,9 @@ func Setpriority(which int, who int, prio int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETPGID<<4, uintptr(pid), uintptr(pgid)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -981,9 +2920,9 @@ func Setpgid(pid int, pgid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(resource int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETRLIMIT<<4, uintptr(resource), uintptr(unsafe.Pointer(lim))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -991,9 +2930,9 @@ func Setrlimit(resource int, lim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETREGID<<4, uintptr(rgid), uintptr(egid)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1001,9 +2940,9 @@ func Setregid(rgid int, egid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETREUID<<4, uintptr(ruid), uintptr(euid)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1011,10 +2950,10 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_SETSID, 0, 0, 0) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec() + SYS_SETSID<<4) pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1022,9 +2961,11 @@ func Setsid() (pid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := syscall_syscall(SYS_SETUID, uintptr(uid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETUID<<4, uintptr(uid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1032,9 +2973,11 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(uid int) (err error) { - _, _, e1 := syscall_syscall(SYS_SETGID, uintptr(uid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETGID<<4, uintptr(uid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1042,9 +2985,11 @@ func Setgid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(fd int, how int) (err error) { - _, _, e1 := syscall_syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHUTDOWN<<4, uintptr(fd), uintptr(how)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1057,9 +3002,11 @@ func stat(path string, statLE *Stat_LE_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___STAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(statLE)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___STAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(statLE))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1077,17 +3024,63 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___SYMLINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SYMLINK_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Symlinkat(oldPath string, dirfd int, newPath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldPath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newPath) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SYMLINKAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(dirfd), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } +//go:nosplit +func get_SymlinkatAddr() *(func(oldPath string, dirfd int, newPath string) (err error)) + +var Symlinkat = enter_Symlinkat + +func enter_Symlinkat(oldPath string, dirfd int, newPath string) (err error) { + funcref := get_SymlinkatAddr() + if funcptrtest(GetZosLibVec()+SYS___SYMLINKAT_A<<4, "") == 0 { + *funcref = impl_Symlinkat + } else { + *funcref = error_Symlinkat + } + return (*funcref)(oldPath, dirfd, newPath) +} + +func error_Symlinkat(oldPath string, dirfd int, newPath string) (err error) { + err = ENOSYS + return +} + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() { - syscall_syscall(SYS_SYNC, 0, 0, 0) + runtime.EnterSyscall() + CallLeFuncWithErr(GetZosLibVec() + SYS_SYNC<<4) + runtime.ExitSyscall() return } @@ -1099,9 +3092,11 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___TRUNCATE_A, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___TRUNCATE_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(length)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1109,9 +3104,11 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Tcgetattr(fildes int, termptr *Termios) (err error) { - _, _, e1 := syscall_syscall(SYS_TCGETATTR, uintptr(fildes), uintptr(unsafe.Pointer(termptr)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TCGETATTR<<4, uintptr(fildes), uintptr(unsafe.Pointer(termptr))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1119,9 +3116,11 @@ func Tcgetattr(fildes int, termptr *Termios) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Tcsetattr(fildes int, when int, termptr *Termios) (err error) { - _, _, e1 := syscall_syscall(SYS_TCSETATTR, uintptr(fildes), uintptr(when), uintptr(unsafe.Pointer(termptr))) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TCSETATTR<<4, uintptr(fildes), uintptr(when), uintptr(unsafe.Pointer(termptr))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1129,7 +3128,9 @@ func Tcsetattr(fildes int, when int, termptr *Termios) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(mask int) (oldmask int) { - r0, _, _ := syscall_syscall(SYS_UMASK, uintptr(mask), 0, 0) + runtime.EnterSyscall() + r0, _, _ := CallLeFuncWithErr(GetZosLibVec()+SYS_UMASK<<4, uintptr(mask)) + runtime.ExitSyscall() oldmask = int(r0) return } @@ -1142,10 +3143,49 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___UNLINK_A, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UNLINK_A<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UNLINKAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_UnlinkatAddr() *(func(dirfd int, path string, flags int) (err error)) + +var Unlinkat = enter_Unlinkat + +func enter_Unlinkat(dirfd int, path string, flags int) (err error) { + funcref := get_UnlinkatAddr() + if funcptrtest(GetZosLibVec()+SYS___UNLINKAT_A<<4, "") == 0 { + *funcref = impl_Unlinkat + } else { + *funcref = error_Unlinkat } + return (*funcref)(dirfd, path, flags) +} + +func error_Unlinkat(dirfd int, path string, flags int) (err error) { + err = ENOSYS return } @@ -1157,9 +3197,11 @@ func Utime(path string, utim *Utimbuf) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___UTIME_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(utim)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UTIME_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(utim))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1172,11 +3214,91 @@ func open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall(SYS___OPEN_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___OPEN_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___OPENAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_openatAddr() *(func(dirfd int, path string, flags int, mode uint32) (fd int, err error)) + +var openat = enter_openat + +func enter_openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + funcref := get_openatAddr() + if funcptrtest(GetZosLibVec()+SYS___OPENAT_A<<4, "") == 0 { + *funcref = impl_openat + } else { + *funcref = error_openat + } + return (*funcref)(dirfd, path, flags, mode) +} + +func error_openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + fd = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___OPENAT2_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(open_how)), uintptr(size)) + runtime.ExitSyscall() fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_openat2Addr() *(func(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error)) + +var openat2 = enter_openat2 + +func enter_openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) { + funcref := get_openat2Addr() + if funcptrtest(GetZosLibVec()+SYS___OPENAT2_A<<4, "") == 0 { + *funcref = impl_openat2 + } else { + *funcref = error_openat2 } + return (*funcref)(dirfd, path, open_how, size) +} + +func error_openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) { + fd = -1 + err = ENOSYS return } @@ -1188,9 +3310,23 @@ func remove(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS_REMOVE, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_REMOVE<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func waitid(idType int, id int, info *Siginfo, options int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WAITID<<4, uintptr(idType), uintptr(id), uintptr(unsafe.Pointer(info)), uintptr(options)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1198,10 +3334,12 @@ func remove(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) { - r0, _, e1 := syscall_syscall(SYS_WAITPID, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WAITPID<<4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options)) + runtime.ExitSyscall() wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1209,9 +3347,9 @@ func waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func gettimeofday(tv *timeval_zos) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETTIMEOFDAY<<4, uintptr(unsafe.Pointer(tv))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1219,9 +3357,9 @@ func gettimeofday(tv *timeval_zos) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe(p *[2]_C_int) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PIPE<<4, uintptr(unsafe.Pointer(p))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1234,20 +3372,87 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___UTIMES_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UTIMES_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(nmsgsfds int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (ret int, err error) { - r0, _, e1 := syscall_syscall6(SYS_SELECT, uintptr(nmsgsfds), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func impl_utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UTIMENSAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(ts)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_utimensatAddr() *(func(dirfd int, path string, ts *[2]Timespec, flags int) (err error)) + +var utimensat = enter_utimensat + +func enter_utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) { + funcref := get_utimensatAddr() + if funcptrtest(GetZosLibVec()+SYS___UTIMENSAT_A<<4, "") == 0 { + *funcref = impl_utimensat + } else { + *funcref = error_utimensat + } + return (*funcref)(dirfd, path, ts, flags) +} + +func error_utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Posix_openpt(oflag int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_POSIX_OPENPT<<4, uintptr(oflag)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Grantpt(fildes int) (rc int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GRANTPT<<4, uintptr(fildes)) + runtime.ExitSyscall() + rc = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlockpt(fildes int) (rc int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_UNLOCKPT<<4, uintptr(fildes)) + runtime.ExitSyscall() + rc = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 0cc3ce496e2..53aef5dc58d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -452,4 +452,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 856d92d69ef..71d524763d3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -374,4 +374,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 8d467094cf5..c747706131c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -416,4 +416,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index edc173244d0..f96e214f6d4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -319,4 +319,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 445eba20615..28425346cf1 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -313,4 +313,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index adba01bca70..d0953018dae 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -436,4 +436,9 @@ const ( SYS_FUTEX_WAKE = 4454 SYS_FUTEX_WAIT = 4455 SYS_FUTEX_REQUEUE = 4456 + SYS_STATMOUNT = 4457 + SYS_LISTMOUNT = 4458 + SYS_LSM_GET_SELF_ATTR = 4459 + SYS_LSM_SET_SELF_ATTR = 4460 + SYS_LSM_LIST_MODULES = 4461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 014c4e9c7a7..295c7f4b818 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -366,4 +366,9 @@ const ( SYS_FUTEX_WAKE = 5454 SYS_FUTEX_WAIT = 5455 SYS_FUTEX_REQUEUE = 5456 + SYS_STATMOUNT = 5457 + SYS_LISTMOUNT = 5458 + SYS_LSM_GET_SELF_ATTR = 5459 + SYS_LSM_SET_SELF_ATTR = 5460 + SYS_LSM_LIST_MODULES = 5461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index ccc97d74d05..d1a9eaca7a4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -366,4 +366,9 @@ const ( SYS_FUTEX_WAKE = 5454 SYS_FUTEX_WAIT = 5455 SYS_FUTEX_REQUEUE = 5456 + SYS_STATMOUNT = 5457 + SYS_LISTMOUNT = 5458 + SYS_LSM_GET_SELF_ATTR = 5459 + SYS_LSM_SET_SELF_ATTR = 5460 + SYS_LSM_LIST_MODULES = 5461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index ec2b64a95d7..bec157c39fd 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -436,4 +436,9 @@ const ( SYS_FUTEX_WAKE = 4454 SYS_FUTEX_WAIT = 4455 SYS_FUTEX_REQUEUE = 4456 + SYS_STATMOUNT = 4457 + SYS_LISTMOUNT = 4458 + SYS_LSM_GET_SELF_ATTR = 4459 + SYS_LSM_SET_SELF_ATTR = 4460 + SYS_LSM_LIST_MODULES = 4461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 21a839e338b..7ee7bdc435c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -443,4 +443,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index c11121ec3b4..fad1f25b449 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -415,4 +415,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 909b631fcb4..7d3e16357d6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -415,4 +415,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index e49bed16ea6..0ed53ad9f7e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -320,4 +320,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 66017d2d32b..2fba04ad500 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -381,4 +381,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 47bab18dced..621d00d741b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -394,4 +394,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go index b2e30858199..5e8c263ca9c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go @@ -1,2669 +1,2852 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// go run mksyscall_zos_s390x.go -o_sysnum zsysnum_zos_s390x.go -o_syscall zsyscall_zos_s390x.go -i_syscall syscall_zos_s390x.go -o_asm zsymaddr_zos_s390x.s +// Code generated by the command above; see README.md. DO NOT EDIT. //go:build zos && s390x package unix -// TODO: auto-generate. - const ( - SYS_ACOSD128 = 0xB80 - SYS_ACOSD32 = 0xB7E - SYS_ACOSD64 = 0xB7F - SYS_ACOSHD128 = 0xB83 - SYS_ACOSHD32 = 0xB81 - SYS_ACOSHD64 = 0xB82 - SYS_AIO_FSYNC = 0xC69 - SYS_ASCTIME = 0x0AE - SYS_ASCTIME64 = 0xCD7 - SYS_ASCTIME64_R = 0xCD8 - SYS_ASIND128 = 0xB86 - SYS_ASIND32 = 0xB84 - SYS_ASIND64 = 0xB85 - SYS_ASINHD128 = 0xB89 - SYS_ASINHD32 = 0xB87 - SYS_ASINHD64 = 0xB88 - SYS_ATAN2D128 = 0xB8F - SYS_ATAN2D32 = 0xB8D - SYS_ATAN2D64 = 0xB8E - SYS_ATAND128 = 0xB8C - SYS_ATAND32 = 0xB8A - SYS_ATAND64 = 0xB8B - SYS_ATANHD128 = 0xB92 - SYS_ATANHD32 = 0xB90 - SYS_ATANHD64 = 0xB91 - SYS_BIND2ADDRSEL = 0xD59 - SYS_C16RTOMB = 0xD40 - SYS_C32RTOMB = 0xD41 - SYS_CBRTD128 = 0xB95 - SYS_CBRTD32 = 0xB93 - SYS_CBRTD64 = 0xB94 - SYS_CEILD128 = 0xB98 - SYS_CEILD32 = 0xB96 - SYS_CEILD64 = 0xB97 - SYS_CLEARENV = 0x0C9 - SYS_CLEARERR_UNLOCKED = 0xCA1 - SYS_CLOCK = 0x0AA - SYS_CLOGL = 0xA00 - SYS_CLRMEMF = 0x0BD - SYS_CONJ = 0xA03 - SYS_CONJF = 0xA06 - SYS_CONJL = 0xA09 - SYS_COPYSIGND128 = 0xB9E - SYS_COPYSIGND32 = 0xB9C - SYS_COPYSIGND64 = 0xB9D - SYS_COSD128 = 0xBA1 - SYS_COSD32 = 0xB9F - SYS_COSD64 = 0xBA0 - SYS_COSHD128 = 0xBA4 - SYS_COSHD32 = 0xBA2 - SYS_COSHD64 = 0xBA3 - SYS_CPOW = 0xA0C - SYS_CPOWF = 0xA0F - SYS_CPOWL = 0xA12 - SYS_CPROJ = 0xA15 - SYS_CPROJF = 0xA18 - SYS_CPROJL = 0xA1B - SYS_CREAL = 0xA1E - SYS_CREALF = 0xA21 - SYS_CREALL = 0xA24 - SYS_CSIN = 0xA27 - SYS_CSINF = 0xA2A - SYS_CSINH = 0xA30 - SYS_CSINHF = 0xA33 - SYS_CSINHL = 0xA36 - SYS_CSINL = 0xA2D - SYS_CSNAP = 0x0C5 - SYS_CSQRT = 0xA39 - SYS_CSQRTF = 0xA3C - SYS_CSQRTL = 0xA3F - SYS_CTAN = 0xA42 - SYS_CTANF = 0xA45 - SYS_CTANH = 0xA4B - SYS_CTANHF = 0xA4E - SYS_CTANHL = 0xA51 - SYS_CTANL = 0xA48 - SYS_CTIME = 0x0AB - SYS_CTIME64 = 0xCD9 - SYS_CTIME64_R = 0xCDA - SYS_CTRACE = 0x0C6 - SYS_DIFFTIME = 0x0A7 - SYS_DIFFTIME64 = 0xCDB - SYS_DLADDR = 0xC82 - SYS_DYNALLOC = 0x0C3 - SYS_DYNFREE = 0x0C2 - SYS_ERFCD128 = 0xBAA - SYS_ERFCD32 = 0xBA8 - SYS_ERFCD64 = 0xBA9 - SYS_ERFD128 = 0xBA7 - SYS_ERFD32 = 0xBA5 - SYS_ERFD64 = 0xBA6 - SYS_EXP2D128 = 0xBB0 - SYS_EXP2D32 = 0xBAE - SYS_EXP2D64 = 0xBAF - SYS_EXPD128 = 0xBAD - SYS_EXPD32 = 0xBAB - SYS_EXPD64 = 0xBAC - SYS_EXPM1D128 = 0xBB3 - SYS_EXPM1D32 = 0xBB1 - SYS_EXPM1D64 = 0xBB2 - SYS_FABSD128 = 0xBB6 - SYS_FABSD32 = 0xBB4 - SYS_FABSD64 = 0xBB5 - SYS_FDELREC_UNLOCKED = 0xCA2 - SYS_FDIMD128 = 0xBB9 - SYS_FDIMD32 = 0xBB7 - SYS_FDIMD64 = 0xBB8 - SYS_FDOPEN_UNLOCKED = 0xCFC - SYS_FECLEAREXCEPT = 0xAEA - SYS_FEGETENV = 0xAEB - SYS_FEGETEXCEPTFLAG = 0xAEC - SYS_FEGETROUND = 0xAED - SYS_FEHOLDEXCEPT = 0xAEE - SYS_FEOF_UNLOCKED = 0xCA3 - SYS_FERAISEEXCEPT = 0xAEF - SYS_FERROR_UNLOCKED = 0xCA4 - SYS_FESETENV = 0xAF0 - SYS_FESETEXCEPTFLAG = 0xAF1 - SYS_FESETROUND = 0xAF2 - SYS_FETCHEP = 0x0BF - SYS_FETESTEXCEPT = 0xAF3 - SYS_FEUPDATEENV = 0xAF4 - SYS_FE_DEC_GETROUND = 0xBBA - SYS_FE_DEC_SETROUND = 0xBBB - SYS_FFLUSH_UNLOCKED = 0xCA5 - SYS_FGETC_UNLOCKED = 0xC80 - SYS_FGETPOS64 = 0xCEE - SYS_FGETPOS64_UNLOCKED = 0xCF4 - SYS_FGETPOS_UNLOCKED = 0xCA6 - SYS_FGETS_UNLOCKED = 0xC7C - SYS_FGETWC_UNLOCKED = 0xCA7 - SYS_FGETWS_UNLOCKED = 0xCA8 - SYS_FILENO_UNLOCKED = 0xCA9 - SYS_FLDATA = 0x0C1 - SYS_FLDATA_UNLOCKED = 0xCAA - SYS_FLOCATE_UNLOCKED = 0xCAB - SYS_FLOORD128 = 0xBBE - SYS_FLOORD32 = 0xBBC - SYS_FLOORD64 = 0xBBD - SYS_FMA = 0xA63 - SYS_FMAD128 = 0xBC1 - SYS_FMAD32 = 0xBBF - SYS_FMAD64 = 0xBC0 - SYS_FMAF = 0xA66 - SYS_FMAL = 0xA69 - SYS_FMAX = 0xA6C - SYS_FMAXD128 = 0xBC4 - SYS_FMAXD32 = 0xBC2 - SYS_FMAXD64 = 0xBC3 - SYS_FMAXF = 0xA6F - SYS_FMAXL = 0xA72 - SYS_FMIN = 0xA75 - SYS_FMIND128 = 0xBC7 - SYS_FMIND32 = 0xBC5 - SYS_FMIND64 = 0xBC6 - SYS_FMINF = 0xA78 - SYS_FMINL = 0xA7B - SYS_FMODD128 = 0xBCA - SYS_FMODD32 = 0xBC8 - SYS_FMODD64 = 0xBC9 - SYS_FOPEN64 = 0xD49 - SYS_FOPEN64_UNLOCKED = 0xD4A - SYS_FOPEN_UNLOCKED = 0xCFA - SYS_FPRINTF_UNLOCKED = 0xCAC - SYS_FPUTC_UNLOCKED = 0xC81 - SYS_FPUTS_UNLOCKED = 0xC7E - SYS_FPUTWC_UNLOCKED = 0xCAD - SYS_FPUTWS_UNLOCKED = 0xCAE - SYS_FREAD_NOUPDATE = 0xCEC - SYS_FREAD_NOUPDATE_UNLOCKED = 0xCED - SYS_FREAD_UNLOCKED = 0xC7B - SYS_FREEIFADDRS = 0xCE6 - SYS_FREOPEN64 = 0xD4B - SYS_FREOPEN64_UNLOCKED = 0xD4C - SYS_FREOPEN_UNLOCKED = 0xCFB - SYS_FREXPD128 = 0xBCE - SYS_FREXPD32 = 0xBCC - SYS_FREXPD64 = 0xBCD - SYS_FSCANF_UNLOCKED = 0xCAF - SYS_FSEEK64 = 0xCEF - SYS_FSEEK64_UNLOCKED = 0xCF5 - SYS_FSEEKO64 = 0xCF0 - SYS_FSEEKO64_UNLOCKED = 0xCF6 - SYS_FSEEKO_UNLOCKED = 0xCB1 - SYS_FSEEK_UNLOCKED = 0xCB0 - SYS_FSETPOS64 = 0xCF1 - SYS_FSETPOS64_UNLOCKED = 0xCF7 - SYS_FSETPOS_UNLOCKED = 0xCB3 - SYS_FTELL64 = 0xCF2 - SYS_FTELL64_UNLOCKED = 0xCF8 - SYS_FTELLO64 = 0xCF3 - SYS_FTELLO64_UNLOCKED = 0xCF9 - SYS_FTELLO_UNLOCKED = 0xCB5 - SYS_FTELL_UNLOCKED = 0xCB4 - SYS_FUPDATE = 0x0B5 - SYS_FUPDATE_UNLOCKED = 0xCB7 - SYS_FWIDE_UNLOCKED = 0xCB8 - SYS_FWPRINTF_UNLOCKED = 0xCB9 - SYS_FWRITE_UNLOCKED = 0xC7A - SYS_FWSCANF_UNLOCKED = 0xCBA - SYS_GETDATE64 = 0xD4F - SYS_GETIFADDRS = 0xCE7 - SYS_GETIPV4SOURCEFILTER = 0xC77 - SYS_GETSOURCEFILTER = 0xC79 - SYS_GETSYNTX = 0x0FD - SYS_GETS_UNLOCKED = 0xC7D - SYS_GETTIMEOFDAY64 = 0xD50 - SYS_GETWCHAR_UNLOCKED = 0xCBC - SYS_GETWC_UNLOCKED = 0xCBB - SYS_GMTIME = 0x0B0 - SYS_GMTIME64 = 0xCDC - SYS_GMTIME64_R = 0xCDD - SYS_HYPOTD128 = 0xBD1 - SYS_HYPOTD32 = 0xBCF - SYS_HYPOTD64 = 0xBD0 - SYS_ILOGBD128 = 0xBD4 - SYS_ILOGBD32 = 0xBD2 - SYS_ILOGBD64 = 0xBD3 - SYS_ILOGBF = 0xA7E - SYS_ILOGBL = 0xA81 - SYS_INET6_IS_SRCADDR = 0xD5A - SYS_ISBLANK = 0x0FE - SYS_ISWALNUM = 0x0FF - SYS_LDEXPD128 = 0xBD7 - SYS_LDEXPD32 = 0xBD5 - SYS_LDEXPD64 = 0xBD6 - SYS_LGAMMAD128 = 0xBDA - SYS_LGAMMAD32 = 0xBD8 - SYS_LGAMMAD64 = 0xBD9 - SYS_LIO_LISTIO = 0xC6A - SYS_LLRINT = 0xA84 - SYS_LLRINTD128 = 0xBDD - SYS_LLRINTD32 = 0xBDB - SYS_LLRINTD64 = 0xBDC - SYS_LLRINTF = 0xA87 - SYS_LLRINTL = 0xA8A - SYS_LLROUND = 0xA8D - SYS_LLROUNDD128 = 0xBE0 - SYS_LLROUNDD32 = 0xBDE - SYS_LLROUNDD64 = 0xBDF - SYS_LLROUNDF = 0xA90 - SYS_LLROUNDL = 0xA93 - SYS_LOCALTIM = 0x0B1 - SYS_LOCALTIME = 0x0B1 - SYS_LOCALTIME64 = 0xCDE - SYS_LOCALTIME64_R = 0xCDF - SYS_LOG10D128 = 0xBE6 - SYS_LOG10D32 = 0xBE4 - SYS_LOG10D64 = 0xBE5 - SYS_LOG1PD128 = 0xBE9 - SYS_LOG1PD32 = 0xBE7 - SYS_LOG1PD64 = 0xBE8 - SYS_LOG2D128 = 0xBEC - SYS_LOG2D32 = 0xBEA - SYS_LOG2D64 = 0xBEB - SYS_LOGBD128 = 0xBEF - SYS_LOGBD32 = 0xBED - SYS_LOGBD64 = 0xBEE - SYS_LOGBF = 0xA96 - SYS_LOGBL = 0xA99 - SYS_LOGD128 = 0xBE3 - SYS_LOGD32 = 0xBE1 - SYS_LOGD64 = 0xBE2 - SYS_LRINT = 0xA9C - SYS_LRINTD128 = 0xBF2 - SYS_LRINTD32 = 0xBF0 - SYS_LRINTD64 = 0xBF1 - SYS_LRINTF = 0xA9F - SYS_LRINTL = 0xAA2 - SYS_LROUNDD128 = 0xBF5 - SYS_LROUNDD32 = 0xBF3 - SYS_LROUNDD64 = 0xBF4 - SYS_LROUNDL = 0xAA5 - SYS_MBLEN = 0x0AF - SYS_MBRTOC16 = 0xD42 - SYS_MBRTOC32 = 0xD43 - SYS_MEMSET = 0x0A3 - SYS_MKTIME = 0x0AC - SYS_MKTIME64 = 0xCE0 - SYS_MODFD128 = 0xBF8 - SYS_MODFD32 = 0xBF6 - SYS_MODFD64 = 0xBF7 - SYS_NAN = 0xAA8 - SYS_NAND128 = 0xBFB - SYS_NAND32 = 0xBF9 - SYS_NAND64 = 0xBFA - SYS_NANF = 0xAAA - SYS_NANL = 0xAAC - SYS_NEARBYINT = 0xAAE - SYS_NEARBYINTD128 = 0xBFE - SYS_NEARBYINTD32 = 0xBFC - SYS_NEARBYINTD64 = 0xBFD - SYS_NEARBYINTF = 0xAB1 - SYS_NEARBYINTL = 0xAB4 - SYS_NEXTAFTERD128 = 0xC01 - SYS_NEXTAFTERD32 = 0xBFF - SYS_NEXTAFTERD64 = 0xC00 - SYS_NEXTAFTERF = 0xAB7 - SYS_NEXTAFTERL = 0xABA - SYS_NEXTTOWARD = 0xABD - SYS_NEXTTOWARDD128 = 0xC04 - SYS_NEXTTOWARDD32 = 0xC02 - SYS_NEXTTOWARDD64 = 0xC03 - SYS_NEXTTOWARDF = 0xAC0 - SYS_NEXTTOWARDL = 0xAC3 - SYS_NL_LANGINFO = 0x0FC - SYS_PERROR_UNLOCKED = 0xCBD - SYS_POSIX_FALLOCATE = 0xCE8 - SYS_POSIX_MEMALIGN = 0xCE9 - SYS_POSIX_OPENPT = 0xC66 - SYS_POWD128 = 0xC07 - SYS_POWD32 = 0xC05 - SYS_POWD64 = 0xC06 - SYS_PRINTF_UNLOCKED = 0xCBE - SYS_PSELECT = 0xC67 - SYS_PTHREAD_ATTR_GETSTACK = 0xB3E - SYS_PTHREAD_ATTR_SETSTACK = 0xB3F - SYS_PTHREAD_SECURITY_APPLID_NP = 0xCE4 - SYS_PUTS_UNLOCKED = 0xC7F - SYS_PUTWCHAR_UNLOCKED = 0xCC0 - SYS_PUTWC_UNLOCKED = 0xCBF - SYS_QUANTEXPD128 = 0xD46 - SYS_QUANTEXPD32 = 0xD44 - SYS_QUANTEXPD64 = 0xD45 - SYS_QUANTIZED128 = 0xC0A - SYS_QUANTIZED32 = 0xC08 - SYS_QUANTIZED64 = 0xC09 - SYS_REMAINDERD128 = 0xC0D - SYS_REMAINDERD32 = 0xC0B - SYS_REMAINDERD64 = 0xC0C - SYS_RESIZE_ALLOC = 0xCEB - SYS_REWIND_UNLOCKED = 0xCC1 - SYS_RINTD128 = 0xC13 - SYS_RINTD32 = 0xC11 - SYS_RINTD64 = 0xC12 - SYS_RINTF = 0xACB - SYS_RINTL = 0xACD - SYS_ROUND = 0xACF - SYS_ROUNDD128 = 0xC16 - SYS_ROUNDD32 = 0xC14 - SYS_ROUNDD64 = 0xC15 - SYS_ROUNDF = 0xAD2 - SYS_ROUNDL = 0xAD5 - SYS_SAMEQUANTUMD128 = 0xC19 - SYS_SAMEQUANTUMD32 = 0xC17 - SYS_SAMEQUANTUMD64 = 0xC18 - SYS_SCALBLN = 0xAD8 - SYS_SCALBLND128 = 0xC1C - SYS_SCALBLND32 = 0xC1A - SYS_SCALBLND64 = 0xC1B - SYS_SCALBLNF = 0xADB - SYS_SCALBLNL = 0xADE - SYS_SCALBND128 = 0xC1F - SYS_SCALBND32 = 0xC1D - SYS_SCALBND64 = 0xC1E - SYS_SCALBNF = 0xAE3 - SYS_SCALBNL = 0xAE6 - SYS_SCANF_UNLOCKED = 0xCC2 - SYS_SCHED_YIELD = 0xB32 - SYS_SETENV = 0x0C8 - SYS_SETIPV4SOURCEFILTER = 0xC76 - SYS_SETSOURCEFILTER = 0xC78 - SYS_SHM_OPEN = 0xC8C - SYS_SHM_UNLINK = 0xC8D - SYS_SIND128 = 0xC22 - SYS_SIND32 = 0xC20 - SYS_SIND64 = 0xC21 - SYS_SINHD128 = 0xC25 - SYS_SINHD32 = 0xC23 - SYS_SINHD64 = 0xC24 - SYS_SIZEOF_ALLOC = 0xCEA - SYS_SOCKATMARK = 0xC68 - SYS_SQRTD128 = 0xC28 - SYS_SQRTD32 = 0xC26 - SYS_SQRTD64 = 0xC27 - SYS_STRCHR = 0x0A0 - SYS_STRCSPN = 0x0A1 - SYS_STRERROR = 0x0A8 - SYS_STRERROR_R = 0xB33 - SYS_STRFTIME = 0x0B2 - SYS_STRLEN = 0x0A9 - SYS_STRPBRK = 0x0A2 - SYS_STRSPN = 0x0A4 - SYS_STRSTR = 0x0A5 - SYS_STRTOD128 = 0xC2B - SYS_STRTOD32 = 0xC29 - SYS_STRTOD64 = 0xC2A - SYS_STRTOK = 0x0A6 - SYS_TAND128 = 0xC2E - SYS_TAND32 = 0xC2C - SYS_TAND64 = 0xC2D - SYS_TANHD128 = 0xC31 - SYS_TANHD32 = 0xC2F - SYS_TANHD64 = 0xC30 - SYS_TGAMMAD128 = 0xC34 - SYS_TGAMMAD32 = 0xC32 - SYS_TGAMMAD64 = 0xC33 - SYS_TIME = 0x0AD - SYS_TIME64 = 0xCE1 - SYS_TMPFILE64 = 0xD4D - SYS_TMPFILE64_UNLOCKED = 0xD4E - SYS_TMPFILE_UNLOCKED = 0xCFD - SYS_TRUNCD128 = 0xC40 - SYS_TRUNCD32 = 0xC3E - SYS_TRUNCD64 = 0xC3F - SYS_UNGETC_UNLOCKED = 0xCC3 - SYS_UNGETWC_UNLOCKED = 0xCC4 - SYS_UNSETENV = 0xB34 - SYS_VFPRINTF_UNLOCKED = 0xCC5 - SYS_VFSCANF_UNLOCKED = 0xCC7 - SYS_VFWPRINTF_UNLOCKED = 0xCC9 - SYS_VFWSCANF_UNLOCKED = 0xCCB - SYS_VPRINTF_UNLOCKED = 0xCCD - SYS_VSCANF_UNLOCKED = 0xCCF - SYS_VWPRINTF_UNLOCKED = 0xCD1 - SYS_VWSCANF_UNLOCKED = 0xCD3 - SYS_WCSTOD128 = 0xC43 - SYS_WCSTOD32 = 0xC41 - SYS_WCSTOD64 = 0xC42 - SYS_WPRINTF_UNLOCKED = 0xCD5 - SYS_WSCANF_UNLOCKED = 0xCD6 - SYS__FLUSHLBF = 0xD68 - SYS__FLUSHLBF_UNLOCKED = 0xD6F - SYS___ACOSHF_H = 0xA54 - SYS___ACOSHL_H = 0xA55 - SYS___ASINHF_H = 0xA56 - SYS___ASINHL_H = 0xA57 - SYS___ATANPID128 = 0xC6D - SYS___ATANPID32 = 0xC6B - SYS___ATANPID64 = 0xC6C - SYS___CBRTF_H = 0xA58 - SYS___CBRTL_H = 0xA59 - SYS___CDUMP = 0x0C4 - SYS___CLASS = 0xAFA - SYS___CLASS2 = 0xB99 - SYS___CLASS2D128 = 0xC99 - SYS___CLASS2D32 = 0xC97 - SYS___CLASS2D64 = 0xC98 - SYS___CLASS2F = 0xC91 - SYS___CLASS2F_B = 0xC93 - SYS___CLASS2F_H = 0xC94 - SYS___CLASS2L = 0xC92 - SYS___CLASS2L_B = 0xC95 - SYS___CLASS2L_H = 0xC96 - SYS___CLASS2_B = 0xB9A - SYS___CLASS2_H = 0xB9B - SYS___CLASS_B = 0xAFB - SYS___CLASS_H = 0xAFC - SYS___CLOGL_B = 0xA01 - SYS___CLOGL_H = 0xA02 - SYS___CLRENV = 0x0C9 - SYS___CLRMF = 0x0BD - SYS___CODEPAGE_INFO = 0xC64 - SYS___CONJF_B = 0xA07 - SYS___CONJF_H = 0xA08 - SYS___CONJL_B = 0xA0A - SYS___CONJL_H = 0xA0B - SYS___CONJ_B = 0xA04 - SYS___CONJ_H = 0xA05 - SYS___COPYSIGN_B = 0xA5A - SYS___COPYSIGN_H = 0xAF5 - SYS___COSPID128 = 0xC70 - SYS___COSPID32 = 0xC6E - SYS___COSPID64 = 0xC6F - SYS___CPOWF_B = 0xA10 - SYS___CPOWF_H = 0xA11 - SYS___CPOWL_B = 0xA13 - SYS___CPOWL_H = 0xA14 - SYS___CPOW_B = 0xA0D - SYS___CPOW_H = 0xA0E - SYS___CPROJF_B = 0xA19 - SYS___CPROJF_H = 0xA1A - SYS___CPROJL_B = 0xA1C - SYS___CPROJL_H = 0xA1D - SYS___CPROJ_B = 0xA16 - SYS___CPROJ_H = 0xA17 - SYS___CREALF_B = 0xA22 - SYS___CREALF_H = 0xA23 - SYS___CREALL_B = 0xA25 - SYS___CREALL_H = 0xA26 - SYS___CREAL_B = 0xA1F - SYS___CREAL_H = 0xA20 - SYS___CSINF_B = 0xA2B - SYS___CSINF_H = 0xA2C - SYS___CSINHF_B = 0xA34 - SYS___CSINHF_H = 0xA35 - SYS___CSINHL_B = 0xA37 - SYS___CSINHL_H = 0xA38 - SYS___CSINH_B = 0xA31 - SYS___CSINH_H = 0xA32 - SYS___CSINL_B = 0xA2E - SYS___CSINL_H = 0xA2F - SYS___CSIN_B = 0xA28 - SYS___CSIN_H = 0xA29 - SYS___CSNAP = 0x0C5 - SYS___CSQRTF_B = 0xA3D - SYS___CSQRTF_H = 0xA3E - SYS___CSQRTL_B = 0xA40 - SYS___CSQRTL_H = 0xA41 - SYS___CSQRT_B = 0xA3A - SYS___CSQRT_H = 0xA3B - SYS___CTANF_B = 0xA46 - SYS___CTANF_H = 0xA47 - SYS___CTANHF_B = 0xA4F - SYS___CTANHF_H = 0xA50 - SYS___CTANHL_B = 0xA52 - SYS___CTANHL_H = 0xA53 - SYS___CTANH_B = 0xA4C - SYS___CTANH_H = 0xA4D - SYS___CTANL_B = 0xA49 - SYS___CTANL_H = 0xA4A - SYS___CTAN_B = 0xA43 - SYS___CTAN_H = 0xA44 - SYS___CTEST = 0x0C7 - SYS___CTRACE = 0x0C6 - SYS___D1TOP = 0xC9B - SYS___D2TOP = 0xC9C - SYS___D4TOP = 0xC9D - SYS___DYNALL = 0x0C3 - SYS___DYNFRE = 0x0C2 - SYS___EXP2F_H = 0xA5E - SYS___EXP2L_H = 0xA5F - SYS___EXP2_H = 0xA5D - SYS___EXPM1F_H = 0xA5B - SYS___EXPM1L_H = 0xA5C - SYS___FBUFSIZE = 0xD60 - SYS___FLBF = 0xD62 - SYS___FLDATA = 0x0C1 - SYS___FMAF_B = 0xA67 - SYS___FMAF_H = 0xA68 - SYS___FMAL_B = 0xA6A - SYS___FMAL_H = 0xA6B - SYS___FMAXF_B = 0xA70 - SYS___FMAXF_H = 0xA71 - SYS___FMAXL_B = 0xA73 - SYS___FMAXL_H = 0xA74 - SYS___FMAX_B = 0xA6D - SYS___FMAX_H = 0xA6E - SYS___FMA_B = 0xA64 - SYS___FMA_H = 0xA65 - SYS___FMINF_B = 0xA79 - SYS___FMINF_H = 0xA7A - SYS___FMINL_B = 0xA7C - SYS___FMINL_H = 0xA7D - SYS___FMIN_B = 0xA76 - SYS___FMIN_H = 0xA77 - SYS___FPENDING = 0xD61 - SYS___FPENDING_UNLOCKED = 0xD6C - SYS___FPURGE = 0xD69 - SYS___FPURGE_UNLOCKED = 0xD70 - SYS___FP_CAST_D = 0xBCB - SYS___FREADABLE = 0xD63 - SYS___FREADAHEAD = 0xD6A - SYS___FREADAHEAD_UNLOCKED = 0xD71 - SYS___FREADING = 0xD65 - SYS___FREADING_UNLOCKED = 0xD6D - SYS___FSEEK2 = 0xB3C - SYS___FSETERR = 0xD6B - SYS___FSETLOCKING = 0xD67 - SYS___FTCHEP = 0x0BF - SYS___FTELL2 = 0xB3B - SYS___FUPDT = 0x0B5 - SYS___FWRITABLE = 0xD64 - SYS___FWRITING = 0xD66 - SYS___FWRITING_UNLOCKED = 0xD6E - SYS___GETCB = 0x0B4 - SYS___GETGRGID1 = 0xD5B - SYS___GETGRNAM1 = 0xD5C - SYS___GETTHENT = 0xCE5 - SYS___GETTOD = 0xD3E - SYS___HYPOTF_H = 0xAF6 - SYS___HYPOTL_H = 0xAF7 - SYS___ILOGBF_B = 0xA7F - SYS___ILOGBF_H = 0xA80 - SYS___ILOGBL_B = 0xA82 - SYS___ILOGBL_H = 0xA83 - SYS___ISBLANK_A = 0xB2E - SYS___ISBLNK = 0x0FE - SYS___ISWBLANK_A = 0xB2F - SYS___LE_CEEGTJS = 0xD72 - SYS___LE_TRACEBACK = 0xB7A - SYS___LGAMMAL_H = 0xA62 - SYS___LGAMMA_B_C99 = 0xB39 - SYS___LGAMMA_H_C99 = 0xB38 - SYS___LGAMMA_R_C99 = 0xB3A - SYS___LLRINTF_B = 0xA88 - SYS___LLRINTF_H = 0xA89 - SYS___LLRINTL_B = 0xA8B - SYS___LLRINTL_H = 0xA8C - SYS___LLRINT_B = 0xA85 - SYS___LLRINT_H = 0xA86 - SYS___LLROUNDF_B = 0xA91 - SYS___LLROUNDF_H = 0xA92 - SYS___LLROUNDL_B = 0xA94 - SYS___LLROUNDL_H = 0xA95 - SYS___LLROUND_B = 0xA8E - SYS___LLROUND_H = 0xA8F - SYS___LOCALE_CTL = 0xD47 - SYS___LOG1PF_H = 0xA60 - SYS___LOG1PL_H = 0xA61 - SYS___LOGBF_B = 0xA97 - SYS___LOGBF_H = 0xA98 - SYS___LOGBL_B = 0xA9A - SYS___LOGBL_H = 0xA9B - SYS___LOGIN_APPLID = 0xCE2 - SYS___LRINTF_B = 0xAA0 - SYS___LRINTF_H = 0xAA1 - SYS___LRINTL_B = 0xAA3 - SYS___LRINTL_H = 0xAA4 - SYS___LRINT_B = 0xA9D - SYS___LRINT_H = 0xA9E - SYS___LROUNDF_FIXUP = 0xB31 - SYS___LROUNDL_B = 0xAA6 - SYS___LROUNDL_H = 0xAA7 - SYS___LROUND_FIXUP = 0xB30 - SYS___MOSERVICES = 0xD3D - SYS___MUST_STAY_CLEAN = 0xB7C - SYS___NANF_B = 0xAAB - SYS___NANL_B = 0xAAD - SYS___NAN_B = 0xAA9 - SYS___NEARBYINTF_B = 0xAB2 - SYS___NEARBYINTF_H = 0xAB3 - SYS___NEARBYINTL_B = 0xAB5 - SYS___NEARBYINTL_H = 0xAB6 - SYS___NEARBYINT_B = 0xAAF - SYS___NEARBYINT_H = 0xAB0 - SYS___NEXTAFTERF_B = 0xAB8 - SYS___NEXTAFTERF_H = 0xAB9 - SYS___NEXTAFTERL_B = 0xABB - SYS___NEXTAFTERL_H = 0xABC - SYS___NEXTTOWARDF_B = 0xAC1 - SYS___NEXTTOWARDF_H = 0xAC2 - SYS___NEXTTOWARDL_B = 0xAC4 - SYS___NEXTTOWARDL_H = 0xAC5 - SYS___NEXTTOWARD_B = 0xABE - SYS___NEXTTOWARD_H = 0xABF - SYS___O_ENV = 0xB7D - SYS___PASSWD_APPLID = 0xCE3 - SYS___PTOD1 = 0xC9E - SYS___PTOD2 = 0xC9F - SYS___PTOD4 = 0xCA0 - SYS___REGCOMP_STD = 0x0EA - SYS___REMAINDERF_H = 0xAC6 - SYS___REMAINDERL_H = 0xAC7 - SYS___REMQUOD128 = 0xC10 - SYS___REMQUOD32 = 0xC0E - SYS___REMQUOD64 = 0xC0F - SYS___REMQUOF_H = 0xAC9 - SYS___REMQUOL_H = 0xACA - SYS___REMQUO_H = 0xAC8 - SYS___RINTF_B = 0xACC - SYS___RINTL_B = 0xACE - SYS___ROUNDF_B = 0xAD3 - SYS___ROUNDF_H = 0xAD4 - SYS___ROUNDL_B = 0xAD6 - SYS___ROUNDL_H = 0xAD7 - SYS___ROUND_B = 0xAD0 - SYS___ROUND_H = 0xAD1 - SYS___SCALBLNF_B = 0xADC - SYS___SCALBLNF_H = 0xADD - SYS___SCALBLNL_B = 0xADF - SYS___SCALBLNL_H = 0xAE0 - SYS___SCALBLN_B = 0xAD9 - SYS___SCALBLN_H = 0xADA - SYS___SCALBNF_B = 0xAE4 - SYS___SCALBNF_H = 0xAE5 - SYS___SCALBNL_B = 0xAE7 - SYS___SCALBNL_H = 0xAE8 - SYS___SCALBN_B = 0xAE1 - SYS___SCALBN_H = 0xAE2 - SYS___SETENV = 0x0C8 - SYS___SINPID128 = 0xC73 - SYS___SINPID32 = 0xC71 - SYS___SINPID64 = 0xC72 - SYS___SMF_RECORD2 = 0xD48 - SYS___STATIC_REINIT = 0xB3D - SYS___TGAMMAF_H_C99 = 0xB79 - SYS___TGAMMAL_H = 0xAE9 - SYS___TGAMMA_H_C99 = 0xB78 - SYS___TOCSNAME2 = 0xC9A - SYS_CEIL = 0x01F - SYS_CHAUDIT = 0x1E0 - SYS_EXP = 0x01A - SYS_FCHAUDIT = 0x1E1 - SYS_FREXP = 0x01D - SYS_GETGROUPSBYNAME = 0x1E2 - SYS_GETPWUID = 0x1A0 - SYS_GETUID = 0x1A1 - SYS_ISATTY = 0x1A3 - SYS_KILL = 0x1A4 - SYS_LDEXP = 0x01E - SYS_LINK = 0x1A5 - SYS_LOG10 = 0x01C - SYS_LSEEK = 0x1A6 - SYS_LSTAT = 0x1A7 - SYS_MKDIR = 0x1A8 - SYS_MKFIFO = 0x1A9 - SYS_MKNOD = 0x1AA - SYS_MODF = 0x01B - SYS_MOUNT = 0x1AB - SYS_OPEN = 0x1AC - SYS_OPENDIR = 0x1AD - SYS_PATHCONF = 0x1AE - SYS_PAUSE = 0x1AF - SYS_PIPE = 0x1B0 - SYS_PTHREAD_ATTR_DESTROY = 0x1E7 - SYS_PTHREAD_ATTR_GETDETACHSTATE = 0x1EB - SYS_PTHREAD_ATTR_GETSTACKSIZE = 0x1E9 - SYS_PTHREAD_ATTR_GETWEIGHT_NP = 0x1ED - SYS_PTHREAD_ATTR_INIT = 0x1E6 - SYS_PTHREAD_ATTR_SETDETACHSTATE = 0x1EA - SYS_PTHREAD_ATTR_SETSTACKSIZE = 0x1E8 - SYS_PTHREAD_ATTR_SETWEIGHT_NP = 0x1EC - SYS_PTHREAD_CANCEL = 0x1EE - SYS_PTHREAD_CLEANUP_POP = 0x1F0 - SYS_PTHREAD_CLEANUP_PUSH = 0x1EF - SYS_PTHREAD_CONDATTR_DESTROY = 0x1F2 - SYS_PTHREAD_CONDATTR_INIT = 0x1F1 - SYS_PTHREAD_COND_BROADCAST = 0x1F6 - SYS_PTHREAD_COND_DESTROY = 0x1F4 - SYS_PTHREAD_COND_INIT = 0x1F3 - SYS_PTHREAD_COND_SIGNAL = 0x1F5 - SYS_PTHREAD_COND_TIMEDWAIT = 0x1F8 - SYS_PTHREAD_COND_WAIT = 0x1F7 - SYS_PTHREAD_CREATE = 0x1F9 - SYS_PTHREAD_DETACH = 0x1FA - SYS_PTHREAD_EQUAL = 0x1FB - SYS_PTHREAD_EXIT = 0x1E4 - SYS_PTHREAD_GETSPECIFIC = 0x1FC - SYS_PTHREAD_JOIN = 0x1FD - SYS_PTHREAD_KEY_CREATE = 0x1FE - SYS_PTHREAD_KILL = 0x1E5 - SYS_PTHREAD_MUTEXATTR_INIT = 0x1FF - SYS_READ = 0x1B2 - SYS_READDIR = 0x1B3 - SYS_READLINK = 0x1B4 - SYS_REWINDDIR = 0x1B5 - SYS_RMDIR = 0x1B6 - SYS_SETEGID = 0x1B7 - SYS_SETEUID = 0x1B8 - SYS_SETGID = 0x1B9 - SYS_SETPGID = 0x1BA - SYS_SETSID = 0x1BB - SYS_SETUID = 0x1BC - SYS_SIGACTION = 0x1BD - SYS_SIGADDSET = 0x1BE - SYS_SIGDELSET = 0x1BF - SYS_SIGEMPTYSET = 0x1C0 - SYS_SIGFILLSET = 0x1C1 - SYS_SIGISMEMBER = 0x1C2 - SYS_SIGLONGJMP = 0x1C3 - SYS_SIGPENDING = 0x1C4 - SYS_SIGPROCMASK = 0x1C5 - SYS_SIGSETJMP = 0x1C6 - SYS_SIGSUSPEND = 0x1C7 - SYS_SIGWAIT = 0x1E3 - SYS_SLEEP = 0x1C8 - SYS_STAT = 0x1C9 - SYS_SYMLINK = 0x1CB - SYS_SYSCONF = 0x1CC - SYS_TCDRAIN = 0x1CD - SYS_TCFLOW = 0x1CE - SYS_TCFLUSH = 0x1CF - SYS_TCGETATTR = 0x1D0 - SYS_TCGETPGRP = 0x1D1 - SYS_TCSENDBREAK = 0x1D2 - SYS_TCSETATTR = 0x1D3 - SYS_TCSETPGRP = 0x1D4 - SYS_TIMES = 0x1D5 - SYS_TTYNAME = 0x1D6 - SYS_TZSET = 0x1D7 - SYS_UMASK = 0x1D8 - SYS_UMOUNT = 0x1D9 - SYS_UNAME = 0x1DA - SYS_UNLINK = 0x1DB - SYS_UTIME = 0x1DC - SYS_WAIT = 0x1DD - SYS_WAITPID = 0x1DE - SYS_WRITE = 0x1DF - SYS_W_GETPSENT = 0x1B1 - SYS_W_IOCTL = 0x1A2 - SYS_W_STATFS = 0x1CA - SYS_A64L = 0x2EF - SYS_BCMP = 0x2B9 - SYS_BCOPY = 0x2BA - SYS_BZERO = 0x2BB - SYS_CATCLOSE = 0x2B6 - SYS_CATGETS = 0x2B7 - SYS_CATOPEN = 0x2B8 - SYS_CRYPT = 0x2AC - SYS_DBM_CLEARERR = 0x2F7 - SYS_DBM_CLOSE = 0x2F8 - SYS_DBM_DELETE = 0x2F9 - SYS_DBM_ERROR = 0x2FA - SYS_DBM_FETCH = 0x2FB - SYS_DBM_FIRSTKEY = 0x2FC - SYS_DBM_NEXTKEY = 0x2FD - SYS_DBM_OPEN = 0x2FE - SYS_DBM_STORE = 0x2FF - SYS_DRAND48 = 0x2B2 - SYS_ENCRYPT = 0x2AD - SYS_ENDUTXENT = 0x2E1 - SYS_ERAND48 = 0x2B3 - SYS_ERF = 0x02C - SYS_ERFC = 0x02D - SYS_FCHDIR = 0x2D9 - SYS_FFS = 0x2BC - SYS_FMTMSG = 0x2E5 - SYS_FSTATVFS = 0x2B4 - SYS_FTIME = 0x2F5 - SYS_GAMMA = 0x02E - SYS_GETDATE = 0x2A6 - SYS_GETPAGESIZE = 0x2D8 - SYS_GETTIMEOFDAY = 0x2F6 - SYS_GETUTXENT = 0x2E0 - SYS_GETUTXID = 0x2E2 - SYS_GETUTXLINE = 0x2E3 - SYS_HCREATE = 0x2C6 - SYS_HDESTROY = 0x2C7 - SYS_HSEARCH = 0x2C8 - SYS_HYPOT = 0x02B - SYS_INDEX = 0x2BD - SYS_INITSTATE = 0x2C2 - SYS_INSQUE = 0x2CF - SYS_ISASCII = 0x2ED - SYS_JRAND48 = 0x2E6 - SYS_L64A = 0x2F0 - SYS_LCONG48 = 0x2EA - SYS_LFIND = 0x2C9 - SYS_LRAND48 = 0x2E7 - SYS_LSEARCH = 0x2CA - SYS_MEMCCPY = 0x2D4 - SYS_MRAND48 = 0x2E8 - SYS_NRAND48 = 0x2E9 - SYS_PCLOSE = 0x2D2 - SYS_POPEN = 0x2D1 - SYS_PUTUTXLINE = 0x2E4 - SYS_RANDOM = 0x2C4 - SYS_REMQUE = 0x2D0 - SYS_RINDEX = 0x2BE - SYS_SEED48 = 0x2EC - SYS_SETKEY = 0x2AE - SYS_SETSTATE = 0x2C3 - SYS_SETUTXENT = 0x2DF - SYS_SRAND48 = 0x2EB - SYS_SRANDOM = 0x2C5 - SYS_STATVFS = 0x2B5 - SYS_STRCASECMP = 0x2BF - SYS_STRDUP = 0x2C0 - SYS_STRNCASECMP = 0x2C1 - SYS_SWAB = 0x2D3 - SYS_TDELETE = 0x2CB - SYS_TFIND = 0x2CC - SYS_TOASCII = 0x2EE - SYS_TSEARCH = 0x2CD - SYS_TWALK = 0x2CE - SYS_UALARM = 0x2F1 - SYS_USLEEP = 0x2F2 - SYS_WAIT3 = 0x2A7 - SYS_WAITID = 0x2A8 - SYS_Y1 = 0x02A - SYS___ATOE = 0x2DB - SYS___ATOE_L = 0x2DC - SYS___CATTRM = 0x2A9 - SYS___CNVBLK = 0x2AF - SYS___CRYTRM = 0x2B0 - SYS___DLGHT = 0x2A1 - SYS___ECRTRM = 0x2B1 - SYS___ETOA = 0x2DD - SYS___ETOA_L = 0x2DE - SYS___GDTRM = 0x2AA - SYS___OCLCK = 0x2DA - SYS___OPARGF = 0x2A2 - SYS___OPERRF = 0x2A5 - SYS___OPINDF = 0x2A4 - SYS___OPOPTF = 0x2A3 - SYS___RNDTRM = 0x2AB - SYS___SRCTRM = 0x2F4 - SYS___TZONE = 0x2A0 - SYS___UTXTRM = 0x2F3 - SYS_ASIN = 0x03E - SYS_ISXDIGIT = 0x03B - SYS_SETLOCAL = 0x03A - SYS_SETLOCALE = 0x03A - SYS_SIN = 0x03F - SYS_TOLOWER = 0x03C - SYS_TOUPPER = 0x03D - SYS_ACCEPT_AND_RECV = 0x4F7 - SYS_ATOL = 0x04E - SYS_CHECKSCH = 0x4BC - SYS_CHECKSCHENV = 0x4BC - SYS_CLEARERR = 0x04C - SYS_CONNECTS = 0x4B5 - SYS_CONNECTSERVER = 0x4B5 - SYS_CONNECTW = 0x4B4 - SYS_CONNECTWORKMGR = 0x4B4 - SYS_CONTINUE = 0x4B3 - SYS_CONTINUEWORKUNIT = 0x4B3 - SYS_COPYSIGN = 0x4C2 - SYS_CREATEWO = 0x4B2 - SYS_CREATEWORKUNIT = 0x4B2 - SYS_DELETEWO = 0x4B9 - SYS_DELETEWORKUNIT = 0x4B9 - SYS_DISCONNE = 0x4B6 - SYS_DISCONNECTSERVER = 0x4B6 - SYS_FEOF = 0x04D - SYS_FERROR = 0x04A - SYS_FINITE = 0x4C8 - SYS_GAMMA_R = 0x4E2 - SYS_JOINWORK = 0x4B7 - SYS_JOINWORKUNIT = 0x4B7 - SYS_LEAVEWOR = 0x4B8 - SYS_LEAVEWORKUNIT = 0x4B8 - SYS_LGAMMA_R = 0x4EB - SYS_MATHERR = 0x4D0 - SYS_PERROR = 0x04F - SYS_QUERYMET = 0x4BA - SYS_QUERYMETRICS = 0x4BA - SYS_QUERYSCH = 0x4BB - SYS_QUERYSCHENV = 0x4BB - SYS_REWIND = 0x04B - SYS_SCALBN = 0x4D4 - SYS_SIGNIFIC = 0x4D5 - SYS_SIGNIFICAND = 0x4D5 - SYS___ACOSH_B = 0x4DA - SYS___ACOS_B = 0x4D9 - SYS___ASINH_B = 0x4BE - SYS___ASIN_B = 0x4DB - SYS___ATAN2_B = 0x4DC - SYS___ATANH_B = 0x4DD - SYS___ATAN_B = 0x4BF - SYS___CBRT_B = 0x4C0 - SYS___CEIL_B = 0x4C1 - SYS___COSH_B = 0x4DE - SYS___COS_B = 0x4C3 - SYS___DGHT = 0x4A8 - SYS___ENVN = 0x4B0 - SYS___ERFC_B = 0x4C5 - SYS___ERF_B = 0x4C4 - SYS___EXPM1_B = 0x4C6 - SYS___EXP_B = 0x4DF - SYS___FABS_B = 0x4C7 - SYS___FLOOR_B = 0x4C9 - SYS___FMOD_B = 0x4E0 - SYS___FP_SETMODE = 0x4F8 - SYS___FREXP_B = 0x4CA - SYS___GAMMA_B = 0x4E1 - SYS___GDRR = 0x4A1 - SYS___HRRNO = 0x4A2 - SYS___HYPOT_B = 0x4E3 - SYS___ILOGB_B = 0x4CB - SYS___ISNAN_B = 0x4CC - SYS___J0_B = 0x4E4 - SYS___J1_B = 0x4E6 - SYS___JN_B = 0x4E8 - SYS___LDEXP_B = 0x4CD - SYS___LGAMMA_B = 0x4EA - SYS___LOG10_B = 0x4ED - SYS___LOG1P_B = 0x4CE - SYS___LOGB_B = 0x4CF - SYS___LOGIN = 0x4F5 - SYS___LOG_B = 0x4EC - SYS___MLOCKALL = 0x4B1 - SYS___MODF_B = 0x4D1 - SYS___NEXTAFTER_B = 0x4D2 - SYS___OPENDIR2 = 0x4F3 - SYS___OPEN_STAT = 0x4F6 - SYS___OPND = 0x4A5 - SYS___OPPT = 0x4A6 - SYS___OPRG = 0x4A3 - SYS___OPRR = 0x4A4 - SYS___PID_AFFINITY = 0x4BD - SYS___POW_B = 0x4EE - SYS___READDIR2 = 0x4F4 - SYS___REMAINDER_B = 0x4EF - SYS___RINT_B = 0x4D3 - SYS___SCALB_B = 0x4F0 - SYS___SIGACTIONSET = 0x4FB - SYS___SIGGM = 0x4A7 - SYS___SINH_B = 0x4F1 - SYS___SIN_B = 0x4D6 - SYS___SQRT_B = 0x4F2 - SYS___TANH_B = 0x4D8 - SYS___TAN_B = 0x4D7 - SYS___TRRNO = 0x4AF - SYS___TZNE = 0x4A9 - SYS___TZZN = 0x4AA - SYS___UCREATE = 0x4FC - SYS___UFREE = 0x4FE - SYS___UHEAPREPORT = 0x4FF - SYS___UMALLOC = 0x4FD - SYS___Y0_B = 0x4E5 - SYS___Y1_B = 0x4E7 - SYS___YN_B = 0x4E9 - SYS_ABORT = 0x05C - SYS_ASCTIME_R = 0x5E0 - SYS_ATEXIT = 0x05D - SYS_CONNECTE = 0x5AE - SYS_CONNECTEXPORTIMPORT = 0x5AE - SYS_CTIME_R = 0x5E1 - SYS_DN_COMP = 0x5DF - SYS_DN_EXPAND = 0x5DD - SYS_DN_SKIPNAME = 0x5DE - SYS_EXIT = 0x05A - SYS_EXPORTWO = 0x5A1 - SYS_EXPORTWORKUNIT = 0x5A1 - SYS_EXTRACTW = 0x5A5 - SYS_EXTRACTWORKUNIT = 0x5A5 - SYS_FSEEKO = 0x5C9 - SYS_FTELLO = 0x5C8 - SYS_GETGRGID_R = 0x5E7 - SYS_GETGRNAM_R = 0x5E8 - SYS_GETLOGIN_R = 0x5E9 - SYS_GETPWNAM_R = 0x5EA - SYS_GETPWUID_R = 0x5EB - SYS_GMTIME_R = 0x5E2 - SYS_IMPORTWO = 0x5A3 - SYS_IMPORTWORKUNIT = 0x5A3 - SYS_INET_NTOP = 0x5D3 - SYS_INET_PTON = 0x5D4 - SYS_LLABS = 0x5CE - SYS_LLDIV = 0x5CB - SYS_LOCALTIME_R = 0x5E3 - SYS_PTHREAD_ATFORK = 0x5ED - SYS_PTHREAD_ATTR_GETDETACHSTATE_U98 = 0x5FB - SYS_PTHREAD_ATTR_GETGUARDSIZE = 0x5EE - SYS_PTHREAD_ATTR_GETSCHEDPARAM = 0x5F9 - SYS_PTHREAD_ATTR_GETSTACKADDR = 0x5EF - SYS_PTHREAD_ATTR_SETDETACHSTATE_U98 = 0x5FC - SYS_PTHREAD_ATTR_SETGUARDSIZE = 0x5F0 - SYS_PTHREAD_ATTR_SETSCHEDPARAM = 0x5FA - SYS_PTHREAD_ATTR_SETSTACKADDR = 0x5F1 - SYS_PTHREAD_CONDATTR_GETPSHARED = 0x5F2 - SYS_PTHREAD_CONDATTR_SETPSHARED = 0x5F3 - SYS_PTHREAD_DETACH_U98 = 0x5FD - SYS_PTHREAD_GETCONCURRENCY = 0x5F4 - SYS_PTHREAD_GETSPECIFIC_U98 = 0x5FE - SYS_PTHREAD_KEY_DELETE = 0x5F5 - SYS_PTHREAD_SETCANCELSTATE = 0x5FF - SYS_PTHREAD_SETCONCURRENCY = 0x5F6 - SYS_PTHREAD_SIGMASK = 0x5F7 - SYS_QUERYENC = 0x5AD - SYS_QUERYWORKUNITCLASSIFICATION = 0x5AD - SYS_RAISE = 0x05E - SYS_RAND_R = 0x5E4 - SYS_READDIR_R = 0x5E6 - SYS_REALLOC = 0x05B - SYS_RES_INIT = 0x5D8 - SYS_RES_MKQUERY = 0x5D7 - SYS_RES_QUERY = 0x5D9 - SYS_RES_QUERYDOMAIN = 0x5DC - SYS_RES_SEARCH = 0x5DA - SYS_RES_SEND = 0x5DB - SYS_SETJMP = 0x05F - SYS_SIGQUEUE = 0x5A9 - SYS_STRTOK_R = 0x5E5 - SYS_STRTOLL = 0x5B0 - SYS_STRTOULL = 0x5B1 - SYS_TTYNAME_R = 0x5EC - SYS_UNDOEXPO = 0x5A2 - SYS_UNDOEXPORTWORKUNIT = 0x5A2 - SYS_UNDOIMPO = 0x5A4 - SYS_UNDOIMPORTWORKUNIT = 0x5A4 - SYS_WCSTOLL = 0x5CC - SYS_WCSTOULL = 0x5CD - SYS___ABORT = 0x05C - SYS___CONSOLE2 = 0x5D2 - SYS___CPL = 0x5A6 - SYS___DISCARDDATA = 0x5F8 - SYS___DSA_PREV = 0x5B2 - SYS___EP_FIND = 0x5B3 - SYS___FP_SWAPMODE = 0x5AF - SYS___GETUSERID = 0x5AB - SYS___GET_CPUID = 0x5B9 - SYS___GET_SYSTEM_SETTINGS = 0x5BA - SYS___IPDOMAINNAME = 0x5AC - SYS___MAP_INIT = 0x5A7 - SYS___MAP_SERVICE = 0x5A8 - SYS___MOUNT = 0x5AA - SYS___MSGRCV_TIMED = 0x5B7 - SYS___RES = 0x5D6 - SYS___SEMOP_TIMED = 0x5B8 - SYS___SERVER_THREADS_QUERY = 0x5B4 - SYS_FPRINTF = 0x06D - SYS_FSCANF = 0x06A - SYS_PRINTF = 0x06F - SYS_SETBUF = 0x06B - SYS_SETVBUF = 0x06C - SYS_SSCANF = 0x06E - SYS___CATGETS_A = 0x6C0 - SYS___CHAUDIT_A = 0x6F4 - SYS___CHMOD_A = 0x6E8 - SYS___COLLATE_INIT_A = 0x6AC - SYS___CREAT_A = 0x6F6 - SYS___CTYPE_INIT_A = 0x6AF - SYS___DLLLOAD_A = 0x6DF - SYS___DLLQUERYFN_A = 0x6E0 - SYS___DLLQUERYVAR_A = 0x6E1 - SYS___E2A_L = 0x6E3 - SYS___EXECLE_A = 0x6A0 - SYS___EXECLP_A = 0x6A4 - SYS___EXECVE_A = 0x6C1 - SYS___EXECVP_A = 0x6C2 - SYS___EXECV_A = 0x6B1 - SYS___FPRINTF_A = 0x6FA - SYS___GETADDRINFO_A = 0x6BF - SYS___GETNAMEINFO_A = 0x6C4 - SYS___GET_WCTYPE_STD_A = 0x6AE - SYS___ICONV_OPEN_A = 0x6DE - SYS___IF_INDEXTONAME_A = 0x6DC - SYS___IF_NAMETOINDEX_A = 0x6DB - SYS___ISWCTYPE_A = 0x6B0 - SYS___IS_WCTYPE_STD_A = 0x6B2 - SYS___LOCALECONV_A = 0x6B8 - SYS___LOCALECONV_STD_A = 0x6B9 - SYS___LOCALE_INIT_A = 0x6B7 - SYS___LSTAT_A = 0x6EE - SYS___LSTAT_O_A = 0x6EF - SYS___MKDIR_A = 0x6E9 - SYS___MKFIFO_A = 0x6EC - SYS___MKNOD_A = 0x6F0 - SYS___MONETARY_INIT_A = 0x6BC - SYS___MOUNT_A = 0x6F1 - SYS___NL_CSINFO_A = 0x6D6 - SYS___NL_LANGINFO_A = 0x6BA - SYS___NL_LNAGINFO_STD_A = 0x6BB - SYS___NL_MONINFO_A = 0x6D7 - SYS___NL_NUMINFO_A = 0x6D8 - SYS___NL_RESPINFO_A = 0x6D9 - SYS___NL_TIMINFO_A = 0x6DA - SYS___NUMERIC_INIT_A = 0x6C6 - SYS___OPEN_A = 0x6F7 - SYS___PRINTF_A = 0x6DD - SYS___RESP_INIT_A = 0x6C7 - SYS___RPMATCH_A = 0x6C8 - SYS___RPMATCH_C_A = 0x6C9 - SYS___RPMATCH_STD_A = 0x6CA - SYS___SETLOCALE_A = 0x6F9 - SYS___SPAWNP_A = 0x6C5 - SYS___SPAWN_A = 0x6C3 - SYS___SPRINTF_A = 0x6FB - SYS___STAT_A = 0x6EA - SYS___STAT_O_A = 0x6EB - SYS___STRCOLL_STD_A = 0x6A1 - SYS___STRFMON_A = 0x6BD - SYS___STRFMON_STD_A = 0x6BE - SYS___STRFTIME_A = 0x6CC - SYS___STRFTIME_STD_A = 0x6CD - SYS___STRPTIME_A = 0x6CE - SYS___STRPTIME_STD_A = 0x6CF - SYS___STRXFRM_A = 0x6A2 - SYS___STRXFRM_C_A = 0x6A3 - SYS___STRXFRM_STD_A = 0x6A5 - SYS___SYNTAX_INIT_A = 0x6D4 - SYS___TIME_INIT_A = 0x6CB - SYS___TOD_INIT_A = 0x6D5 - SYS___TOWLOWER_A = 0x6B3 - SYS___TOWLOWER_STD_A = 0x6B4 - SYS___TOWUPPER_A = 0x6B5 - SYS___TOWUPPER_STD_A = 0x6B6 - SYS___UMOUNT_A = 0x6F2 - SYS___VFPRINTF_A = 0x6FC - SYS___VPRINTF_A = 0x6FD - SYS___VSPRINTF_A = 0x6FE - SYS___VSWPRINTF_A = 0x6FF - SYS___WCSCOLL_A = 0x6A6 - SYS___WCSCOLL_C_A = 0x6A7 - SYS___WCSCOLL_STD_A = 0x6A8 - SYS___WCSFTIME_A = 0x6D0 - SYS___WCSFTIME_STD_A = 0x6D1 - SYS___WCSXFRM_A = 0x6A9 - SYS___WCSXFRM_C_A = 0x6AA - SYS___WCSXFRM_STD_A = 0x6AB - SYS___WCTYPE_A = 0x6AD - SYS___W_GETMNTENT_A = 0x6F5 - SYS_____CCSIDTYPE_A = 0x6E6 - SYS_____CHATTR_A = 0x6E2 - SYS_____CSNAMETYPE_A = 0x6E7 - SYS_____OPEN_STAT_A = 0x6ED - SYS_____SPAWN2_A = 0x6D2 - SYS_____SPAWNP2_A = 0x6D3 - SYS_____TOCCSID_A = 0x6E4 - SYS_____TOCSNAME_A = 0x6E5 - SYS_ACL_FREE = 0x7FF - SYS_ACL_INIT = 0x7FE - SYS_FWIDE = 0x7DF - SYS_FWPRINTF = 0x7D1 - SYS_FWRITE = 0x07E - SYS_FWSCANF = 0x7D5 - SYS_GETCHAR = 0x07B - SYS_GETS = 0x07C - SYS_M_CREATE_LAYOUT = 0x7C9 - SYS_M_DESTROY_LAYOUT = 0x7CA - SYS_M_GETVALUES_LAYOUT = 0x7CB - SYS_M_SETVALUES_LAYOUT = 0x7CC - SYS_M_TRANSFORM_LAYOUT = 0x7CD - SYS_M_WTRANSFORM_LAYOUT = 0x7CE - SYS_PREAD = 0x7C7 - SYS_PUTC = 0x07D - SYS_PUTCHAR = 0x07A - SYS_PUTS = 0x07F - SYS_PWRITE = 0x7C8 - SYS_TOWCTRAN = 0x7D8 - SYS_TOWCTRANS = 0x7D8 - SYS_UNATEXIT = 0x7B5 - SYS_VFWPRINT = 0x7D3 - SYS_VFWPRINTF = 0x7D3 - SYS_VWPRINTF = 0x7D4 - SYS_WCTRANS = 0x7D7 - SYS_WPRINTF = 0x7D2 - SYS_WSCANF = 0x7D6 - SYS___ASCTIME_R_A = 0x7A1 - SYS___BASENAME_A = 0x7DC - SYS___BTOWC_A = 0x7E4 - SYS___CDUMP_A = 0x7B7 - SYS___CEE3DMP_A = 0x7B6 - SYS___CEILF_H = 0x7F4 - SYS___CEILL_H = 0x7F5 - SYS___CEIL_H = 0x7EA - SYS___CRYPT_A = 0x7BE - SYS___CSNAP_A = 0x7B8 - SYS___CTEST_A = 0x7B9 - SYS___CTIME_R_A = 0x7A2 - SYS___CTRACE_A = 0x7BA - SYS___DBM_OPEN_A = 0x7E6 - SYS___DIRNAME_A = 0x7DD - SYS___FABSF_H = 0x7FA - SYS___FABSL_H = 0x7FB - SYS___FABS_H = 0x7ED - SYS___FGETWC_A = 0x7AA - SYS___FGETWS_A = 0x7AD - SYS___FLOORF_H = 0x7F6 - SYS___FLOORL_H = 0x7F7 - SYS___FLOOR_H = 0x7EB - SYS___FPUTWC_A = 0x7A5 - SYS___FPUTWS_A = 0x7A8 - SYS___GETTIMEOFDAY_A = 0x7AE - SYS___GETWCHAR_A = 0x7AC - SYS___GETWC_A = 0x7AB - SYS___GLOB_A = 0x7DE - SYS___GMTIME_A = 0x7AF - SYS___GMTIME_R_A = 0x7B0 - SYS___INET_PTON_A = 0x7BC - SYS___J0_H = 0x7EE - SYS___J1_H = 0x7EF - SYS___JN_H = 0x7F0 - SYS___LOCALTIME_A = 0x7B1 - SYS___LOCALTIME_R_A = 0x7B2 - SYS___MALLOC24 = 0x7FC - SYS___MALLOC31 = 0x7FD - SYS___MKTIME_A = 0x7B3 - SYS___MODFF_H = 0x7F8 - SYS___MODFL_H = 0x7F9 - SYS___MODF_H = 0x7EC - SYS___OPENDIR_A = 0x7C2 - SYS___OSNAME = 0x7E0 - SYS___PUTWCHAR_A = 0x7A7 - SYS___PUTWC_A = 0x7A6 - SYS___READDIR_A = 0x7C3 - SYS___STRTOLL_A = 0x7A3 - SYS___STRTOULL_A = 0x7A4 - SYS___SYSLOG_A = 0x7BD - SYS___TZZNA = 0x7B4 - SYS___UNGETWC_A = 0x7A9 - SYS___UTIME_A = 0x7A0 - SYS___VFPRINTF2_A = 0x7E7 - SYS___VPRINTF2_A = 0x7E8 - SYS___VSPRINTF2_A = 0x7E9 - SYS___VSWPRNTF2_A = 0x7BB - SYS___WCSTOD_A = 0x7D9 - SYS___WCSTOL_A = 0x7DA - SYS___WCSTOUL_A = 0x7DB - SYS___WCTOB_A = 0x7E5 - SYS___Y0_H = 0x7F1 - SYS___Y1_H = 0x7F2 - SYS___YN_H = 0x7F3 - SYS_____OPENDIR2_A = 0x7BF - SYS_____OSNAME_A = 0x7E1 - SYS_____READDIR2_A = 0x7C0 - SYS_DLCLOSE = 0x8DF - SYS_DLERROR = 0x8E0 - SYS_DLOPEN = 0x8DD - SYS_DLSYM = 0x8DE - SYS_FLOCKFILE = 0x8D3 - SYS_FTRYLOCKFILE = 0x8D4 - SYS_FUNLOCKFILE = 0x8D5 - SYS_GETCHAR_UNLOCKED = 0x8D7 - SYS_GETC_UNLOCKED = 0x8D6 - SYS_PUTCHAR_UNLOCKED = 0x8D9 - SYS_PUTC_UNLOCKED = 0x8D8 - SYS_SNPRINTF = 0x8DA - SYS_VSNPRINTF = 0x8DB - SYS_WCSCSPN = 0x08B - SYS_WCSLEN = 0x08C - SYS_WCSNCAT = 0x08D - SYS_WCSNCMP = 0x08A - SYS_WCSNCPY = 0x08F - SYS_WCSSPN = 0x08E - SYS___ABSF_H = 0x8E7 - SYS___ABSL_H = 0x8E8 - SYS___ABS_H = 0x8E6 - SYS___ACOSF_H = 0x8EA - SYS___ACOSH_H = 0x8EC - SYS___ACOSL_H = 0x8EB - SYS___ACOS_H = 0x8E9 - SYS___ASINF_H = 0x8EE - SYS___ASINH_H = 0x8F0 - SYS___ASINL_H = 0x8EF - SYS___ASIN_H = 0x8ED - SYS___ATAN2F_H = 0x8F8 - SYS___ATAN2L_H = 0x8F9 - SYS___ATAN2_H = 0x8F7 - SYS___ATANF_H = 0x8F2 - SYS___ATANHF_H = 0x8F5 - SYS___ATANHL_H = 0x8F6 - SYS___ATANH_H = 0x8F4 - SYS___ATANL_H = 0x8F3 - SYS___ATAN_H = 0x8F1 - SYS___CBRT_H = 0x8FA - SYS___COPYSIGNF_H = 0x8FB - SYS___COPYSIGNL_H = 0x8FC - SYS___COSF_H = 0x8FE - SYS___COSL_H = 0x8FF - SYS___COS_H = 0x8FD - SYS___DLERROR_A = 0x8D2 - SYS___DLOPEN_A = 0x8D0 - SYS___DLSYM_A = 0x8D1 - SYS___GETUTXENT_A = 0x8C6 - SYS___GETUTXID_A = 0x8C7 - SYS___GETUTXLINE_A = 0x8C8 - SYS___ITOA = 0x8AA - SYS___ITOA_A = 0x8B0 - SYS___LE_CONDITION_TOKEN_BUILD = 0x8A5 - SYS___LE_MSG_ADD_INSERT = 0x8A6 - SYS___LE_MSG_GET = 0x8A7 - SYS___LE_MSG_GET_AND_WRITE = 0x8A8 - SYS___LE_MSG_WRITE = 0x8A9 - SYS___LLTOA = 0x8AE - SYS___LLTOA_A = 0x8B4 - SYS___LTOA = 0x8AC - SYS___LTOA_A = 0x8B2 - SYS___PUTCHAR_UNLOCKED_A = 0x8CC - SYS___PUTC_UNLOCKED_A = 0x8CB - SYS___PUTUTXLINE_A = 0x8C9 - SYS___RESET_EXCEPTION_HANDLER = 0x8E3 - SYS___REXEC_A = 0x8C4 - SYS___REXEC_AF_A = 0x8C5 - SYS___SET_EXCEPTION_HANDLER = 0x8E2 - SYS___SNPRINTF_A = 0x8CD - SYS___SUPERKILL = 0x8A4 - SYS___TCGETATTR_A = 0x8A1 - SYS___TCSETATTR_A = 0x8A2 - SYS___ULLTOA = 0x8AF - SYS___ULLTOA_A = 0x8B5 - SYS___ULTOA = 0x8AD - SYS___ULTOA_A = 0x8B3 - SYS___UTOA = 0x8AB - SYS___UTOA_A = 0x8B1 - SYS___VHM_EVENT = 0x8E4 - SYS___VSNPRINTF_A = 0x8CE - SYS_____GETENV_A = 0x8C3 - SYS_____UTMPXNAME_A = 0x8CA - SYS_CACOSH = 0x9A0 - SYS_CACOSHF = 0x9A3 - SYS_CACOSHL = 0x9A6 - SYS_CARG = 0x9A9 - SYS_CARGF = 0x9AC - SYS_CARGL = 0x9AF - SYS_CASIN = 0x9B2 - SYS_CASINF = 0x9B5 - SYS_CASINH = 0x9BB - SYS_CASINHF = 0x9BE - SYS_CASINHL = 0x9C1 - SYS_CASINL = 0x9B8 - SYS_CATAN = 0x9C4 - SYS_CATANF = 0x9C7 - SYS_CATANH = 0x9CD - SYS_CATANHF = 0x9D0 - SYS_CATANHL = 0x9D3 - SYS_CATANL = 0x9CA - SYS_CCOS = 0x9D6 - SYS_CCOSF = 0x9D9 - SYS_CCOSH = 0x9DF - SYS_CCOSHF = 0x9E2 - SYS_CCOSHL = 0x9E5 - SYS_CCOSL = 0x9DC - SYS_CEXP = 0x9E8 - SYS_CEXPF = 0x9EB - SYS_CEXPL = 0x9EE - SYS_CIMAG = 0x9F1 - SYS_CIMAGF = 0x9F4 - SYS_CIMAGL = 0x9F7 - SYS_CLOGF = 0x9FD - SYS_MEMCHR = 0x09B - SYS_MEMCMP = 0x09A - SYS_STRCOLL = 0x09C - SYS_STRNCMP = 0x09D - SYS_STRRCHR = 0x09F - SYS_STRXFRM = 0x09E - SYS___CACOSHF_B = 0x9A4 - SYS___CACOSHF_H = 0x9A5 - SYS___CACOSHL_B = 0x9A7 - SYS___CACOSHL_H = 0x9A8 - SYS___CACOSH_B = 0x9A1 - SYS___CACOSH_H = 0x9A2 - SYS___CARGF_B = 0x9AD - SYS___CARGF_H = 0x9AE - SYS___CARGL_B = 0x9B0 - SYS___CARGL_H = 0x9B1 - SYS___CARG_B = 0x9AA - SYS___CARG_H = 0x9AB - SYS___CASINF_B = 0x9B6 - SYS___CASINF_H = 0x9B7 - SYS___CASINHF_B = 0x9BF - SYS___CASINHF_H = 0x9C0 - SYS___CASINHL_B = 0x9C2 - SYS___CASINHL_H = 0x9C3 - SYS___CASINH_B = 0x9BC - SYS___CASINH_H = 0x9BD - SYS___CASINL_B = 0x9B9 - SYS___CASINL_H = 0x9BA - SYS___CASIN_B = 0x9B3 - SYS___CASIN_H = 0x9B4 - SYS___CATANF_B = 0x9C8 - SYS___CATANF_H = 0x9C9 - SYS___CATANHF_B = 0x9D1 - SYS___CATANHF_H = 0x9D2 - SYS___CATANHL_B = 0x9D4 - SYS___CATANHL_H = 0x9D5 - SYS___CATANH_B = 0x9CE - SYS___CATANH_H = 0x9CF - SYS___CATANL_B = 0x9CB - SYS___CATANL_H = 0x9CC - SYS___CATAN_B = 0x9C5 - SYS___CATAN_H = 0x9C6 - SYS___CCOSF_B = 0x9DA - SYS___CCOSF_H = 0x9DB - SYS___CCOSHF_B = 0x9E3 - SYS___CCOSHF_H = 0x9E4 - SYS___CCOSHL_B = 0x9E6 - SYS___CCOSHL_H = 0x9E7 - SYS___CCOSH_B = 0x9E0 - SYS___CCOSH_H = 0x9E1 - SYS___CCOSL_B = 0x9DD - SYS___CCOSL_H = 0x9DE - SYS___CCOS_B = 0x9D7 - SYS___CCOS_H = 0x9D8 - SYS___CEXPF_B = 0x9EC - SYS___CEXPF_H = 0x9ED - SYS___CEXPL_B = 0x9EF - SYS___CEXPL_H = 0x9F0 - SYS___CEXP_B = 0x9E9 - SYS___CEXP_H = 0x9EA - SYS___CIMAGF_B = 0x9F5 - SYS___CIMAGF_H = 0x9F6 - SYS___CIMAGL_B = 0x9F8 - SYS___CIMAGL_H = 0x9F9 - SYS___CIMAG_B = 0x9F2 - SYS___CIMAG_H = 0x9F3 - SYS___CLOG = 0x9FA - SYS___CLOGF_B = 0x9FE - SYS___CLOGF_H = 0x9FF - SYS___CLOG_B = 0x9FB - SYS___CLOG_H = 0x9FC - SYS_ISWCTYPE = 0x10C - SYS_ISWXDIGI = 0x10A - SYS_ISWXDIGIT = 0x10A - SYS_MBSINIT = 0x10F - SYS_TOWLOWER = 0x10D - SYS_TOWUPPER = 0x10E - SYS_WCTYPE = 0x10B - SYS_WCSSTR = 0x11B - SYS___RPMTCH = 0x11A - SYS_WCSTOD = 0x12E - SYS_WCSTOK = 0x12C - SYS_WCSTOL = 0x12D - SYS_WCSTOUL = 0x12F - SYS_FGETWC = 0x13C - SYS_FGETWS = 0x13D - SYS_FPUTWC = 0x13E - SYS_FPUTWS = 0x13F - SYS_REGERROR = 0x13B - SYS_REGFREE = 0x13A - SYS_COLLEQUIV = 0x14F - SYS_COLLTOSTR = 0x14E - SYS_ISMCCOLLEL = 0x14C - SYS_STRTOCOLL = 0x14D - SYS_DLLFREE = 0x16F - SYS_DLLQUERYFN = 0x16D - SYS_DLLQUERYVAR = 0x16E - SYS_GETMCCOLL = 0x16A - SYS_GETWMCCOLL = 0x16B - SYS___ERR2AD = 0x16C - SYS_CFSETOSPEED = 0x17A - SYS_CHDIR = 0x17B - SYS_CHMOD = 0x17C - SYS_CHOWN = 0x17D - SYS_CLOSE = 0x17E - SYS_CLOSEDIR = 0x17F - SYS_LOG = 0x017 - SYS_COSH = 0x018 - SYS_FCHMOD = 0x18A - SYS_FCHOWN = 0x18B - SYS_FCNTL = 0x18C - SYS_FILENO = 0x18D - SYS_FORK = 0x18E - SYS_FPATHCONF = 0x18F - SYS_GETLOGIN = 0x19A - SYS_GETPGRP = 0x19C - SYS_GETPID = 0x19D - SYS_GETPPID = 0x19E - SYS_GETPWNAM = 0x19F - SYS_TANH = 0x019 - SYS_W_GETMNTENT = 0x19B - SYS_POW = 0x020 - SYS_PTHREAD_SELF = 0x20A - SYS_PTHREAD_SETINTR = 0x20B - SYS_PTHREAD_SETINTRTYPE = 0x20C - SYS_PTHREAD_SETSPECIFIC = 0x20D - SYS_PTHREAD_TESTINTR = 0x20E - SYS_PTHREAD_YIELD = 0x20F - SYS_SQRT = 0x021 - SYS_FLOOR = 0x022 - SYS_J1 = 0x023 - SYS_WCSPBRK = 0x23F - SYS_BSEARCH = 0x24C - SYS_FABS = 0x024 - SYS_GETENV = 0x24A - SYS_LDIV = 0x24D - SYS_SYSTEM = 0x24B - SYS_FMOD = 0x025 - SYS___RETHROW = 0x25F - SYS___THROW = 0x25E - SYS_J0 = 0x026 - SYS_PUTENV = 0x26A - SYS___GETENV = 0x26F - SYS_SEMCTL = 0x27A - SYS_SEMGET = 0x27B - SYS_SEMOP = 0x27C - SYS_SHMAT = 0x27D - SYS_SHMCTL = 0x27E - SYS_SHMDT = 0x27F - SYS_YN = 0x027 - SYS_JN = 0x028 - SYS_SIGALTSTACK = 0x28A - SYS_SIGHOLD = 0x28B - SYS_SIGIGNORE = 0x28C - SYS_SIGINTERRUPT = 0x28D - SYS_SIGPAUSE = 0x28E - SYS_SIGRELSE = 0x28F - SYS_GETOPT = 0x29A - SYS_GETSUBOPT = 0x29D - SYS_LCHOWN = 0x29B - SYS_SETPGRP = 0x29E - SYS_TRUNCATE = 0x29C - SYS_Y0 = 0x029 - SYS___GDERR = 0x29F - SYS_ISALPHA = 0x030 - SYS_VFORK = 0x30F - SYS__LONGJMP = 0x30D - SYS__SETJMP = 0x30E - SYS_GLOB = 0x31A - SYS_GLOBFREE = 0x31B - SYS_ISALNUM = 0x031 - SYS_PUTW = 0x31C - SYS_SEEKDIR = 0x31D - SYS_TELLDIR = 0x31E - SYS_TEMPNAM = 0x31F - SYS_GETTIMEOFDAY_R = 0x32E - SYS_ISLOWER = 0x032 - SYS_LGAMMA = 0x32C - SYS_REMAINDER = 0x32A - SYS_SCALB = 0x32B - SYS_SYNC = 0x32F - SYS_TTYSLOT = 0x32D - SYS_ENDPROTOENT = 0x33A - SYS_ENDSERVENT = 0x33B - SYS_GETHOSTBYADDR = 0x33D - SYS_GETHOSTBYADDR_R = 0x33C - SYS_GETHOSTBYNAME = 0x33F - SYS_GETHOSTBYNAME_R = 0x33E - SYS_ISCNTRL = 0x033 - SYS_GETSERVBYNAME = 0x34A - SYS_GETSERVBYPORT = 0x34B - SYS_GETSERVENT = 0x34C - SYS_GETSOCKNAME = 0x34D - SYS_GETSOCKOPT = 0x34E - SYS_INET_ADDR = 0x34F - SYS_ISDIGIT = 0x034 - SYS_ISGRAPH = 0x035 - SYS_SELECT = 0x35B - SYS_SELECTEX = 0x35C - SYS_SEND = 0x35D - SYS_SENDTO = 0x35F - SYS_CHROOT = 0x36A - SYS_ISNAN = 0x36D - SYS_ISUPPER = 0x036 - SYS_ULIMIT = 0x36C - SYS_UTIMES = 0x36E - SYS_W_STATVFS = 0x36B - SYS___H_ERRNO = 0x36F - SYS_GRANTPT = 0x37A - SYS_ISPRINT = 0x037 - SYS_TCGETSID = 0x37C - SYS_UNLOCKPT = 0x37B - SYS___TCGETCP = 0x37D - SYS___TCSETCP = 0x37E - SYS___TCSETTABLES = 0x37F - SYS_ISPUNCT = 0x038 - SYS_NLIST = 0x38C - SYS___IPDBCS = 0x38D - SYS___IPDSPX = 0x38E - SYS___IPMSGC = 0x38F - SYS___STHOSTENT = 0x38B - SYS___STSERVENT = 0x38A - SYS_ISSPACE = 0x039 - SYS_COS = 0x040 - SYS_T_ALLOC = 0x40A - SYS_T_BIND = 0x40B - SYS_T_CLOSE = 0x40C - SYS_T_CONNECT = 0x40D - SYS_T_ERROR = 0x40E - SYS_T_FREE = 0x40F - SYS_TAN = 0x041 - SYS_T_RCVREL = 0x41A - SYS_T_RCVUDATA = 0x41B - SYS_T_RCVUDERR = 0x41C - SYS_T_SND = 0x41D - SYS_T_SNDDIS = 0x41E - SYS_T_SNDREL = 0x41F - SYS_GETPMSG = 0x42A - SYS_ISASTREAM = 0x42B - SYS_PUTMSG = 0x42C - SYS_PUTPMSG = 0x42D - SYS_SINH = 0x042 - SYS___ISPOSIXON = 0x42E - SYS___OPENMVSREL = 0x42F - SYS_ACOS = 0x043 - SYS_ATAN = 0x044 - SYS_ATAN2 = 0x045 - SYS_FTELL = 0x046 - SYS_FGETPOS = 0x047 - SYS_SOCK_DEBUG = 0x47A - SYS_SOCK_DO_TESTSTOR = 0x47D - SYS_TAKESOCKET = 0x47E - SYS___SERVER_INIT = 0x47F - SYS_FSEEK = 0x048 - SYS___IPHOST = 0x48B - SYS___IPNODE = 0x48C - SYS___SERVER_CLASSIFY_CREATE = 0x48D - SYS___SERVER_CLASSIFY_DESTROY = 0x48E - SYS___SERVER_CLASSIFY_RESET = 0x48F - SYS___SMF_RECORD = 0x48A - SYS_FSETPOS = 0x049 - SYS___FNWSA = 0x49B - SYS___SPAWN2 = 0x49D - SYS___SPAWNP2 = 0x49E - SYS_ATOF = 0x050 - SYS_PTHREAD_MUTEXATTR_GETPSHARED = 0x50A - SYS_PTHREAD_MUTEXATTR_SETPSHARED = 0x50B - SYS_PTHREAD_RWLOCK_DESTROY = 0x50C - SYS_PTHREAD_RWLOCK_INIT = 0x50D - SYS_PTHREAD_RWLOCK_RDLOCK = 0x50E - SYS_PTHREAD_RWLOCK_TRYRDLOCK = 0x50F - SYS_ATOI = 0x051 - SYS___FP_CLASS = 0x51D - SYS___FP_CLR_FLAG = 0x51A - SYS___FP_FINITE = 0x51E - SYS___FP_ISNAN = 0x51F - SYS___FP_RAISE_XCP = 0x51C - SYS___FP_READ_FLAG = 0x51B - SYS_RAND = 0x052 - SYS_SIGTIMEDWAIT = 0x52D - SYS_SIGWAITINFO = 0x52E - SYS___CHKBFP = 0x52F - SYS___FPC_RS = 0x52C - SYS___FPC_RW = 0x52A - SYS___FPC_SM = 0x52B - SYS_STRTOD = 0x053 - SYS_STRTOL = 0x054 - SYS_STRTOUL = 0x055 - SYS_MALLOC = 0x056 - SYS_SRAND = 0x057 - SYS_CALLOC = 0x058 - SYS_FREE = 0x059 - SYS___OSENV = 0x59F - SYS___W_PIOCTL = 0x59E - SYS_LONGJMP = 0x060 - SYS___FLOORF_B = 0x60A - SYS___FLOORL_B = 0x60B - SYS___FREXPF_B = 0x60C - SYS___FREXPL_B = 0x60D - SYS___LDEXPF_B = 0x60E - SYS___LDEXPL_B = 0x60F - SYS_SIGNAL = 0x061 - SYS___ATAN2F_B = 0x61A - SYS___ATAN2L_B = 0x61B - SYS___COSHF_B = 0x61C - SYS___COSHL_B = 0x61D - SYS___EXPF_B = 0x61E - SYS___EXPL_B = 0x61F - SYS_TMPNAM = 0x062 - SYS___ABSF_B = 0x62A - SYS___ABSL_B = 0x62C - SYS___ABS_B = 0x62B - SYS___FMODF_B = 0x62D - SYS___FMODL_B = 0x62E - SYS___MODFF_B = 0x62F - SYS_ATANL = 0x63A - SYS_CEILF = 0x63B - SYS_CEILL = 0x63C - SYS_COSF = 0x63D - SYS_COSHF = 0x63F - SYS_COSL = 0x63E - SYS_REMOVE = 0x063 - SYS_POWL = 0x64A - SYS_RENAME = 0x064 - SYS_SINF = 0x64B - SYS_SINHF = 0x64F - SYS_SINL = 0x64C - SYS_SQRTF = 0x64D - SYS_SQRTL = 0x64E - SYS_BTOWC = 0x65F - SYS_FREXPL = 0x65A - SYS_LDEXPF = 0x65B - SYS_LDEXPL = 0x65C - SYS_MODFF = 0x65D - SYS_MODFL = 0x65E - SYS_TMPFILE = 0x065 - SYS_FREOPEN = 0x066 - SYS___CHARMAP_INIT_A = 0x66E - SYS___GETHOSTBYADDR_R_A = 0x66C - SYS___GETHOSTBYNAME_A = 0x66A - SYS___GETHOSTBYNAME_R_A = 0x66D - SYS___MBLEN_A = 0x66F - SYS___RES_INIT_A = 0x66B - SYS_FCLOSE = 0x067 - SYS___GETGRGID_R_A = 0x67D - SYS___WCSTOMBS_A = 0x67A - SYS___WCSTOMBS_STD_A = 0x67B - SYS___WCSWIDTH_A = 0x67C - SYS___WCSWIDTH_ASIA = 0x67F - SYS___WCSWIDTH_STD_A = 0x67E - SYS_FFLUSH = 0x068 - SYS___GETLOGIN_R_A = 0x68E - SYS___GETPWNAM_R_A = 0x68C - SYS___GETPWUID_R_A = 0x68D - SYS___TTYNAME_R_A = 0x68F - SYS___WCWIDTH_ASIA = 0x68B - SYS___WCWIDTH_STD_A = 0x68A - SYS_FOPEN = 0x069 - SYS___REGEXEC_A = 0x69A - SYS___REGEXEC_STD_A = 0x69B - SYS___REGFREE_A = 0x69C - SYS___REGFREE_STD_A = 0x69D - SYS___STRCOLL_A = 0x69E - SYS___STRCOLL_C_A = 0x69F - SYS_SCANF = 0x070 - SYS___A64L_A = 0x70C - SYS___ECVT_A = 0x70D - SYS___FCVT_A = 0x70E - SYS___GCVT_A = 0x70F - SYS___STRTOUL_A = 0x70A - SYS_____AE_CORRESTBL_QUERY_A = 0x70B - SYS_SPRINTF = 0x071 - SYS___ACCESS_A = 0x71F - SYS___CATOPEN_A = 0x71E - SYS___GETOPT_A = 0x71D - SYS___REALPATH_A = 0x71A - SYS___SETENV_A = 0x71B - SYS___SYSTEM_A = 0x71C - SYS_FGETC = 0x072 - SYS___GAI_STRERROR_A = 0x72F - SYS___RMDIR_A = 0x72A - SYS___STATVFS_A = 0x72B - SYS___SYMLINK_A = 0x72C - SYS___TRUNCATE_A = 0x72D - SYS___UNLINK_A = 0x72E - SYS_VFPRINTF = 0x073 - SYS___ISSPACE_A = 0x73A - SYS___ISUPPER_A = 0x73B - SYS___ISWALNUM_A = 0x73F - SYS___ISXDIGIT_A = 0x73C - SYS___TOLOWER_A = 0x73D - SYS___TOUPPER_A = 0x73E - SYS_VPRINTF = 0x074 - SYS___CONFSTR_A = 0x74B - SYS___FDOPEN_A = 0x74E - SYS___FLDATA_A = 0x74F - SYS___FTOK_A = 0x74C - SYS___ISWXDIGIT_A = 0x74A - SYS___MKTEMP_A = 0x74D - SYS_VSPRINTF = 0x075 - SYS___GETGRGID_A = 0x75A - SYS___GETGRNAM_A = 0x75B - SYS___GETGROUPSBYNAME_A = 0x75C - SYS___GETHOSTENT_A = 0x75D - SYS___GETHOSTNAME_A = 0x75E - SYS___GETLOGIN_A = 0x75F - SYS_GETC = 0x076 - SYS___CREATEWORKUNIT_A = 0x76A - SYS___CTERMID_A = 0x76B - SYS___FMTMSG_A = 0x76C - SYS___INITGROUPS_A = 0x76D - SYS___MSGRCV_A = 0x76F - SYS_____LOGIN_A = 0x76E - SYS_FGETS = 0x077 - SYS___STRCASECMP_A = 0x77B - SYS___STRNCASECMP_A = 0x77C - SYS___TTYNAME_A = 0x77D - SYS___UNAME_A = 0x77E - SYS___UTIMES_A = 0x77F - SYS_____SERVER_PWU_A = 0x77A - SYS_FPUTC = 0x078 - SYS___CREAT_O_A = 0x78E - SYS___ENVNA = 0x78F - SYS___FREAD_A = 0x78A - SYS___FWRITE_A = 0x78B - SYS___ISASCII = 0x78D - SYS___OPEN_O_A = 0x78C - SYS_FPUTS = 0x079 - SYS___ASCTIME_A = 0x79C - SYS___CTIME_A = 0x79D - SYS___GETDATE_A = 0x79E - SYS___GETSERVBYPORT_A = 0x79A - SYS___GETSERVENT_A = 0x79B - SYS___TZSET_A = 0x79F - SYS_ACL_FROM_TEXT = 0x80C - SYS_ACL_SET_FD = 0x80A - SYS_ACL_SET_FILE = 0x80B - SYS_ACL_SORT = 0x80E - SYS_ACL_TO_TEXT = 0x80D - SYS_UNGETC = 0x080 - SYS___SHUTDOWN_REGISTRATION = 0x80F - SYS_FREAD = 0x081 - SYS_FREEADDRINFO = 0x81A - SYS_GAI_STRERROR = 0x81B - SYS_REXEC_AF = 0x81C - SYS___DYNALLOC_A = 0x81F - SYS___POE = 0x81D - SYS_WCSTOMBS = 0x082 - SYS___INET_ADDR_A = 0x82F - SYS___NLIST_A = 0x82A - SYS_____TCGETCP_A = 0x82B - SYS_____TCSETCP_A = 0x82C - SYS_____W_PIOCTL_A = 0x82E - SYS_MBTOWC = 0x083 - SYS___CABEND = 0x83D - SYS___LE_CIB_GET = 0x83E - SYS___RECVMSG_A = 0x83B - SYS___SENDMSG_A = 0x83A - SYS___SET_LAA_FOR_JIT = 0x83F - SYS_____LCHATTR_A = 0x83C - SYS_WCTOMB = 0x084 - SYS___CBRTL_B = 0x84A - SYS___COPYSIGNF_B = 0x84B - SYS___COPYSIGNL_B = 0x84C - SYS___COTANF_B = 0x84D - SYS___COTANL_B = 0x84F - SYS___COTAN_B = 0x84E - SYS_MBSTOWCS = 0x085 - SYS___LOG1PL_B = 0x85A - SYS___LOG2F_B = 0x85B - SYS___LOG2L_B = 0x85D - SYS___LOG2_B = 0x85C - SYS___REMAINDERF_B = 0x85E - SYS___REMAINDERL_B = 0x85F - SYS_ACOSHF = 0x86E - SYS_ACOSHL = 0x86F - SYS_WCSCPY = 0x086 - SYS___ERFCF_B = 0x86D - SYS___ERFF_B = 0x86C - SYS___LROUNDF_B = 0x86A - SYS___LROUND_B = 0x86B - SYS_COTANL = 0x87A - SYS_EXP2F = 0x87B - SYS_EXP2L = 0x87C - SYS_EXPM1F = 0x87D - SYS_EXPM1L = 0x87E - SYS_FDIMF = 0x87F - SYS_WCSCAT = 0x087 - SYS___COTANL = 0x87A - SYS_REMAINDERF = 0x88A - SYS_REMAINDERL = 0x88B - SYS_REMAINDF = 0x88A - SYS_REMAINDL = 0x88B - SYS_REMQUO = 0x88D - SYS_REMQUOF = 0x88C - SYS_REMQUOL = 0x88E - SYS_TGAMMAF = 0x88F - SYS_WCSCHR = 0x088 - SYS_ERFCF = 0x89B - SYS_ERFCL = 0x89C - SYS_ERFL = 0x89A - SYS_EXP2 = 0x89E - SYS_WCSCMP = 0x089 - SYS___EXP2_B = 0x89D - SYS___FAR_JUMP = 0x89F - SYS_ABS = 0x090 - SYS___ERFCL_H = 0x90A - SYS___EXPF_H = 0x90C - SYS___EXPL_H = 0x90D - SYS___EXPM1_H = 0x90E - SYS___EXP_H = 0x90B - SYS___FDIM_H = 0x90F - SYS_DIV = 0x091 - SYS___LOG2F_H = 0x91F - SYS___LOG2_H = 0x91E - SYS___LOGB_H = 0x91D - SYS___LOGF_H = 0x91B - SYS___LOGL_H = 0x91C - SYS___LOG_H = 0x91A - SYS_LABS = 0x092 - SYS___POWL_H = 0x92A - SYS___REMAINDER_H = 0x92B - SYS___RINT_H = 0x92C - SYS___SCALB_H = 0x92D - SYS___SINF_H = 0x92F - SYS___SIN_H = 0x92E - SYS_STRNCPY = 0x093 - SYS___TANHF_H = 0x93B - SYS___TANHL_H = 0x93C - SYS___TANH_H = 0x93A - SYS___TGAMMAF_H = 0x93E - SYS___TGAMMA_H = 0x93D - SYS___TRUNC_H = 0x93F - SYS_MEMCPY = 0x094 - SYS_VFWSCANF = 0x94A - SYS_VSWSCANF = 0x94E - SYS_VWSCANF = 0x94C - SYS_INET6_RTH_ADD = 0x95D - SYS_INET6_RTH_INIT = 0x95C - SYS_INET6_RTH_REVERSE = 0x95E - SYS_INET6_RTH_SEGMENTS = 0x95F - SYS_INET6_RTH_SPACE = 0x95B - SYS_MEMMOVE = 0x095 - SYS_WCSTOLD = 0x95A - SYS_STRCPY = 0x096 - SYS_STRCMP = 0x097 - SYS_CABS = 0x98E - SYS_STRCAT = 0x098 - SYS___CABS_B = 0x98F - SYS___POW_II = 0x98A - SYS___POW_II_B = 0x98B - SYS___POW_II_H = 0x98C - SYS_CACOSF = 0x99A - SYS_CACOSL = 0x99D - SYS_STRNCAT = 0x099 - SYS___CACOSF_B = 0x99B - SYS___CACOSF_H = 0x99C - SYS___CACOSL_B = 0x99E - SYS___CACOSL_H = 0x99F - SYS_ISWALPHA = 0x100 - SYS_ISWBLANK = 0x101 - SYS___ISWBLK = 0x101 - SYS_ISWCNTRL = 0x102 - SYS_ISWDIGIT = 0x103 - SYS_ISWGRAPH = 0x104 - SYS_ISWLOWER = 0x105 - SYS_ISWPRINT = 0x106 - SYS_ISWPUNCT = 0x107 - SYS_ISWSPACE = 0x108 - SYS_ISWUPPER = 0x109 - SYS_WCTOB = 0x110 - SYS_MBRLEN = 0x111 - SYS_MBRTOWC = 0x112 - SYS_MBSRTOWC = 0x113 - SYS_MBSRTOWCS = 0x113 - SYS_WCRTOMB = 0x114 - SYS_WCSRTOMB = 0x115 - SYS_WCSRTOMBS = 0x115 - SYS___CSID = 0x116 - SYS___WCSID = 0x117 - SYS_STRPTIME = 0x118 - SYS___STRPTM = 0x118 - SYS_STRFMON = 0x119 - SYS_WCSCOLL = 0x130 - SYS_WCSXFRM = 0x131 - SYS_WCSWIDTH = 0x132 - SYS_WCWIDTH = 0x133 - SYS_WCSFTIME = 0x134 - SYS_SWPRINTF = 0x135 - SYS_VSWPRINT = 0x136 - SYS_VSWPRINTF = 0x136 - SYS_SWSCANF = 0x137 - SYS_REGCOMP = 0x138 - SYS_REGEXEC = 0x139 - SYS_GETWC = 0x140 - SYS_GETWCHAR = 0x141 - SYS_PUTWC = 0x142 - SYS_PUTWCHAR = 0x143 - SYS_UNGETWC = 0x144 - SYS_ICONV_OPEN = 0x145 - SYS_ICONV = 0x146 - SYS_ICONV_CLOSE = 0x147 - SYS_COLLRANGE = 0x150 - SYS_CCLASS = 0x151 - SYS_COLLORDER = 0x152 - SYS___DEMANGLE = 0x154 - SYS_FDOPEN = 0x155 - SYS___ERRNO = 0x156 - SYS___ERRNO2 = 0x157 - SYS___TERROR = 0x158 - SYS_MAXCOLL = 0x169 - SYS_DLLLOAD = 0x170 - SYS__EXIT = 0x174 - SYS_ACCESS = 0x175 - SYS_ALARM = 0x176 - SYS_CFGETISPEED = 0x177 - SYS_CFGETOSPEED = 0x178 - SYS_CFSETISPEED = 0x179 - SYS_CREAT = 0x180 - SYS_CTERMID = 0x181 - SYS_DUP = 0x182 - SYS_DUP2 = 0x183 - SYS_EXECL = 0x184 - SYS_EXECLE = 0x185 - SYS_EXECLP = 0x186 - SYS_EXECV = 0x187 - SYS_EXECVE = 0x188 - SYS_EXECVP = 0x189 - SYS_FSTAT = 0x190 - SYS_FSYNC = 0x191 - SYS_FTRUNCATE = 0x192 - SYS_GETCWD = 0x193 - SYS_GETEGID = 0x194 - SYS_GETEUID = 0x195 - SYS_GETGID = 0x196 - SYS_GETGRGID = 0x197 - SYS_GETGRNAM = 0x198 - SYS_GETGROUPS = 0x199 - SYS_PTHREAD_MUTEXATTR_DESTROY = 0x200 - SYS_PTHREAD_MUTEXATTR_SETKIND_NP = 0x201 - SYS_PTHREAD_MUTEXATTR_GETKIND_NP = 0x202 - SYS_PTHREAD_MUTEX_INIT = 0x203 - SYS_PTHREAD_MUTEX_DESTROY = 0x204 - SYS_PTHREAD_MUTEX_LOCK = 0x205 - SYS_PTHREAD_MUTEX_TRYLOCK = 0x206 - SYS_PTHREAD_MUTEX_UNLOCK = 0x207 - SYS_PTHREAD_ONCE = 0x209 - SYS_TW_OPEN = 0x210 - SYS_TW_FCNTL = 0x211 - SYS_PTHREAD_JOIN_D4_NP = 0x212 - SYS_PTHREAD_CONDATTR_SETKIND_NP = 0x213 - SYS_PTHREAD_CONDATTR_GETKIND_NP = 0x214 - SYS_EXTLINK_NP = 0x215 - SYS___PASSWD = 0x216 - SYS_SETGROUPS = 0x217 - SYS_INITGROUPS = 0x218 - SYS_WCSRCHR = 0x240 - SYS_SVC99 = 0x241 - SYS___SVC99 = 0x241 - SYS_WCSWCS = 0x242 - SYS_LOCALECO = 0x243 - SYS_LOCALECONV = 0x243 - SYS___LIBREL = 0x244 - SYS_RELEASE = 0x245 - SYS___RLSE = 0x245 - SYS_FLOCATE = 0x246 - SYS___FLOCT = 0x246 - SYS_FDELREC = 0x247 - SYS___FDLREC = 0x247 - SYS_FETCH = 0x248 - SYS___FETCH = 0x248 - SYS_QSORT = 0x249 - SYS___CLEANUPCATCH = 0x260 - SYS___CATCHMATCH = 0x261 - SYS___CLEAN2UPCATCH = 0x262 - SYS_GETPRIORITY = 0x270 - SYS_NICE = 0x271 - SYS_SETPRIORITY = 0x272 - SYS_GETITIMER = 0x273 - SYS_SETITIMER = 0x274 - SYS_MSGCTL = 0x275 - SYS_MSGGET = 0x276 - SYS_MSGRCV = 0x277 - SYS_MSGSND = 0x278 - SYS_MSGXRCV = 0x279 - SYS___MSGXR = 0x279 - SYS_SHMGET = 0x280 - SYS___GETIPC = 0x281 - SYS_SETGRENT = 0x282 - SYS_GETGRENT = 0x283 - SYS_ENDGRENT = 0x284 - SYS_SETPWENT = 0x285 - SYS_GETPWENT = 0x286 - SYS_ENDPWENT = 0x287 - SYS_BSD_SIGNAL = 0x288 - SYS_KILLPG = 0x289 - SYS_SIGSET = 0x290 - SYS_SIGSTACK = 0x291 - SYS_GETRLIMIT = 0x292 - SYS_SETRLIMIT = 0x293 - SYS_GETRUSAGE = 0x294 - SYS_MMAP = 0x295 - SYS_MPROTECT = 0x296 - SYS_MSYNC = 0x297 - SYS_MUNMAP = 0x298 - SYS_CONFSTR = 0x299 - SYS___NDMTRM = 0x300 - SYS_FTOK = 0x301 - SYS_BASENAME = 0x302 - SYS_DIRNAME = 0x303 - SYS_GETDTABLESIZE = 0x304 - SYS_MKSTEMP = 0x305 - SYS_MKTEMP = 0x306 - SYS_NFTW = 0x307 - SYS_GETWD = 0x308 - SYS_LOCKF = 0x309 - SYS_WORDEXP = 0x310 - SYS_WORDFREE = 0x311 - SYS_GETPGID = 0x312 - SYS_GETSID = 0x313 - SYS___UTMPXNAME = 0x314 - SYS_CUSERID = 0x315 - SYS_GETPASS = 0x316 - SYS_FNMATCH = 0x317 - SYS_FTW = 0x318 - SYS_GETW = 0x319 - SYS_ACOSH = 0x320 - SYS_ASINH = 0x321 - SYS_ATANH = 0x322 - SYS_CBRT = 0x323 - SYS_EXPM1 = 0x324 - SYS_ILOGB = 0x325 - SYS_LOGB = 0x326 - SYS_LOG1P = 0x327 - SYS_NEXTAFTER = 0x328 - SYS_RINT = 0x329 - SYS_SPAWN = 0x330 - SYS_SPAWNP = 0x331 - SYS_GETLOGIN_UU = 0x332 - SYS_ECVT = 0x333 - SYS_FCVT = 0x334 - SYS_GCVT = 0x335 - SYS_ACCEPT = 0x336 - SYS_BIND = 0x337 - SYS_CONNECT = 0x338 - SYS_ENDHOSTENT = 0x339 - SYS_GETHOSTENT = 0x340 - SYS_GETHOSTID = 0x341 - SYS_GETHOSTNAME = 0x342 - SYS_GETNETBYADDR = 0x343 - SYS_GETNETBYNAME = 0x344 - SYS_GETNETENT = 0x345 - SYS_GETPEERNAME = 0x346 - SYS_GETPROTOBYNAME = 0x347 - SYS_GETPROTOBYNUMBER = 0x348 - SYS_GETPROTOENT = 0x349 - SYS_INET_LNAOF = 0x350 - SYS_INET_MAKEADDR = 0x351 - SYS_INET_NETOF = 0x352 - SYS_INET_NETWORK = 0x353 - SYS_INET_NTOA = 0x354 - SYS_IOCTL = 0x355 - SYS_LISTEN = 0x356 - SYS_READV = 0x357 - SYS_RECV = 0x358 - SYS_RECVFROM = 0x359 - SYS_SETHOSTENT = 0x360 - SYS_SETNETENT = 0x361 - SYS_SETPEER = 0x362 - SYS_SETPROTOENT = 0x363 - SYS_SETSERVENT = 0x364 - SYS_SETSOCKOPT = 0x365 - SYS_SHUTDOWN = 0x366 - SYS_SOCKET = 0x367 - SYS_SOCKETPAIR = 0x368 - SYS_WRITEV = 0x369 - SYS_ENDNETENT = 0x370 - SYS_CLOSELOG = 0x371 - SYS_OPENLOG = 0x372 - SYS_SETLOGMASK = 0x373 - SYS_SYSLOG = 0x374 - SYS_PTSNAME = 0x375 - SYS_SETREUID = 0x376 - SYS_SETREGID = 0x377 - SYS_REALPATH = 0x378 - SYS___SIGNGAM = 0x379 - SYS_POLL = 0x380 - SYS_REXEC = 0x381 - SYS___ISASCII2 = 0x382 - SYS___TOASCII2 = 0x383 - SYS_CHPRIORITY = 0x384 - SYS_PTHREAD_ATTR_SETSYNCTYPE_NP = 0x385 - SYS_PTHREAD_ATTR_GETSYNCTYPE_NP = 0x386 - SYS_PTHREAD_SET_LIMIT_NP = 0x387 - SYS___STNETENT = 0x388 - SYS___STPROTOENT = 0x389 - SYS___SELECT1 = 0x390 - SYS_PTHREAD_SECURITY_NP = 0x391 - SYS___CHECK_RESOURCE_AUTH_NP = 0x392 - SYS___CONVERT_ID_NP = 0x393 - SYS___OPENVMREL = 0x394 - SYS_WMEMCHR = 0x395 - SYS_WMEMCMP = 0x396 - SYS_WMEMCPY = 0x397 - SYS_WMEMMOVE = 0x398 - SYS_WMEMSET = 0x399 - SYS___FPUTWC = 0x400 - SYS___PUTWC = 0x401 - SYS___PWCHAR = 0x402 - SYS___WCSFTM = 0x403 - SYS___WCSTOK = 0x404 - SYS___WCWDTH = 0x405 - SYS_T_ACCEPT = 0x409 - SYS_T_GETINFO = 0x410 - SYS_T_GETPROTADDR = 0x411 - SYS_T_GETSTATE = 0x412 - SYS_T_LISTEN = 0x413 - SYS_T_LOOK = 0x414 - SYS_T_OPEN = 0x415 - SYS_T_OPTMGMT = 0x416 - SYS_T_RCV = 0x417 - SYS_T_RCVCONNECT = 0x418 - SYS_T_RCVDIS = 0x419 - SYS_T_SNDUDATA = 0x420 - SYS_T_STRERROR = 0x421 - SYS_T_SYNC = 0x422 - SYS_T_UNBIND = 0x423 - SYS___T_ERRNO = 0x424 - SYS___RECVMSG2 = 0x425 - SYS___SENDMSG2 = 0x426 - SYS_FATTACH = 0x427 - SYS_FDETACH = 0x428 - SYS_GETMSG = 0x429 - SYS_GETCONTEXT = 0x430 - SYS_SETCONTEXT = 0x431 - SYS_MAKECONTEXT = 0x432 - SYS_SWAPCONTEXT = 0x433 - SYS_PTHREAD_GETSPECIFIC_D8_NP = 0x434 - SYS_GETCLIENTID = 0x470 - SYS___GETCLIENTID = 0x471 - SYS_GETSTABLESIZE = 0x472 - SYS_GETIBMOPT = 0x473 - SYS_GETIBMSOCKOPT = 0x474 - SYS_GIVESOCKET = 0x475 - SYS_IBMSFLUSH = 0x476 - SYS_MAXDESC = 0x477 - SYS_SETIBMOPT = 0x478 - SYS_SETIBMSOCKOPT = 0x479 - SYS___SERVER_PWU = 0x480 - SYS_PTHREAD_TAG_NP = 0x481 - SYS___CONSOLE = 0x482 - SYS___WSINIT = 0x483 - SYS___IPTCPN = 0x489 - SYS___SERVER_CLASSIFY = 0x490 - SYS___HEAPRPT = 0x496 - SYS___ISBFP = 0x500 - SYS___FP_CAST = 0x501 - SYS___CERTIFICATE = 0x502 - SYS_SEND_FILE = 0x503 - SYS_AIO_CANCEL = 0x504 - SYS_AIO_ERROR = 0x505 - SYS_AIO_READ = 0x506 - SYS_AIO_RETURN = 0x507 - SYS_AIO_SUSPEND = 0x508 - SYS_AIO_WRITE = 0x509 - SYS_PTHREAD_RWLOCK_TRYWRLOCK = 0x510 - SYS_PTHREAD_RWLOCK_UNLOCK = 0x511 - SYS_PTHREAD_RWLOCK_WRLOCK = 0x512 - SYS_PTHREAD_RWLOCKATTR_GETPSHARED = 0x513 - SYS_PTHREAD_RWLOCKATTR_SETPSHARED = 0x514 - SYS_PTHREAD_RWLOCKATTR_INIT = 0x515 - SYS_PTHREAD_RWLOCKATTR_DESTROY = 0x516 - SYS___CTTBL = 0x517 - SYS_PTHREAD_MUTEXATTR_SETTYPE = 0x518 - SYS_PTHREAD_MUTEXATTR_GETTYPE = 0x519 - SYS___FP_UNORDERED = 0x520 - SYS___FP_READ_RND = 0x521 - SYS___FP_READ_RND_B = 0x522 - SYS___FP_SWAP_RND = 0x523 - SYS___FP_SWAP_RND_B = 0x524 - SYS___FP_LEVEL = 0x525 - SYS___FP_BTOH = 0x526 - SYS___FP_HTOB = 0x527 - SYS___FPC_RD = 0x528 - SYS___FPC_WR = 0x529 - SYS_PTHREAD_SETCANCELTYPE = 0x600 - SYS_PTHREAD_TESTCANCEL = 0x601 - SYS___ATANF_B = 0x602 - SYS___ATANL_B = 0x603 - SYS___CEILF_B = 0x604 - SYS___CEILL_B = 0x605 - SYS___COSF_B = 0x606 - SYS___COSL_B = 0x607 - SYS___FABSF_B = 0x608 - SYS___FABSL_B = 0x609 - SYS___SINF_B = 0x610 - SYS___SINL_B = 0x611 - SYS___TANF_B = 0x612 - SYS___TANL_B = 0x613 - SYS___TANHF_B = 0x614 - SYS___TANHL_B = 0x615 - SYS___ACOSF_B = 0x616 - SYS___ACOSL_B = 0x617 - SYS___ASINF_B = 0x618 - SYS___ASINL_B = 0x619 - SYS___LOGF_B = 0x620 - SYS___LOGL_B = 0x621 - SYS___LOG10F_B = 0x622 - SYS___LOG10L_B = 0x623 - SYS___POWF_B = 0x624 - SYS___POWL_B = 0x625 - SYS___SINHF_B = 0x626 - SYS___SINHL_B = 0x627 - SYS___SQRTF_B = 0x628 - SYS___SQRTL_B = 0x629 - SYS___MODFL_B = 0x630 - SYS_ABSF = 0x631 - SYS_ABSL = 0x632 - SYS_ACOSF = 0x633 - SYS_ACOSL = 0x634 - SYS_ASINF = 0x635 - SYS_ASINL = 0x636 - SYS_ATAN2F = 0x637 - SYS_ATAN2L = 0x638 - SYS_ATANF = 0x639 - SYS_COSHL = 0x640 - SYS_EXPF = 0x641 - SYS_EXPL = 0x642 - SYS_TANHF = 0x643 - SYS_TANHL = 0x644 - SYS_LOG10F = 0x645 - SYS_LOG10L = 0x646 - SYS_LOGF = 0x647 - SYS_LOGL = 0x648 - SYS_POWF = 0x649 - SYS_SINHL = 0x650 - SYS_TANF = 0x651 - SYS_TANL = 0x652 - SYS_FABSF = 0x653 - SYS_FABSL = 0x654 - SYS_FLOORF = 0x655 - SYS_FLOORL = 0x656 - SYS_FMODF = 0x657 - SYS_FMODL = 0x658 - SYS_FREXPF = 0x659 - SYS___CHATTR = 0x660 - SYS___FCHATTR = 0x661 - SYS___TOCCSID = 0x662 - SYS___CSNAMETYPE = 0x663 - SYS___TOCSNAME = 0x664 - SYS___CCSIDTYPE = 0x665 - SYS___AE_CORRESTBL_QUERY = 0x666 - SYS___AE_AUTOCONVERT_STATE = 0x667 - SYS_DN_FIND = 0x668 - SYS___GETHOSTBYADDR_A = 0x669 - SYS___MBLEN_SB_A = 0x670 - SYS___MBLEN_STD_A = 0x671 - SYS___MBLEN_UTF = 0x672 - SYS___MBSTOWCS_A = 0x673 - SYS___MBSTOWCS_STD_A = 0x674 - SYS___MBTOWC_A = 0x675 - SYS___MBTOWC_ISO1 = 0x676 - SYS___MBTOWC_SBCS = 0x677 - SYS___MBTOWC_MBCS = 0x678 - SYS___MBTOWC_UTF = 0x679 - SYS___CSID_A = 0x680 - SYS___CSID_STD_A = 0x681 - SYS___WCSID_A = 0x682 - SYS___WCSID_STD_A = 0x683 - SYS___WCTOMB_A = 0x684 - SYS___WCTOMB_ISO1 = 0x685 - SYS___WCTOMB_STD_A = 0x686 - SYS___WCTOMB_UTF = 0x687 - SYS___WCWIDTH_A = 0x688 - SYS___GETGRNAM_R_A = 0x689 - SYS___READDIR_R_A = 0x690 - SYS___E2A_S = 0x691 - SYS___FNMATCH_A = 0x692 - SYS___FNMATCH_C_A = 0x693 - SYS___EXECL_A = 0x694 - SYS___FNMATCH_STD_A = 0x695 - SYS___REGCOMP_A = 0x696 - SYS___REGCOMP_STD_A = 0x697 - SYS___REGERROR_A = 0x698 - SYS___REGERROR_STD_A = 0x699 - SYS___SWPRINTF_A = 0x700 - SYS___FSCANF_A = 0x701 - SYS___SCANF_A = 0x702 - SYS___SSCANF_A = 0x703 - SYS___SWSCANF_A = 0x704 - SYS___ATOF_A = 0x705 - SYS___ATOI_A = 0x706 - SYS___ATOL_A = 0x707 - SYS___STRTOD_A = 0x708 - SYS___STRTOL_A = 0x709 - SYS___L64A_A = 0x710 - SYS___STRERROR_A = 0x711 - SYS___PERROR_A = 0x712 - SYS___FETCH_A = 0x713 - SYS___GETENV_A = 0x714 - SYS___MKSTEMP_A = 0x717 - SYS___PTSNAME_A = 0x718 - SYS___PUTENV_A = 0x719 - SYS___CHDIR_A = 0x720 - SYS___CHOWN_A = 0x721 - SYS___CHROOT_A = 0x722 - SYS___GETCWD_A = 0x723 - SYS___GETWD_A = 0x724 - SYS___LCHOWN_A = 0x725 - SYS___LINK_A = 0x726 - SYS___PATHCONF_A = 0x727 - SYS___IF_NAMEINDEX_A = 0x728 - SYS___READLINK_A = 0x729 - SYS___EXTLINK_NP_A = 0x730 - SYS___ISALNUM_A = 0x731 - SYS___ISALPHA_A = 0x732 - SYS___A2E_S = 0x733 - SYS___ISCNTRL_A = 0x734 - SYS___ISDIGIT_A = 0x735 - SYS___ISGRAPH_A = 0x736 - SYS___ISLOWER_A = 0x737 - SYS___ISPRINT_A = 0x738 - SYS___ISPUNCT_A = 0x739 - SYS___ISWALPHA_A = 0x740 - SYS___A2E_L = 0x741 - SYS___ISWCNTRL_A = 0x742 - SYS___ISWDIGIT_A = 0x743 - SYS___ISWGRAPH_A = 0x744 - SYS___ISWLOWER_A = 0x745 - SYS___ISWPRINT_A = 0x746 - SYS___ISWPUNCT_A = 0x747 - SYS___ISWSPACE_A = 0x748 - SYS___ISWUPPER_A = 0x749 - SYS___REMOVE_A = 0x750 - SYS___RENAME_A = 0x751 - SYS___TMPNAM_A = 0x752 - SYS___FOPEN_A = 0x753 - SYS___FREOPEN_A = 0x754 - SYS___CUSERID_A = 0x755 - SYS___POPEN_A = 0x756 - SYS___TEMPNAM_A = 0x757 - SYS___FTW_A = 0x758 - SYS___GETGRENT_A = 0x759 - SYS___INET_NTOP_A = 0x760 - SYS___GETPASS_A = 0x761 - SYS___GETPWENT_A = 0x762 - SYS___GETPWNAM_A = 0x763 - SYS___GETPWUID_A = 0x764 - SYS_____CHECK_RESOURCE_AUTH_NP_A = 0x765 - SYS___CHECKSCHENV_A = 0x766 - SYS___CONNECTSERVER_A = 0x767 - SYS___CONNECTWORKMGR_A = 0x768 - SYS_____CONSOLE_A = 0x769 - SYS___MSGSND_A = 0x770 - SYS___MSGXRCV_A = 0x771 - SYS___NFTW_A = 0x772 - SYS_____PASSWD_A = 0x773 - SYS___PTHREAD_SECURITY_NP_A = 0x774 - SYS___QUERYMETRICS_A = 0x775 - SYS___QUERYSCHENV = 0x776 - SYS___READV_A = 0x777 - SYS_____SERVER_CLASSIFY_A = 0x778 - SYS_____SERVER_INIT_A = 0x779 - SYS___W_GETPSENT_A = 0x780 - SYS___WRITEV_A = 0x781 - SYS___W_STATFS_A = 0x782 - SYS___W_STATVFS_A = 0x783 - SYS___FPUTC_A = 0x784 - SYS___PUTCHAR_A = 0x785 - SYS___PUTS_A = 0x786 - SYS___FGETS_A = 0x787 - SYS___GETS_A = 0x788 - SYS___FPUTS_A = 0x789 - SYS___PUTC_A = 0x790 - SYS___AE_THREAD_SETMODE = 0x791 - SYS___AE_THREAD_SWAPMODE = 0x792 - SYS___GETNETBYADDR_A = 0x793 - SYS___GETNETBYNAME_A = 0x794 - SYS___GETNETENT_A = 0x795 - SYS___GETPROTOBYNAME_A = 0x796 - SYS___GETPROTOBYNUMBER_A = 0x797 - SYS___GETPROTOENT_A = 0x798 - SYS___GETSERVBYNAME_A = 0x799 - SYS_ACL_FIRST_ENTRY = 0x800 - SYS_ACL_GET_ENTRY = 0x801 - SYS_ACL_VALID = 0x802 - SYS_ACL_CREATE_ENTRY = 0x803 - SYS_ACL_DELETE_ENTRY = 0x804 - SYS_ACL_UPDATE_ENTRY = 0x805 - SYS_ACL_DELETE_FD = 0x806 - SYS_ACL_DELETE_FILE = 0x807 - SYS_ACL_GET_FD = 0x808 - SYS_ACL_GET_FILE = 0x809 - SYS___ERFL_B = 0x810 - SYS___ERFCL_B = 0x811 - SYS___LGAMMAL_B = 0x812 - SYS___SETHOOKEVENTS = 0x813 - SYS_IF_NAMETOINDEX = 0x814 - SYS_IF_INDEXTONAME = 0x815 - SYS_IF_NAMEINDEX = 0x816 - SYS_IF_FREENAMEINDEX = 0x817 - SYS_GETADDRINFO = 0x818 - SYS_GETNAMEINFO = 0x819 - SYS___DYNFREE_A = 0x820 - SYS___RES_QUERY_A = 0x821 - SYS___RES_SEARCH_A = 0x822 - SYS___RES_QUERYDOMAIN_A = 0x823 - SYS___RES_MKQUERY_A = 0x824 - SYS___RES_SEND_A = 0x825 - SYS___DN_EXPAND_A = 0x826 - SYS___DN_SKIPNAME_A = 0x827 - SYS___DN_COMP_A = 0x828 - SYS___DN_FIND_A = 0x829 - SYS___INET_NTOA_A = 0x830 - SYS___INET_NETWORK_A = 0x831 - SYS___ACCEPT_A = 0x832 - SYS___ACCEPT_AND_RECV_A = 0x833 - SYS___BIND_A = 0x834 - SYS___CONNECT_A = 0x835 - SYS___GETPEERNAME_A = 0x836 - SYS___GETSOCKNAME_A = 0x837 - SYS___RECVFROM_A = 0x838 - SYS___SENDTO_A = 0x839 - SYS___LCHATTR = 0x840 - SYS___WRITEDOWN = 0x841 - SYS_PTHREAD_MUTEX_INIT2 = 0x842 - SYS___ACOSHF_B = 0x843 - SYS___ACOSHL_B = 0x844 - SYS___ASINHF_B = 0x845 - SYS___ASINHL_B = 0x846 - SYS___ATANHF_B = 0x847 - SYS___ATANHL_B = 0x848 - SYS___CBRTF_B = 0x849 - SYS___EXP2F_B = 0x850 - SYS___EXP2L_B = 0x851 - SYS___EXPM1F_B = 0x852 - SYS___EXPM1L_B = 0x853 - SYS___FDIMF_B = 0x854 - SYS___FDIM_B = 0x855 - SYS___FDIML_B = 0x856 - SYS___HYPOTF_B = 0x857 - SYS___HYPOTL_B = 0x858 - SYS___LOG1PF_B = 0x859 - SYS___REMQUOF_B = 0x860 - SYS___REMQUO_B = 0x861 - SYS___REMQUOL_B = 0x862 - SYS___TGAMMAF_B = 0x863 - SYS___TGAMMA_B = 0x864 - SYS___TGAMMAL_B = 0x865 - SYS___TRUNCF_B = 0x866 - SYS___TRUNC_B = 0x867 - SYS___TRUNCL_B = 0x868 - SYS___LGAMMAF_B = 0x869 - SYS_ASINHF = 0x870 - SYS_ASINHL = 0x871 - SYS_ATANHF = 0x872 - SYS_ATANHL = 0x873 - SYS_CBRTF = 0x874 - SYS_CBRTL = 0x875 - SYS_COPYSIGNF = 0x876 - SYS_CPYSIGNF = 0x876 - SYS_COPYSIGNL = 0x877 - SYS_CPYSIGNL = 0x877 - SYS_COTANF = 0x878 - SYS___COTANF = 0x878 - SYS_COTAN = 0x879 - SYS___COTAN = 0x879 - SYS_FDIM = 0x881 - SYS_FDIML = 0x882 - SYS_HYPOTF = 0x883 - SYS_HYPOTL = 0x884 - SYS_LOG1PF = 0x885 - SYS_LOG1PL = 0x886 - SYS_LOG2F = 0x887 - SYS_LOG2 = 0x888 - SYS_LOG2L = 0x889 - SYS_TGAMMA = 0x890 - SYS_TGAMMAL = 0x891 - SYS_TRUNCF = 0x892 - SYS_TRUNC = 0x893 - SYS_TRUNCL = 0x894 - SYS_LGAMMAF = 0x895 - SYS_LGAMMAL = 0x896 - SYS_LROUNDF = 0x897 - SYS_LROUND = 0x898 - SYS_ERFF = 0x899 - SYS___COSHF_H = 0x900 - SYS___COSHL_H = 0x901 - SYS___COTAN_H = 0x902 - SYS___COTANF_H = 0x903 - SYS___COTANL_H = 0x904 - SYS___ERF_H = 0x905 - SYS___ERFF_H = 0x906 - SYS___ERFL_H = 0x907 - SYS___ERFC_H = 0x908 - SYS___ERFCF_H = 0x909 - SYS___FDIMF_H = 0x910 - SYS___FDIML_H = 0x911 - SYS___FMOD_H = 0x912 - SYS___FMODF_H = 0x913 - SYS___FMODL_H = 0x914 - SYS___GAMMA_H = 0x915 - SYS___HYPOT_H = 0x916 - SYS___ILOGB_H = 0x917 - SYS___LGAMMA_H = 0x918 - SYS___LGAMMAF_H = 0x919 - SYS___LOG2L_H = 0x920 - SYS___LOG1P_H = 0x921 - SYS___LOG10_H = 0x922 - SYS___LOG10F_H = 0x923 - SYS___LOG10L_H = 0x924 - SYS___LROUND_H = 0x925 - SYS___LROUNDF_H = 0x926 - SYS___NEXTAFTER_H = 0x927 - SYS___POW_H = 0x928 - SYS___POWF_H = 0x929 - SYS___SINL_H = 0x930 - SYS___SINH_H = 0x931 - SYS___SINHF_H = 0x932 - SYS___SINHL_H = 0x933 - SYS___SQRT_H = 0x934 - SYS___SQRTF_H = 0x935 - SYS___SQRTL_H = 0x936 - SYS___TAN_H = 0x937 - SYS___TANF_H = 0x938 - SYS___TANL_H = 0x939 - SYS___TRUNCF_H = 0x940 - SYS___TRUNCL_H = 0x941 - SYS___COSH_H = 0x942 - SYS___LE_DEBUG_SET_RESUME_MCH = 0x943 - SYS_VFSCANF = 0x944 - SYS_VSCANF = 0x946 - SYS_VSSCANF = 0x948 - SYS_IMAXABS = 0x950 - SYS_IMAXDIV = 0x951 - SYS_STRTOIMAX = 0x952 - SYS_STRTOUMAX = 0x953 - SYS_WCSTOIMAX = 0x954 - SYS_WCSTOUMAX = 0x955 - SYS_ATOLL = 0x956 - SYS_STRTOF = 0x957 - SYS_STRTOLD = 0x958 - SYS_WCSTOF = 0x959 - SYS_INET6_RTH_GETADDR = 0x960 - SYS_INET6_OPT_INIT = 0x961 - SYS_INET6_OPT_APPEND = 0x962 - SYS_INET6_OPT_FINISH = 0x963 - SYS_INET6_OPT_SET_VAL = 0x964 - SYS_INET6_OPT_NEXT = 0x965 - SYS_INET6_OPT_FIND = 0x966 - SYS_INET6_OPT_GET_VAL = 0x967 - SYS___POW_I = 0x987 - SYS___POW_I_B = 0x988 - SYS___POW_I_H = 0x989 - SYS___CABS_H = 0x990 - SYS_CABSF = 0x991 - SYS___CABSF_B = 0x992 - SYS___CABSF_H = 0x993 - SYS_CABSL = 0x994 - SYS___CABSL_B = 0x995 - SYS___CABSL_H = 0x996 - SYS_CACOS = 0x997 - SYS___CACOS_B = 0x998 - SYS___CACOS_H = 0x999 + SYS_LOG = 0x17 // 23 + SYS_COSH = 0x18 // 24 + SYS_TANH = 0x19 // 25 + SYS_EXP = 0x1A // 26 + SYS_MODF = 0x1B // 27 + SYS_LOG10 = 0x1C // 28 + SYS_FREXP = 0x1D // 29 + SYS_LDEXP = 0x1E // 30 + SYS_CEIL = 0x1F // 31 + SYS_POW = 0x20 // 32 + SYS_SQRT = 0x21 // 33 + SYS_FLOOR = 0x22 // 34 + SYS_J1 = 0x23 // 35 + SYS_FABS = 0x24 // 36 + SYS_FMOD = 0x25 // 37 + SYS_J0 = 0x26 // 38 + SYS_YN = 0x27 // 39 + SYS_JN = 0x28 // 40 + SYS_Y0 = 0x29 // 41 + SYS_Y1 = 0x2A // 42 + SYS_HYPOT = 0x2B // 43 + SYS_ERF = 0x2C // 44 + SYS_ERFC = 0x2D // 45 + SYS_GAMMA = 0x2E // 46 + SYS_ISALPHA = 0x30 // 48 + SYS_ISALNUM = 0x31 // 49 + SYS_ISLOWER = 0x32 // 50 + SYS_ISCNTRL = 0x33 // 51 + SYS_ISDIGIT = 0x34 // 52 + SYS_ISGRAPH = 0x35 // 53 + SYS_ISUPPER = 0x36 // 54 + SYS_ISPRINT = 0x37 // 55 + SYS_ISPUNCT = 0x38 // 56 + SYS_ISSPACE = 0x39 // 57 + SYS_SETLOCAL = 0x3A // 58 + SYS_SETLOCALE = 0x3A // 58 + SYS_ISXDIGIT = 0x3B // 59 + SYS_TOLOWER = 0x3C // 60 + SYS_TOUPPER = 0x3D // 61 + SYS_ASIN = 0x3E // 62 + SYS_SIN = 0x3F // 63 + SYS_COS = 0x40 // 64 + SYS_TAN = 0x41 // 65 + SYS_SINH = 0x42 // 66 + SYS_ACOS = 0x43 // 67 + SYS_ATAN = 0x44 // 68 + SYS_ATAN2 = 0x45 // 69 + SYS_FTELL = 0x46 // 70 + SYS_FGETPOS = 0x47 // 71 + SYS_FSEEK = 0x48 // 72 + SYS_FSETPOS = 0x49 // 73 + SYS_FERROR = 0x4A // 74 + SYS_REWIND = 0x4B // 75 + SYS_CLEARERR = 0x4C // 76 + SYS_FEOF = 0x4D // 77 + SYS_ATOL = 0x4E // 78 + SYS_PERROR = 0x4F // 79 + SYS_ATOF = 0x50 // 80 + SYS_ATOI = 0x51 // 81 + SYS_RAND = 0x52 // 82 + SYS_STRTOD = 0x53 // 83 + SYS_STRTOL = 0x54 // 84 + SYS_STRTOUL = 0x55 // 85 + SYS_MALLOC = 0x56 // 86 + SYS_SRAND = 0x57 // 87 + SYS_CALLOC = 0x58 // 88 + SYS_FREE = 0x59 // 89 + SYS_EXIT = 0x5A // 90 + SYS_REALLOC = 0x5B // 91 + SYS_ABORT = 0x5C // 92 + SYS___ABORT = 0x5C // 92 + SYS_ATEXIT = 0x5D // 93 + SYS_RAISE = 0x5E // 94 + SYS_SETJMP = 0x5F // 95 + SYS_LONGJMP = 0x60 // 96 + SYS_SIGNAL = 0x61 // 97 + SYS_TMPNAM = 0x62 // 98 + SYS_REMOVE = 0x63 // 99 + SYS_RENAME = 0x64 // 100 + SYS_TMPFILE = 0x65 // 101 + SYS_FREOPEN = 0x66 // 102 + SYS_FCLOSE = 0x67 // 103 + SYS_FFLUSH = 0x68 // 104 + SYS_FOPEN = 0x69 // 105 + SYS_FSCANF = 0x6A // 106 + SYS_SETBUF = 0x6B // 107 + SYS_SETVBUF = 0x6C // 108 + SYS_FPRINTF = 0x6D // 109 + SYS_SSCANF = 0x6E // 110 + SYS_PRINTF = 0x6F // 111 + SYS_SCANF = 0x70 // 112 + SYS_SPRINTF = 0x71 // 113 + SYS_FGETC = 0x72 // 114 + SYS_VFPRINTF = 0x73 // 115 + SYS_VPRINTF = 0x74 // 116 + SYS_VSPRINTF = 0x75 // 117 + SYS_GETC = 0x76 // 118 + SYS_FGETS = 0x77 // 119 + SYS_FPUTC = 0x78 // 120 + SYS_FPUTS = 0x79 // 121 + SYS_PUTCHAR = 0x7A // 122 + SYS_GETCHAR = 0x7B // 123 + SYS_GETS = 0x7C // 124 + SYS_PUTC = 0x7D // 125 + SYS_FWRITE = 0x7E // 126 + SYS_PUTS = 0x7F // 127 + SYS_UNGETC = 0x80 // 128 + SYS_FREAD = 0x81 // 129 + SYS_WCSTOMBS = 0x82 // 130 + SYS_MBTOWC = 0x83 // 131 + SYS_WCTOMB = 0x84 // 132 + SYS_MBSTOWCS = 0x85 // 133 + SYS_WCSCPY = 0x86 // 134 + SYS_WCSCAT = 0x87 // 135 + SYS_WCSCHR = 0x88 // 136 + SYS_WCSCMP = 0x89 // 137 + SYS_WCSNCMP = 0x8A // 138 + SYS_WCSCSPN = 0x8B // 139 + SYS_WCSLEN = 0x8C // 140 + SYS_WCSNCAT = 0x8D // 141 + SYS_WCSSPN = 0x8E // 142 + SYS_WCSNCPY = 0x8F // 143 + SYS_ABS = 0x90 // 144 + SYS_DIV = 0x91 // 145 + SYS_LABS = 0x92 // 146 + SYS_STRNCPY = 0x93 // 147 + SYS_MEMCPY = 0x94 // 148 + SYS_MEMMOVE = 0x95 // 149 + SYS_STRCPY = 0x96 // 150 + SYS_STRCMP = 0x97 // 151 + SYS_STRCAT = 0x98 // 152 + SYS_STRNCAT = 0x99 // 153 + SYS_MEMCMP = 0x9A // 154 + SYS_MEMCHR = 0x9B // 155 + SYS_STRCOLL = 0x9C // 156 + SYS_STRNCMP = 0x9D // 157 + SYS_STRXFRM = 0x9E // 158 + SYS_STRRCHR = 0x9F // 159 + SYS_STRCHR = 0xA0 // 160 + SYS_STRCSPN = 0xA1 // 161 + SYS_STRPBRK = 0xA2 // 162 + SYS_MEMSET = 0xA3 // 163 + SYS_STRSPN = 0xA4 // 164 + SYS_STRSTR = 0xA5 // 165 + SYS_STRTOK = 0xA6 // 166 + SYS_DIFFTIME = 0xA7 // 167 + SYS_STRERROR = 0xA8 // 168 + SYS_STRLEN = 0xA9 // 169 + SYS_CLOCK = 0xAA // 170 + SYS_CTIME = 0xAB // 171 + SYS_MKTIME = 0xAC // 172 + SYS_TIME = 0xAD // 173 + SYS_ASCTIME = 0xAE // 174 + SYS_MBLEN = 0xAF // 175 + SYS_GMTIME = 0xB0 // 176 + SYS_LOCALTIM = 0xB1 // 177 + SYS_LOCALTIME = 0xB1 // 177 + SYS_STRFTIME = 0xB2 // 178 + SYS___GETCB = 0xB4 // 180 + SYS_FUPDATE = 0xB5 // 181 + SYS___FUPDT = 0xB5 // 181 + SYS_CLRMEMF = 0xBD // 189 + SYS___CLRMF = 0xBD // 189 + SYS_FETCHEP = 0xBF // 191 + SYS___FTCHEP = 0xBF // 191 + SYS_FLDATA = 0xC1 // 193 + SYS___FLDATA = 0xC1 // 193 + SYS_DYNFREE = 0xC2 // 194 + SYS___DYNFRE = 0xC2 // 194 + SYS_DYNALLOC = 0xC3 // 195 + SYS___DYNALL = 0xC3 // 195 + SYS___CDUMP = 0xC4 // 196 + SYS_CSNAP = 0xC5 // 197 + SYS___CSNAP = 0xC5 // 197 + SYS_CTRACE = 0xC6 // 198 + SYS___CTRACE = 0xC6 // 198 + SYS___CTEST = 0xC7 // 199 + SYS_SETENV = 0xC8 // 200 + SYS___SETENV = 0xC8 // 200 + SYS_CLEARENV = 0xC9 // 201 + SYS___CLRENV = 0xC9 // 201 + SYS___REGCOMP_STD = 0xEA // 234 + SYS_NL_LANGINFO = 0xFC // 252 + SYS_GETSYNTX = 0xFD // 253 + SYS_ISBLANK = 0xFE // 254 + SYS___ISBLNK = 0xFE // 254 + SYS_ISWALNUM = 0xFF // 255 + SYS_ISWALPHA = 0x100 // 256 + SYS_ISWBLANK = 0x101 // 257 + SYS___ISWBLK = 0x101 // 257 + SYS_ISWCNTRL = 0x102 // 258 + SYS_ISWDIGIT = 0x103 // 259 + SYS_ISWGRAPH = 0x104 // 260 + SYS_ISWLOWER = 0x105 // 261 + SYS_ISWPRINT = 0x106 // 262 + SYS_ISWPUNCT = 0x107 // 263 + SYS_ISWSPACE = 0x108 // 264 + SYS_ISWUPPER = 0x109 // 265 + SYS_ISWXDIGI = 0x10A // 266 + SYS_ISWXDIGIT = 0x10A // 266 + SYS_WCTYPE = 0x10B // 267 + SYS_ISWCTYPE = 0x10C // 268 + SYS_TOWLOWER = 0x10D // 269 + SYS_TOWUPPER = 0x10E // 270 + SYS_MBSINIT = 0x10F // 271 + SYS_WCTOB = 0x110 // 272 + SYS_MBRLEN = 0x111 // 273 + SYS_MBRTOWC = 0x112 // 274 + SYS_MBSRTOWC = 0x113 // 275 + SYS_MBSRTOWCS = 0x113 // 275 + SYS_WCRTOMB = 0x114 // 276 + SYS_WCSRTOMB = 0x115 // 277 + SYS_WCSRTOMBS = 0x115 // 277 + SYS___CSID = 0x116 // 278 + SYS___WCSID = 0x117 // 279 + SYS_STRPTIME = 0x118 // 280 + SYS___STRPTM = 0x118 // 280 + SYS_STRFMON = 0x119 // 281 + SYS___RPMTCH = 0x11A // 282 + SYS_WCSSTR = 0x11B // 283 + SYS_WCSTOK = 0x12C // 300 + SYS_WCSTOL = 0x12D // 301 + SYS_WCSTOD = 0x12E // 302 + SYS_WCSTOUL = 0x12F // 303 + SYS_WCSCOLL = 0x130 // 304 + SYS_WCSXFRM = 0x131 // 305 + SYS_WCSWIDTH = 0x132 // 306 + SYS_WCWIDTH = 0x133 // 307 + SYS_WCSFTIME = 0x134 // 308 + SYS_SWPRINTF = 0x135 // 309 + SYS_VSWPRINT = 0x136 // 310 + SYS_VSWPRINTF = 0x136 // 310 + SYS_SWSCANF = 0x137 // 311 + SYS_REGCOMP = 0x138 // 312 + SYS_REGEXEC = 0x139 // 313 + SYS_REGFREE = 0x13A // 314 + SYS_REGERROR = 0x13B // 315 + SYS_FGETWC = 0x13C // 316 + SYS_FGETWS = 0x13D // 317 + SYS_FPUTWC = 0x13E // 318 + SYS_FPUTWS = 0x13F // 319 + SYS_GETWC = 0x140 // 320 + SYS_GETWCHAR = 0x141 // 321 + SYS_PUTWC = 0x142 // 322 + SYS_PUTWCHAR = 0x143 // 323 + SYS_UNGETWC = 0x144 // 324 + SYS_ICONV_OPEN = 0x145 // 325 + SYS_ICONV = 0x146 // 326 + SYS_ICONV_CLOSE = 0x147 // 327 + SYS_ISMCCOLLEL = 0x14C // 332 + SYS_STRTOCOLL = 0x14D // 333 + SYS_COLLTOSTR = 0x14E // 334 + SYS_COLLEQUIV = 0x14F // 335 + SYS_COLLRANGE = 0x150 // 336 + SYS_CCLASS = 0x151 // 337 + SYS_COLLORDER = 0x152 // 338 + SYS___DEMANGLE = 0x154 // 340 + SYS_FDOPEN = 0x155 // 341 + SYS___ERRNO = 0x156 // 342 + SYS___ERRNO2 = 0x157 // 343 + SYS___TERROR = 0x158 // 344 + SYS_MAXCOLL = 0x169 // 361 + SYS_GETMCCOLL = 0x16A // 362 + SYS_GETWMCCOLL = 0x16B // 363 + SYS___ERR2AD = 0x16C // 364 + SYS_DLLQUERYFN = 0x16D // 365 + SYS_DLLQUERYVAR = 0x16E // 366 + SYS_DLLFREE = 0x16F // 367 + SYS_DLLLOAD = 0x170 // 368 + SYS__EXIT = 0x174 // 372 + SYS_ACCESS = 0x175 // 373 + SYS_ALARM = 0x176 // 374 + SYS_CFGETISPEED = 0x177 // 375 + SYS_CFGETOSPEED = 0x178 // 376 + SYS_CFSETISPEED = 0x179 // 377 + SYS_CFSETOSPEED = 0x17A // 378 + SYS_CHDIR = 0x17B // 379 + SYS_CHMOD = 0x17C // 380 + SYS_CHOWN = 0x17D // 381 + SYS_CLOSE = 0x17E // 382 + SYS_CLOSEDIR = 0x17F // 383 + SYS_CREAT = 0x180 // 384 + SYS_CTERMID = 0x181 // 385 + SYS_DUP = 0x182 // 386 + SYS_DUP2 = 0x183 // 387 + SYS_EXECL = 0x184 // 388 + SYS_EXECLE = 0x185 // 389 + SYS_EXECLP = 0x186 // 390 + SYS_EXECV = 0x187 // 391 + SYS_EXECVE = 0x188 // 392 + SYS_EXECVP = 0x189 // 393 + SYS_FCHMOD = 0x18A // 394 + SYS_FCHOWN = 0x18B // 395 + SYS_FCNTL = 0x18C // 396 + SYS_FILENO = 0x18D // 397 + SYS_FORK = 0x18E // 398 + SYS_FPATHCONF = 0x18F // 399 + SYS_FSTAT = 0x190 // 400 + SYS_FSYNC = 0x191 // 401 + SYS_FTRUNCATE = 0x192 // 402 + SYS_GETCWD = 0x193 // 403 + SYS_GETEGID = 0x194 // 404 + SYS_GETEUID = 0x195 // 405 + SYS_GETGID = 0x196 // 406 + SYS_GETGRGID = 0x197 // 407 + SYS_GETGRNAM = 0x198 // 408 + SYS_GETGROUPS = 0x199 // 409 + SYS_GETLOGIN = 0x19A // 410 + SYS_W_GETMNTENT = 0x19B // 411 + SYS_GETPGRP = 0x19C // 412 + SYS_GETPID = 0x19D // 413 + SYS_GETPPID = 0x19E // 414 + SYS_GETPWNAM = 0x19F // 415 + SYS_GETPWUID = 0x1A0 // 416 + SYS_GETUID = 0x1A1 // 417 + SYS_W_IOCTL = 0x1A2 // 418 + SYS_ISATTY = 0x1A3 // 419 + SYS_KILL = 0x1A4 // 420 + SYS_LINK = 0x1A5 // 421 + SYS_LSEEK = 0x1A6 // 422 + SYS_LSTAT = 0x1A7 // 423 + SYS_MKDIR = 0x1A8 // 424 + SYS_MKFIFO = 0x1A9 // 425 + SYS_MKNOD = 0x1AA // 426 + SYS_MOUNT = 0x1AB // 427 + SYS_OPEN = 0x1AC // 428 + SYS_OPENDIR = 0x1AD // 429 + SYS_PATHCONF = 0x1AE // 430 + SYS_PAUSE = 0x1AF // 431 + SYS_PIPE = 0x1B0 // 432 + SYS_W_GETPSENT = 0x1B1 // 433 + SYS_READ = 0x1B2 // 434 + SYS_READDIR = 0x1B3 // 435 + SYS_READLINK = 0x1B4 // 436 + SYS_REWINDDIR = 0x1B5 // 437 + SYS_RMDIR = 0x1B6 // 438 + SYS_SETEGID = 0x1B7 // 439 + SYS_SETEUID = 0x1B8 // 440 + SYS_SETGID = 0x1B9 // 441 + SYS_SETPGID = 0x1BA // 442 + SYS_SETSID = 0x1BB // 443 + SYS_SETUID = 0x1BC // 444 + SYS_SIGACTION = 0x1BD // 445 + SYS_SIGADDSET = 0x1BE // 446 + SYS_SIGDELSET = 0x1BF // 447 + SYS_SIGEMPTYSET = 0x1C0 // 448 + SYS_SIGFILLSET = 0x1C1 // 449 + SYS_SIGISMEMBER = 0x1C2 // 450 + SYS_SIGLONGJMP = 0x1C3 // 451 + SYS_SIGPENDING = 0x1C4 // 452 + SYS_SIGPROCMASK = 0x1C5 // 453 + SYS_SIGSETJMP = 0x1C6 // 454 + SYS_SIGSUSPEND = 0x1C7 // 455 + SYS_SLEEP = 0x1C8 // 456 + SYS_STAT = 0x1C9 // 457 + SYS_W_STATFS = 0x1CA // 458 + SYS_SYMLINK = 0x1CB // 459 + SYS_SYSCONF = 0x1CC // 460 + SYS_TCDRAIN = 0x1CD // 461 + SYS_TCFLOW = 0x1CE // 462 + SYS_TCFLUSH = 0x1CF // 463 + SYS_TCGETATTR = 0x1D0 // 464 + SYS_TCGETPGRP = 0x1D1 // 465 + SYS_TCSENDBREAK = 0x1D2 // 466 + SYS_TCSETATTR = 0x1D3 // 467 + SYS_TCSETPGRP = 0x1D4 // 468 + SYS_TIMES = 0x1D5 // 469 + SYS_TTYNAME = 0x1D6 // 470 + SYS_TZSET = 0x1D7 // 471 + SYS_UMASK = 0x1D8 // 472 + SYS_UMOUNT = 0x1D9 // 473 + SYS_UNAME = 0x1DA // 474 + SYS_UNLINK = 0x1DB // 475 + SYS_UTIME = 0x1DC // 476 + SYS_WAIT = 0x1DD // 477 + SYS_WAITPID = 0x1DE // 478 + SYS_WRITE = 0x1DF // 479 + SYS_CHAUDIT = 0x1E0 // 480 + SYS_FCHAUDIT = 0x1E1 // 481 + SYS_GETGROUPSBYNAME = 0x1E2 // 482 + SYS_SIGWAIT = 0x1E3 // 483 + SYS_PTHREAD_EXIT = 0x1E4 // 484 + SYS_PTHREAD_KILL = 0x1E5 // 485 + SYS_PTHREAD_ATTR_INIT = 0x1E6 // 486 + SYS_PTHREAD_ATTR_DESTROY = 0x1E7 // 487 + SYS_PTHREAD_ATTR_SETSTACKSIZE = 0x1E8 // 488 + SYS_PTHREAD_ATTR_GETSTACKSIZE = 0x1E9 // 489 + SYS_PTHREAD_ATTR_SETDETACHSTATE = 0x1EA // 490 + SYS_PTHREAD_ATTR_GETDETACHSTATE = 0x1EB // 491 + SYS_PTHREAD_ATTR_SETWEIGHT_NP = 0x1EC // 492 + SYS_PTHREAD_ATTR_GETWEIGHT_NP = 0x1ED // 493 + SYS_PTHREAD_CANCEL = 0x1EE // 494 + SYS_PTHREAD_CLEANUP_PUSH = 0x1EF // 495 + SYS_PTHREAD_CLEANUP_POP = 0x1F0 // 496 + SYS_PTHREAD_CONDATTR_INIT = 0x1F1 // 497 + SYS_PTHREAD_CONDATTR_DESTROY = 0x1F2 // 498 + SYS_PTHREAD_COND_INIT = 0x1F3 // 499 + SYS_PTHREAD_COND_DESTROY = 0x1F4 // 500 + SYS_PTHREAD_COND_SIGNAL = 0x1F5 // 501 + SYS_PTHREAD_COND_BROADCAST = 0x1F6 // 502 + SYS_PTHREAD_COND_WAIT = 0x1F7 // 503 + SYS_PTHREAD_COND_TIMEDWAIT = 0x1F8 // 504 + SYS_PTHREAD_CREATE = 0x1F9 // 505 + SYS_PTHREAD_DETACH = 0x1FA // 506 + SYS_PTHREAD_EQUAL = 0x1FB // 507 + SYS_PTHREAD_GETSPECIFIC = 0x1FC // 508 + SYS_PTHREAD_JOIN = 0x1FD // 509 + SYS_PTHREAD_KEY_CREATE = 0x1FE // 510 + SYS_PTHREAD_MUTEXATTR_INIT = 0x1FF // 511 + SYS_PTHREAD_MUTEXATTR_DESTROY = 0x200 // 512 + SYS_PTHREAD_MUTEXATTR_SETKIND_NP = 0x201 // 513 + SYS_PTHREAD_MUTEXATTR_GETKIND_NP = 0x202 // 514 + SYS_PTHREAD_MUTEX_INIT = 0x203 // 515 + SYS_PTHREAD_MUTEX_DESTROY = 0x204 // 516 + SYS_PTHREAD_MUTEX_LOCK = 0x205 // 517 + SYS_PTHREAD_MUTEX_TRYLOCK = 0x206 // 518 + SYS_PTHREAD_MUTEX_UNLOCK = 0x207 // 519 + SYS_PTHREAD_ONCE = 0x209 // 521 + SYS_PTHREAD_SELF = 0x20A // 522 + SYS_PTHREAD_SETINTR = 0x20B // 523 + SYS_PTHREAD_SETINTRTYPE = 0x20C // 524 + SYS_PTHREAD_SETSPECIFIC = 0x20D // 525 + SYS_PTHREAD_TESTINTR = 0x20E // 526 + SYS_PTHREAD_YIELD = 0x20F // 527 + SYS_TW_OPEN = 0x210 // 528 + SYS_TW_FCNTL = 0x211 // 529 + SYS_PTHREAD_JOIN_D4_NP = 0x212 // 530 + SYS_PTHREAD_CONDATTR_SETKIND_NP = 0x213 // 531 + SYS_PTHREAD_CONDATTR_GETKIND_NP = 0x214 // 532 + SYS_EXTLINK_NP = 0x215 // 533 + SYS___PASSWD = 0x216 // 534 + SYS_SETGROUPS = 0x217 // 535 + SYS_INITGROUPS = 0x218 // 536 + SYS_WCSPBRK = 0x23F // 575 + SYS_WCSRCHR = 0x240 // 576 + SYS_SVC99 = 0x241 // 577 + SYS___SVC99 = 0x241 // 577 + SYS_WCSWCS = 0x242 // 578 + SYS_LOCALECO = 0x243 // 579 + SYS_LOCALECONV = 0x243 // 579 + SYS___LIBREL = 0x244 // 580 + SYS_RELEASE = 0x245 // 581 + SYS___RLSE = 0x245 // 581 + SYS_FLOCATE = 0x246 // 582 + SYS___FLOCT = 0x246 // 582 + SYS_FDELREC = 0x247 // 583 + SYS___FDLREC = 0x247 // 583 + SYS_FETCH = 0x248 // 584 + SYS___FETCH = 0x248 // 584 + SYS_QSORT = 0x249 // 585 + SYS_GETENV = 0x24A // 586 + SYS_SYSTEM = 0x24B // 587 + SYS_BSEARCH = 0x24C // 588 + SYS_LDIV = 0x24D // 589 + SYS___THROW = 0x25E // 606 + SYS___RETHROW = 0x25F // 607 + SYS___CLEANUPCATCH = 0x260 // 608 + SYS___CATCHMATCH = 0x261 // 609 + SYS___CLEAN2UPCATCH = 0x262 // 610 + SYS_PUTENV = 0x26A // 618 + SYS___GETENV = 0x26F // 623 + SYS_GETPRIORITY = 0x270 // 624 + SYS_NICE = 0x271 // 625 + SYS_SETPRIORITY = 0x272 // 626 + SYS_GETITIMER = 0x273 // 627 + SYS_SETITIMER = 0x274 // 628 + SYS_MSGCTL = 0x275 // 629 + SYS_MSGGET = 0x276 // 630 + SYS_MSGRCV = 0x277 // 631 + SYS_MSGSND = 0x278 // 632 + SYS_MSGXRCV = 0x279 // 633 + SYS___MSGXR = 0x279 // 633 + SYS_SEMCTL = 0x27A // 634 + SYS_SEMGET = 0x27B // 635 + SYS_SEMOP = 0x27C // 636 + SYS_SHMAT = 0x27D // 637 + SYS_SHMCTL = 0x27E // 638 + SYS_SHMDT = 0x27F // 639 + SYS_SHMGET = 0x280 // 640 + SYS___GETIPC = 0x281 // 641 + SYS_SETGRENT = 0x282 // 642 + SYS_GETGRENT = 0x283 // 643 + SYS_ENDGRENT = 0x284 // 644 + SYS_SETPWENT = 0x285 // 645 + SYS_GETPWENT = 0x286 // 646 + SYS_ENDPWENT = 0x287 // 647 + SYS_BSD_SIGNAL = 0x288 // 648 + SYS_KILLPG = 0x289 // 649 + SYS_SIGALTSTACK = 0x28A // 650 + SYS_SIGHOLD = 0x28B // 651 + SYS_SIGIGNORE = 0x28C // 652 + SYS_SIGINTERRUPT = 0x28D // 653 + SYS_SIGPAUSE = 0x28E // 654 + SYS_SIGRELSE = 0x28F // 655 + SYS_SIGSET = 0x290 // 656 + SYS_SIGSTACK = 0x291 // 657 + SYS_GETRLIMIT = 0x292 // 658 + SYS_SETRLIMIT = 0x293 // 659 + SYS_GETRUSAGE = 0x294 // 660 + SYS_MMAP = 0x295 // 661 + SYS_MPROTECT = 0x296 // 662 + SYS_MSYNC = 0x297 // 663 + SYS_MUNMAP = 0x298 // 664 + SYS_CONFSTR = 0x299 // 665 + SYS_GETOPT = 0x29A // 666 + SYS_LCHOWN = 0x29B // 667 + SYS_TRUNCATE = 0x29C // 668 + SYS_GETSUBOPT = 0x29D // 669 + SYS_SETPGRP = 0x29E // 670 + SYS___GDERR = 0x29F // 671 + SYS___TZONE = 0x2A0 // 672 + SYS___DLGHT = 0x2A1 // 673 + SYS___OPARGF = 0x2A2 // 674 + SYS___OPOPTF = 0x2A3 // 675 + SYS___OPINDF = 0x2A4 // 676 + SYS___OPERRF = 0x2A5 // 677 + SYS_GETDATE = 0x2A6 // 678 + SYS_WAIT3 = 0x2A7 // 679 + SYS_WAITID = 0x2A8 // 680 + SYS___CATTRM = 0x2A9 // 681 + SYS___GDTRM = 0x2AA // 682 + SYS___RNDTRM = 0x2AB // 683 + SYS_CRYPT = 0x2AC // 684 + SYS_ENCRYPT = 0x2AD // 685 + SYS_SETKEY = 0x2AE // 686 + SYS___CNVBLK = 0x2AF // 687 + SYS___CRYTRM = 0x2B0 // 688 + SYS___ECRTRM = 0x2B1 // 689 + SYS_DRAND48 = 0x2B2 // 690 + SYS_ERAND48 = 0x2B3 // 691 + SYS_FSTATVFS = 0x2B4 // 692 + SYS_STATVFS = 0x2B5 // 693 + SYS_CATCLOSE = 0x2B6 // 694 + SYS_CATGETS = 0x2B7 // 695 + SYS_CATOPEN = 0x2B8 // 696 + SYS_BCMP = 0x2B9 // 697 + SYS_BCOPY = 0x2BA // 698 + SYS_BZERO = 0x2BB // 699 + SYS_FFS = 0x2BC // 700 + SYS_INDEX = 0x2BD // 701 + SYS_RINDEX = 0x2BE // 702 + SYS_STRCASECMP = 0x2BF // 703 + SYS_STRDUP = 0x2C0 // 704 + SYS_STRNCASECMP = 0x2C1 // 705 + SYS_INITSTATE = 0x2C2 // 706 + SYS_SETSTATE = 0x2C3 // 707 + SYS_RANDOM = 0x2C4 // 708 + SYS_SRANDOM = 0x2C5 // 709 + SYS_HCREATE = 0x2C6 // 710 + SYS_HDESTROY = 0x2C7 // 711 + SYS_HSEARCH = 0x2C8 // 712 + SYS_LFIND = 0x2C9 // 713 + SYS_LSEARCH = 0x2CA // 714 + SYS_TDELETE = 0x2CB // 715 + SYS_TFIND = 0x2CC // 716 + SYS_TSEARCH = 0x2CD // 717 + SYS_TWALK = 0x2CE // 718 + SYS_INSQUE = 0x2CF // 719 + SYS_REMQUE = 0x2D0 // 720 + SYS_POPEN = 0x2D1 // 721 + SYS_PCLOSE = 0x2D2 // 722 + SYS_SWAB = 0x2D3 // 723 + SYS_MEMCCPY = 0x2D4 // 724 + SYS_GETPAGESIZE = 0x2D8 // 728 + SYS_FCHDIR = 0x2D9 // 729 + SYS___OCLCK = 0x2DA // 730 + SYS___ATOE = 0x2DB // 731 + SYS___ATOE_L = 0x2DC // 732 + SYS___ETOA = 0x2DD // 733 + SYS___ETOA_L = 0x2DE // 734 + SYS_SETUTXENT = 0x2DF // 735 + SYS_GETUTXENT = 0x2E0 // 736 + SYS_ENDUTXENT = 0x2E1 // 737 + SYS_GETUTXID = 0x2E2 // 738 + SYS_GETUTXLINE = 0x2E3 // 739 + SYS_PUTUTXLINE = 0x2E4 // 740 + SYS_FMTMSG = 0x2E5 // 741 + SYS_JRAND48 = 0x2E6 // 742 + SYS_LRAND48 = 0x2E7 // 743 + SYS_MRAND48 = 0x2E8 // 744 + SYS_NRAND48 = 0x2E9 // 745 + SYS_LCONG48 = 0x2EA // 746 + SYS_SRAND48 = 0x2EB // 747 + SYS_SEED48 = 0x2EC // 748 + SYS_ISASCII = 0x2ED // 749 + SYS_TOASCII = 0x2EE // 750 + SYS_A64L = 0x2EF // 751 + SYS_L64A = 0x2F0 // 752 + SYS_UALARM = 0x2F1 // 753 + SYS_USLEEP = 0x2F2 // 754 + SYS___UTXTRM = 0x2F3 // 755 + SYS___SRCTRM = 0x2F4 // 756 + SYS_FTIME = 0x2F5 // 757 + SYS_GETTIMEOFDAY = 0x2F6 // 758 + SYS_DBM_CLEARERR = 0x2F7 // 759 + SYS_DBM_CLOSE = 0x2F8 // 760 + SYS_DBM_DELETE = 0x2F9 // 761 + SYS_DBM_ERROR = 0x2FA // 762 + SYS_DBM_FETCH = 0x2FB // 763 + SYS_DBM_FIRSTKEY = 0x2FC // 764 + SYS_DBM_NEXTKEY = 0x2FD // 765 + SYS_DBM_OPEN = 0x2FE // 766 + SYS_DBM_STORE = 0x2FF // 767 + SYS___NDMTRM = 0x300 // 768 + SYS_FTOK = 0x301 // 769 + SYS_BASENAME = 0x302 // 770 + SYS_DIRNAME = 0x303 // 771 + SYS_GETDTABLESIZE = 0x304 // 772 + SYS_MKSTEMP = 0x305 // 773 + SYS_MKTEMP = 0x306 // 774 + SYS_NFTW = 0x307 // 775 + SYS_GETWD = 0x308 // 776 + SYS_LOCKF = 0x309 // 777 + SYS__LONGJMP = 0x30D // 781 + SYS__SETJMP = 0x30E // 782 + SYS_VFORK = 0x30F // 783 + SYS_WORDEXP = 0x310 // 784 + SYS_WORDFREE = 0x311 // 785 + SYS_GETPGID = 0x312 // 786 + SYS_GETSID = 0x313 // 787 + SYS___UTMPXNAME = 0x314 // 788 + SYS_CUSERID = 0x315 // 789 + SYS_GETPASS = 0x316 // 790 + SYS_FNMATCH = 0x317 // 791 + SYS_FTW = 0x318 // 792 + SYS_GETW = 0x319 // 793 + SYS_GLOB = 0x31A // 794 + SYS_GLOBFREE = 0x31B // 795 + SYS_PUTW = 0x31C // 796 + SYS_SEEKDIR = 0x31D // 797 + SYS_TELLDIR = 0x31E // 798 + SYS_TEMPNAM = 0x31F // 799 + SYS_ACOSH = 0x320 // 800 + SYS_ASINH = 0x321 // 801 + SYS_ATANH = 0x322 // 802 + SYS_CBRT = 0x323 // 803 + SYS_EXPM1 = 0x324 // 804 + SYS_ILOGB = 0x325 // 805 + SYS_LOGB = 0x326 // 806 + SYS_LOG1P = 0x327 // 807 + SYS_NEXTAFTER = 0x328 // 808 + SYS_RINT = 0x329 // 809 + SYS_REMAINDER = 0x32A // 810 + SYS_SCALB = 0x32B // 811 + SYS_LGAMMA = 0x32C // 812 + SYS_TTYSLOT = 0x32D // 813 + SYS_GETTIMEOFDAY_R = 0x32E // 814 + SYS_SYNC = 0x32F // 815 + SYS_SPAWN = 0x330 // 816 + SYS_SPAWNP = 0x331 // 817 + SYS_GETLOGIN_UU = 0x332 // 818 + SYS_ECVT = 0x333 // 819 + SYS_FCVT = 0x334 // 820 + SYS_GCVT = 0x335 // 821 + SYS_ACCEPT = 0x336 // 822 + SYS_BIND = 0x337 // 823 + SYS_CONNECT = 0x338 // 824 + SYS_ENDHOSTENT = 0x339 // 825 + SYS_ENDPROTOENT = 0x33A // 826 + SYS_ENDSERVENT = 0x33B // 827 + SYS_GETHOSTBYADDR_R = 0x33C // 828 + SYS_GETHOSTBYADDR = 0x33D // 829 + SYS_GETHOSTBYNAME_R = 0x33E // 830 + SYS_GETHOSTBYNAME = 0x33F // 831 + SYS_GETHOSTENT = 0x340 // 832 + SYS_GETHOSTID = 0x341 // 833 + SYS_GETHOSTNAME = 0x342 // 834 + SYS_GETNETBYADDR = 0x343 // 835 + SYS_GETNETBYNAME = 0x344 // 836 + SYS_GETNETENT = 0x345 // 837 + SYS_GETPEERNAME = 0x346 // 838 + SYS_GETPROTOBYNAME = 0x347 // 839 + SYS_GETPROTOBYNUMBER = 0x348 // 840 + SYS_GETPROTOENT = 0x349 // 841 + SYS_GETSERVBYNAME = 0x34A // 842 + SYS_GETSERVBYPORT = 0x34B // 843 + SYS_GETSERVENT = 0x34C // 844 + SYS_GETSOCKNAME = 0x34D // 845 + SYS_GETSOCKOPT = 0x34E // 846 + SYS_INET_ADDR = 0x34F // 847 + SYS_INET_LNAOF = 0x350 // 848 + SYS_INET_MAKEADDR = 0x351 // 849 + SYS_INET_NETOF = 0x352 // 850 + SYS_INET_NETWORK = 0x353 // 851 + SYS_INET_NTOA = 0x354 // 852 + SYS_IOCTL = 0x355 // 853 + SYS_LISTEN = 0x356 // 854 + SYS_READV = 0x357 // 855 + SYS_RECV = 0x358 // 856 + SYS_RECVFROM = 0x359 // 857 + SYS_SELECT = 0x35B // 859 + SYS_SELECTEX = 0x35C // 860 + SYS_SEND = 0x35D // 861 + SYS_SENDTO = 0x35F // 863 + SYS_SETHOSTENT = 0x360 // 864 + SYS_SETNETENT = 0x361 // 865 + SYS_SETPEER = 0x362 // 866 + SYS_SETPROTOENT = 0x363 // 867 + SYS_SETSERVENT = 0x364 // 868 + SYS_SETSOCKOPT = 0x365 // 869 + SYS_SHUTDOWN = 0x366 // 870 + SYS_SOCKET = 0x367 // 871 + SYS_SOCKETPAIR = 0x368 // 872 + SYS_WRITEV = 0x369 // 873 + SYS_CHROOT = 0x36A // 874 + SYS_W_STATVFS = 0x36B // 875 + SYS_ULIMIT = 0x36C // 876 + SYS_ISNAN = 0x36D // 877 + SYS_UTIMES = 0x36E // 878 + SYS___H_ERRNO = 0x36F // 879 + SYS_ENDNETENT = 0x370 // 880 + SYS_CLOSELOG = 0x371 // 881 + SYS_OPENLOG = 0x372 // 882 + SYS_SETLOGMASK = 0x373 // 883 + SYS_SYSLOG = 0x374 // 884 + SYS_PTSNAME = 0x375 // 885 + SYS_SETREUID = 0x376 // 886 + SYS_SETREGID = 0x377 // 887 + SYS_REALPATH = 0x378 // 888 + SYS___SIGNGAM = 0x379 // 889 + SYS_GRANTPT = 0x37A // 890 + SYS_UNLOCKPT = 0x37B // 891 + SYS_TCGETSID = 0x37C // 892 + SYS___TCGETCP = 0x37D // 893 + SYS___TCSETCP = 0x37E // 894 + SYS___TCSETTABLES = 0x37F // 895 + SYS_POLL = 0x380 // 896 + SYS_REXEC = 0x381 // 897 + SYS___ISASCII2 = 0x382 // 898 + SYS___TOASCII2 = 0x383 // 899 + SYS_CHPRIORITY = 0x384 // 900 + SYS_PTHREAD_ATTR_SETSYNCTYPE_NP = 0x385 // 901 + SYS_PTHREAD_ATTR_GETSYNCTYPE_NP = 0x386 // 902 + SYS_PTHREAD_SET_LIMIT_NP = 0x387 // 903 + SYS___STNETENT = 0x388 // 904 + SYS___STPROTOENT = 0x389 // 905 + SYS___STSERVENT = 0x38A // 906 + SYS___STHOSTENT = 0x38B // 907 + SYS_NLIST = 0x38C // 908 + SYS___IPDBCS = 0x38D // 909 + SYS___IPDSPX = 0x38E // 910 + SYS___IPMSGC = 0x38F // 911 + SYS___SELECT1 = 0x390 // 912 + SYS_PTHREAD_SECURITY_NP = 0x391 // 913 + SYS___CHECK_RESOURCE_AUTH_NP = 0x392 // 914 + SYS___CONVERT_ID_NP = 0x393 // 915 + SYS___OPENVMREL = 0x394 // 916 + SYS_WMEMCHR = 0x395 // 917 + SYS_WMEMCMP = 0x396 // 918 + SYS_WMEMCPY = 0x397 // 919 + SYS_WMEMMOVE = 0x398 // 920 + SYS_WMEMSET = 0x399 // 921 + SYS___FPUTWC = 0x400 // 1024 + SYS___PUTWC = 0x401 // 1025 + SYS___PWCHAR = 0x402 // 1026 + SYS___WCSFTM = 0x403 // 1027 + SYS___WCSTOK = 0x404 // 1028 + SYS___WCWDTH = 0x405 // 1029 + SYS_T_ACCEPT = 0x409 // 1033 + SYS_T_ALLOC = 0x40A // 1034 + SYS_T_BIND = 0x40B // 1035 + SYS_T_CLOSE = 0x40C // 1036 + SYS_T_CONNECT = 0x40D // 1037 + SYS_T_ERROR = 0x40E // 1038 + SYS_T_FREE = 0x40F // 1039 + SYS_T_GETINFO = 0x410 // 1040 + SYS_T_GETPROTADDR = 0x411 // 1041 + SYS_T_GETSTATE = 0x412 // 1042 + SYS_T_LISTEN = 0x413 // 1043 + SYS_T_LOOK = 0x414 // 1044 + SYS_T_OPEN = 0x415 // 1045 + SYS_T_OPTMGMT = 0x416 // 1046 + SYS_T_RCV = 0x417 // 1047 + SYS_T_RCVCONNECT = 0x418 // 1048 + SYS_T_RCVDIS = 0x419 // 1049 + SYS_T_RCVREL = 0x41A // 1050 + SYS_T_RCVUDATA = 0x41B // 1051 + SYS_T_RCVUDERR = 0x41C // 1052 + SYS_T_SND = 0x41D // 1053 + SYS_T_SNDDIS = 0x41E // 1054 + SYS_T_SNDREL = 0x41F // 1055 + SYS_T_SNDUDATA = 0x420 // 1056 + SYS_T_STRERROR = 0x421 // 1057 + SYS_T_SYNC = 0x422 // 1058 + SYS_T_UNBIND = 0x423 // 1059 + SYS___T_ERRNO = 0x424 // 1060 + SYS___RECVMSG2 = 0x425 // 1061 + SYS___SENDMSG2 = 0x426 // 1062 + SYS_FATTACH = 0x427 // 1063 + SYS_FDETACH = 0x428 // 1064 + SYS_GETMSG = 0x429 // 1065 + SYS_GETPMSG = 0x42A // 1066 + SYS_ISASTREAM = 0x42B // 1067 + SYS_PUTMSG = 0x42C // 1068 + SYS_PUTPMSG = 0x42D // 1069 + SYS___ISPOSIXON = 0x42E // 1070 + SYS___OPENMVSREL = 0x42F // 1071 + SYS_GETCONTEXT = 0x430 // 1072 + SYS_SETCONTEXT = 0x431 // 1073 + SYS_MAKECONTEXT = 0x432 // 1074 + SYS_SWAPCONTEXT = 0x433 // 1075 + SYS_PTHREAD_GETSPECIFIC_D8_NP = 0x434 // 1076 + SYS_GETCLIENTID = 0x470 // 1136 + SYS___GETCLIENTID = 0x471 // 1137 + SYS_GETSTABLESIZE = 0x472 // 1138 + SYS_GETIBMOPT = 0x473 // 1139 + SYS_GETIBMSOCKOPT = 0x474 // 1140 + SYS_GIVESOCKET = 0x475 // 1141 + SYS_IBMSFLUSH = 0x476 // 1142 + SYS_MAXDESC = 0x477 // 1143 + SYS_SETIBMOPT = 0x478 // 1144 + SYS_SETIBMSOCKOPT = 0x479 // 1145 + SYS_SOCK_DEBUG = 0x47A // 1146 + SYS_SOCK_DO_TESTSTOR = 0x47D // 1149 + SYS_TAKESOCKET = 0x47E // 1150 + SYS___SERVER_INIT = 0x47F // 1151 + SYS___SERVER_PWU = 0x480 // 1152 + SYS_PTHREAD_TAG_NP = 0x481 // 1153 + SYS___CONSOLE = 0x482 // 1154 + SYS___WSINIT = 0x483 // 1155 + SYS___IPTCPN = 0x489 // 1161 + SYS___SMF_RECORD = 0x48A // 1162 + SYS___IPHOST = 0x48B // 1163 + SYS___IPNODE = 0x48C // 1164 + SYS___SERVER_CLASSIFY_CREATE = 0x48D // 1165 + SYS___SERVER_CLASSIFY_DESTROY = 0x48E // 1166 + SYS___SERVER_CLASSIFY_RESET = 0x48F // 1167 + SYS___SERVER_CLASSIFY = 0x490 // 1168 + SYS___HEAPRPT = 0x496 // 1174 + SYS___FNWSA = 0x49B // 1179 + SYS___SPAWN2 = 0x49D // 1181 + SYS___SPAWNP2 = 0x49E // 1182 + SYS___GDRR = 0x4A1 // 1185 + SYS___HRRNO = 0x4A2 // 1186 + SYS___OPRG = 0x4A3 // 1187 + SYS___OPRR = 0x4A4 // 1188 + SYS___OPND = 0x4A5 // 1189 + SYS___OPPT = 0x4A6 // 1190 + SYS___SIGGM = 0x4A7 // 1191 + SYS___DGHT = 0x4A8 // 1192 + SYS___TZNE = 0x4A9 // 1193 + SYS___TZZN = 0x4AA // 1194 + SYS___TRRNO = 0x4AF // 1199 + SYS___ENVN = 0x4B0 // 1200 + SYS___MLOCKALL = 0x4B1 // 1201 + SYS_CREATEWO = 0x4B2 // 1202 + SYS_CREATEWORKUNIT = 0x4B2 // 1202 + SYS_CONTINUE = 0x4B3 // 1203 + SYS_CONTINUEWORKUNIT = 0x4B3 // 1203 + SYS_CONNECTW = 0x4B4 // 1204 + SYS_CONNECTWORKMGR = 0x4B4 // 1204 + SYS_CONNECTS = 0x4B5 // 1205 + SYS_CONNECTSERVER = 0x4B5 // 1205 + SYS_DISCONNE = 0x4B6 // 1206 + SYS_DISCONNECTSERVER = 0x4B6 // 1206 + SYS_JOINWORK = 0x4B7 // 1207 + SYS_JOINWORKUNIT = 0x4B7 // 1207 + SYS_LEAVEWOR = 0x4B8 // 1208 + SYS_LEAVEWORKUNIT = 0x4B8 // 1208 + SYS_DELETEWO = 0x4B9 // 1209 + SYS_DELETEWORKUNIT = 0x4B9 // 1209 + SYS_QUERYMET = 0x4BA // 1210 + SYS_QUERYMETRICS = 0x4BA // 1210 + SYS_QUERYSCH = 0x4BB // 1211 + SYS_QUERYSCHENV = 0x4BB // 1211 + SYS_CHECKSCH = 0x4BC // 1212 + SYS_CHECKSCHENV = 0x4BC // 1212 + SYS___PID_AFFINITY = 0x4BD // 1213 + SYS___ASINH_B = 0x4BE // 1214 + SYS___ATAN_B = 0x4BF // 1215 + SYS___CBRT_B = 0x4C0 // 1216 + SYS___CEIL_B = 0x4C1 // 1217 + SYS_COPYSIGN = 0x4C2 // 1218 + SYS___COS_B = 0x4C3 // 1219 + SYS___ERF_B = 0x4C4 // 1220 + SYS___ERFC_B = 0x4C5 // 1221 + SYS___EXPM1_B = 0x4C6 // 1222 + SYS___FABS_B = 0x4C7 // 1223 + SYS_FINITE = 0x4C8 // 1224 + SYS___FLOOR_B = 0x4C9 // 1225 + SYS___FREXP_B = 0x4CA // 1226 + SYS___ILOGB_B = 0x4CB // 1227 + SYS___ISNAN_B = 0x4CC // 1228 + SYS___LDEXP_B = 0x4CD // 1229 + SYS___LOG1P_B = 0x4CE // 1230 + SYS___LOGB_B = 0x4CF // 1231 + SYS_MATHERR = 0x4D0 // 1232 + SYS___MODF_B = 0x4D1 // 1233 + SYS___NEXTAFTER_B = 0x4D2 // 1234 + SYS___RINT_B = 0x4D3 // 1235 + SYS_SCALBN = 0x4D4 // 1236 + SYS_SIGNIFIC = 0x4D5 // 1237 + SYS_SIGNIFICAND = 0x4D5 // 1237 + SYS___SIN_B = 0x4D6 // 1238 + SYS___TAN_B = 0x4D7 // 1239 + SYS___TANH_B = 0x4D8 // 1240 + SYS___ACOS_B = 0x4D9 // 1241 + SYS___ACOSH_B = 0x4DA // 1242 + SYS___ASIN_B = 0x4DB // 1243 + SYS___ATAN2_B = 0x4DC // 1244 + SYS___ATANH_B = 0x4DD // 1245 + SYS___COSH_B = 0x4DE // 1246 + SYS___EXP_B = 0x4DF // 1247 + SYS___FMOD_B = 0x4E0 // 1248 + SYS___GAMMA_B = 0x4E1 // 1249 + SYS_GAMMA_R = 0x4E2 // 1250 + SYS___HYPOT_B = 0x4E3 // 1251 + SYS___J0_B = 0x4E4 // 1252 + SYS___Y0_B = 0x4E5 // 1253 + SYS___J1_B = 0x4E6 // 1254 + SYS___Y1_B = 0x4E7 // 1255 + SYS___JN_B = 0x4E8 // 1256 + SYS___YN_B = 0x4E9 // 1257 + SYS___LGAMMA_B = 0x4EA // 1258 + SYS_LGAMMA_R = 0x4EB // 1259 + SYS___LOG_B = 0x4EC // 1260 + SYS___LOG10_B = 0x4ED // 1261 + SYS___POW_B = 0x4EE // 1262 + SYS___REMAINDER_B = 0x4EF // 1263 + SYS___SCALB_B = 0x4F0 // 1264 + SYS___SINH_B = 0x4F1 // 1265 + SYS___SQRT_B = 0x4F2 // 1266 + SYS___OPENDIR2 = 0x4F3 // 1267 + SYS___READDIR2 = 0x4F4 // 1268 + SYS___LOGIN = 0x4F5 // 1269 + SYS___OPEN_STAT = 0x4F6 // 1270 + SYS_ACCEPT_AND_RECV = 0x4F7 // 1271 + SYS___FP_SETMODE = 0x4F8 // 1272 + SYS___SIGACTIONSET = 0x4FB // 1275 + SYS___UCREATE = 0x4FC // 1276 + SYS___UMALLOC = 0x4FD // 1277 + SYS___UFREE = 0x4FE // 1278 + SYS___UHEAPREPORT = 0x4FF // 1279 + SYS___ISBFP = 0x500 // 1280 + SYS___FP_CAST = 0x501 // 1281 + SYS___CERTIFICATE = 0x502 // 1282 + SYS_SEND_FILE = 0x503 // 1283 + SYS_AIO_CANCEL = 0x504 // 1284 + SYS_AIO_ERROR = 0x505 // 1285 + SYS_AIO_READ = 0x506 // 1286 + SYS_AIO_RETURN = 0x507 // 1287 + SYS_AIO_SUSPEND = 0x508 // 1288 + SYS_AIO_WRITE = 0x509 // 1289 + SYS_PTHREAD_MUTEXATTR_GETPSHARED = 0x50A // 1290 + SYS_PTHREAD_MUTEXATTR_SETPSHARED = 0x50B // 1291 + SYS_PTHREAD_RWLOCK_DESTROY = 0x50C // 1292 + SYS_PTHREAD_RWLOCK_INIT = 0x50D // 1293 + SYS_PTHREAD_RWLOCK_RDLOCK = 0x50E // 1294 + SYS_PTHREAD_RWLOCK_TRYRDLOCK = 0x50F // 1295 + SYS_PTHREAD_RWLOCK_TRYWRLOCK = 0x510 // 1296 + SYS_PTHREAD_RWLOCK_UNLOCK = 0x511 // 1297 + SYS_PTHREAD_RWLOCK_WRLOCK = 0x512 // 1298 + SYS_PTHREAD_RWLOCKATTR_GETPSHARED = 0x513 // 1299 + SYS_PTHREAD_RWLOCKATTR_SETPSHARED = 0x514 // 1300 + SYS_PTHREAD_RWLOCKATTR_INIT = 0x515 // 1301 + SYS_PTHREAD_RWLOCKATTR_DESTROY = 0x516 // 1302 + SYS___CTTBL = 0x517 // 1303 + SYS_PTHREAD_MUTEXATTR_SETTYPE = 0x518 // 1304 + SYS_PTHREAD_MUTEXATTR_GETTYPE = 0x519 // 1305 + SYS___FP_CLR_FLAG = 0x51A // 1306 + SYS___FP_READ_FLAG = 0x51B // 1307 + SYS___FP_RAISE_XCP = 0x51C // 1308 + SYS___FP_CLASS = 0x51D // 1309 + SYS___FP_FINITE = 0x51E // 1310 + SYS___FP_ISNAN = 0x51F // 1311 + SYS___FP_UNORDERED = 0x520 // 1312 + SYS___FP_READ_RND = 0x521 // 1313 + SYS___FP_READ_RND_B = 0x522 // 1314 + SYS___FP_SWAP_RND = 0x523 // 1315 + SYS___FP_SWAP_RND_B = 0x524 // 1316 + SYS___FP_LEVEL = 0x525 // 1317 + SYS___FP_BTOH = 0x526 // 1318 + SYS___FP_HTOB = 0x527 // 1319 + SYS___FPC_RD = 0x528 // 1320 + SYS___FPC_WR = 0x529 // 1321 + SYS___FPC_RW = 0x52A // 1322 + SYS___FPC_SM = 0x52B // 1323 + SYS___FPC_RS = 0x52C // 1324 + SYS_SIGTIMEDWAIT = 0x52D // 1325 + SYS_SIGWAITINFO = 0x52E // 1326 + SYS___CHKBFP = 0x52F // 1327 + SYS___W_PIOCTL = 0x59E // 1438 + SYS___OSENV = 0x59F // 1439 + SYS_EXPORTWO = 0x5A1 // 1441 + SYS_EXPORTWORKUNIT = 0x5A1 // 1441 + SYS_UNDOEXPO = 0x5A2 // 1442 + SYS_UNDOEXPORTWORKUNIT = 0x5A2 // 1442 + SYS_IMPORTWO = 0x5A3 // 1443 + SYS_IMPORTWORKUNIT = 0x5A3 // 1443 + SYS_UNDOIMPO = 0x5A4 // 1444 + SYS_UNDOIMPORTWORKUNIT = 0x5A4 // 1444 + SYS_EXTRACTW = 0x5A5 // 1445 + SYS_EXTRACTWORKUNIT = 0x5A5 // 1445 + SYS___CPL = 0x5A6 // 1446 + SYS___MAP_INIT = 0x5A7 // 1447 + SYS___MAP_SERVICE = 0x5A8 // 1448 + SYS_SIGQUEUE = 0x5A9 // 1449 + SYS___MOUNT = 0x5AA // 1450 + SYS___GETUSERID = 0x5AB // 1451 + SYS___IPDOMAINNAME = 0x5AC // 1452 + SYS_QUERYENC = 0x5AD // 1453 + SYS_QUERYWORKUNITCLASSIFICATION = 0x5AD // 1453 + SYS_CONNECTE = 0x5AE // 1454 + SYS_CONNECTEXPORTIMPORT = 0x5AE // 1454 + SYS___FP_SWAPMODE = 0x5AF // 1455 + SYS_STRTOLL = 0x5B0 // 1456 + SYS_STRTOULL = 0x5B1 // 1457 + SYS___DSA_PREV = 0x5B2 // 1458 + SYS___EP_FIND = 0x5B3 // 1459 + SYS___SERVER_THREADS_QUERY = 0x5B4 // 1460 + SYS___MSGRCV_TIMED = 0x5B7 // 1463 + SYS___SEMOP_TIMED = 0x5B8 // 1464 + SYS___GET_CPUID = 0x5B9 // 1465 + SYS___GET_SYSTEM_SETTINGS = 0x5BA // 1466 + SYS_FTELLO = 0x5C8 // 1480 + SYS_FSEEKO = 0x5C9 // 1481 + SYS_LLDIV = 0x5CB // 1483 + SYS_WCSTOLL = 0x5CC // 1484 + SYS_WCSTOULL = 0x5CD // 1485 + SYS_LLABS = 0x5CE // 1486 + SYS___CONSOLE2 = 0x5D2 // 1490 + SYS_INET_NTOP = 0x5D3 // 1491 + SYS_INET_PTON = 0x5D4 // 1492 + SYS___RES = 0x5D6 // 1494 + SYS_RES_MKQUERY = 0x5D7 // 1495 + SYS_RES_INIT = 0x5D8 // 1496 + SYS_RES_QUERY = 0x5D9 // 1497 + SYS_RES_SEARCH = 0x5DA // 1498 + SYS_RES_SEND = 0x5DB // 1499 + SYS_RES_QUERYDOMAIN = 0x5DC // 1500 + SYS_DN_EXPAND = 0x5DD // 1501 + SYS_DN_SKIPNAME = 0x5DE // 1502 + SYS_DN_COMP = 0x5DF // 1503 + SYS_ASCTIME_R = 0x5E0 // 1504 + SYS_CTIME_R = 0x5E1 // 1505 + SYS_GMTIME_R = 0x5E2 // 1506 + SYS_LOCALTIME_R = 0x5E3 // 1507 + SYS_RAND_R = 0x5E4 // 1508 + SYS_STRTOK_R = 0x5E5 // 1509 + SYS_READDIR_R = 0x5E6 // 1510 + SYS_GETGRGID_R = 0x5E7 // 1511 + SYS_GETGRNAM_R = 0x5E8 // 1512 + SYS_GETLOGIN_R = 0x5E9 // 1513 + SYS_GETPWNAM_R = 0x5EA // 1514 + SYS_GETPWUID_R = 0x5EB // 1515 + SYS_TTYNAME_R = 0x5EC // 1516 + SYS_PTHREAD_ATFORK = 0x5ED // 1517 + SYS_PTHREAD_ATTR_GETGUARDSIZE = 0x5EE // 1518 + SYS_PTHREAD_ATTR_GETSTACKADDR = 0x5EF // 1519 + SYS_PTHREAD_ATTR_SETGUARDSIZE = 0x5F0 // 1520 + SYS_PTHREAD_ATTR_SETSTACKADDR = 0x5F1 // 1521 + SYS_PTHREAD_CONDATTR_GETPSHARED = 0x5F2 // 1522 + SYS_PTHREAD_CONDATTR_SETPSHARED = 0x5F3 // 1523 + SYS_PTHREAD_GETCONCURRENCY = 0x5F4 // 1524 + SYS_PTHREAD_KEY_DELETE = 0x5F5 // 1525 + SYS_PTHREAD_SETCONCURRENCY = 0x5F6 // 1526 + SYS_PTHREAD_SIGMASK = 0x5F7 // 1527 + SYS___DISCARDDATA = 0x5F8 // 1528 + SYS_PTHREAD_ATTR_GETSCHEDPARAM = 0x5F9 // 1529 + SYS_PTHREAD_ATTR_SETSCHEDPARAM = 0x5FA // 1530 + SYS_PTHREAD_ATTR_GETDETACHSTATE_U98 = 0x5FB // 1531 + SYS_PTHREAD_ATTR_SETDETACHSTATE_U98 = 0x5FC // 1532 + SYS_PTHREAD_DETACH_U98 = 0x5FD // 1533 + SYS_PTHREAD_GETSPECIFIC_U98 = 0x5FE // 1534 + SYS_PTHREAD_SETCANCELSTATE = 0x5FF // 1535 + SYS_PTHREAD_SETCANCELTYPE = 0x600 // 1536 + SYS_PTHREAD_TESTCANCEL = 0x601 // 1537 + SYS___ATANF_B = 0x602 // 1538 + SYS___ATANL_B = 0x603 // 1539 + SYS___CEILF_B = 0x604 // 1540 + SYS___CEILL_B = 0x605 // 1541 + SYS___COSF_B = 0x606 // 1542 + SYS___COSL_B = 0x607 // 1543 + SYS___FABSF_B = 0x608 // 1544 + SYS___FABSL_B = 0x609 // 1545 + SYS___FLOORF_B = 0x60A // 1546 + SYS___FLOORL_B = 0x60B // 1547 + SYS___FREXPF_B = 0x60C // 1548 + SYS___FREXPL_B = 0x60D // 1549 + SYS___LDEXPF_B = 0x60E // 1550 + SYS___LDEXPL_B = 0x60F // 1551 + SYS___SINF_B = 0x610 // 1552 + SYS___SINL_B = 0x611 // 1553 + SYS___TANF_B = 0x612 // 1554 + SYS___TANL_B = 0x613 // 1555 + SYS___TANHF_B = 0x614 // 1556 + SYS___TANHL_B = 0x615 // 1557 + SYS___ACOSF_B = 0x616 // 1558 + SYS___ACOSL_B = 0x617 // 1559 + SYS___ASINF_B = 0x618 // 1560 + SYS___ASINL_B = 0x619 // 1561 + SYS___ATAN2F_B = 0x61A // 1562 + SYS___ATAN2L_B = 0x61B // 1563 + SYS___COSHF_B = 0x61C // 1564 + SYS___COSHL_B = 0x61D // 1565 + SYS___EXPF_B = 0x61E // 1566 + SYS___EXPL_B = 0x61F // 1567 + SYS___LOGF_B = 0x620 // 1568 + SYS___LOGL_B = 0x621 // 1569 + SYS___LOG10F_B = 0x622 // 1570 + SYS___LOG10L_B = 0x623 // 1571 + SYS___POWF_B = 0x624 // 1572 + SYS___POWL_B = 0x625 // 1573 + SYS___SINHF_B = 0x626 // 1574 + SYS___SINHL_B = 0x627 // 1575 + SYS___SQRTF_B = 0x628 // 1576 + SYS___SQRTL_B = 0x629 // 1577 + SYS___ABSF_B = 0x62A // 1578 + SYS___ABS_B = 0x62B // 1579 + SYS___ABSL_B = 0x62C // 1580 + SYS___FMODF_B = 0x62D // 1581 + SYS___FMODL_B = 0x62E // 1582 + SYS___MODFF_B = 0x62F // 1583 + SYS___MODFL_B = 0x630 // 1584 + SYS_ABSF = 0x631 // 1585 + SYS_ABSL = 0x632 // 1586 + SYS_ACOSF = 0x633 // 1587 + SYS_ACOSL = 0x634 // 1588 + SYS_ASINF = 0x635 // 1589 + SYS_ASINL = 0x636 // 1590 + SYS_ATAN2F = 0x637 // 1591 + SYS_ATAN2L = 0x638 // 1592 + SYS_ATANF = 0x639 // 1593 + SYS_ATANL = 0x63A // 1594 + SYS_CEILF = 0x63B // 1595 + SYS_CEILL = 0x63C // 1596 + SYS_COSF = 0x63D // 1597 + SYS_COSL = 0x63E // 1598 + SYS_COSHF = 0x63F // 1599 + SYS_COSHL = 0x640 // 1600 + SYS_EXPF = 0x641 // 1601 + SYS_EXPL = 0x642 // 1602 + SYS_TANHF = 0x643 // 1603 + SYS_TANHL = 0x644 // 1604 + SYS_LOG10F = 0x645 // 1605 + SYS_LOG10L = 0x646 // 1606 + SYS_LOGF = 0x647 // 1607 + SYS_LOGL = 0x648 // 1608 + SYS_POWF = 0x649 // 1609 + SYS_POWL = 0x64A // 1610 + SYS_SINF = 0x64B // 1611 + SYS_SINL = 0x64C // 1612 + SYS_SQRTF = 0x64D // 1613 + SYS_SQRTL = 0x64E // 1614 + SYS_SINHF = 0x64F // 1615 + SYS_SINHL = 0x650 // 1616 + SYS_TANF = 0x651 // 1617 + SYS_TANL = 0x652 // 1618 + SYS_FABSF = 0x653 // 1619 + SYS_FABSL = 0x654 // 1620 + SYS_FLOORF = 0x655 // 1621 + SYS_FLOORL = 0x656 // 1622 + SYS_FMODF = 0x657 // 1623 + SYS_FMODL = 0x658 // 1624 + SYS_FREXPF = 0x659 // 1625 + SYS_FREXPL = 0x65A // 1626 + SYS_LDEXPF = 0x65B // 1627 + SYS_LDEXPL = 0x65C // 1628 + SYS_MODFF = 0x65D // 1629 + SYS_MODFL = 0x65E // 1630 + SYS_BTOWC = 0x65F // 1631 + SYS___CHATTR = 0x660 // 1632 + SYS___FCHATTR = 0x661 // 1633 + SYS___TOCCSID = 0x662 // 1634 + SYS___CSNAMETYPE = 0x663 // 1635 + SYS___TOCSNAME = 0x664 // 1636 + SYS___CCSIDTYPE = 0x665 // 1637 + SYS___AE_CORRESTBL_QUERY = 0x666 // 1638 + SYS___AE_AUTOCONVERT_STATE = 0x667 // 1639 + SYS_DN_FIND = 0x668 // 1640 + SYS___GETHOSTBYADDR_A = 0x669 // 1641 + SYS___GETHOSTBYNAME_A = 0x66A // 1642 + SYS___RES_INIT_A = 0x66B // 1643 + SYS___GETHOSTBYADDR_R_A = 0x66C // 1644 + SYS___GETHOSTBYNAME_R_A = 0x66D // 1645 + SYS___CHARMAP_INIT_A = 0x66E // 1646 + SYS___MBLEN_A = 0x66F // 1647 + SYS___MBLEN_SB_A = 0x670 // 1648 + SYS___MBLEN_STD_A = 0x671 // 1649 + SYS___MBLEN_UTF = 0x672 // 1650 + SYS___MBSTOWCS_A = 0x673 // 1651 + SYS___MBSTOWCS_STD_A = 0x674 // 1652 + SYS___MBTOWC_A = 0x675 // 1653 + SYS___MBTOWC_ISO1 = 0x676 // 1654 + SYS___MBTOWC_SBCS = 0x677 // 1655 + SYS___MBTOWC_MBCS = 0x678 // 1656 + SYS___MBTOWC_UTF = 0x679 // 1657 + SYS___WCSTOMBS_A = 0x67A // 1658 + SYS___WCSTOMBS_STD_A = 0x67B // 1659 + SYS___WCSWIDTH_A = 0x67C // 1660 + SYS___GETGRGID_R_A = 0x67D // 1661 + SYS___WCSWIDTH_STD_A = 0x67E // 1662 + SYS___WCSWIDTH_ASIA = 0x67F // 1663 + SYS___CSID_A = 0x680 // 1664 + SYS___CSID_STD_A = 0x681 // 1665 + SYS___WCSID_A = 0x682 // 1666 + SYS___WCSID_STD_A = 0x683 // 1667 + SYS___WCTOMB_A = 0x684 // 1668 + SYS___WCTOMB_ISO1 = 0x685 // 1669 + SYS___WCTOMB_STD_A = 0x686 // 1670 + SYS___WCTOMB_UTF = 0x687 // 1671 + SYS___WCWIDTH_A = 0x688 // 1672 + SYS___GETGRNAM_R_A = 0x689 // 1673 + SYS___WCWIDTH_STD_A = 0x68A // 1674 + SYS___WCWIDTH_ASIA = 0x68B // 1675 + SYS___GETPWNAM_R_A = 0x68C // 1676 + SYS___GETPWUID_R_A = 0x68D // 1677 + SYS___GETLOGIN_R_A = 0x68E // 1678 + SYS___TTYNAME_R_A = 0x68F // 1679 + SYS___READDIR_R_A = 0x690 // 1680 + SYS___E2A_S = 0x691 // 1681 + SYS___FNMATCH_A = 0x692 // 1682 + SYS___FNMATCH_C_A = 0x693 // 1683 + SYS___EXECL_A = 0x694 // 1684 + SYS___FNMATCH_STD_A = 0x695 // 1685 + SYS___REGCOMP_A = 0x696 // 1686 + SYS___REGCOMP_STD_A = 0x697 // 1687 + SYS___REGERROR_A = 0x698 // 1688 + SYS___REGERROR_STD_A = 0x699 // 1689 + SYS___REGEXEC_A = 0x69A // 1690 + SYS___REGEXEC_STD_A = 0x69B // 1691 + SYS___REGFREE_A = 0x69C // 1692 + SYS___REGFREE_STD_A = 0x69D // 1693 + SYS___STRCOLL_A = 0x69E // 1694 + SYS___STRCOLL_C_A = 0x69F // 1695 + SYS___EXECLE_A = 0x6A0 // 1696 + SYS___STRCOLL_STD_A = 0x6A1 // 1697 + SYS___STRXFRM_A = 0x6A2 // 1698 + SYS___STRXFRM_C_A = 0x6A3 // 1699 + SYS___EXECLP_A = 0x6A4 // 1700 + SYS___STRXFRM_STD_A = 0x6A5 // 1701 + SYS___WCSCOLL_A = 0x6A6 // 1702 + SYS___WCSCOLL_C_A = 0x6A7 // 1703 + SYS___WCSCOLL_STD_A = 0x6A8 // 1704 + SYS___WCSXFRM_A = 0x6A9 // 1705 + SYS___WCSXFRM_C_A = 0x6AA // 1706 + SYS___WCSXFRM_STD_A = 0x6AB // 1707 + SYS___COLLATE_INIT_A = 0x6AC // 1708 + SYS___WCTYPE_A = 0x6AD // 1709 + SYS___GET_WCTYPE_STD_A = 0x6AE // 1710 + SYS___CTYPE_INIT_A = 0x6AF // 1711 + SYS___ISWCTYPE_A = 0x6B0 // 1712 + SYS___EXECV_A = 0x6B1 // 1713 + SYS___IS_WCTYPE_STD_A = 0x6B2 // 1714 + SYS___TOWLOWER_A = 0x6B3 // 1715 + SYS___TOWLOWER_STD_A = 0x6B4 // 1716 + SYS___TOWUPPER_A = 0x6B5 // 1717 + SYS___TOWUPPER_STD_A = 0x6B6 // 1718 + SYS___LOCALE_INIT_A = 0x6B7 // 1719 + SYS___LOCALECONV_A = 0x6B8 // 1720 + SYS___LOCALECONV_STD_A = 0x6B9 // 1721 + SYS___NL_LANGINFO_A = 0x6BA // 1722 + SYS___NL_LNAGINFO_STD_A = 0x6BB // 1723 + SYS___MONETARY_INIT_A = 0x6BC // 1724 + SYS___STRFMON_A = 0x6BD // 1725 + SYS___STRFMON_STD_A = 0x6BE // 1726 + SYS___GETADDRINFO_A = 0x6BF // 1727 + SYS___CATGETS_A = 0x6C0 // 1728 + SYS___EXECVE_A = 0x6C1 // 1729 + SYS___EXECVP_A = 0x6C2 // 1730 + SYS___SPAWN_A = 0x6C3 // 1731 + SYS___GETNAMEINFO_A = 0x6C4 // 1732 + SYS___SPAWNP_A = 0x6C5 // 1733 + SYS___NUMERIC_INIT_A = 0x6C6 // 1734 + SYS___RESP_INIT_A = 0x6C7 // 1735 + SYS___RPMATCH_A = 0x6C8 // 1736 + SYS___RPMATCH_C_A = 0x6C9 // 1737 + SYS___RPMATCH_STD_A = 0x6CA // 1738 + SYS___TIME_INIT_A = 0x6CB // 1739 + SYS___STRFTIME_A = 0x6CC // 1740 + SYS___STRFTIME_STD_A = 0x6CD // 1741 + SYS___STRPTIME_A = 0x6CE // 1742 + SYS___STRPTIME_STD_A = 0x6CF // 1743 + SYS___WCSFTIME_A = 0x6D0 // 1744 + SYS___WCSFTIME_STD_A = 0x6D1 // 1745 + SYS_____SPAWN2_A = 0x6D2 // 1746 + SYS_____SPAWNP2_A = 0x6D3 // 1747 + SYS___SYNTAX_INIT_A = 0x6D4 // 1748 + SYS___TOD_INIT_A = 0x6D5 // 1749 + SYS___NL_CSINFO_A = 0x6D6 // 1750 + SYS___NL_MONINFO_A = 0x6D7 // 1751 + SYS___NL_NUMINFO_A = 0x6D8 // 1752 + SYS___NL_RESPINFO_A = 0x6D9 // 1753 + SYS___NL_TIMINFO_A = 0x6DA // 1754 + SYS___IF_NAMETOINDEX_A = 0x6DB // 1755 + SYS___IF_INDEXTONAME_A = 0x6DC // 1756 + SYS___PRINTF_A = 0x6DD // 1757 + SYS___ICONV_OPEN_A = 0x6DE // 1758 + SYS___DLLLOAD_A = 0x6DF // 1759 + SYS___DLLQUERYFN_A = 0x6E0 // 1760 + SYS___DLLQUERYVAR_A = 0x6E1 // 1761 + SYS_____CHATTR_A = 0x6E2 // 1762 + SYS___E2A_L = 0x6E3 // 1763 + SYS_____TOCCSID_A = 0x6E4 // 1764 + SYS_____TOCSNAME_A = 0x6E5 // 1765 + SYS_____CCSIDTYPE_A = 0x6E6 // 1766 + SYS_____CSNAMETYPE_A = 0x6E7 // 1767 + SYS___CHMOD_A = 0x6E8 // 1768 + SYS___MKDIR_A = 0x6E9 // 1769 + SYS___STAT_A = 0x6EA // 1770 + SYS___STAT_O_A = 0x6EB // 1771 + SYS___MKFIFO_A = 0x6EC // 1772 + SYS_____OPEN_STAT_A = 0x6ED // 1773 + SYS___LSTAT_A = 0x6EE // 1774 + SYS___LSTAT_O_A = 0x6EF // 1775 + SYS___MKNOD_A = 0x6F0 // 1776 + SYS___MOUNT_A = 0x6F1 // 1777 + SYS___UMOUNT_A = 0x6F2 // 1778 + SYS___CHAUDIT_A = 0x6F4 // 1780 + SYS___W_GETMNTENT_A = 0x6F5 // 1781 + SYS___CREAT_A = 0x6F6 // 1782 + SYS___OPEN_A = 0x6F7 // 1783 + SYS___SETLOCALE_A = 0x6F9 // 1785 + SYS___FPRINTF_A = 0x6FA // 1786 + SYS___SPRINTF_A = 0x6FB // 1787 + SYS___VFPRINTF_A = 0x6FC // 1788 + SYS___VPRINTF_A = 0x6FD // 1789 + SYS___VSPRINTF_A = 0x6FE // 1790 + SYS___VSWPRINTF_A = 0x6FF // 1791 + SYS___SWPRINTF_A = 0x700 // 1792 + SYS___FSCANF_A = 0x701 // 1793 + SYS___SCANF_A = 0x702 // 1794 + SYS___SSCANF_A = 0x703 // 1795 + SYS___SWSCANF_A = 0x704 // 1796 + SYS___ATOF_A = 0x705 // 1797 + SYS___ATOI_A = 0x706 // 1798 + SYS___ATOL_A = 0x707 // 1799 + SYS___STRTOD_A = 0x708 // 1800 + SYS___STRTOL_A = 0x709 // 1801 + SYS___STRTOUL_A = 0x70A // 1802 + SYS_____AE_CORRESTBL_QUERY_A = 0x70B // 1803 + SYS___A64L_A = 0x70C // 1804 + SYS___ECVT_A = 0x70D // 1805 + SYS___FCVT_A = 0x70E // 1806 + SYS___GCVT_A = 0x70F // 1807 + SYS___L64A_A = 0x710 // 1808 + SYS___STRERROR_A = 0x711 // 1809 + SYS___PERROR_A = 0x712 // 1810 + SYS___FETCH_A = 0x713 // 1811 + SYS___GETENV_A = 0x714 // 1812 + SYS___MKSTEMP_A = 0x717 // 1815 + SYS___PTSNAME_A = 0x718 // 1816 + SYS___PUTENV_A = 0x719 // 1817 + SYS___REALPATH_A = 0x71A // 1818 + SYS___SETENV_A = 0x71B // 1819 + SYS___SYSTEM_A = 0x71C // 1820 + SYS___GETOPT_A = 0x71D // 1821 + SYS___CATOPEN_A = 0x71E // 1822 + SYS___ACCESS_A = 0x71F // 1823 + SYS___CHDIR_A = 0x720 // 1824 + SYS___CHOWN_A = 0x721 // 1825 + SYS___CHROOT_A = 0x722 // 1826 + SYS___GETCWD_A = 0x723 // 1827 + SYS___GETWD_A = 0x724 // 1828 + SYS___LCHOWN_A = 0x725 // 1829 + SYS___LINK_A = 0x726 // 1830 + SYS___PATHCONF_A = 0x727 // 1831 + SYS___IF_NAMEINDEX_A = 0x728 // 1832 + SYS___READLINK_A = 0x729 // 1833 + SYS___RMDIR_A = 0x72A // 1834 + SYS___STATVFS_A = 0x72B // 1835 + SYS___SYMLINK_A = 0x72C // 1836 + SYS___TRUNCATE_A = 0x72D // 1837 + SYS___UNLINK_A = 0x72E // 1838 + SYS___GAI_STRERROR_A = 0x72F // 1839 + SYS___EXTLINK_NP_A = 0x730 // 1840 + SYS___ISALNUM_A = 0x731 // 1841 + SYS___ISALPHA_A = 0x732 // 1842 + SYS___A2E_S = 0x733 // 1843 + SYS___ISCNTRL_A = 0x734 // 1844 + SYS___ISDIGIT_A = 0x735 // 1845 + SYS___ISGRAPH_A = 0x736 // 1846 + SYS___ISLOWER_A = 0x737 // 1847 + SYS___ISPRINT_A = 0x738 // 1848 + SYS___ISPUNCT_A = 0x739 // 1849 + SYS___ISSPACE_A = 0x73A // 1850 + SYS___ISUPPER_A = 0x73B // 1851 + SYS___ISXDIGIT_A = 0x73C // 1852 + SYS___TOLOWER_A = 0x73D // 1853 + SYS___TOUPPER_A = 0x73E // 1854 + SYS___ISWALNUM_A = 0x73F // 1855 + SYS___ISWALPHA_A = 0x740 // 1856 + SYS___A2E_L = 0x741 // 1857 + SYS___ISWCNTRL_A = 0x742 // 1858 + SYS___ISWDIGIT_A = 0x743 // 1859 + SYS___ISWGRAPH_A = 0x744 // 1860 + SYS___ISWLOWER_A = 0x745 // 1861 + SYS___ISWPRINT_A = 0x746 // 1862 + SYS___ISWPUNCT_A = 0x747 // 1863 + SYS___ISWSPACE_A = 0x748 // 1864 + SYS___ISWUPPER_A = 0x749 // 1865 + SYS___ISWXDIGIT_A = 0x74A // 1866 + SYS___CONFSTR_A = 0x74B // 1867 + SYS___FTOK_A = 0x74C // 1868 + SYS___MKTEMP_A = 0x74D // 1869 + SYS___FDOPEN_A = 0x74E // 1870 + SYS___FLDATA_A = 0x74F // 1871 + SYS___REMOVE_A = 0x750 // 1872 + SYS___RENAME_A = 0x751 // 1873 + SYS___TMPNAM_A = 0x752 // 1874 + SYS___FOPEN_A = 0x753 // 1875 + SYS___FREOPEN_A = 0x754 // 1876 + SYS___CUSERID_A = 0x755 // 1877 + SYS___POPEN_A = 0x756 // 1878 + SYS___TEMPNAM_A = 0x757 // 1879 + SYS___FTW_A = 0x758 // 1880 + SYS___GETGRENT_A = 0x759 // 1881 + SYS___GETGRGID_A = 0x75A // 1882 + SYS___GETGRNAM_A = 0x75B // 1883 + SYS___GETGROUPSBYNAME_A = 0x75C // 1884 + SYS___GETHOSTENT_A = 0x75D // 1885 + SYS___GETHOSTNAME_A = 0x75E // 1886 + SYS___GETLOGIN_A = 0x75F // 1887 + SYS___INET_NTOP_A = 0x760 // 1888 + SYS___GETPASS_A = 0x761 // 1889 + SYS___GETPWENT_A = 0x762 // 1890 + SYS___GETPWNAM_A = 0x763 // 1891 + SYS___GETPWUID_A = 0x764 // 1892 + SYS_____CHECK_RESOURCE_AUTH_NP_A = 0x765 // 1893 + SYS___CHECKSCHENV_A = 0x766 // 1894 + SYS___CONNECTSERVER_A = 0x767 // 1895 + SYS___CONNECTWORKMGR_A = 0x768 // 1896 + SYS_____CONSOLE_A = 0x769 // 1897 + SYS___CREATEWORKUNIT_A = 0x76A // 1898 + SYS___CTERMID_A = 0x76B // 1899 + SYS___FMTMSG_A = 0x76C // 1900 + SYS___INITGROUPS_A = 0x76D // 1901 + SYS_____LOGIN_A = 0x76E // 1902 + SYS___MSGRCV_A = 0x76F // 1903 + SYS___MSGSND_A = 0x770 // 1904 + SYS___MSGXRCV_A = 0x771 // 1905 + SYS___NFTW_A = 0x772 // 1906 + SYS_____PASSWD_A = 0x773 // 1907 + SYS___PTHREAD_SECURITY_NP_A = 0x774 // 1908 + SYS___QUERYMETRICS_A = 0x775 // 1909 + SYS___QUERYSCHENV = 0x776 // 1910 + SYS___READV_A = 0x777 // 1911 + SYS_____SERVER_CLASSIFY_A = 0x778 // 1912 + SYS_____SERVER_INIT_A = 0x779 // 1913 + SYS_____SERVER_PWU_A = 0x77A // 1914 + SYS___STRCASECMP_A = 0x77B // 1915 + SYS___STRNCASECMP_A = 0x77C // 1916 + SYS___TTYNAME_A = 0x77D // 1917 + SYS___UNAME_A = 0x77E // 1918 + SYS___UTIMES_A = 0x77F // 1919 + SYS___W_GETPSENT_A = 0x780 // 1920 + SYS___WRITEV_A = 0x781 // 1921 + SYS___W_STATFS_A = 0x782 // 1922 + SYS___W_STATVFS_A = 0x783 // 1923 + SYS___FPUTC_A = 0x784 // 1924 + SYS___PUTCHAR_A = 0x785 // 1925 + SYS___PUTS_A = 0x786 // 1926 + SYS___FGETS_A = 0x787 // 1927 + SYS___GETS_A = 0x788 // 1928 + SYS___FPUTS_A = 0x789 // 1929 + SYS___FREAD_A = 0x78A // 1930 + SYS___FWRITE_A = 0x78B // 1931 + SYS___OPEN_O_A = 0x78C // 1932 + SYS___ISASCII = 0x78D // 1933 + SYS___CREAT_O_A = 0x78E // 1934 + SYS___ENVNA = 0x78F // 1935 + SYS___PUTC_A = 0x790 // 1936 + SYS___AE_THREAD_SETMODE = 0x791 // 1937 + SYS___AE_THREAD_SWAPMODE = 0x792 // 1938 + SYS___GETNETBYADDR_A = 0x793 // 1939 + SYS___GETNETBYNAME_A = 0x794 // 1940 + SYS___GETNETENT_A = 0x795 // 1941 + SYS___GETPROTOBYNAME_A = 0x796 // 1942 + SYS___GETPROTOBYNUMBER_A = 0x797 // 1943 + SYS___GETPROTOENT_A = 0x798 // 1944 + SYS___GETSERVBYNAME_A = 0x799 // 1945 + SYS___GETSERVBYPORT_A = 0x79A // 1946 + SYS___GETSERVENT_A = 0x79B // 1947 + SYS___ASCTIME_A = 0x79C // 1948 + SYS___CTIME_A = 0x79D // 1949 + SYS___GETDATE_A = 0x79E // 1950 + SYS___TZSET_A = 0x79F // 1951 + SYS___UTIME_A = 0x7A0 // 1952 + SYS___ASCTIME_R_A = 0x7A1 // 1953 + SYS___CTIME_R_A = 0x7A2 // 1954 + SYS___STRTOLL_A = 0x7A3 // 1955 + SYS___STRTOULL_A = 0x7A4 // 1956 + SYS___FPUTWC_A = 0x7A5 // 1957 + SYS___PUTWC_A = 0x7A6 // 1958 + SYS___PUTWCHAR_A = 0x7A7 // 1959 + SYS___FPUTWS_A = 0x7A8 // 1960 + SYS___UNGETWC_A = 0x7A9 // 1961 + SYS___FGETWC_A = 0x7AA // 1962 + SYS___GETWC_A = 0x7AB // 1963 + SYS___GETWCHAR_A = 0x7AC // 1964 + SYS___FGETWS_A = 0x7AD // 1965 + SYS___GETTIMEOFDAY_A = 0x7AE // 1966 + SYS___GMTIME_A = 0x7AF // 1967 + SYS___GMTIME_R_A = 0x7B0 // 1968 + SYS___LOCALTIME_A = 0x7B1 // 1969 + SYS___LOCALTIME_R_A = 0x7B2 // 1970 + SYS___MKTIME_A = 0x7B3 // 1971 + SYS___TZZNA = 0x7B4 // 1972 + SYS_UNATEXIT = 0x7B5 // 1973 + SYS___CEE3DMP_A = 0x7B6 // 1974 + SYS___CDUMP_A = 0x7B7 // 1975 + SYS___CSNAP_A = 0x7B8 // 1976 + SYS___CTEST_A = 0x7B9 // 1977 + SYS___CTRACE_A = 0x7BA // 1978 + SYS___VSWPRNTF2_A = 0x7BB // 1979 + SYS___INET_PTON_A = 0x7BC // 1980 + SYS___SYSLOG_A = 0x7BD // 1981 + SYS___CRYPT_A = 0x7BE // 1982 + SYS_____OPENDIR2_A = 0x7BF // 1983 + SYS_____READDIR2_A = 0x7C0 // 1984 + SYS___OPENDIR_A = 0x7C2 // 1986 + SYS___READDIR_A = 0x7C3 // 1987 + SYS_PREAD = 0x7C7 // 1991 + SYS_PWRITE = 0x7C8 // 1992 + SYS_M_CREATE_LAYOUT = 0x7C9 // 1993 + SYS_M_DESTROY_LAYOUT = 0x7CA // 1994 + SYS_M_GETVALUES_LAYOUT = 0x7CB // 1995 + SYS_M_SETVALUES_LAYOUT = 0x7CC // 1996 + SYS_M_TRANSFORM_LAYOUT = 0x7CD // 1997 + SYS_M_WTRANSFORM_LAYOUT = 0x7CE // 1998 + SYS_FWPRINTF = 0x7D1 // 2001 + SYS_WPRINTF = 0x7D2 // 2002 + SYS_VFWPRINT = 0x7D3 // 2003 + SYS_VFWPRINTF = 0x7D3 // 2003 + SYS_VWPRINTF = 0x7D4 // 2004 + SYS_FWSCANF = 0x7D5 // 2005 + SYS_WSCANF = 0x7D6 // 2006 + SYS_WCTRANS = 0x7D7 // 2007 + SYS_TOWCTRAN = 0x7D8 // 2008 + SYS_TOWCTRANS = 0x7D8 // 2008 + SYS___WCSTOD_A = 0x7D9 // 2009 + SYS___WCSTOL_A = 0x7DA // 2010 + SYS___WCSTOUL_A = 0x7DB // 2011 + SYS___BASENAME_A = 0x7DC // 2012 + SYS___DIRNAME_A = 0x7DD // 2013 + SYS___GLOB_A = 0x7DE // 2014 + SYS_FWIDE = 0x7DF // 2015 + SYS___OSNAME = 0x7E0 // 2016 + SYS_____OSNAME_A = 0x7E1 // 2017 + SYS___BTOWC_A = 0x7E4 // 2020 + SYS___WCTOB_A = 0x7E5 // 2021 + SYS___DBM_OPEN_A = 0x7E6 // 2022 + SYS___VFPRINTF2_A = 0x7E7 // 2023 + SYS___VPRINTF2_A = 0x7E8 // 2024 + SYS___VSPRINTF2_A = 0x7E9 // 2025 + SYS___CEIL_H = 0x7EA // 2026 + SYS___FLOOR_H = 0x7EB // 2027 + SYS___MODF_H = 0x7EC // 2028 + SYS___FABS_H = 0x7ED // 2029 + SYS___J0_H = 0x7EE // 2030 + SYS___J1_H = 0x7EF // 2031 + SYS___JN_H = 0x7F0 // 2032 + SYS___Y0_H = 0x7F1 // 2033 + SYS___Y1_H = 0x7F2 // 2034 + SYS___YN_H = 0x7F3 // 2035 + SYS___CEILF_H = 0x7F4 // 2036 + SYS___CEILL_H = 0x7F5 // 2037 + SYS___FLOORF_H = 0x7F6 // 2038 + SYS___FLOORL_H = 0x7F7 // 2039 + SYS___MODFF_H = 0x7F8 // 2040 + SYS___MODFL_H = 0x7F9 // 2041 + SYS___FABSF_H = 0x7FA // 2042 + SYS___FABSL_H = 0x7FB // 2043 + SYS___MALLOC24 = 0x7FC // 2044 + SYS___MALLOC31 = 0x7FD // 2045 + SYS_ACL_INIT = 0x7FE // 2046 + SYS_ACL_FREE = 0x7FF // 2047 + SYS_ACL_FIRST_ENTRY = 0x800 // 2048 + SYS_ACL_GET_ENTRY = 0x801 // 2049 + SYS_ACL_VALID = 0x802 // 2050 + SYS_ACL_CREATE_ENTRY = 0x803 // 2051 + SYS_ACL_DELETE_ENTRY = 0x804 // 2052 + SYS_ACL_UPDATE_ENTRY = 0x805 // 2053 + SYS_ACL_DELETE_FD = 0x806 // 2054 + SYS_ACL_DELETE_FILE = 0x807 // 2055 + SYS_ACL_GET_FD = 0x808 // 2056 + SYS_ACL_GET_FILE = 0x809 // 2057 + SYS_ACL_SET_FD = 0x80A // 2058 + SYS_ACL_SET_FILE = 0x80B // 2059 + SYS_ACL_FROM_TEXT = 0x80C // 2060 + SYS_ACL_TO_TEXT = 0x80D // 2061 + SYS_ACL_SORT = 0x80E // 2062 + SYS___SHUTDOWN_REGISTRATION = 0x80F // 2063 + SYS___ERFL_B = 0x810 // 2064 + SYS___ERFCL_B = 0x811 // 2065 + SYS___LGAMMAL_B = 0x812 // 2066 + SYS___SETHOOKEVENTS = 0x813 // 2067 + SYS_IF_NAMETOINDEX = 0x814 // 2068 + SYS_IF_INDEXTONAME = 0x815 // 2069 + SYS_IF_NAMEINDEX = 0x816 // 2070 + SYS_IF_FREENAMEINDEX = 0x817 // 2071 + SYS_GETADDRINFO = 0x818 // 2072 + SYS_GETNAMEINFO = 0x819 // 2073 + SYS_FREEADDRINFO = 0x81A // 2074 + SYS_GAI_STRERROR = 0x81B // 2075 + SYS_REXEC_AF = 0x81C // 2076 + SYS___POE = 0x81D // 2077 + SYS___DYNALLOC_A = 0x81F // 2079 + SYS___DYNFREE_A = 0x820 // 2080 + SYS___RES_QUERY_A = 0x821 // 2081 + SYS___RES_SEARCH_A = 0x822 // 2082 + SYS___RES_QUERYDOMAIN_A = 0x823 // 2083 + SYS___RES_MKQUERY_A = 0x824 // 2084 + SYS___RES_SEND_A = 0x825 // 2085 + SYS___DN_EXPAND_A = 0x826 // 2086 + SYS___DN_SKIPNAME_A = 0x827 // 2087 + SYS___DN_COMP_A = 0x828 // 2088 + SYS___DN_FIND_A = 0x829 // 2089 + SYS___NLIST_A = 0x82A // 2090 + SYS_____TCGETCP_A = 0x82B // 2091 + SYS_____TCSETCP_A = 0x82C // 2092 + SYS_____W_PIOCTL_A = 0x82E // 2094 + SYS___INET_ADDR_A = 0x82F // 2095 + SYS___INET_NTOA_A = 0x830 // 2096 + SYS___INET_NETWORK_A = 0x831 // 2097 + SYS___ACCEPT_A = 0x832 // 2098 + SYS___ACCEPT_AND_RECV_A = 0x833 // 2099 + SYS___BIND_A = 0x834 // 2100 + SYS___CONNECT_A = 0x835 // 2101 + SYS___GETPEERNAME_A = 0x836 // 2102 + SYS___GETSOCKNAME_A = 0x837 // 2103 + SYS___RECVFROM_A = 0x838 // 2104 + SYS___SENDTO_A = 0x839 // 2105 + SYS___SENDMSG_A = 0x83A // 2106 + SYS___RECVMSG_A = 0x83B // 2107 + SYS_____LCHATTR_A = 0x83C // 2108 + SYS___CABEND = 0x83D // 2109 + SYS___LE_CIB_GET = 0x83E // 2110 + SYS___SET_LAA_FOR_JIT = 0x83F // 2111 + SYS___LCHATTR = 0x840 // 2112 + SYS___WRITEDOWN = 0x841 // 2113 + SYS_PTHREAD_MUTEX_INIT2 = 0x842 // 2114 + SYS___ACOSHF_B = 0x843 // 2115 + SYS___ACOSHL_B = 0x844 // 2116 + SYS___ASINHF_B = 0x845 // 2117 + SYS___ASINHL_B = 0x846 // 2118 + SYS___ATANHF_B = 0x847 // 2119 + SYS___ATANHL_B = 0x848 // 2120 + SYS___CBRTF_B = 0x849 // 2121 + SYS___CBRTL_B = 0x84A // 2122 + SYS___COPYSIGNF_B = 0x84B // 2123 + SYS___COPYSIGNL_B = 0x84C // 2124 + SYS___COTANF_B = 0x84D // 2125 + SYS___COTAN_B = 0x84E // 2126 + SYS___COTANL_B = 0x84F // 2127 + SYS___EXP2F_B = 0x850 // 2128 + SYS___EXP2L_B = 0x851 // 2129 + SYS___EXPM1F_B = 0x852 // 2130 + SYS___EXPM1L_B = 0x853 // 2131 + SYS___FDIMF_B = 0x854 // 2132 + SYS___FDIM_B = 0x855 // 2133 + SYS___FDIML_B = 0x856 // 2134 + SYS___HYPOTF_B = 0x857 // 2135 + SYS___HYPOTL_B = 0x858 // 2136 + SYS___LOG1PF_B = 0x859 // 2137 + SYS___LOG1PL_B = 0x85A // 2138 + SYS___LOG2F_B = 0x85B // 2139 + SYS___LOG2_B = 0x85C // 2140 + SYS___LOG2L_B = 0x85D // 2141 + SYS___REMAINDERF_B = 0x85E // 2142 + SYS___REMAINDERL_B = 0x85F // 2143 + SYS___REMQUOF_B = 0x860 // 2144 + SYS___REMQUO_B = 0x861 // 2145 + SYS___REMQUOL_B = 0x862 // 2146 + SYS___TGAMMAF_B = 0x863 // 2147 + SYS___TGAMMA_B = 0x864 // 2148 + SYS___TGAMMAL_B = 0x865 // 2149 + SYS___TRUNCF_B = 0x866 // 2150 + SYS___TRUNC_B = 0x867 // 2151 + SYS___TRUNCL_B = 0x868 // 2152 + SYS___LGAMMAF_B = 0x869 // 2153 + SYS___LROUNDF_B = 0x86A // 2154 + SYS___LROUND_B = 0x86B // 2155 + SYS___ERFF_B = 0x86C // 2156 + SYS___ERFCF_B = 0x86D // 2157 + SYS_ACOSHF = 0x86E // 2158 + SYS_ACOSHL = 0x86F // 2159 + SYS_ASINHF = 0x870 // 2160 + SYS_ASINHL = 0x871 // 2161 + SYS_ATANHF = 0x872 // 2162 + SYS_ATANHL = 0x873 // 2163 + SYS_CBRTF = 0x874 // 2164 + SYS_CBRTL = 0x875 // 2165 + SYS_COPYSIGNF = 0x876 // 2166 + SYS_CPYSIGNF = 0x876 // 2166 + SYS_COPYSIGNL = 0x877 // 2167 + SYS_CPYSIGNL = 0x877 // 2167 + SYS_COTANF = 0x878 // 2168 + SYS___COTANF = 0x878 // 2168 + SYS_COTAN = 0x879 // 2169 + SYS___COTAN = 0x879 // 2169 + SYS_COTANL = 0x87A // 2170 + SYS___COTANL = 0x87A // 2170 + SYS_EXP2F = 0x87B // 2171 + SYS_EXP2L = 0x87C // 2172 + SYS_EXPM1F = 0x87D // 2173 + SYS_EXPM1L = 0x87E // 2174 + SYS_FDIMF = 0x87F // 2175 + SYS_FDIM = 0x881 // 2177 + SYS_FDIML = 0x882 // 2178 + SYS_HYPOTF = 0x883 // 2179 + SYS_HYPOTL = 0x884 // 2180 + SYS_LOG1PF = 0x885 // 2181 + SYS_LOG1PL = 0x886 // 2182 + SYS_LOG2F = 0x887 // 2183 + SYS_LOG2 = 0x888 // 2184 + SYS_LOG2L = 0x889 // 2185 + SYS_REMAINDERF = 0x88A // 2186 + SYS_REMAINDF = 0x88A // 2186 + SYS_REMAINDERL = 0x88B // 2187 + SYS_REMAINDL = 0x88B // 2187 + SYS_REMQUOF = 0x88C // 2188 + SYS_REMQUO = 0x88D // 2189 + SYS_REMQUOL = 0x88E // 2190 + SYS_TGAMMAF = 0x88F // 2191 + SYS_TGAMMA = 0x890 // 2192 + SYS_TGAMMAL = 0x891 // 2193 + SYS_TRUNCF = 0x892 // 2194 + SYS_TRUNC = 0x893 // 2195 + SYS_TRUNCL = 0x894 // 2196 + SYS_LGAMMAF = 0x895 // 2197 + SYS_LGAMMAL = 0x896 // 2198 + SYS_LROUNDF = 0x897 // 2199 + SYS_LROUND = 0x898 // 2200 + SYS_ERFF = 0x899 // 2201 + SYS_ERFL = 0x89A // 2202 + SYS_ERFCF = 0x89B // 2203 + SYS_ERFCL = 0x89C // 2204 + SYS___EXP2_B = 0x89D // 2205 + SYS_EXP2 = 0x89E // 2206 + SYS___FAR_JUMP = 0x89F // 2207 + SYS___TCGETATTR_A = 0x8A1 // 2209 + SYS___TCSETATTR_A = 0x8A2 // 2210 + SYS___SUPERKILL = 0x8A4 // 2212 + SYS___LE_CONDITION_TOKEN_BUILD = 0x8A5 // 2213 + SYS___LE_MSG_ADD_INSERT = 0x8A6 // 2214 + SYS___LE_MSG_GET = 0x8A7 // 2215 + SYS___LE_MSG_GET_AND_WRITE = 0x8A8 // 2216 + SYS___LE_MSG_WRITE = 0x8A9 // 2217 + SYS___ITOA = 0x8AA // 2218 + SYS___UTOA = 0x8AB // 2219 + SYS___LTOA = 0x8AC // 2220 + SYS___ULTOA = 0x8AD // 2221 + SYS___LLTOA = 0x8AE // 2222 + SYS___ULLTOA = 0x8AF // 2223 + SYS___ITOA_A = 0x8B0 // 2224 + SYS___UTOA_A = 0x8B1 // 2225 + SYS___LTOA_A = 0x8B2 // 2226 + SYS___ULTOA_A = 0x8B3 // 2227 + SYS___LLTOA_A = 0x8B4 // 2228 + SYS___ULLTOA_A = 0x8B5 // 2229 + SYS_____GETENV_A = 0x8C3 // 2243 + SYS___REXEC_A = 0x8C4 // 2244 + SYS___REXEC_AF_A = 0x8C5 // 2245 + SYS___GETUTXENT_A = 0x8C6 // 2246 + SYS___GETUTXID_A = 0x8C7 // 2247 + SYS___GETUTXLINE_A = 0x8C8 // 2248 + SYS___PUTUTXLINE_A = 0x8C9 // 2249 + SYS_____UTMPXNAME_A = 0x8CA // 2250 + SYS___PUTC_UNLOCKED_A = 0x8CB // 2251 + SYS___PUTCHAR_UNLOCKED_A = 0x8CC // 2252 + SYS___SNPRINTF_A = 0x8CD // 2253 + SYS___VSNPRINTF_A = 0x8CE // 2254 + SYS___DLOPEN_A = 0x8D0 // 2256 + SYS___DLSYM_A = 0x8D1 // 2257 + SYS___DLERROR_A = 0x8D2 // 2258 + SYS_FLOCKFILE = 0x8D3 // 2259 + SYS_FTRYLOCKFILE = 0x8D4 // 2260 + SYS_FUNLOCKFILE = 0x8D5 // 2261 + SYS_GETC_UNLOCKED = 0x8D6 // 2262 + SYS_GETCHAR_UNLOCKED = 0x8D7 // 2263 + SYS_PUTC_UNLOCKED = 0x8D8 // 2264 + SYS_PUTCHAR_UNLOCKED = 0x8D9 // 2265 + SYS_SNPRINTF = 0x8DA // 2266 + SYS_VSNPRINTF = 0x8DB // 2267 + SYS_DLOPEN = 0x8DD // 2269 + SYS_DLSYM = 0x8DE // 2270 + SYS_DLCLOSE = 0x8DF // 2271 + SYS_DLERROR = 0x8E0 // 2272 + SYS___SET_EXCEPTION_HANDLER = 0x8E2 // 2274 + SYS___RESET_EXCEPTION_HANDLER = 0x8E3 // 2275 + SYS___VHM_EVENT = 0x8E4 // 2276 + SYS___ABS_H = 0x8E6 // 2278 + SYS___ABSF_H = 0x8E7 // 2279 + SYS___ABSL_H = 0x8E8 // 2280 + SYS___ACOS_H = 0x8E9 // 2281 + SYS___ACOSF_H = 0x8EA // 2282 + SYS___ACOSL_H = 0x8EB // 2283 + SYS___ACOSH_H = 0x8EC // 2284 + SYS___ASIN_H = 0x8ED // 2285 + SYS___ASINF_H = 0x8EE // 2286 + SYS___ASINL_H = 0x8EF // 2287 + SYS___ASINH_H = 0x8F0 // 2288 + SYS___ATAN_H = 0x8F1 // 2289 + SYS___ATANF_H = 0x8F2 // 2290 + SYS___ATANL_H = 0x8F3 // 2291 + SYS___ATANH_H = 0x8F4 // 2292 + SYS___ATANHF_H = 0x8F5 // 2293 + SYS___ATANHL_H = 0x8F6 // 2294 + SYS___ATAN2_H = 0x8F7 // 2295 + SYS___ATAN2F_H = 0x8F8 // 2296 + SYS___ATAN2L_H = 0x8F9 // 2297 + SYS___CBRT_H = 0x8FA // 2298 + SYS___COPYSIGNF_H = 0x8FB // 2299 + SYS___COPYSIGNL_H = 0x8FC // 2300 + SYS___COS_H = 0x8FD // 2301 + SYS___COSF_H = 0x8FE // 2302 + SYS___COSL_H = 0x8FF // 2303 + SYS___COSHF_H = 0x900 // 2304 + SYS___COSHL_H = 0x901 // 2305 + SYS___COTAN_H = 0x902 // 2306 + SYS___COTANF_H = 0x903 // 2307 + SYS___COTANL_H = 0x904 // 2308 + SYS___ERF_H = 0x905 // 2309 + SYS___ERFF_H = 0x906 // 2310 + SYS___ERFL_H = 0x907 // 2311 + SYS___ERFC_H = 0x908 // 2312 + SYS___ERFCF_H = 0x909 // 2313 + SYS___ERFCL_H = 0x90A // 2314 + SYS___EXP_H = 0x90B // 2315 + SYS___EXPF_H = 0x90C // 2316 + SYS___EXPL_H = 0x90D // 2317 + SYS___EXPM1_H = 0x90E // 2318 + SYS___FDIM_H = 0x90F // 2319 + SYS___FDIMF_H = 0x910 // 2320 + SYS___FDIML_H = 0x911 // 2321 + SYS___FMOD_H = 0x912 // 2322 + SYS___FMODF_H = 0x913 // 2323 + SYS___FMODL_H = 0x914 // 2324 + SYS___GAMMA_H = 0x915 // 2325 + SYS___HYPOT_H = 0x916 // 2326 + SYS___ILOGB_H = 0x917 // 2327 + SYS___LGAMMA_H = 0x918 // 2328 + SYS___LGAMMAF_H = 0x919 // 2329 + SYS___LOG_H = 0x91A // 2330 + SYS___LOGF_H = 0x91B // 2331 + SYS___LOGL_H = 0x91C // 2332 + SYS___LOGB_H = 0x91D // 2333 + SYS___LOG2_H = 0x91E // 2334 + SYS___LOG2F_H = 0x91F // 2335 + SYS___LOG2L_H = 0x920 // 2336 + SYS___LOG1P_H = 0x921 // 2337 + SYS___LOG10_H = 0x922 // 2338 + SYS___LOG10F_H = 0x923 // 2339 + SYS___LOG10L_H = 0x924 // 2340 + SYS___LROUND_H = 0x925 // 2341 + SYS___LROUNDF_H = 0x926 // 2342 + SYS___NEXTAFTER_H = 0x927 // 2343 + SYS___POW_H = 0x928 // 2344 + SYS___POWF_H = 0x929 // 2345 + SYS___POWL_H = 0x92A // 2346 + SYS___REMAINDER_H = 0x92B // 2347 + SYS___RINT_H = 0x92C // 2348 + SYS___SCALB_H = 0x92D // 2349 + SYS___SIN_H = 0x92E // 2350 + SYS___SINF_H = 0x92F // 2351 + SYS___SINL_H = 0x930 // 2352 + SYS___SINH_H = 0x931 // 2353 + SYS___SINHF_H = 0x932 // 2354 + SYS___SINHL_H = 0x933 // 2355 + SYS___SQRT_H = 0x934 // 2356 + SYS___SQRTF_H = 0x935 // 2357 + SYS___SQRTL_H = 0x936 // 2358 + SYS___TAN_H = 0x937 // 2359 + SYS___TANF_H = 0x938 // 2360 + SYS___TANL_H = 0x939 // 2361 + SYS___TANH_H = 0x93A // 2362 + SYS___TANHF_H = 0x93B // 2363 + SYS___TANHL_H = 0x93C // 2364 + SYS___TGAMMA_H = 0x93D // 2365 + SYS___TGAMMAF_H = 0x93E // 2366 + SYS___TRUNC_H = 0x93F // 2367 + SYS___TRUNCF_H = 0x940 // 2368 + SYS___TRUNCL_H = 0x941 // 2369 + SYS___COSH_H = 0x942 // 2370 + SYS___LE_DEBUG_SET_RESUME_MCH = 0x943 // 2371 + SYS_VFSCANF = 0x944 // 2372 + SYS_VSCANF = 0x946 // 2374 + SYS_VSSCANF = 0x948 // 2376 + SYS_VFWSCANF = 0x94A // 2378 + SYS_VWSCANF = 0x94C // 2380 + SYS_VSWSCANF = 0x94E // 2382 + SYS_IMAXABS = 0x950 // 2384 + SYS_IMAXDIV = 0x951 // 2385 + SYS_STRTOIMAX = 0x952 // 2386 + SYS_STRTOUMAX = 0x953 // 2387 + SYS_WCSTOIMAX = 0x954 // 2388 + SYS_WCSTOUMAX = 0x955 // 2389 + SYS_ATOLL = 0x956 // 2390 + SYS_STRTOF = 0x957 // 2391 + SYS_STRTOLD = 0x958 // 2392 + SYS_WCSTOF = 0x959 // 2393 + SYS_WCSTOLD = 0x95A // 2394 + SYS_INET6_RTH_SPACE = 0x95B // 2395 + SYS_INET6_RTH_INIT = 0x95C // 2396 + SYS_INET6_RTH_ADD = 0x95D // 2397 + SYS_INET6_RTH_REVERSE = 0x95E // 2398 + SYS_INET6_RTH_SEGMENTS = 0x95F // 2399 + SYS_INET6_RTH_GETADDR = 0x960 // 2400 + SYS_INET6_OPT_INIT = 0x961 // 2401 + SYS_INET6_OPT_APPEND = 0x962 // 2402 + SYS_INET6_OPT_FINISH = 0x963 // 2403 + SYS_INET6_OPT_SET_VAL = 0x964 // 2404 + SYS_INET6_OPT_NEXT = 0x965 // 2405 + SYS_INET6_OPT_FIND = 0x966 // 2406 + SYS_INET6_OPT_GET_VAL = 0x967 // 2407 + SYS___POW_I = 0x987 // 2439 + SYS___POW_I_B = 0x988 // 2440 + SYS___POW_I_H = 0x989 // 2441 + SYS___POW_II = 0x98A // 2442 + SYS___POW_II_B = 0x98B // 2443 + SYS___POW_II_H = 0x98C // 2444 + SYS_CABS = 0x98E // 2446 + SYS___CABS_B = 0x98F // 2447 + SYS___CABS_H = 0x990 // 2448 + SYS_CABSF = 0x991 // 2449 + SYS___CABSF_B = 0x992 // 2450 + SYS___CABSF_H = 0x993 // 2451 + SYS_CABSL = 0x994 // 2452 + SYS___CABSL_B = 0x995 // 2453 + SYS___CABSL_H = 0x996 // 2454 + SYS_CACOS = 0x997 // 2455 + SYS___CACOS_B = 0x998 // 2456 + SYS___CACOS_H = 0x999 // 2457 + SYS_CACOSF = 0x99A // 2458 + SYS___CACOSF_B = 0x99B // 2459 + SYS___CACOSF_H = 0x99C // 2460 + SYS_CACOSL = 0x99D // 2461 + SYS___CACOSL_B = 0x99E // 2462 + SYS___CACOSL_H = 0x99F // 2463 + SYS_CACOSH = 0x9A0 // 2464 + SYS___CACOSH_B = 0x9A1 // 2465 + SYS___CACOSH_H = 0x9A2 // 2466 + SYS_CACOSHF = 0x9A3 // 2467 + SYS___CACOSHF_B = 0x9A4 // 2468 + SYS___CACOSHF_H = 0x9A5 // 2469 + SYS_CACOSHL = 0x9A6 // 2470 + SYS___CACOSHL_B = 0x9A7 // 2471 + SYS___CACOSHL_H = 0x9A8 // 2472 + SYS_CARG = 0x9A9 // 2473 + SYS___CARG_B = 0x9AA // 2474 + SYS___CARG_H = 0x9AB // 2475 + SYS_CARGF = 0x9AC // 2476 + SYS___CARGF_B = 0x9AD // 2477 + SYS___CARGF_H = 0x9AE // 2478 + SYS_CARGL = 0x9AF // 2479 + SYS___CARGL_B = 0x9B0 // 2480 + SYS___CARGL_H = 0x9B1 // 2481 + SYS_CASIN = 0x9B2 // 2482 + SYS___CASIN_B = 0x9B3 // 2483 + SYS___CASIN_H = 0x9B4 // 2484 + SYS_CASINF = 0x9B5 // 2485 + SYS___CASINF_B = 0x9B6 // 2486 + SYS___CASINF_H = 0x9B7 // 2487 + SYS_CASINL = 0x9B8 // 2488 + SYS___CASINL_B = 0x9B9 // 2489 + SYS___CASINL_H = 0x9BA // 2490 + SYS_CASINH = 0x9BB // 2491 + SYS___CASINH_B = 0x9BC // 2492 + SYS___CASINH_H = 0x9BD // 2493 + SYS_CASINHF = 0x9BE // 2494 + SYS___CASINHF_B = 0x9BF // 2495 + SYS___CASINHF_H = 0x9C0 // 2496 + SYS_CASINHL = 0x9C1 // 2497 + SYS___CASINHL_B = 0x9C2 // 2498 + SYS___CASINHL_H = 0x9C3 // 2499 + SYS_CATAN = 0x9C4 // 2500 + SYS___CATAN_B = 0x9C5 // 2501 + SYS___CATAN_H = 0x9C6 // 2502 + SYS_CATANF = 0x9C7 // 2503 + SYS___CATANF_B = 0x9C8 // 2504 + SYS___CATANF_H = 0x9C9 // 2505 + SYS_CATANL = 0x9CA // 2506 + SYS___CATANL_B = 0x9CB // 2507 + SYS___CATANL_H = 0x9CC // 2508 + SYS_CATANH = 0x9CD // 2509 + SYS___CATANH_B = 0x9CE // 2510 + SYS___CATANH_H = 0x9CF // 2511 + SYS_CATANHF = 0x9D0 // 2512 + SYS___CATANHF_B = 0x9D1 // 2513 + SYS___CATANHF_H = 0x9D2 // 2514 + SYS_CATANHL = 0x9D3 // 2515 + SYS___CATANHL_B = 0x9D4 // 2516 + SYS___CATANHL_H = 0x9D5 // 2517 + SYS_CCOS = 0x9D6 // 2518 + SYS___CCOS_B = 0x9D7 // 2519 + SYS___CCOS_H = 0x9D8 // 2520 + SYS_CCOSF = 0x9D9 // 2521 + SYS___CCOSF_B = 0x9DA // 2522 + SYS___CCOSF_H = 0x9DB // 2523 + SYS_CCOSL = 0x9DC // 2524 + SYS___CCOSL_B = 0x9DD // 2525 + SYS___CCOSL_H = 0x9DE // 2526 + SYS_CCOSH = 0x9DF // 2527 + SYS___CCOSH_B = 0x9E0 // 2528 + SYS___CCOSH_H = 0x9E1 // 2529 + SYS_CCOSHF = 0x9E2 // 2530 + SYS___CCOSHF_B = 0x9E3 // 2531 + SYS___CCOSHF_H = 0x9E4 // 2532 + SYS_CCOSHL = 0x9E5 // 2533 + SYS___CCOSHL_B = 0x9E6 // 2534 + SYS___CCOSHL_H = 0x9E7 // 2535 + SYS_CEXP = 0x9E8 // 2536 + SYS___CEXP_B = 0x9E9 // 2537 + SYS___CEXP_H = 0x9EA // 2538 + SYS_CEXPF = 0x9EB // 2539 + SYS___CEXPF_B = 0x9EC // 2540 + SYS___CEXPF_H = 0x9ED // 2541 + SYS_CEXPL = 0x9EE // 2542 + SYS___CEXPL_B = 0x9EF // 2543 + SYS___CEXPL_H = 0x9F0 // 2544 + SYS_CIMAG = 0x9F1 // 2545 + SYS___CIMAG_B = 0x9F2 // 2546 + SYS___CIMAG_H = 0x9F3 // 2547 + SYS_CIMAGF = 0x9F4 // 2548 + SYS___CIMAGF_B = 0x9F5 // 2549 + SYS___CIMAGF_H = 0x9F6 // 2550 + SYS_CIMAGL = 0x9F7 // 2551 + SYS___CIMAGL_B = 0x9F8 // 2552 + SYS___CIMAGL_H = 0x9F9 // 2553 + SYS___CLOG = 0x9FA // 2554 + SYS___CLOG_B = 0x9FB // 2555 + SYS___CLOG_H = 0x9FC // 2556 + SYS_CLOGF = 0x9FD // 2557 + SYS___CLOGF_B = 0x9FE // 2558 + SYS___CLOGF_H = 0x9FF // 2559 + SYS_CLOGL = 0xA00 // 2560 + SYS___CLOGL_B = 0xA01 // 2561 + SYS___CLOGL_H = 0xA02 // 2562 + SYS_CONJ = 0xA03 // 2563 + SYS___CONJ_B = 0xA04 // 2564 + SYS___CONJ_H = 0xA05 // 2565 + SYS_CONJF = 0xA06 // 2566 + SYS___CONJF_B = 0xA07 // 2567 + SYS___CONJF_H = 0xA08 // 2568 + SYS_CONJL = 0xA09 // 2569 + SYS___CONJL_B = 0xA0A // 2570 + SYS___CONJL_H = 0xA0B // 2571 + SYS_CPOW = 0xA0C // 2572 + SYS___CPOW_B = 0xA0D // 2573 + SYS___CPOW_H = 0xA0E // 2574 + SYS_CPOWF = 0xA0F // 2575 + SYS___CPOWF_B = 0xA10 // 2576 + SYS___CPOWF_H = 0xA11 // 2577 + SYS_CPOWL = 0xA12 // 2578 + SYS___CPOWL_B = 0xA13 // 2579 + SYS___CPOWL_H = 0xA14 // 2580 + SYS_CPROJ = 0xA15 // 2581 + SYS___CPROJ_B = 0xA16 // 2582 + SYS___CPROJ_H = 0xA17 // 2583 + SYS_CPROJF = 0xA18 // 2584 + SYS___CPROJF_B = 0xA19 // 2585 + SYS___CPROJF_H = 0xA1A // 2586 + SYS_CPROJL = 0xA1B // 2587 + SYS___CPROJL_B = 0xA1C // 2588 + SYS___CPROJL_H = 0xA1D // 2589 + SYS_CREAL = 0xA1E // 2590 + SYS___CREAL_B = 0xA1F // 2591 + SYS___CREAL_H = 0xA20 // 2592 + SYS_CREALF = 0xA21 // 2593 + SYS___CREALF_B = 0xA22 // 2594 + SYS___CREALF_H = 0xA23 // 2595 + SYS_CREALL = 0xA24 // 2596 + SYS___CREALL_B = 0xA25 // 2597 + SYS___CREALL_H = 0xA26 // 2598 + SYS_CSIN = 0xA27 // 2599 + SYS___CSIN_B = 0xA28 // 2600 + SYS___CSIN_H = 0xA29 // 2601 + SYS_CSINF = 0xA2A // 2602 + SYS___CSINF_B = 0xA2B // 2603 + SYS___CSINF_H = 0xA2C // 2604 + SYS_CSINL = 0xA2D // 2605 + SYS___CSINL_B = 0xA2E // 2606 + SYS___CSINL_H = 0xA2F // 2607 + SYS_CSINH = 0xA30 // 2608 + SYS___CSINH_B = 0xA31 // 2609 + SYS___CSINH_H = 0xA32 // 2610 + SYS_CSINHF = 0xA33 // 2611 + SYS___CSINHF_B = 0xA34 // 2612 + SYS___CSINHF_H = 0xA35 // 2613 + SYS_CSINHL = 0xA36 // 2614 + SYS___CSINHL_B = 0xA37 // 2615 + SYS___CSINHL_H = 0xA38 // 2616 + SYS_CSQRT = 0xA39 // 2617 + SYS___CSQRT_B = 0xA3A // 2618 + SYS___CSQRT_H = 0xA3B // 2619 + SYS_CSQRTF = 0xA3C // 2620 + SYS___CSQRTF_B = 0xA3D // 2621 + SYS___CSQRTF_H = 0xA3E // 2622 + SYS_CSQRTL = 0xA3F // 2623 + SYS___CSQRTL_B = 0xA40 // 2624 + SYS___CSQRTL_H = 0xA41 // 2625 + SYS_CTAN = 0xA42 // 2626 + SYS___CTAN_B = 0xA43 // 2627 + SYS___CTAN_H = 0xA44 // 2628 + SYS_CTANF = 0xA45 // 2629 + SYS___CTANF_B = 0xA46 // 2630 + SYS___CTANF_H = 0xA47 // 2631 + SYS_CTANL = 0xA48 // 2632 + SYS___CTANL_B = 0xA49 // 2633 + SYS___CTANL_H = 0xA4A // 2634 + SYS_CTANH = 0xA4B // 2635 + SYS___CTANH_B = 0xA4C // 2636 + SYS___CTANH_H = 0xA4D // 2637 + SYS_CTANHF = 0xA4E // 2638 + SYS___CTANHF_B = 0xA4F // 2639 + SYS___CTANHF_H = 0xA50 // 2640 + SYS_CTANHL = 0xA51 // 2641 + SYS___CTANHL_B = 0xA52 // 2642 + SYS___CTANHL_H = 0xA53 // 2643 + SYS___ACOSHF_H = 0xA54 // 2644 + SYS___ACOSHL_H = 0xA55 // 2645 + SYS___ASINHF_H = 0xA56 // 2646 + SYS___ASINHL_H = 0xA57 // 2647 + SYS___CBRTF_H = 0xA58 // 2648 + SYS___CBRTL_H = 0xA59 // 2649 + SYS___COPYSIGN_B = 0xA5A // 2650 + SYS___EXPM1F_H = 0xA5B // 2651 + SYS___EXPM1L_H = 0xA5C // 2652 + SYS___EXP2_H = 0xA5D // 2653 + SYS___EXP2F_H = 0xA5E // 2654 + SYS___EXP2L_H = 0xA5F // 2655 + SYS___LOG1PF_H = 0xA60 // 2656 + SYS___LOG1PL_H = 0xA61 // 2657 + SYS___LGAMMAL_H = 0xA62 // 2658 + SYS_FMA = 0xA63 // 2659 + SYS___FMA_B = 0xA64 // 2660 + SYS___FMA_H = 0xA65 // 2661 + SYS_FMAF = 0xA66 // 2662 + SYS___FMAF_B = 0xA67 // 2663 + SYS___FMAF_H = 0xA68 // 2664 + SYS_FMAL = 0xA69 // 2665 + SYS___FMAL_B = 0xA6A // 2666 + SYS___FMAL_H = 0xA6B // 2667 + SYS_FMAX = 0xA6C // 2668 + SYS___FMAX_B = 0xA6D // 2669 + SYS___FMAX_H = 0xA6E // 2670 + SYS_FMAXF = 0xA6F // 2671 + SYS___FMAXF_B = 0xA70 // 2672 + SYS___FMAXF_H = 0xA71 // 2673 + SYS_FMAXL = 0xA72 // 2674 + SYS___FMAXL_B = 0xA73 // 2675 + SYS___FMAXL_H = 0xA74 // 2676 + SYS_FMIN = 0xA75 // 2677 + SYS___FMIN_B = 0xA76 // 2678 + SYS___FMIN_H = 0xA77 // 2679 + SYS_FMINF = 0xA78 // 2680 + SYS___FMINF_B = 0xA79 // 2681 + SYS___FMINF_H = 0xA7A // 2682 + SYS_FMINL = 0xA7B // 2683 + SYS___FMINL_B = 0xA7C // 2684 + SYS___FMINL_H = 0xA7D // 2685 + SYS_ILOGBF = 0xA7E // 2686 + SYS___ILOGBF_B = 0xA7F // 2687 + SYS___ILOGBF_H = 0xA80 // 2688 + SYS_ILOGBL = 0xA81 // 2689 + SYS___ILOGBL_B = 0xA82 // 2690 + SYS___ILOGBL_H = 0xA83 // 2691 + SYS_LLRINT = 0xA84 // 2692 + SYS___LLRINT_B = 0xA85 // 2693 + SYS___LLRINT_H = 0xA86 // 2694 + SYS_LLRINTF = 0xA87 // 2695 + SYS___LLRINTF_B = 0xA88 // 2696 + SYS___LLRINTF_H = 0xA89 // 2697 + SYS_LLRINTL = 0xA8A // 2698 + SYS___LLRINTL_B = 0xA8B // 2699 + SYS___LLRINTL_H = 0xA8C // 2700 + SYS_LLROUND = 0xA8D // 2701 + SYS___LLROUND_B = 0xA8E // 2702 + SYS___LLROUND_H = 0xA8F // 2703 + SYS_LLROUNDF = 0xA90 // 2704 + SYS___LLROUNDF_B = 0xA91 // 2705 + SYS___LLROUNDF_H = 0xA92 // 2706 + SYS_LLROUNDL = 0xA93 // 2707 + SYS___LLROUNDL_B = 0xA94 // 2708 + SYS___LLROUNDL_H = 0xA95 // 2709 + SYS_LOGBF = 0xA96 // 2710 + SYS___LOGBF_B = 0xA97 // 2711 + SYS___LOGBF_H = 0xA98 // 2712 + SYS_LOGBL = 0xA99 // 2713 + SYS___LOGBL_B = 0xA9A // 2714 + SYS___LOGBL_H = 0xA9B // 2715 + SYS_LRINT = 0xA9C // 2716 + SYS___LRINT_B = 0xA9D // 2717 + SYS___LRINT_H = 0xA9E // 2718 + SYS_LRINTF = 0xA9F // 2719 + SYS___LRINTF_B = 0xAA0 // 2720 + SYS___LRINTF_H = 0xAA1 // 2721 + SYS_LRINTL = 0xAA2 // 2722 + SYS___LRINTL_B = 0xAA3 // 2723 + SYS___LRINTL_H = 0xAA4 // 2724 + SYS_LROUNDL = 0xAA5 // 2725 + SYS___LROUNDL_B = 0xAA6 // 2726 + SYS___LROUNDL_H = 0xAA7 // 2727 + SYS_NAN = 0xAA8 // 2728 + SYS___NAN_B = 0xAA9 // 2729 + SYS_NANF = 0xAAA // 2730 + SYS___NANF_B = 0xAAB // 2731 + SYS_NANL = 0xAAC // 2732 + SYS___NANL_B = 0xAAD // 2733 + SYS_NEARBYINT = 0xAAE // 2734 + SYS___NEARBYINT_B = 0xAAF // 2735 + SYS___NEARBYINT_H = 0xAB0 // 2736 + SYS_NEARBYINTF = 0xAB1 // 2737 + SYS___NEARBYINTF_B = 0xAB2 // 2738 + SYS___NEARBYINTF_H = 0xAB3 // 2739 + SYS_NEARBYINTL = 0xAB4 // 2740 + SYS___NEARBYINTL_B = 0xAB5 // 2741 + SYS___NEARBYINTL_H = 0xAB6 // 2742 + SYS_NEXTAFTERF = 0xAB7 // 2743 + SYS___NEXTAFTERF_B = 0xAB8 // 2744 + SYS___NEXTAFTERF_H = 0xAB9 // 2745 + SYS_NEXTAFTERL = 0xABA // 2746 + SYS___NEXTAFTERL_B = 0xABB // 2747 + SYS___NEXTAFTERL_H = 0xABC // 2748 + SYS_NEXTTOWARD = 0xABD // 2749 + SYS___NEXTTOWARD_B = 0xABE // 2750 + SYS___NEXTTOWARD_H = 0xABF // 2751 + SYS_NEXTTOWARDF = 0xAC0 // 2752 + SYS___NEXTTOWARDF_B = 0xAC1 // 2753 + SYS___NEXTTOWARDF_H = 0xAC2 // 2754 + SYS_NEXTTOWARDL = 0xAC3 // 2755 + SYS___NEXTTOWARDL_B = 0xAC4 // 2756 + SYS___NEXTTOWARDL_H = 0xAC5 // 2757 + SYS___REMAINDERF_H = 0xAC6 // 2758 + SYS___REMAINDERL_H = 0xAC7 // 2759 + SYS___REMQUO_H = 0xAC8 // 2760 + SYS___REMQUOF_H = 0xAC9 // 2761 + SYS___REMQUOL_H = 0xACA // 2762 + SYS_RINTF = 0xACB // 2763 + SYS___RINTF_B = 0xACC // 2764 + SYS_RINTL = 0xACD // 2765 + SYS___RINTL_B = 0xACE // 2766 + SYS_ROUND = 0xACF // 2767 + SYS___ROUND_B = 0xAD0 // 2768 + SYS___ROUND_H = 0xAD1 // 2769 + SYS_ROUNDF = 0xAD2 // 2770 + SYS___ROUNDF_B = 0xAD3 // 2771 + SYS___ROUNDF_H = 0xAD4 // 2772 + SYS_ROUNDL = 0xAD5 // 2773 + SYS___ROUNDL_B = 0xAD6 // 2774 + SYS___ROUNDL_H = 0xAD7 // 2775 + SYS_SCALBLN = 0xAD8 // 2776 + SYS___SCALBLN_B = 0xAD9 // 2777 + SYS___SCALBLN_H = 0xADA // 2778 + SYS_SCALBLNF = 0xADB // 2779 + SYS___SCALBLNF_B = 0xADC // 2780 + SYS___SCALBLNF_H = 0xADD // 2781 + SYS_SCALBLNL = 0xADE // 2782 + SYS___SCALBLNL_B = 0xADF // 2783 + SYS___SCALBLNL_H = 0xAE0 // 2784 + SYS___SCALBN_B = 0xAE1 // 2785 + SYS___SCALBN_H = 0xAE2 // 2786 + SYS_SCALBNF = 0xAE3 // 2787 + SYS___SCALBNF_B = 0xAE4 // 2788 + SYS___SCALBNF_H = 0xAE5 // 2789 + SYS_SCALBNL = 0xAE6 // 2790 + SYS___SCALBNL_B = 0xAE7 // 2791 + SYS___SCALBNL_H = 0xAE8 // 2792 + SYS___TGAMMAL_H = 0xAE9 // 2793 + SYS_FECLEAREXCEPT = 0xAEA // 2794 + SYS_FEGETENV = 0xAEB // 2795 + SYS_FEGETEXCEPTFLAG = 0xAEC // 2796 + SYS_FEGETROUND = 0xAED // 2797 + SYS_FEHOLDEXCEPT = 0xAEE // 2798 + SYS_FERAISEEXCEPT = 0xAEF // 2799 + SYS_FESETENV = 0xAF0 // 2800 + SYS_FESETEXCEPTFLAG = 0xAF1 // 2801 + SYS_FESETROUND = 0xAF2 // 2802 + SYS_FETESTEXCEPT = 0xAF3 // 2803 + SYS_FEUPDATEENV = 0xAF4 // 2804 + SYS___COPYSIGN_H = 0xAF5 // 2805 + SYS___HYPOTF_H = 0xAF6 // 2806 + SYS___HYPOTL_H = 0xAF7 // 2807 + SYS___CLASS = 0xAFA // 2810 + SYS___CLASS_B = 0xAFB // 2811 + SYS___CLASS_H = 0xAFC // 2812 + SYS___ISBLANK_A = 0xB2E // 2862 + SYS___ISWBLANK_A = 0xB2F // 2863 + SYS___LROUND_FIXUP = 0xB30 // 2864 + SYS___LROUNDF_FIXUP = 0xB31 // 2865 + SYS_SCHED_YIELD = 0xB32 // 2866 + SYS_STRERROR_R = 0xB33 // 2867 + SYS_UNSETENV = 0xB34 // 2868 + SYS___LGAMMA_H_C99 = 0xB38 // 2872 + SYS___LGAMMA_B_C99 = 0xB39 // 2873 + SYS___LGAMMA_R_C99 = 0xB3A // 2874 + SYS___FTELL2 = 0xB3B // 2875 + SYS___FSEEK2 = 0xB3C // 2876 + SYS___STATIC_REINIT = 0xB3D // 2877 + SYS_PTHREAD_ATTR_GETSTACK = 0xB3E // 2878 + SYS_PTHREAD_ATTR_SETSTACK = 0xB3F // 2879 + SYS___TGAMMA_H_C99 = 0xB78 // 2936 + SYS___TGAMMAF_H_C99 = 0xB79 // 2937 + SYS___LE_TRACEBACK = 0xB7A // 2938 + SYS___MUST_STAY_CLEAN = 0xB7C // 2940 + SYS___O_ENV = 0xB7D // 2941 + SYS_ACOSD32 = 0xB7E // 2942 + SYS_ACOSD64 = 0xB7F // 2943 + SYS_ACOSD128 = 0xB80 // 2944 + SYS_ACOSHD32 = 0xB81 // 2945 + SYS_ACOSHD64 = 0xB82 // 2946 + SYS_ACOSHD128 = 0xB83 // 2947 + SYS_ASIND32 = 0xB84 // 2948 + SYS_ASIND64 = 0xB85 // 2949 + SYS_ASIND128 = 0xB86 // 2950 + SYS_ASINHD32 = 0xB87 // 2951 + SYS_ASINHD64 = 0xB88 // 2952 + SYS_ASINHD128 = 0xB89 // 2953 + SYS_ATAND32 = 0xB8A // 2954 + SYS_ATAND64 = 0xB8B // 2955 + SYS_ATAND128 = 0xB8C // 2956 + SYS_ATAN2D32 = 0xB8D // 2957 + SYS_ATAN2D64 = 0xB8E // 2958 + SYS_ATAN2D128 = 0xB8F // 2959 + SYS_ATANHD32 = 0xB90 // 2960 + SYS_ATANHD64 = 0xB91 // 2961 + SYS_ATANHD128 = 0xB92 // 2962 + SYS_CBRTD32 = 0xB93 // 2963 + SYS_CBRTD64 = 0xB94 // 2964 + SYS_CBRTD128 = 0xB95 // 2965 + SYS_CEILD32 = 0xB96 // 2966 + SYS_CEILD64 = 0xB97 // 2967 + SYS_CEILD128 = 0xB98 // 2968 + SYS___CLASS2 = 0xB99 // 2969 + SYS___CLASS2_B = 0xB9A // 2970 + SYS___CLASS2_H = 0xB9B // 2971 + SYS_COPYSIGND32 = 0xB9C // 2972 + SYS_COPYSIGND64 = 0xB9D // 2973 + SYS_COPYSIGND128 = 0xB9E // 2974 + SYS_COSD32 = 0xB9F // 2975 + SYS_COSD64 = 0xBA0 // 2976 + SYS_COSD128 = 0xBA1 // 2977 + SYS_COSHD32 = 0xBA2 // 2978 + SYS_COSHD64 = 0xBA3 // 2979 + SYS_COSHD128 = 0xBA4 // 2980 + SYS_ERFD32 = 0xBA5 // 2981 + SYS_ERFD64 = 0xBA6 // 2982 + SYS_ERFD128 = 0xBA7 // 2983 + SYS_ERFCD32 = 0xBA8 // 2984 + SYS_ERFCD64 = 0xBA9 // 2985 + SYS_ERFCD128 = 0xBAA // 2986 + SYS_EXPD32 = 0xBAB // 2987 + SYS_EXPD64 = 0xBAC // 2988 + SYS_EXPD128 = 0xBAD // 2989 + SYS_EXP2D32 = 0xBAE // 2990 + SYS_EXP2D64 = 0xBAF // 2991 + SYS_EXP2D128 = 0xBB0 // 2992 + SYS_EXPM1D32 = 0xBB1 // 2993 + SYS_EXPM1D64 = 0xBB2 // 2994 + SYS_EXPM1D128 = 0xBB3 // 2995 + SYS_FABSD32 = 0xBB4 // 2996 + SYS_FABSD64 = 0xBB5 // 2997 + SYS_FABSD128 = 0xBB6 // 2998 + SYS_FDIMD32 = 0xBB7 // 2999 + SYS_FDIMD64 = 0xBB8 // 3000 + SYS_FDIMD128 = 0xBB9 // 3001 + SYS_FE_DEC_GETROUND = 0xBBA // 3002 + SYS_FE_DEC_SETROUND = 0xBBB // 3003 + SYS_FLOORD32 = 0xBBC // 3004 + SYS_FLOORD64 = 0xBBD // 3005 + SYS_FLOORD128 = 0xBBE // 3006 + SYS_FMAD32 = 0xBBF // 3007 + SYS_FMAD64 = 0xBC0 // 3008 + SYS_FMAD128 = 0xBC1 // 3009 + SYS_FMAXD32 = 0xBC2 // 3010 + SYS_FMAXD64 = 0xBC3 // 3011 + SYS_FMAXD128 = 0xBC4 // 3012 + SYS_FMIND32 = 0xBC5 // 3013 + SYS_FMIND64 = 0xBC6 // 3014 + SYS_FMIND128 = 0xBC7 // 3015 + SYS_FMODD32 = 0xBC8 // 3016 + SYS_FMODD64 = 0xBC9 // 3017 + SYS_FMODD128 = 0xBCA // 3018 + SYS___FP_CAST_D = 0xBCB // 3019 + SYS_FREXPD32 = 0xBCC // 3020 + SYS_FREXPD64 = 0xBCD // 3021 + SYS_FREXPD128 = 0xBCE // 3022 + SYS_HYPOTD32 = 0xBCF // 3023 + SYS_HYPOTD64 = 0xBD0 // 3024 + SYS_HYPOTD128 = 0xBD1 // 3025 + SYS_ILOGBD32 = 0xBD2 // 3026 + SYS_ILOGBD64 = 0xBD3 // 3027 + SYS_ILOGBD128 = 0xBD4 // 3028 + SYS_LDEXPD32 = 0xBD5 // 3029 + SYS_LDEXPD64 = 0xBD6 // 3030 + SYS_LDEXPD128 = 0xBD7 // 3031 + SYS_LGAMMAD32 = 0xBD8 // 3032 + SYS_LGAMMAD64 = 0xBD9 // 3033 + SYS_LGAMMAD128 = 0xBDA // 3034 + SYS_LLRINTD32 = 0xBDB // 3035 + SYS_LLRINTD64 = 0xBDC // 3036 + SYS_LLRINTD128 = 0xBDD // 3037 + SYS_LLROUNDD32 = 0xBDE // 3038 + SYS_LLROUNDD64 = 0xBDF // 3039 + SYS_LLROUNDD128 = 0xBE0 // 3040 + SYS_LOGD32 = 0xBE1 // 3041 + SYS_LOGD64 = 0xBE2 // 3042 + SYS_LOGD128 = 0xBE3 // 3043 + SYS_LOG10D32 = 0xBE4 // 3044 + SYS_LOG10D64 = 0xBE5 // 3045 + SYS_LOG10D128 = 0xBE6 // 3046 + SYS_LOG1PD32 = 0xBE7 // 3047 + SYS_LOG1PD64 = 0xBE8 // 3048 + SYS_LOG1PD128 = 0xBE9 // 3049 + SYS_LOG2D32 = 0xBEA // 3050 + SYS_LOG2D64 = 0xBEB // 3051 + SYS_LOG2D128 = 0xBEC // 3052 + SYS_LOGBD32 = 0xBED // 3053 + SYS_LOGBD64 = 0xBEE // 3054 + SYS_LOGBD128 = 0xBEF // 3055 + SYS_LRINTD32 = 0xBF0 // 3056 + SYS_LRINTD64 = 0xBF1 // 3057 + SYS_LRINTD128 = 0xBF2 // 3058 + SYS_LROUNDD32 = 0xBF3 // 3059 + SYS_LROUNDD64 = 0xBF4 // 3060 + SYS_LROUNDD128 = 0xBF5 // 3061 + SYS_MODFD32 = 0xBF6 // 3062 + SYS_MODFD64 = 0xBF7 // 3063 + SYS_MODFD128 = 0xBF8 // 3064 + SYS_NAND32 = 0xBF9 // 3065 + SYS_NAND64 = 0xBFA // 3066 + SYS_NAND128 = 0xBFB // 3067 + SYS_NEARBYINTD32 = 0xBFC // 3068 + SYS_NEARBYINTD64 = 0xBFD // 3069 + SYS_NEARBYINTD128 = 0xBFE // 3070 + SYS_NEXTAFTERD32 = 0xBFF // 3071 + SYS_NEXTAFTERD64 = 0xC00 // 3072 + SYS_NEXTAFTERD128 = 0xC01 // 3073 + SYS_NEXTTOWARDD32 = 0xC02 // 3074 + SYS_NEXTTOWARDD64 = 0xC03 // 3075 + SYS_NEXTTOWARDD128 = 0xC04 // 3076 + SYS_POWD32 = 0xC05 // 3077 + SYS_POWD64 = 0xC06 // 3078 + SYS_POWD128 = 0xC07 // 3079 + SYS_QUANTIZED32 = 0xC08 // 3080 + SYS_QUANTIZED64 = 0xC09 // 3081 + SYS_QUANTIZED128 = 0xC0A // 3082 + SYS_REMAINDERD32 = 0xC0B // 3083 + SYS_REMAINDERD64 = 0xC0C // 3084 + SYS_REMAINDERD128 = 0xC0D // 3085 + SYS___REMQUOD32 = 0xC0E // 3086 + SYS___REMQUOD64 = 0xC0F // 3087 + SYS___REMQUOD128 = 0xC10 // 3088 + SYS_RINTD32 = 0xC11 // 3089 + SYS_RINTD64 = 0xC12 // 3090 + SYS_RINTD128 = 0xC13 // 3091 + SYS_ROUNDD32 = 0xC14 // 3092 + SYS_ROUNDD64 = 0xC15 // 3093 + SYS_ROUNDD128 = 0xC16 // 3094 + SYS_SAMEQUANTUMD32 = 0xC17 // 3095 + SYS_SAMEQUANTUMD64 = 0xC18 // 3096 + SYS_SAMEQUANTUMD128 = 0xC19 // 3097 + SYS_SCALBLND32 = 0xC1A // 3098 + SYS_SCALBLND64 = 0xC1B // 3099 + SYS_SCALBLND128 = 0xC1C // 3100 + SYS_SCALBND32 = 0xC1D // 3101 + SYS_SCALBND64 = 0xC1E // 3102 + SYS_SCALBND128 = 0xC1F // 3103 + SYS_SIND32 = 0xC20 // 3104 + SYS_SIND64 = 0xC21 // 3105 + SYS_SIND128 = 0xC22 // 3106 + SYS_SINHD32 = 0xC23 // 3107 + SYS_SINHD64 = 0xC24 // 3108 + SYS_SINHD128 = 0xC25 // 3109 + SYS_SQRTD32 = 0xC26 // 3110 + SYS_SQRTD64 = 0xC27 // 3111 + SYS_SQRTD128 = 0xC28 // 3112 + SYS_STRTOD32 = 0xC29 // 3113 + SYS_STRTOD64 = 0xC2A // 3114 + SYS_STRTOD128 = 0xC2B // 3115 + SYS_TAND32 = 0xC2C // 3116 + SYS_TAND64 = 0xC2D // 3117 + SYS_TAND128 = 0xC2E // 3118 + SYS_TANHD32 = 0xC2F // 3119 + SYS_TANHD64 = 0xC30 // 3120 + SYS_TANHD128 = 0xC31 // 3121 + SYS_TGAMMAD32 = 0xC32 // 3122 + SYS_TGAMMAD64 = 0xC33 // 3123 + SYS_TGAMMAD128 = 0xC34 // 3124 + SYS_TRUNCD32 = 0xC3E // 3134 + SYS_TRUNCD64 = 0xC3F // 3135 + SYS_TRUNCD128 = 0xC40 // 3136 + SYS_WCSTOD32 = 0xC41 // 3137 + SYS_WCSTOD64 = 0xC42 // 3138 + SYS_WCSTOD128 = 0xC43 // 3139 + SYS___CODEPAGE_INFO = 0xC64 // 3172 + SYS_POSIX_OPENPT = 0xC66 // 3174 + SYS_PSELECT = 0xC67 // 3175 + SYS_SOCKATMARK = 0xC68 // 3176 + SYS_AIO_FSYNC = 0xC69 // 3177 + SYS_LIO_LISTIO = 0xC6A // 3178 + SYS___ATANPID32 = 0xC6B // 3179 + SYS___ATANPID64 = 0xC6C // 3180 + SYS___ATANPID128 = 0xC6D // 3181 + SYS___COSPID32 = 0xC6E // 3182 + SYS___COSPID64 = 0xC6F // 3183 + SYS___COSPID128 = 0xC70 // 3184 + SYS___SINPID32 = 0xC71 // 3185 + SYS___SINPID64 = 0xC72 // 3186 + SYS___SINPID128 = 0xC73 // 3187 + SYS_SETIPV4SOURCEFILTER = 0xC76 // 3190 + SYS_GETIPV4SOURCEFILTER = 0xC77 // 3191 + SYS_SETSOURCEFILTER = 0xC78 // 3192 + SYS_GETSOURCEFILTER = 0xC79 // 3193 + SYS_FWRITE_UNLOCKED = 0xC7A // 3194 + SYS_FREAD_UNLOCKED = 0xC7B // 3195 + SYS_FGETS_UNLOCKED = 0xC7C // 3196 + SYS_GETS_UNLOCKED = 0xC7D // 3197 + SYS_FPUTS_UNLOCKED = 0xC7E // 3198 + SYS_PUTS_UNLOCKED = 0xC7F // 3199 + SYS_FGETC_UNLOCKED = 0xC80 // 3200 + SYS_FPUTC_UNLOCKED = 0xC81 // 3201 + SYS_DLADDR = 0xC82 // 3202 + SYS_SHM_OPEN = 0xC8C // 3212 + SYS_SHM_UNLINK = 0xC8D // 3213 + SYS___CLASS2F = 0xC91 // 3217 + SYS___CLASS2L = 0xC92 // 3218 + SYS___CLASS2F_B = 0xC93 // 3219 + SYS___CLASS2F_H = 0xC94 // 3220 + SYS___CLASS2L_B = 0xC95 // 3221 + SYS___CLASS2L_H = 0xC96 // 3222 + SYS___CLASS2D32 = 0xC97 // 3223 + SYS___CLASS2D64 = 0xC98 // 3224 + SYS___CLASS2D128 = 0xC99 // 3225 + SYS___TOCSNAME2 = 0xC9A // 3226 + SYS___D1TOP = 0xC9B // 3227 + SYS___D2TOP = 0xC9C // 3228 + SYS___D4TOP = 0xC9D // 3229 + SYS___PTOD1 = 0xC9E // 3230 + SYS___PTOD2 = 0xC9F // 3231 + SYS___PTOD4 = 0xCA0 // 3232 + SYS_CLEARERR_UNLOCKED = 0xCA1 // 3233 + SYS_FDELREC_UNLOCKED = 0xCA2 // 3234 + SYS_FEOF_UNLOCKED = 0xCA3 // 3235 + SYS_FERROR_UNLOCKED = 0xCA4 // 3236 + SYS_FFLUSH_UNLOCKED = 0xCA5 // 3237 + SYS_FGETPOS_UNLOCKED = 0xCA6 // 3238 + SYS_FGETWC_UNLOCKED = 0xCA7 // 3239 + SYS_FGETWS_UNLOCKED = 0xCA8 // 3240 + SYS_FILENO_UNLOCKED = 0xCA9 // 3241 + SYS_FLDATA_UNLOCKED = 0xCAA // 3242 + SYS_FLOCATE_UNLOCKED = 0xCAB // 3243 + SYS_FPRINTF_UNLOCKED = 0xCAC // 3244 + SYS_FPUTWC_UNLOCKED = 0xCAD // 3245 + SYS_FPUTWS_UNLOCKED = 0xCAE // 3246 + SYS_FSCANF_UNLOCKED = 0xCAF // 3247 + SYS_FSEEK_UNLOCKED = 0xCB0 // 3248 + SYS_FSEEKO_UNLOCKED = 0xCB1 // 3249 + SYS_FSETPOS_UNLOCKED = 0xCB3 // 3251 + SYS_FTELL_UNLOCKED = 0xCB4 // 3252 + SYS_FTELLO_UNLOCKED = 0xCB5 // 3253 + SYS_FUPDATE_UNLOCKED = 0xCB7 // 3255 + SYS_FWIDE_UNLOCKED = 0xCB8 // 3256 + SYS_FWPRINTF_UNLOCKED = 0xCB9 // 3257 + SYS_FWSCANF_UNLOCKED = 0xCBA // 3258 + SYS_GETWC_UNLOCKED = 0xCBB // 3259 + SYS_GETWCHAR_UNLOCKED = 0xCBC // 3260 + SYS_PERROR_UNLOCKED = 0xCBD // 3261 + SYS_PRINTF_UNLOCKED = 0xCBE // 3262 + SYS_PUTWC_UNLOCKED = 0xCBF // 3263 + SYS_PUTWCHAR_UNLOCKED = 0xCC0 // 3264 + SYS_REWIND_UNLOCKED = 0xCC1 // 3265 + SYS_SCANF_UNLOCKED = 0xCC2 // 3266 + SYS_UNGETC_UNLOCKED = 0xCC3 // 3267 + SYS_UNGETWC_UNLOCKED = 0xCC4 // 3268 + SYS_VFPRINTF_UNLOCKED = 0xCC5 // 3269 + SYS_VFSCANF_UNLOCKED = 0xCC7 // 3271 + SYS_VFWPRINTF_UNLOCKED = 0xCC9 // 3273 + SYS_VFWSCANF_UNLOCKED = 0xCCB // 3275 + SYS_VPRINTF_UNLOCKED = 0xCCD // 3277 + SYS_VSCANF_UNLOCKED = 0xCCF // 3279 + SYS_VWPRINTF_UNLOCKED = 0xCD1 // 3281 + SYS_VWSCANF_UNLOCKED = 0xCD3 // 3283 + SYS_WPRINTF_UNLOCKED = 0xCD5 // 3285 + SYS_WSCANF_UNLOCKED = 0xCD6 // 3286 + SYS_ASCTIME64 = 0xCD7 // 3287 + SYS_ASCTIME64_R = 0xCD8 // 3288 + SYS_CTIME64 = 0xCD9 // 3289 + SYS_CTIME64_R = 0xCDA // 3290 + SYS_DIFFTIME64 = 0xCDB // 3291 + SYS_GMTIME64 = 0xCDC // 3292 + SYS_GMTIME64_R = 0xCDD // 3293 + SYS_LOCALTIME64 = 0xCDE // 3294 + SYS_LOCALTIME64_R = 0xCDF // 3295 + SYS_MKTIME64 = 0xCE0 // 3296 + SYS_TIME64 = 0xCE1 // 3297 + SYS___LOGIN_APPLID = 0xCE2 // 3298 + SYS___PASSWD_APPLID = 0xCE3 // 3299 + SYS_PTHREAD_SECURITY_APPLID_NP = 0xCE4 // 3300 + SYS___GETTHENT = 0xCE5 // 3301 + SYS_FREEIFADDRS = 0xCE6 // 3302 + SYS_GETIFADDRS = 0xCE7 // 3303 + SYS_POSIX_FALLOCATE = 0xCE8 // 3304 + SYS_POSIX_MEMALIGN = 0xCE9 // 3305 + SYS_SIZEOF_ALLOC = 0xCEA // 3306 + SYS_RESIZE_ALLOC = 0xCEB // 3307 + SYS_FREAD_NOUPDATE = 0xCEC // 3308 + SYS_FREAD_NOUPDATE_UNLOCKED = 0xCED // 3309 + SYS_FGETPOS64 = 0xCEE // 3310 + SYS_FSEEK64 = 0xCEF // 3311 + SYS_FSEEKO64 = 0xCF0 // 3312 + SYS_FSETPOS64 = 0xCF1 // 3313 + SYS_FTELL64 = 0xCF2 // 3314 + SYS_FTELLO64 = 0xCF3 // 3315 + SYS_FGETPOS64_UNLOCKED = 0xCF4 // 3316 + SYS_FSEEK64_UNLOCKED = 0xCF5 // 3317 + SYS_FSEEKO64_UNLOCKED = 0xCF6 // 3318 + SYS_FSETPOS64_UNLOCKED = 0xCF7 // 3319 + SYS_FTELL64_UNLOCKED = 0xCF8 // 3320 + SYS_FTELLO64_UNLOCKED = 0xCF9 // 3321 + SYS_FOPEN_UNLOCKED = 0xCFA // 3322 + SYS_FREOPEN_UNLOCKED = 0xCFB // 3323 + SYS_FDOPEN_UNLOCKED = 0xCFC // 3324 + SYS_TMPFILE_UNLOCKED = 0xCFD // 3325 + SYS___MOSERVICES = 0xD3D // 3389 + SYS___GETTOD = 0xD3E // 3390 + SYS_C16RTOMB = 0xD40 // 3392 + SYS_C32RTOMB = 0xD41 // 3393 + SYS_MBRTOC16 = 0xD42 // 3394 + SYS_MBRTOC32 = 0xD43 // 3395 + SYS_QUANTEXPD32 = 0xD44 // 3396 + SYS_QUANTEXPD64 = 0xD45 // 3397 + SYS_QUANTEXPD128 = 0xD46 // 3398 + SYS___LOCALE_CTL = 0xD47 // 3399 + SYS___SMF_RECORD2 = 0xD48 // 3400 + SYS_FOPEN64 = 0xD49 // 3401 + SYS_FOPEN64_UNLOCKED = 0xD4A // 3402 + SYS_FREOPEN64 = 0xD4B // 3403 + SYS_FREOPEN64_UNLOCKED = 0xD4C // 3404 + SYS_TMPFILE64 = 0xD4D // 3405 + SYS_TMPFILE64_UNLOCKED = 0xD4E // 3406 + SYS_GETDATE64 = 0xD4F // 3407 + SYS_GETTIMEOFDAY64 = 0xD50 // 3408 + SYS_BIND2ADDRSEL = 0xD59 // 3417 + SYS_INET6_IS_SRCADDR = 0xD5A // 3418 + SYS___GETGRGID1 = 0xD5B // 3419 + SYS___GETGRNAM1 = 0xD5C // 3420 + SYS___FBUFSIZE = 0xD60 // 3424 + SYS___FPENDING = 0xD61 // 3425 + SYS___FLBF = 0xD62 // 3426 + SYS___FREADABLE = 0xD63 // 3427 + SYS___FWRITABLE = 0xD64 // 3428 + SYS___FREADING = 0xD65 // 3429 + SYS___FWRITING = 0xD66 // 3430 + SYS___FSETLOCKING = 0xD67 // 3431 + SYS__FLUSHLBF = 0xD68 // 3432 + SYS___FPURGE = 0xD69 // 3433 + SYS___FREADAHEAD = 0xD6A // 3434 + SYS___FSETERR = 0xD6B // 3435 + SYS___FPENDING_UNLOCKED = 0xD6C // 3436 + SYS___FREADING_UNLOCKED = 0xD6D // 3437 + SYS___FWRITING_UNLOCKED = 0xD6E // 3438 + SYS__FLUSHLBF_UNLOCKED = 0xD6F // 3439 + SYS___FPURGE_UNLOCKED = 0xD70 // 3440 + SYS___FREADAHEAD_UNLOCKED = 0xD71 // 3441 + SYS___LE_CEEGTJS = 0xD72 // 3442 + SYS___LE_RECORD_DUMP = 0xD73 // 3443 + SYS_FSTAT64 = 0xD74 // 3444 + SYS_LSTAT64 = 0xD75 // 3445 + SYS_STAT64 = 0xD76 // 3446 + SYS___READDIR2_64 = 0xD77 // 3447 + SYS___OPEN_STAT64 = 0xD78 // 3448 + SYS_FTW64 = 0xD79 // 3449 + SYS_NFTW64 = 0xD7A // 3450 + SYS_UTIME64 = 0xD7B // 3451 + SYS_UTIMES64 = 0xD7C // 3452 + SYS___GETIPC64 = 0xD7D // 3453 + SYS_MSGCTL64 = 0xD7E // 3454 + SYS_SEMCTL64 = 0xD7F // 3455 + SYS_SHMCTL64 = 0xD80 // 3456 + SYS_MSGXRCV64 = 0xD81 // 3457 + SYS___MGXR64 = 0xD81 // 3457 + SYS_W_GETPSENT64 = 0xD82 // 3458 + SYS_PTHREAD_COND_TIMEDWAIT64 = 0xD83 // 3459 + SYS_FTIME64 = 0xD85 // 3461 + SYS_GETUTXENT64 = 0xD86 // 3462 + SYS_GETUTXID64 = 0xD87 // 3463 + SYS_GETUTXLINE64 = 0xD88 // 3464 + SYS_PUTUTXLINE64 = 0xD89 // 3465 + SYS_NEWLOCALE = 0xD8A // 3466 + SYS_FREELOCALE = 0xD8B // 3467 + SYS_USELOCALE = 0xD8C // 3468 + SYS_DUPLOCALE = 0xD8D // 3469 + SYS___CHATTR64 = 0xD9C // 3484 + SYS___LCHATTR64 = 0xD9D // 3485 + SYS___FCHATTR64 = 0xD9E // 3486 + SYS_____CHATTR64_A = 0xD9F // 3487 + SYS_____LCHATTR64_A = 0xDA0 // 3488 + SYS___LE_CEEUSGD = 0xDA1 // 3489 + SYS___LE_IFAM_CON = 0xDA2 // 3490 + SYS___LE_IFAM_DSC = 0xDA3 // 3491 + SYS___LE_IFAM_GET = 0xDA4 // 3492 + SYS___LE_IFAM_QRY = 0xDA5 // 3493 + SYS_ALIGNED_ALLOC = 0xDA6 // 3494 + SYS_ACCEPT4 = 0xDA7 // 3495 + SYS___ACCEPT4_A = 0xDA8 // 3496 + SYS_COPYFILERANGE = 0xDA9 // 3497 + SYS_GETLINE = 0xDAA // 3498 + SYS___GETLINE_A = 0xDAB // 3499 + SYS_DIRFD = 0xDAC // 3500 + SYS_CLOCK_GETTIME = 0xDAD // 3501 + SYS_DUP3 = 0xDAE // 3502 + SYS_EPOLL_CREATE = 0xDAF // 3503 + SYS_EPOLL_CREATE1 = 0xDB0 // 3504 + SYS_EPOLL_CTL = 0xDB1 // 3505 + SYS_EPOLL_WAIT = 0xDB2 // 3506 + SYS_EPOLL_PWAIT = 0xDB3 // 3507 + SYS_EVENTFD = 0xDB4 // 3508 + SYS_STATFS = 0xDB5 // 3509 + SYS___STATFS_A = 0xDB6 // 3510 + SYS_FSTATFS = 0xDB7 // 3511 + SYS_INOTIFY_INIT = 0xDB8 // 3512 + SYS_INOTIFY_INIT1 = 0xDB9 // 3513 + SYS_INOTIFY_ADD_WATCH = 0xDBA // 3514 + SYS___INOTIFY_ADD_WATCH_A = 0xDBB // 3515 + SYS_INOTIFY_RM_WATCH = 0xDBC // 3516 + SYS_PIPE2 = 0xDBD // 3517 + SYS_PIVOT_ROOT = 0xDBE // 3518 + SYS___PIVOT_ROOT_A = 0xDBF // 3519 + SYS_PRCTL = 0xDC0 // 3520 + SYS_PRLIMIT = 0xDC1 // 3521 + SYS_SETHOSTNAME = 0xDC2 // 3522 + SYS___SETHOSTNAME_A = 0xDC3 // 3523 + SYS_SETRESUID = 0xDC4 // 3524 + SYS_SETRESGID = 0xDC5 // 3525 + SYS_PTHREAD_CONDATTR_GETCLOCK = 0xDC6 // 3526 + SYS_FLOCK = 0xDC7 // 3527 + SYS_FGETXATTR = 0xDC8 // 3528 + SYS___FGETXATTR_A = 0xDC9 // 3529 + SYS_FLISTXATTR = 0xDCA // 3530 + SYS___FLISTXATTR_A = 0xDCB // 3531 + SYS_FREMOVEXATTR = 0xDCC // 3532 + SYS___FREMOVEXATTR_A = 0xDCD // 3533 + SYS_FSETXATTR = 0xDCE // 3534 + SYS___FSETXATTR_A = 0xDCF // 3535 + SYS_GETXATTR = 0xDD0 // 3536 + SYS___GETXATTR_A = 0xDD1 // 3537 + SYS_LGETXATTR = 0xDD2 // 3538 + SYS___LGETXATTR_A = 0xDD3 // 3539 + SYS_LISTXATTR = 0xDD4 // 3540 + SYS___LISTXATTR_A = 0xDD5 // 3541 + SYS_LLISTXATTR = 0xDD6 // 3542 + SYS___LLISTXATTR_A = 0xDD7 // 3543 + SYS_LREMOVEXATTR = 0xDD8 // 3544 + SYS___LREMOVEXATTR_A = 0xDD9 // 3545 + SYS_LSETXATTR = 0xDDA // 3546 + SYS___LSETXATTR_A = 0xDDB // 3547 + SYS_REMOVEXATTR = 0xDDC // 3548 + SYS___REMOVEXATTR_A = 0xDDD // 3549 + SYS_SETXATTR = 0xDDE // 3550 + SYS___SETXATTR_A = 0xDDF // 3551 + SYS_FDATASYNC = 0xDE0 // 3552 + SYS_SYNCFS = 0xDE1 // 3553 + SYS_FUTIMES = 0xDE2 // 3554 + SYS_FUTIMESAT = 0xDE3 // 3555 + SYS___FUTIMESAT_A = 0xDE4 // 3556 + SYS_LUTIMES = 0xDE5 // 3557 + SYS___LUTIMES_A = 0xDE6 // 3558 + SYS_INET_ATON = 0xDE7 // 3559 + SYS_GETRANDOM = 0xDE8 // 3560 + SYS_GETTID = 0xDE9 // 3561 + SYS_MEMFD_CREATE = 0xDEA // 3562 + SYS___MEMFD_CREATE_A = 0xDEB // 3563 + SYS_FACCESSAT = 0xDEC // 3564 + SYS___FACCESSAT_A = 0xDED // 3565 + SYS_FCHMODAT = 0xDEE // 3566 + SYS___FCHMODAT_A = 0xDEF // 3567 + SYS_FCHOWNAT = 0xDF0 // 3568 + SYS___FCHOWNAT_A = 0xDF1 // 3569 + SYS_FSTATAT = 0xDF2 // 3570 + SYS___FSTATAT_A = 0xDF3 // 3571 + SYS_LINKAT = 0xDF4 // 3572 + SYS___LINKAT_A = 0xDF5 // 3573 + SYS_MKDIRAT = 0xDF6 // 3574 + SYS___MKDIRAT_A = 0xDF7 // 3575 + SYS_MKFIFOAT = 0xDF8 // 3576 + SYS___MKFIFOAT_A = 0xDF9 // 3577 + SYS_MKNODAT = 0xDFA // 3578 + SYS___MKNODAT_A = 0xDFB // 3579 + SYS_OPENAT = 0xDFC // 3580 + SYS___OPENAT_A = 0xDFD // 3581 + SYS_READLINKAT = 0xDFE // 3582 + SYS___READLINKAT_A = 0xDFF // 3583 + SYS_RENAMEAT = 0xE00 // 3584 + SYS___RENAMEAT_A = 0xE01 // 3585 + SYS_RENAMEAT2 = 0xE02 // 3586 + SYS___RENAMEAT2_A = 0xE03 // 3587 + SYS_SYMLINKAT = 0xE04 // 3588 + SYS___SYMLINKAT_A = 0xE05 // 3589 + SYS_UNLINKAT = 0xE06 // 3590 + SYS___UNLINKAT_A = 0xE07 // 3591 + SYS_SYSINFO = 0xE08 // 3592 + SYS_WAIT4 = 0xE0A // 3594 + SYS_CLONE = 0xE0B // 3595 + SYS_UNSHARE = 0xE0C // 3596 + SYS_SETNS = 0xE0D // 3597 + SYS_CAPGET = 0xE0E // 3598 + SYS_CAPSET = 0xE0F // 3599 + SYS_STRCHRNUL = 0xE10 // 3600 + SYS_PTHREAD_CONDATTR_SETCLOCK = 0xE12 // 3602 + SYS_OPEN_BY_HANDLE_AT = 0xE13 // 3603 + SYS___OPEN_BY_HANDLE_AT_A = 0xE14 // 3604 + SYS___INET_ATON_A = 0xE15 // 3605 + SYS_MOUNT1 = 0xE16 // 3606 + SYS___MOUNT1_A = 0xE17 // 3607 + SYS_UMOUNT1 = 0xE18 // 3608 + SYS___UMOUNT1_A = 0xE19 // 3609 + SYS_UMOUNT2 = 0xE1A // 3610 + SYS___UMOUNT2_A = 0xE1B // 3611 + SYS___PRCTL_A = 0xE1C // 3612 + SYS_LOCALTIME_R2 = 0xE1D // 3613 + SYS___LOCALTIME_R2_A = 0xE1E // 3614 + SYS_OPENAT2 = 0xE1F // 3615 + SYS___OPENAT2_A = 0xE20 // 3616 + SYS___LE_CEEMICT = 0xE21 // 3617 + SYS_GETENTROPY = 0xE22 // 3618 + SYS_NANOSLEEP = 0xE23 // 3619 + SYS_UTIMENSAT = 0xE24 // 3620 + SYS___UTIMENSAT_A = 0xE25 // 3621 + SYS_ASPRINTF = 0xE26 // 3622 + SYS___ASPRINTF_A = 0xE27 // 3623 + SYS_VASPRINTF = 0xE28 // 3624 + SYS___VASPRINTF_A = 0xE29 // 3625 + SYS_DPRINTF = 0xE2A // 3626 + SYS___DPRINTF_A = 0xE2B // 3627 + SYS_GETOPT_LONG = 0xE2C // 3628 + SYS___GETOPT_LONG_A = 0xE2D // 3629 + SYS_PSIGNAL = 0xE2E // 3630 + SYS___PSIGNAL_A = 0xE2F // 3631 + SYS_PSIGNAL_UNLOCKED = 0xE30 // 3632 + SYS___PSIGNAL_UNLOCKED_A = 0xE31 // 3633 + SYS_FSTATAT_O = 0xE32 // 3634 + SYS___FSTATAT_O_A = 0xE33 // 3635 + SYS_FSTATAT64 = 0xE34 // 3636 + SYS___FSTATAT64_A = 0xE35 // 3637 + SYS___CHATTRAT = 0xE36 // 3638 + SYS_____CHATTRAT_A = 0xE37 // 3639 + SYS___CHATTRAT64 = 0xE38 // 3640 + SYS_____CHATTRAT64_A = 0xE39 // 3641 + SYS_MADVISE = 0xE3A // 3642 + SYS___AUTHENTICATE = 0xE3B // 3643 + ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index eff6bcdef81..0036746ea19 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1178,7 +1178,8 @@ const ( PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 0x10 PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 0x11 PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 0x12 - PERF_SAMPLE_BRANCH_MAX_SHIFT = 0x13 + PERF_SAMPLE_BRANCH_COUNTERS = 0x80000 + PERF_SAMPLE_BRANCH_MAX_SHIFT = 0x14 PERF_SAMPLE_BRANCH_USER = 0x1 PERF_SAMPLE_BRANCH_KERNEL = 0x2 PERF_SAMPLE_BRANCH_HV = 0x4 @@ -1198,7 +1199,7 @@ const ( PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 PERF_SAMPLE_BRANCH_HW_INDEX = 0x20000 PERF_SAMPLE_BRANCH_PRIV_SAVE = 0x40000 - PERF_SAMPLE_BRANCH_MAX = 0x80000 + PERF_SAMPLE_BRANCH_MAX = 0x100000 PERF_BR_UNKNOWN = 0x0 PERF_BR_COND = 0x1 PERF_BR_UNCOND = 0x2 @@ -2481,6 +2482,15 @@ type XDPMmapOffsets struct { Cr XDPRingOffset } +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Chunk_size uint32 + Headroom uint32 + Flags uint32 + Tx_metadata_len uint32 +} + type XDPStatistics struct { Rx_dropped uint64 Rx_invalid_descs uint64 @@ -2935,7 +2945,7 @@ const ( BPF_TCP_LISTEN = 0xa BPF_TCP_CLOSING = 0xb BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd + BPF_TCP_MAX_STATES = 0xe TCP_BPF_IW = 0x3e9 TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_BPF_DELACK_MAX = 0x3eb @@ -3211,7 +3221,7 @@ const ( DEVLINK_CMD_LINECARD_NEW = 0x50 DEVLINK_CMD_LINECARD_DEL = 0x51 DEVLINK_CMD_SELFTESTS_GET = 0x52 - DEVLINK_CMD_MAX = 0x53 + DEVLINK_CMD_MAX = 0x54 DEVLINK_PORT_TYPE_NOTSET = 0x0 DEVLINK_PORT_TYPE_AUTO = 0x1 DEVLINK_PORT_TYPE_ETH = 0x2 @@ -4595,7 +4605,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x146 + NL80211_ATTR_MAX = 0x149 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -4861,7 +4871,7 @@ const ( NL80211_BSS_FREQUENCY_OFFSET = 0x14 NL80211_BSS_INFORMATION_ELEMENTS = 0x6 NL80211_BSS_LAST_SEEN_BOOTTIME = 0xf - NL80211_BSS_MAX = 0x16 + NL80211_BSS_MAX = 0x18 NL80211_BSS_MLD_ADDR = 0x16 NL80211_BSS_MLO_LINK_ID = 0x15 NL80211_BSS_PAD = 0x10 @@ -4965,7 +4975,7 @@ const ( NL80211_CMD_LEAVE_IBSS = 0x2c NL80211_CMD_LEAVE_MESH = 0x45 NL80211_CMD_LEAVE_OCB = 0x6d - NL80211_CMD_MAX = 0x9a + NL80211_CMD_MAX = 0x9b NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29 NL80211_CMD_MODIFY_LINK_STA = 0x97 NL80211_CMD_NAN_MATCH = 0x78 @@ -5199,7 +5209,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x1c + NL80211_FREQUENCY_ATTR_MAX = 0x1f NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 438a30affad..fd402da43fc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -477,14 +477,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index adceca3553b..eb7a5e1864a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -492,15 +492,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index eeaa00a37d6..d78ac108b6c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -470,15 +470,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 6739aa91d4e..cd06d47f1f7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -471,15 +471,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index 9920ef6317d..2f28fe26c1a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -472,15 +472,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 2923b799a48..71d6cac2f1a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -476,15 +476,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index ce2750ee415..8596d453563 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -474,15 +474,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 3038811d70b..cd60ea18662 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -474,15 +474,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index efc6fed18c1..b0ae420c489 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -476,15 +476,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 9a654b75a90..8359728759b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -482,15 +482,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 40d358e33e3..69eb6a5c689 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -481,15 +481,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 148c6ceb869..5f583cb62bf 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -481,15 +481,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 72ba81543ef..15adc04142f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -499,15 +499,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 71e765508e2..cf3ce900377 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -495,15 +495,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 4abbdb9de93..590b56739c5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -476,15 +476,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index 54f31be6373..d9a13af4684 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -25,10 +25,13 @@ const ( SizeofIPv6Mreq = 20 SizeofICMPv6Filter = 32 SizeofIPv6MTUInfo = 32 + SizeofInet4Pktinfo = 8 + SizeofInet6Pktinfo = 20 SizeofLinger = 8 SizeofSockaddrInet4 = 16 SizeofSockaddrInet6 = 28 SizeofTCPInfo = 0x68 + SizeofUcred = 12 ) type ( @@ -69,12 +72,17 @@ type Utimbuf struct { } type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte + Sysname [16]byte + Nodename [32]byte + Release [8]byte + Version [8]byte + Machine [16]byte +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 } type RawSockaddrInet4 struct { @@ -325,7 +333,7 @@ type Statvfs_t struct { } type Statfs_t struct { - Type uint32 + Type uint64 Bsize uint64 Blocks uint64 Bfree uint64 @@ -336,6 +344,7 @@ type Statfs_t struct { Namelen uint64 Frsize uint64 Flags uint64 + _ [4]uint64 } type direntLE struct { @@ -412,3 +421,126 @@ type W_Mntent struct { Quiesceowner [8]byte _ [38]byte } + +type EpollEvent struct { + Events uint32 + _ int32 + Fd int32 + Pad int32 +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 + Name string +} + +const ( + SizeofInotifyEvent = 0x10 +) + +type ConsMsg2 struct { + Cm2Format uint16 + Cm2R1 uint16 + Cm2Msglength uint32 + Cm2Msg *byte + Cm2R2 [4]byte + Cm2R3 [4]byte + Cm2Routcde *uint32 + Cm2Descr *uint32 + Cm2Msgflag uint32 + Cm2Token uint32 + Cm2Msgid *uint32 + Cm2R4 [4]byte + Cm2DomToken uint32 + Cm2DomMsgid *uint32 + Cm2ModCartptr *byte + Cm2ModConsidptr *byte + Cm2MsgCart [8]byte + Cm2MsgConsid [4]byte + Cm2R5 [12]byte +} + +const ( + CC_modify = 1 + CC_stop = 2 + CONSOLE_FORMAT_2 = 2 + CONSOLE_FORMAT_3 = 3 + CONSOLE_HRDCPY = 0x80000000 +) + +type OpenHow struct { + Flags uint64 + Mode uint64 + Resolve uint64 +} + +const SizeofOpenHow = 0x18 + +const ( + RESOLVE_CACHED = 0x20 + RESOLVE_BENEATH = 0x8 + RESOLVE_IN_ROOT = 0x10 + RESOLVE_NO_MAGICLINKS = 0x2 + RESOLVE_NO_SYMLINKS = 0x4 + RESOLVE_NO_XDEV = 0x1 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + _ [44]byte +} + +type SysvIpcPerm struct { + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode int32 +} + +type SysvShmDesc struct { + Perm SysvIpcPerm + _ [4]byte + Lpid int32 + Cpid int32 + Nattch uint32 + _ [4]byte + _ [4]byte + _ [4]byte + _ int32 + _ uint8 + _ uint8 + _ uint16 + _ *byte + Segsz uint64 + Atime Time_t + Dtime Time_t + Ctime Time_t +} + +type SysvShmDesc64 struct { + Perm SysvIpcPerm + _ [4]byte + Lpid int32 + Cpid int32 + Nattch uint32 + _ [4]byte + _ [4]byte + _ [4]byte + _ int32 + _ byte + _ uint8 + _ uint16 + _ *byte + Segsz uint64 + Atime int64 + Dtime int64 + Ctime int64 +} diff --git a/vendor/golang.org/x/sys/windows/aliases.go b/vendor/golang.org/x/sys/windows/aliases.go index ce2d713d62e..16f90560a23 100644 --- a/vendor/golang.org/x/sys/windows/aliases.go +++ b/vendor/golang.org/x/sys/windows/aliases.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build windows && go1.9 +//go:build windows package windows diff --git a/vendor/golang.org/x/sys/windows/empty.s b/vendor/golang.org/x/sys/windows/empty.s deleted file mode 100644 index ba64caca5d3..00000000000 --- a/vendor/golang.org/x/sys/windows/empty.s +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.12 - -// This file is here to allow bodyless functions with go:linkname for Go 1.11 -// and earlier (see https://golang.org/issue/23311). diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 6395a031d45..6525c62f3c2 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -165,6 +165,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW //sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) +//sys DisconnectNamedPipe(pipe Handle) (err error) //sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) //sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState @@ -348,8 +349,19 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost //sys GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) //sys SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) +//sys ClearCommBreak(handle Handle) (err error) +//sys ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) +//sys EscapeCommFunction(handle Handle, dwFunc uint32) (err error) +//sys GetCommState(handle Handle, lpDCB *DCB) (err error) +//sys GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) //sys GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys PurgeComm(handle Handle, dwFlags uint32) (err error) +//sys SetCommBreak(handle Handle) (err error) +//sys SetCommMask(handle Handle, dwEvtMask uint32) (err error) +//sys SetCommState(handle Handle, lpDCB *DCB) (err error) //sys SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) +//sys WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) //sys GetActiveProcessorCount(groupNumber uint16) (ret uint32) //sys GetMaximumProcessorCount(groupNumber uint16) (ret uint32) //sys EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) = user32.EnumWindows @@ -1834,3 +1846,73 @@ func ResizePseudoConsole(pconsole Handle, size Coord) error { // accept arguments that can be casted to uintptr, and Coord can't. return resizePseudoConsole(pconsole, *((*uint32)(unsafe.Pointer(&size)))) } + +// DCB constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-dcb. +const ( + CBR_110 = 110 + CBR_300 = 300 + CBR_600 = 600 + CBR_1200 = 1200 + CBR_2400 = 2400 + CBR_4800 = 4800 + CBR_9600 = 9600 + CBR_14400 = 14400 + CBR_19200 = 19200 + CBR_38400 = 38400 + CBR_57600 = 57600 + CBR_115200 = 115200 + CBR_128000 = 128000 + CBR_256000 = 256000 + + DTR_CONTROL_DISABLE = 0x00000000 + DTR_CONTROL_ENABLE = 0x00000010 + DTR_CONTROL_HANDSHAKE = 0x00000020 + + RTS_CONTROL_DISABLE = 0x00000000 + RTS_CONTROL_ENABLE = 0x00001000 + RTS_CONTROL_HANDSHAKE = 0x00002000 + RTS_CONTROL_TOGGLE = 0x00003000 + + NOPARITY = 0 + ODDPARITY = 1 + EVENPARITY = 2 + MARKPARITY = 3 + SPACEPARITY = 4 + + ONESTOPBIT = 0 + ONE5STOPBITS = 1 + TWOSTOPBITS = 2 +) + +// EscapeCommFunction constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-escapecommfunction. +const ( + SETXOFF = 1 + SETXON = 2 + SETRTS = 3 + CLRRTS = 4 + SETDTR = 5 + CLRDTR = 6 + SETBREAK = 8 + CLRBREAK = 9 +) + +// PurgeComm constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-purgecomm. +const ( + PURGE_TXABORT = 0x0001 + PURGE_RXABORT = 0x0002 + PURGE_TXCLEAR = 0x0004 + PURGE_RXCLEAR = 0x0008 +) + +// SetCommMask constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-setcommmask. +const ( + EV_RXCHAR = 0x0001 + EV_RXFLAG = 0x0002 + EV_TXEMPTY = 0x0004 + EV_CTS = 0x0008 + EV_DSR = 0x0010 + EV_RLSD = 0x0020 + EV_BREAK = 0x0040 + EV_ERR = 0x0080 + EV_RING = 0x0100 +) diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 359780f6ace..d8cb71db0a6 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -3380,3 +3380,27 @@ type BLOB struct { Size uint32 BlobData *byte } + +type ComStat struct { + Flags uint32 + CBInQue uint32 + CBOutQue uint32 +} + +type DCB struct { + DCBlength uint32 + BaudRate uint32 + Flags uint32 + wReserved uint16 + XonLim uint16 + XoffLim uint16 + ByteSize uint8 + Parity uint8 + StopBits uint8 + XonChar byte + XoffChar byte + ErrorChar byte + EofChar byte + EvtChar byte + wReserved1 uint16 +} diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index e8791c82c30..5c6035ddfa9 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -188,6 +188,8 @@ var ( procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procClearCommBreak = modkernel32.NewProc("ClearCommBreak") + procClearCommError = modkernel32.NewProc("ClearCommError") procCloseHandle = modkernel32.NewProc("CloseHandle") procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") @@ -212,7 +214,9 @@ var ( procDeleteProcThreadAttributeList = modkernel32.NewProc("DeleteProcThreadAttributeList") procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") + procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe") procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") + procEscapeCommFunction = modkernel32.NewProc("EscapeCommFunction") procExitProcess = modkernel32.NewProc("ExitProcess") procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") procFindClose = modkernel32.NewProc("FindClose") @@ -236,6 +240,8 @@ var ( procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") procGetACP = modkernel32.NewProc("GetACP") procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") + procGetCommModemStatus = modkernel32.NewProc("GetCommModemStatus") + procGetCommState = modkernel32.NewProc("GetCommState") procGetCommTimeouts = modkernel32.NewProc("GetCommTimeouts") procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") @@ -322,6 +328,7 @@ var ( procProcess32NextW = modkernel32.NewProc("Process32NextW") procProcessIdToSessionId = modkernel32.NewProc("ProcessIdToSessionId") procPulseEvent = modkernel32.NewProc("PulseEvent") + procPurgeComm = modkernel32.NewProc("PurgeComm") procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") procQueryFullProcessImageNameW = modkernel32.NewProc("QueryFullProcessImageNameW") procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") @@ -335,6 +342,9 @@ var ( procResetEvent = modkernel32.NewProc("ResetEvent") procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") procResumeThread = modkernel32.NewProc("ResumeThread") + procSetCommBreak = modkernel32.NewProc("SetCommBreak") + procSetCommMask = modkernel32.NewProc("SetCommMask") + procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") @@ -342,7 +352,6 @@ var ( procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") - procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") procSetErrorMode = modkernel32.NewProc("SetErrorMode") procSetEvent = modkernel32.NewProc("SetEvent") @@ -351,6 +360,7 @@ var ( procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") procSetFilePointer = modkernel32.NewProc("SetFilePointer") procSetFileTime = modkernel32.NewProc("SetFileTime") + procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") procSetNamedPipeHandleState = modkernel32.NewProc("SetNamedPipeHandleState") @@ -361,6 +371,7 @@ var ( procSetStdHandle = modkernel32.NewProc("SetStdHandle") procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") + procSetupComm = modkernel32.NewProc("SetupComm") procSizeofResource = modkernel32.NewProc("SizeofResource") procSleepEx = modkernel32.NewProc("SleepEx") procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") @@ -379,6 +390,7 @@ var ( procVirtualQueryEx = modkernel32.NewProc("VirtualQueryEx") procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") procWTSGetActiveConsoleSessionId = modkernel32.NewProc("WTSGetActiveConsoleSessionId") + procWaitCommEvent = modkernel32.NewProc("WaitCommEvent") procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") @@ -1641,6 +1653,22 @@ func CancelIoEx(s Handle, o *Overlapped) (err error) { return } +func ClearCommBreak(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procClearCommBreak.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) { + r1, _, e1 := syscall.Syscall(procClearCommError.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func CloseHandle(handle Handle) (err error) { r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { @@ -1845,6 +1873,14 @@ func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBuff return } +func DisconnectNamedPipe(pipe Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(pipe), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { var _p0 uint32 if bInheritHandle { @@ -1857,6 +1893,14 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP return } +func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { + r1, _, e1 := syscall.Syscall(procEscapeCommFunction.Addr(), 2, uintptr(handle), uintptr(dwFunc), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func ExitProcess(exitcode uint32) { syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) return @@ -2058,6 +2102,22 @@ func GetActiveProcessorCount(groupNumber uint16) (ret uint32) { return } +func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetCommModemStatus.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetCommState(handle Handle, lpDCB *DCB) (err error) { + r1, _, e1 := syscall.Syscall(procGetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) if r1 == 0 { @@ -2810,6 +2870,14 @@ func PulseEvent(event Handle) (err error) { return } +func PurgeComm(handle Handle, dwFlags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procPurgeComm.Addr(), 2, uintptr(handle), uintptr(dwFlags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) n = uint32(r0) @@ -2924,6 +2992,30 @@ func ResumeThread(thread Handle) (ret uint32, err error) { return } +func SetCommBreak(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommBreak.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommMask.Addr(), 2, uintptr(handle), uintptr(dwEvtMask), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetCommState(handle Handle, lpDCB *DCB) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) if r1 == 0 { @@ -2989,14 +3081,6 @@ func SetEndOfFile(handle Handle) (err error) { return } -func SetFileValidData(handle Handle, validDataLength int64) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) if r1 == 0 { @@ -3060,6 +3144,14 @@ func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim return } +func SetFileValidData(handle Handle, validDataLength int64) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) if r1 == 0 { @@ -3145,6 +3237,14 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro return } +func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetupComm.Addr(), 3, uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) { r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) size = uint32(r0) @@ -3291,6 +3391,14 @@ func WTSGetActiveConsoleSessionId() (sessionID uint32) { return } +func WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procWaitCommEvent.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { var _p0 uint32 if waitAll { diff --git a/vendor/golang.org/x/tools/cmd/stringer/stringer.go b/vendor/golang.org/x/tools/cmd/stringer/stringer.go deleted file mode 100644 index 2b19c93e8ea..00000000000 --- a/vendor/golang.org/x/tools/cmd/stringer/stringer.go +++ /dev/null @@ -1,660 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Stringer is a tool to automate the creation of methods that satisfy the fmt.Stringer -// interface. Given the name of a (signed or unsigned) integer type T that has constants -// defined, stringer will create a new self-contained Go source file implementing -// -// func (t T) String() string -// -// The file is created in the same package and directory as the package that defines T. -// It has helpful defaults designed for use with go generate. -// -// Stringer works best with constants that are consecutive values such as created using iota, -// but creates good code regardless. In the future it might also provide custom support for -// constant sets that are bit patterns. -// -// For example, given this snippet, -// -// package painkiller -// -// type Pill int -// -// const ( -// Placebo Pill = iota -// Aspirin -// Ibuprofen -// Paracetamol -// Acetaminophen = Paracetamol -// ) -// -// running this command -// -// stringer -type=Pill -// -// in the same directory will create the file pill_string.go, in package painkiller, -// containing a definition of -// -// func (Pill) String() string -// -// That method will translate the value of a Pill constant to the string representation -// of the respective constant name, so that the call fmt.Print(painkiller.Aspirin) will -// print the string "Aspirin". -// -// Typically this process would be run using go generate, like this: -// -// //go:generate stringer -type=Pill -// -// If multiple constants have the same value, the lexically first matching name will -// be used (in the example, Acetaminophen will print as "Paracetamol"). -// -// With no arguments, it processes the package in the current directory. -// Otherwise, the arguments must name a single directory holding a Go package -// or a set of Go source files that represent a single Go package. -// -// The -type flag accepts a comma-separated list of types so a single run can -// generate methods for multiple types. The default output file is t_string.go, -// where t is the lower-cased name of the first type listed. It can be overridden -// with the -output flag. -// -// The -linecomment flag tells stringer to generate the text of any line comment, trimmed -// of leading spaces, instead of the constant name. For instance, if the constants above had a -// Pill prefix, one could write -// -// PillAspirin // Aspirin -// -// to suppress it in the output. -package main // import "golang.org/x/tools/cmd/stringer" - -import ( - "bytes" - "flag" - "fmt" - "go/ast" - "go/constant" - "go/format" - "go/token" - "go/types" - "log" - "os" - "path/filepath" - "sort" - "strings" - - "golang.org/x/tools/go/packages" -) - -var ( - typeNames = flag.String("type", "", "comma-separated list of type names; must be set") - output = flag.String("output", "", "output file name; default srcdir/_string.go") - trimprefix = flag.String("trimprefix", "", "trim the `prefix` from the generated constant names") - linecomment = flag.Bool("linecomment", false, "use line comment text as printed text when present") - buildTags = flag.String("tags", "", "comma-separated list of build tags to apply") -) - -// Usage is a replacement usage function for the flags package. -func Usage() { - fmt.Fprintf(os.Stderr, "Usage of stringer:\n") - fmt.Fprintf(os.Stderr, "\tstringer [flags] -type T [directory]\n") - fmt.Fprintf(os.Stderr, "\tstringer [flags] -type T files... # Must be a single package\n") - fmt.Fprintf(os.Stderr, "For more information, see:\n") - fmt.Fprintf(os.Stderr, "\thttps://pkg.go.dev/golang.org/x/tools/cmd/stringer\n") - fmt.Fprintf(os.Stderr, "Flags:\n") - flag.PrintDefaults() -} - -func main() { - log.SetFlags(0) - log.SetPrefix("stringer: ") - flag.Usage = Usage - flag.Parse() - if len(*typeNames) == 0 { - flag.Usage() - os.Exit(2) - } - types := strings.Split(*typeNames, ",") - var tags []string - if len(*buildTags) > 0 { - tags = strings.Split(*buildTags, ",") - } - - // We accept either one directory or a list of files. Which do we have? - args := flag.Args() - if len(args) == 0 { - // Default: process whole package in current directory. - args = []string{"."} - } - - // Parse the package once. - var dir string - g := Generator{ - trimPrefix: *trimprefix, - lineComment: *linecomment, - } - // TODO(suzmue): accept other patterns for packages (directories, list of files, import paths, etc). - if len(args) == 1 && isDirectory(args[0]) { - dir = args[0] - } else { - if len(tags) != 0 { - log.Fatal("-tags option applies only to directories, not when files are specified") - } - dir = filepath.Dir(args[0]) - } - - g.parsePackage(args, tags) - - // Print the header and package clause. - g.Printf("// Code generated by \"stringer %s\"; DO NOT EDIT.\n", strings.Join(os.Args[1:], " ")) - g.Printf("\n") - g.Printf("package %s", g.pkg.name) - g.Printf("\n") - g.Printf("import \"strconv\"\n") // Used by all methods. - - // Run generate for each type. - for _, typeName := range types { - g.generate(typeName) - } - - // Format the output. - src := g.format() - - // Write to file. - outputName := *output - if outputName == "" { - baseName := fmt.Sprintf("%s_string.go", types[0]) - outputName = filepath.Join(dir, strings.ToLower(baseName)) - } - err := os.WriteFile(outputName, src, 0644) - if err != nil { - log.Fatalf("writing output: %s", err) - } -} - -// isDirectory reports whether the named file is a directory. -func isDirectory(name string) bool { - info, err := os.Stat(name) - if err != nil { - log.Fatal(err) - } - return info.IsDir() -} - -// Generator holds the state of the analysis. Primarily used to buffer -// the output for format.Source. -type Generator struct { - buf bytes.Buffer // Accumulated output. - pkg *Package // Package we are scanning. - - trimPrefix string - lineComment bool - - logf func(format string, args ...interface{}) // test logging hook; nil when not testing -} - -func (g *Generator) Printf(format string, args ...interface{}) { - fmt.Fprintf(&g.buf, format, args...) -} - -// File holds a single parsed file and associated data. -type File struct { - pkg *Package // Package to which this file belongs. - file *ast.File // Parsed AST. - // These fields are reset for each type being generated. - typeName string // Name of the constant type. - values []Value // Accumulator for constant values of that type. - - trimPrefix string - lineComment bool -} - -type Package struct { - name string - defs map[*ast.Ident]types.Object - files []*File -} - -// parsePackage analyzes the single package constructed from the patterns and tags. -// parsePackage exits if there is an error. -func (g *Generator) parsePackage(patterns []string, tags []string) { - cfg := &packages.Config{ - Mode: packages.NeedName | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax, - // TODO: Need to think about constants in test files. Maybe write type_string_test.go - // in a separate pass? For later. - Tests: false, - BuildFlags: []string{fmt.Sprintf("-tags=%s", strings.Join(tags, " "))}, - Logf: g.logf, - } - pkgs, err := packages.Load(cfg, patterns...) - if err != nil { - log.Fatal(err) - } - if len(pkgs) != 1 { - log.Fatalf("error: %d packages matching %v", len(pkgs), strings.Join(patterns, " ")) - } - g.addPackage(pkgs[0]) -} - -// addPackage adds a type checked Package and its syntax files to the generator. -func (g *Generator) addPackage(pkg *packages.Package) { - g.pkg = &Package{ - name: pkg.Name, - defs: pkg.TypesInfo.Defs, - files: make([]*File, len(pkg.Syntax)), - } - - for i, file := range pkg.Syntax { - g.pkg.files[i] = &File{ - file: file, - pkg: g.pkg, - trimPrefix: g.trimPrefix, - lineComment: g.lineComment, - } - } -} - -// generate produces the String method for the named type. -func (g *Generator) generate(typeName string) { - values := make([]Value, 0, 100) - for _, file := range g.pkg.files { - // Set the state for this run of the walker. - file.typeName = typeName - file.values = nil - if file.file != nil { - ast.Inspect(file.file, file.genDecl) - values = append(values, file.values...) - } - } - - if len(values) == 0 { - log.Fatalf("no values defined for type %s", typeName) - } - // Generate code that will fail if the constants change value. - g.Printf("func _() {\n") - g.Printf("\t// An \"invalid array index\" compiler error signifies that the constant values have changed.\n") - g.Printf("\t// Re-run the stringer command to generate them again.\n") - g.Printf("\tvar x [1]struct{}\n") - for _, v := range values { - g.Printf("\t_ = x[%s - %s]\n", v.originalName, v.str) - } - g.Printf("}\n") - runs := splitIntoRuns(values) - // The decision of which pattern to use depends on the number of - // runs in the numbers. If there's only one, it's easy. For more than - // one, there's a tradeoff between complexity and size of the data - // and code vs. the simplicity of a map. A map takes more space, - // but so does the code. The decision here (crossover at 10) is - // arbitrary, but considers that for large numbers of runs the cost - // of the linear scan in the switch might become important, and - // rather than use yet another algorithm such as binary search, - // we punt and use a map. In any case, the likelihood of a map - // being necessary for any realistic example other than bitmasks - // is very low. And bitmasks probably deserve their own analysis, - // to be done some other day. - switch { - case len(runs) == 1: - g.buildOneRun(runs, typeName) - case len(runs) <= 10: - g.buildMultipleRuns(runs, typeName) - default: - g.buildMap(runs, typeName) - } -} - -// splitIntoRuns breaks the values into runs of contiguous sequences. -// For example, given 1,2,3,5,6,7 it returns {1,2,3},{5,6,7}. -// The input slice is known to be non-empty. -func splitIntoRuns(values []Value) [][]Value { - // We use stable sort so the lexically first name is chosen for equal elements. - sort.Stable(byValue(values)) - // Remove duplicates. Stable sort has put the one we want to print first, - // so use that one. The String method won't care about which named constant - // was the argument, so the first name for the given value is the only one to keep. - // We need to do this because identical values would cause the switch or map - // to fail to compile. - j := 1 - for i := 1; i < len(values); i++ { - if values[i].value != values[i-1].value { - values[j] = values[i] - j++ - } - } - values = values[:j] - runs := make([][]Value, 0, 10) - for len(values) > 0 { - // One contiguous sequence per outer loop. - i := 1 - for i < len(values) && values[i].value == values[i-1].value+1 { - i++ - } - runs = append(runs, values[:i]) - values = values[i:] - } - return runs -} - -// format returns the gofmt-ed contents of the Generator's buffer. -func (g *Generator) format() []byte { - src, err := format.Source(g.buf.Bytes()) - if err != nil { - // Should never happen, but can arise when developing this code. - // The user can compile the output to see the error. - log.Printf("warning: internal error: invalid Go generated: %s", err) - log.Printf("warning: compile the package to analyze the error") - return g.buf.Bytes() - } - return src -} - -// Value represents a declared constant. -type Value struct { - originalName string // The name of the constant. - name string // The name with trimmed prefix. - // The value is stored as a bit pattern alone. The boolean tells us - // whether to interpret it as an int64 or a uint64; the only place - // this matters is when sorting. - // Much of the time the str field is all we need; it is printed - // by Value.String. - value uint64 // Will be converted to int64 when needed. - signed bool // Whether the constant is a signed type. - str string // The string representation given by the "go/constant" package. -} - -func (v *Value) String() string { - return v.str -} - -// byValue lets us sort the constants into increasing order. -// We take care in the Less method to sort in signed or unsigned order, -// as appropriate. -type byValue []Value - -func (b byValue) Len() int { return len(b) } -func (b byValue) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b byValue) Less(i, j int) bool { - if b[i].signed { - return int64(b[i].value) < int64(b[j].value) - } - return b[i].value < b[j].value -} - -// genDecl processes one declaration clause. -func (f *File) genDecl(node ast.Node) bool { - decl, ok := node.(*ast.GenDecl) - if !ok || decl.Tok != token.CONST { - // We only care about const declarations. - return true - } - // The name of the type of the constants we are declaring. - // Can change if this is a multi-element declaration. - typ := "" - // Loop over the elements of the declaration. Each element is a ValueSpec: - // a list of names possibly followed by a type, possibly followed by values. - // If the type and value are both missing, we carry down the type (and value, - // but the "go/types" package takes care of that). - for _, spec := range decl.Specs { - vspec := spec.(*ast.ValueSpec) // Guaranteed to succeed as this is CONST. - if vspec.Type == nil && len(vspec.Values) > 0 { - // "X = 1". With no type but a value. If the constant is untyped, - // skip this vspec and reset the remembered type. - typ = "" - - // If this is a simple type conversion, remember the type. - // We don't mind if this is actually a call; a qualified call won't - // be matched (that will be SelectorExpr, not Ident), and only unusual - // situations will result in a function call that appears to be - // a type conversion. - ce, ok := vspec.Values[0].(*ast.CallExpr) - if !ok { - continue - } - id, ok := ce.Fun.(*ast.Ident) - if !ok { - continue - } - typ = id.Name - } - if vspec.Type != nil { - // "X T". We have a type. Remember it. - ident, ok := vspec.Type.(*ast.Ident) - if !ok { - continue - } - typ = ident.Name - } - if typ != f.typeName { - // This is not the type we're looking for. - continue - } - // We now have a list of names (from one line of source code) all being - // declared with the desired type. - // Grab their names and actual values and store them in f.values. - for _, name := range vspec.Names { - if name.Name == "_" { - continue - } - // This dance lets the type checker find the values for us. It's a - // bit tricky: look up the object declared by the name, find its - // types.Const, and extract its value. - obj, ok := f.pkg.defs[name] - if !ok { - log.Fatalf("no value for constant %s", name) - } - info := obj.Type().Underlying().(*types.Basic).Info() - if info&types.IsInteger == 0 { - log.Fatalf("can't handle non-integer constant type %s", typ) - } - value := obj.(*types.Const).Val() // Guaranteed to succeed as this is CONST. - if value.Kind() != constant.Int { - log.Fatalf("can't happen: constant is not an integer %s", name) - } - i64, isInt := constant.Int64Val(value) - u64, isUint := constant.Uint64Val(value) - if !isInt && !isUint { - log.Fatalf("internal error: value of %s is not an integer: %s", name, value.String()) - } - if !isInt { - u64 = uint64(i64) - } - v := Value{ - originalName: name.Name, - value: u64, - signed: info&types.IsUnsigned == 0, - str: value.String(), - } - if c := vspec.Comment; f.lineComment && c != nil && len(c.List) == 1 { - v.name = strings.TrimSpace(c.Text()) - } else { - v.name = strings.TrimPrefix(v.originalName, f.trimPrefix) - } - f.values = append(f.values, v) - } - } - return false -} - -// Helpers - -// usize returns the number of bits of the smallest unsigned integer -// type that will hold n. Used to create the smallest possible slice of -// integers to use as indexes into the concatenated strings. -func usize(n int) int { - switch { - case n < 1<<8: - return 8 - case n < 1<<16: - return 16 - default: - // 2^32 is enough constants for anyone. - return 32 - } -} - -// declareIndexAndNameVars declares the index slices and concatenated names -// strings representing the runs of values. -func (g *Generator) declareIndexAndNameVars(runs [][]Value, typeName string) { - var indexes, names []string - for i, run := range runs { - index, name := g.createIndexAndNameDecl(run, typeName, fmt.Sprintf("_%d", i)) - if len(run) != 1 { - indexes = append(indexes, index) - } - names = append(names, name) - } - g.Printf("const (\n") - for _, name := range names { - g.Printf("\t%s\n", name) - } - g.Printf(")\n\n") - - if len(indexes) > 0 { - g.Printf("var (") - for _, index := range indexes { - g.Printf("\t%s\n", index) - } - g.Printf(")\n\n") - } -} - -// declareIndexAndNameVar is the single-run version of declareIndexAndNameVars -func (g *Generator) declareIndexAndNameVar(run []Value, typeName string) { - index, name := g.createIndexAndNameDecl(run, typeName, "") - g.Printf("const %s\n", name) - g.Printf("var %s\n", index) -} - -// createIndexAndNameDecl returns the pair of declarations for the run. The caller will add "const" and "var". -func (g *Generator) createIndexAndNameDecl(run []Value, typeName string, suffix string) (string, string) { - b := new(bytes.Buffer) - indexes := make([]int, len(run)) - for i := range run { - b.WriteString(run[i].name) - indexes[i] = b.Len() - } - nameConst := fmt.Sprintf("_%s_name%s = %q", typeName, suffix, b.String()) - nameLen := b.Len() - b.Reset() - fmt.Fprintf(b, "_%s_index%s = [...]uint%d{0, ", typeName, suffix, usize(nameLen)) - for i, v := range indexes { - if i > 0 { - fmt.Fprintf(b, ", ") - } - fmt.Fprintf(b, "%d", v) - } - fmt.Fprintf(b, "}") - return b.String(), nameConst -} - -// declareNameVars declares the concatenated names string representing all the values in the runs. -func (g *Generator) declareNameVars(runs [][]Value, typeName string, suffix string) { - g.Printf("const _%s_name%s = \"", typeName, suffix) - for _, run := range runs { - for i := range run { - g.Printf("%s", run[i].name) - } - } - g.Printf("\"\n") -} - -// buildOneRun generates the variables and String method for a single run of contiguous values. -func (g *Generator) buildOneRun(runs [][]Value, typeName string) { - values := runs[0] - g.Printf("\n") - g.declareIndexAndNameVar(values, typeName) - // The generated code is simple enough to write as a Printf format. - lessThanZero := "" - if values[0].signed { - lessThanZero = "i < 0 || " - } - if values[0].value == 0 { // Signed or unsigned, 0 is still 0. - g.Printf(stringOneRun, typeName, usize(len(values)), lessThanZero) - } else { - g.Printf(stringOneRunWithOffset, typeName, values[0].String(), usize(len(values)), lessThanZero) - } -} - -// Arguments to format are: -// -// [1]: type name -// [2]: size of index element (8 for uint8 etc.) -// [3]: less than zero check (for signed types) -const stringOneRun = `func (i %[1]s) String() string { - if %[3]si >= %[1]s(len(_%[1]s_index)-1) { - return "%[1]s(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _%[1]s_name[_%[1]s_index[i]:_%[1]s_index[i+1]] -} -` - -// Arguments to format are: -// [1]: type name -// [2]: lowest defined value for type, as a string -// [3]: size of index element (8 for uint8 etc.) -// [4]: less than zero check (for signed types) -/* - */ -const stringOneRunWithOffset = `func (i %[1]s) String() string { - i -= %[2]s - if %[4]si >= %[1]s(len(_%[1]s_index)-1) { - return "%[1]s(" + strconv.FormatInt(int64(i + %[2]s), 10) + ")" - } - return _%[1]s_name[_%[1]s_index[i] : _%[1]s_index[i+1]] -} -` - -// buildMultipleRuns generates the variables and String method for multiple runs of contiguous values. -// For this pattern, a single Printf format won't do. -func (g *Generator) buildMultipleRuns(runs [][]Value, typeName string) { - g.Printf("\n") - g.declareIndexAndNameVars(runs, typeName) - g.Printf("func (i %s) String() string {\n", typeName) - g.Printf("\tswitch {\n") - for i, values := range runs { - if len(values) == 1 { - g.Printf("\tcase i == %s:\n", &values[0]) - g.Printf("\t\treturn _%s_name_%d\n", typeName, i) - continue - } - if values[0].value == 0 && !values[0].signed { - // For an unsigned lower bound of 0, "0 <= i" would be redundant. - g.Printf("\tcase i <= %s:\n", &values[len(values)-1]) - } else { - g.Printf("\tcase %s <= i && i <= %s:\n", &values[0], &values[len(values)-1]) - } - if values[0].value != 0 { - g.Printf("\t\ti -= %s\n", &values[0]) - } - g.Printf("\t\treturn _%s_name_%d[_%s_index_%d[i]:_%s_index_%d[i+1]]\n", - typeName, i, typeName, i, typeName, i) - } - g.Printf("\tdefault:\n") - g.Printf("\t\treturn \"%s(\" + strconv.FormatInt(int64(i), 10) + \")\"\n", typeName) - g.Printf("\t}\n") - g.Printf("}\n") -} - -// buildMap handles the case where the space is so sparse a map is a reasonable fallback. -// It's a rare situation but has simple code. -func (g *Generator) buildMap(runs [][]Value, typeName string) { - g.Printf("\n") - g.declareNameVars(runs, typeName, "") - g.Printf("\nvar _%s_map = map[%s]string{\n", typeName, typeName) - n := 0 - for _, values := range runs { - for _, value := range values { - g.Printf("\t%s: _%s_name[%d:%d],\n", &value, typeName, n, n+len(value.name)) - n += len(value.name) - } - } - g.Printf("}\n\n") - g.Printf(stringMap, typeName) -} - -// Argument to format is the type name. -const stringMap = `func (i %[1]s) String() string { - if str, ok := _%[1]s_map[i]; ok { - return str - } - return "%[1]s(" + strconv.FormatInt(int64(i), 10) + ")" -} -` diff --git a/vendor/golang.org/x/tools/cover/profile.go b/vendor/golang.org/x/tools/cover/profile.go new file mode 100644 index 00000000000..47a9a541164 --- /dev/null +++ b/vendor/golang.org/x/tools/cover/profile.go @@ -0,0 +1,266 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cover provides support for parsing coverage profiles +// generated by "go test -coverprofile=cover.out". +package cover // import "golang.org/x/tools/cover" + +import ( + "bufio" + "errors" + "fmt" + "io" + "math" + "os" + "sort" + "strconv" + "strings" +) + +// Profile represents the profiling data for a specific file. +type Profile struct { + FileName string + Mode string + Blocks []ProfileBlock +} + +// ProfileBlock represents a single block of profiling data. +type ProfileBlock struct { + StartLine, StartCol int + EndLine, EndCol int + NumStmt, Count int +} + +type byFileName []*Profile + +func (p byFileName) Len() int { return len(p) } +func (p byFileName) Less(i, j int) bool { return p[i].FileName < p[j].FileName } +func (p byFileName) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// ParseProfiles parses profile data in the specified file and returns a +// Profile for each source file described therein. +func ParseProfiles(fileName string) ([]*Profile, error) { + pf, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer pf.Close() + return ParseProfilesFromReader(pf) +} + +// ParseProfilesFromReader parses profile data from the Reader and +// returns a Profile for each source file described therein. +func ParseProfilesFromReader(rd io.Reader) ([]*Profile, error) { + // First line is "mode: foo", where foo is "set", "count", or "atomic". + // Rest of file is in the format + // encoding/base64/base64.go:34.44,37.40 3 1 + // where the fields are: name.go:line.column,line.column numberOfStatements count + files := make(map[string]*Profile) + s := bufio.NewScanner(rd) + mode := "" + for s.Scan() { + line := s.Text() + if mode == "" { + const p = "mode: " + if !strings.HasPrefix(line, p) || line == p { + return nil, fmt.Errorf("bad mode line: %v", line) + } + mode = line[len(p):] + continue + } + fn, b, err := parseLine(line) + if err != nil { + return nil, fmt.Errorf("line %q doesn't match expected format: %v", line, err) + } + p := files[fn] + if p == nil { + p = &Profile{ + FileName: fn, + Mode: mode, + } + files[fn] = p + } + p.Blocks = append(p.Blocks, b) + } + if err := s.Err(); err != nil { + return nil, err + } + for _, p := range files { + sort.Sort(blocksByStart(p.Blocks)) + // Merge samples from the same location. + j := 1 + for i := 1; i < len(p.Blocks); i++ { + b := p.Blocks[i] + last := p.Blocks[j-1] + if b.StartLine == last.StartLine && + b.StartCol == last.StartCol && + b.EndLine == last.EndLine && + b.EndCol == last.EndCol { + if b.NumStmt != last.NumStmt { + return nil, fmt.Errorf("inconsistent NumStmt: changed from %d to %d", last.NumStmt, b.NumStmt) + } + if mode == "set" { + p.Blocks[j-1].Count |= b.Count + } else { + p.Blocks[j-1].Count += b.Count + } + continue + } + p.Blocks[j] = b + j++ + } + p.Blocks = p.Blocks[:j] + } + // Generate a sorted slice. + profiles := make([]*Profile, 0, len(files)) + for _, profile := range files { + profiles = append(profiles, profile) + } + sort.Sort(byFileName(profiles)) + return profiles, nil +} + +// parseLine parses a line from a coverage file. +// It is equivalent to the regex +// ^(.+):([0-9]+)\.([0-9]+),([0-9]+)\.([0-9]+) ([0-9]+) ([0-9]+)$ +// +// However, it is much faster: https://golang.org/cl/179377 +func parseLine(l string) (fileName string, block ProfileBlock, err error) { + end := len(l) + + b := ProfileBlock{} + b.Count, end, err = seekBack(l, ' ', end, "Count") + if err != nil { + return "", b, err + } + b.NumStmt, end, err = seekBack(l, ' ', end, "NumStmt") + if err != nil { + return "", b, err + } + b.EndCol, end, err = seekBack(l, '.', end, "EndCol") + if err != nil { + return "", b, err + } + b.EndLine, end, err = seekBack(l, ',', end, "EndLine") + if err != nil { + return "", b, err + } + b.StartCol, end, err = seekBack(l, '.', end, "StartCol") + if err != nil { + return "", b, err + } + b.StartLine, end, err = seekBack(l, ':', end, "StartLine") + if err != nil { + return "", b, err + } + fn := l[0:end] + if fn == "" { + return "", b, errors.New("a FileName cannot be blank") + } + return fn, b, nil +} + +// seekBack searches backwards from end to find sep in l, then returns the +// value between sep and end as an integer. +// If seekBack fails, the returned error will reference what. +func seekBack(l string, sep byte, end int, what string) (value int, nextSep int, err error) { + // Since we're seeking backwards and we know only ASCII is legal for these values, + // we can ignore the possibility of non-ASCII characters. + for start := end - 1; start >= 0; start-- { + if l[start] == sep { + i, err := strconv.Atoi(l[start+1 : end]) + if err != nil { + return 0, 0, fmt.Errorf("couldn't parse %q: %v", what, err) + } + if i < 0 { + return 0, 0, fmt.Errorf("negative values are not allowed for %s, found %d", what, i) + } + return i, start, nil + } + } + return 0, 0, fmt.Errorf("couldn't find a %s before %s", string(sep), what) +} + +type blocksByStart []ProfileBlock + +func (b blocksByStart) Len() int { return len(b) } +func (b blocksByStart) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b blocksByStart) Less(i, j int) bool { + bi, bj := b[i], b[j] + return bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol +} + +// Boundary represents the position in a source file of the beginning or end of a +// block as reported by the coverage profile. In HTML mode, it will correspond to +// the opening or closing of a tag and will be used to colorize the source +type Boundary struct { + Offset int // Location as a byte offset in the source file. + Start bool // Is this the start of a block? + Count int // Event count from the cover profile. + Norm float64 // Count normalized to [0..1]. + Index int // Order in input file. +} + +// Boundaries returns a Profile as a set of Boundary objects within the provided src. +func (p *Profile) Boundaries(src []byte) (boundaries []Boundary) { + // Find maximum count. + max := 0 + for _, b := range p.Blocks { + if b.Count > max { + max = b.Count + } + } + // Divisor for normalization. + divisor := math.Log(float64(max)) + + // boundary returns a Boundary, populating the Norm field with a normalized Count. + index := 0 + boundary := func(offset int, start bool, count int) Boundary { + b := Boundary{Offset: offset, Start: start, Count: count, Index: index} + index++ + if !start || count == 0 { + return b + } + if max <= 1 { + b.Norm = 0.8 // Profile is in"set" mode; we want a heat map. Use cov8 in the CSS. + } else if count > 0 { + b.Norm = math.Log(float64(count)) / divisor + } + return b + } + + line, col := 1, 2 // TODO: Why is this 2? + for si, bi := 0, 0; si < len(src) && bi < len(p.Blocks); { + b := p.Blocks[bi] + if b.StartLine == line && b.StartCol == col { + boundaries = append(boundaries, boundary(si, true, b.Count)) + } + if b.EndLine == line && b.EndCol == col || line > b.EndLine { + boundaries = append(boundaries, boundary(si, false, 0)) + bi++ + continue // Don't advance through src; maybe the next block starts here. + } + if src[si] == '\n' { + line++ + col = 0 + } + col++ + si++ + } + sort.Sort(boundariesByPos(boundaries)) + return +} + +type boundariesByPos []Boundary + +func (b boundariesByPos) Len() int { return len(b) } +func (b boundariesByPos) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b boundariesByPos) Less(i, j int) bool { + if b[i].Offset == b[j].Offset { + // Boundaries at the same offset should be ordered according to + // their original position. + return b[i].Index < b[j].Index + } + return b[i].Offset < b[j].Offset +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go deleted file mode 100644 index 03543bd4bb8..00000000000 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package gcexportdata provides functions for locating, reading, and -// writing export data files containing type information produced by the -// gc compiler. This package supports go1.7 export data format and all -// later versions. -// -// Although it might seem convenient for this package to live alongside -// go/types in the standard library, this would cause version skew -// problems for developer tools that use it, since they must be able to -// consume the outputs of the gc compiler both before and after a Go -// update such as from Go 1.7 to Go 1.8. Because this package lives in -// golang.org/x/tools, sites can update their version of this repo some -// time before the Go 1.8 release and rebuild and redeploy their -// developer tools, which will then be able to consume both Go 1.7 and -// Go 1.8 export data files, so they will work before and after the -// Go update. (See discussion at https://golang.org/issue/15651.) -package gcexportdata // import "golang.org/x/tools/go/gcexportdata" - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "go/token" - "go/types" - "io" - "os/exec" - - "golang.org/x/tools/internal/gcimporter" -) - -// Find returns the name of an object (.o) or archive (.a) file -// containing type information for the specified import path, -// using the go command. -// If no file was found, an empty filename is returned. -// -// A relative srcDir is interpreted relative to the current working directory. -// -// Find also returns the package's resolved (canonical) import path, -// reflecting the effects of srcDir and vendoring on importPath. -// -// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages, -// which is more efficient. -func Find(importPath, srcDir string) (filename, path string) { - cmd := exec.Command("go", "list", "-json", "-export", "--", importPath) - cmd.Dir = srcDir - out, err := cmd.CombinedOutput() - if err != nil { - return "", "" - } - var data struct { - ImportPath string - Export string - } - json.Unmarshal(out, &data) - return data.Export, data.ImportPath -} - -// NewReader returns a reader for the export data section of an object -// (.o) or archive (.a) file read from r. The new reader may provide -// additional trailing data beyond the end of the export data. -func NewReader(r io.Reader) (io.Reader, error) { - buf := bufio.NewReader(r) - _, size, err := gcimporter.FindExportData(buf) - if err != nil { - return nil, err - } - - if size >= 0 { - // We were given an archive and found the __.PKGDEF in it. - // This tells us the size of the export data, and we don't - // need to return the entire file. - return &io.LimitedReader{ - R: buf, - N: size, - }, nil - } else { - // We were given an object file. As such, we don't know how large - // the export data is and must return the entire file. - return buf, nil - } -} - -// readAll works the same way as io.ReadAll, but avoids allocations and copies -// by preallocating a byte slice of the necessary size if the size is known up -// front. This is always possible when the input is an archive. In that case, -// NewReader will return the known size using an io.LimitedReader. -func readAll(r io.Reader) ([]byte, error) { - if lr, ok := r.(*io.LimitedReader); ok { - data := make([]byte, lr.N) - _, err := io.ReadFull(lr, data) - return data, err - } - return io.ReadAll(r) -} - -// Read reads export data from in, decodes it, and returns type -// information for the package. -// -// The package path (effectively its linker symbol prefix) is -// specified by path, since unlike the package name, this information -// may not be recorded in the export data. -// -// File position information is added to fset. -// -// Read may inspect and add to the imports map to ensure that references -// within the export data to other packages are consistent. The caller -// must ensure that imports[path] does not exist, or exists but is -// incomplete (see types.Package.Complete), and Read inserts the -// resulting package into this map entry. -// -// On return, the state of the reader is undefined. -func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) { - data, err := readAll(in) - if err != nil { - return nil, fmt.Errorf("reading export data for %q: %v", path, err) - } - - if bytes.HasPrefix(data, []byte("!")) { - return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path) - } - - // The indexed export format starts with an 'i'; the older - // binary export format starts with a 'c', 'd', or 'v' - // (from "version"). Select appropriate importer. - if len(data) > 0 { - switch data[0] { - case 'v', 'c', 'd': // binary, till go1.10 - return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - - case 'i': // indexed, till go1.19 - _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) - return pkg, err - - case 'u': // unified, from go1.20 - _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) - return pkg, err - - default: - l := len(data) - if l > 10 { - l = 10 - } - return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), path) - } - } - return nil, fmt.Errorf("empty export data for %s", path) -} - -// Write writes encoded type information for the specified package to out. -// The FileSet provides file position information for named objects. -func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error { - if _, err := io.WriteString(out, "i"); err != nil { - return err - } - return gcimporter.IExportData(out, fset, pkg) -} - -// ReadBundle reads an export bundle from in, decodes it, and returns type -// information for the packages. -// File position information is added to fset. -// -// ReadBundle may inspect and add to the imports map to ensure that references -// within the export bundle to other packages are consistent. -// -// On return, the state of the reader is undefined. -// -// Experimental: This API is experimental and may change in the future. -func ReadBundle(in io.Reader, fset *token.FileSet, imports map[string]*types.Package) ([]*types.Package, error) { - data, err := readAll(in) - if err != nil { - return nil, fmt.Errorf("reading export bundle: %v", err) - } - return gcimporter.IImportBundle(fset, imports, data) -} - -// WriteBundle writes encoded type information for the specified packages to out. -// The FileSet provides file position information for named objects. -// -// Experimental: This API is experimental and may change in the future. -func WriteBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { - return gcimporter.IExportBundle(out, fset, pkgs) -} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/importer.go b/vendor/golang.org/x/tools/go/gcexportdata/importer.go deleted file mode 100644 index 37a7247e268..00000000000 --- a/vendor/golang.org/x/tools/go/gcexportdata/importer.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gcexportdata - -import ( - "fmt" - "go/token" - "go/types" - "os" -) - -// NewImporter returns a new instance of the types.Importer interface -// that reads type information from export data files written by gc. -// The Importer also satisfies types.ImporterFrom. -// -// Export data files are located using "go build" workspace conventions -// and the build.Default context. -// -// Use this importer instead of go/importer.For("gc", ...) to avoid the -// version-skew problems described in the documentation of this package, -// or to control the FileSet or access the imports map populated during -// package loading. -// -// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages, -// which is more efficient. -func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom { - return importer{fset, imports} -} - -type importer struct { - fset *token.FileSet - imports map[string]*types.Package -} - -func (imp importer) Import(importPath string) (*types.Package, error) { - return imp.ImportFrom(importPath, "", 0) -} - -func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) { - filename, path := Find(importPath, srcDir) - if filename == "" { - if importPath == "unsafe" { - // Even for unsafe, call Find first in case - // the package was vendored. - return types.Unsafe, nil - } - return nil, fmt.Errorf("can't find import: %s", importPath) - } - - if pkg, ok := imp.imports[path]; ok && pkg.Complete() { - return pkg, nil // cache hit - } - - // open file - f, err := os.Open(filename) - if err != nil { - return nil, err - } - defer func() { - f.Close() - if err != nil { - // add file name to error - err = fmt.Errorf("reading export data: %s: %v", filename, err) - } - }() - - r, err := NewReader(f) - if err != nil { - return nil, err - } - - return Read(r, imp.fset, imp.imports, path) -} diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go deleted file mode 100644 index 333676b7cfc..00000000000 --- a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package packagesdriver fetches type sizes for go/packages and go/analysis. -package packagesdriver - -import ( - "context" - "fmt" - "strings" - - "golang.org/x/tools/internal/gocommand" -) - -func GetSizesForArgsGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) { - inv.Verb = "list" - inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} - stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) - var goarch, compiler string - if rawErr != nil { - rawErrMsg := rawErr.Error() - if strings.Contains(rawErrMsg, "cannot find main module") || - strings.Contains(rawErrMsg, "go.mod file not found") { - // User's running outside of a module. - // All bets are off. Get GOARCH and guess compiler is gc. - // TODO(matloob): Is this a problem in practice? - inv.Verb = "env" - inv.Args = []string{"GOARCH"} - envout, enverr := gocmdRunner.Run(ctx, inv) - if enverr != nil { - return "", "", enverr - } - goarch = strings.TrimSpace(envout.String()) - compiler = "gc" - } else if friendlyErr != nil { - return "", "", friendlyErr - } else { - // This should be unreachable, but be defensive - // in case RunRaw's error results are inconsistent. - return "", "", rawErr - } - } else { - fields := strings.Fields(stdout.String()) - if len(fields) < 2 { - return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", - stdout.String(), stderr.String()) - } - goarch = fields[0] - compiler = fields[1] - } - return compiler, goarch, nil -} diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go deleted file mode 100644 index b2a0b7c6a67..00000000000 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package packages loads Go packages for inspection and analysis. - -The [Load] function takes as input a list of patterns and returns a -list of [Package] values describing individual packages matched by those -patterns. -A [Config] specifies configuration options, the most important of which is -the [LoadMode], which controls the amount of detail in the loaded packages. - -Load passes most patterns directly to the underlying build tool. -The default build tool is the go command. -Its supported patterns are described at -https://pkg.go.dev/cmd/go#hdr-Package_lists_and_patterns. - -Load may be used in Go projects that use alternative build systems, by -installing an appropriate "driver" program for the build system and -specifying its location in the GOPACKAGESDRIVER environment variable. -For example, -https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration -explains how to use the driver for Bazel. -The driver program is responsible for interpreting patterns in its -preferred notation and reporting information about the packages that -they identify. -(See driverRequest and driverResponse types for the JSON -schema used by the protocol. -Though the protocol is supported, these types are currently unexported; -see #64608 for a proposal to publish them.) - -Regardless of driver, all patterns with the prefix "query=", where query is a -non-empty string of letters from [a-z], are reserved and may be -interpreted as query operators. - -Two query operators are currently supported: "file" and "pattern". - -The query "file=path/to/file.go" matches the package or packages enclosing -the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go" -might return the packages "fmt" and "fmt [fmt.test]". - -The query "pattern=string" causes "string" to be passed directly to -the underlying build tool. In most cases this is unnecessary, -but an application can use Load("pattern=" + x) as an escaping mechanism -to ensure that x is not interpreted as a query operator if it contains '='. - -All other query operators are reserved for future use and currently -cause Load to report an error. - -The Package struct provides basic information about the package, including - - - ID, a unique identifier for the package in the returned set; - - GoFiles, the names of the package's Go source files; - - Imports, a map from source import strings to the Packages they name; - - Types, the type information for the package's exported symbols; - - Syntax, the parsed syntax trees for the package's source code; and - - TypesInfo, the result of a complete type-check of the package syntax trees. - -(See the documentation for type Package for the complete list of fields -and more detailed descriptions.) - -For example, - - Load(nil, "bytes", "unicode...") - -returns four Package structs describing the standard library packages -bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern -can match multiple packages and that a package might be matched by -multiple patterns: in general it is not possible to determine which -packages correspond to which patterns. - -Note that the list returned by Load contains only the packages matched -by the patterns. Their dependencies can be found by walking the import -graph using the Imports fields. - -The Load function can be configured by passing a pointer to a Config as -the first argument. A nil Config is equivalent to the zero Config, which -causes Load to run in LoadFiles mode, collecting minimal information. -See the documentation for type Config for details. - -As noted earlier, the Config.Mode controls the amount of detail -reported about the loaded packages. See the documentation for type LoadMode -for details. - -Most tools should pass their command-line arguments (after any flags) -uninterpreted to [Load], so that it can interpret them -according to the conventions of the underlying build system. -See the Example function for typical usage. -*/ -package packages // import "golang.org/x/tools/go/packages" - -/* - -Motivation and design considerations - -The new package's design solves problems addressed by two existing -packages: go/build, which locates and describes packages, and -golang.org/x/tools/go/loader, which loads, parses and type-checks them. -The go/build.Package structure encodes too much of the 'go build' way -of organizing projects, leaving us in need of a data type that describes a -package of Go source code independent of the underlying build system. -We wanted something that works equally well with go build and vgo, and -also other build systems such as Bazel and Blaze, making it possible to -construct analysis tools that work in all these environments. -Tools such as errcheck and staticcheck were essentially unavailable to -the Go community at Google, and some of Google's internal tools for Go -are unavailable externally. -This new package provides a uniform way to obtain package metadata by -querying each of these build systems, optionally supporting their -preferred command-line notations for packages, so that tools integrate -neatly with users' build environments. The Metadata query function -executes an external query tool appropriate to the current workspace. - -Loading packages always returns the complete import graph "all the way down", -even if all you want is information about a single package, because the query -mechanisms of all the build systems we currently support ({go,vgo} list, and -blaze/bazel aspect-based query) cannot provide detailed information -about one package without visiting all its dependencies too, so there is -no additional asymptotic cost to providing transitive information. -(This property might not be true of a hypothetical 5th build system.) - -In calls to TypeCheck, all initial packages, and any package that -transitively depends on one of them, must be loaded from source. -Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from -source; D may be loaded from export data, and E may not be loaded at all -(though it's possible that D's export data mentions it, so a -types.Package may be created for it and exposed.) - -The old loader had a feature to suppress type-checking of function -bodies on a per-package basis, primarily intended to reduce the work of -obtaining type information for imported packages. Now that imports are -satisfied by export data, the optimization no longer seems necessary. - -Despite some early attempts, the old loader did not exploit export data, -instead always using the equivalent of WholeProgram mode. This was due -to the complexity of mixing source and export data packages (now -resolved by the upward traversal mentioned above), and because export data -files were nearly always missing or stale. Now that 'go build' supports -caching, all the underlying build systems can guarantee to produce -export data in a reasonable (amortized) time. - -Test "main" packages synthesized by the build system are now reported as -first-class packages, avoiding the need for clients (such as go/ssa) to -reinvent this generation logic. - -One way in which go/packages is simpler than the old loader is in its -treatment of in-package tests. In-package tests are packages that -consist of all the files of the library under test, plus the test files. -The old loader constructed in-package tests by a two-phase process of -mutation called "augmentation": first it would construct and type check -all the ordinary library packages and type-check the packages that -depend on them; then it would add more (test) files to the package and -type-check again. This two-phase approach had four major problems: -1) in processing the tests, the loader modified the library package, - leaving no way for a client application to see both the test - package and the library package; one would mutate into the other. -2) because test files can declare additional methods on types defined in - the library portion of the package, the dispatch of method calls in - the library portion was affected by the presence of the test files. - This should have been a clue that the packages were logically - different. -3) this model of "augmentation" assumed at most one in-package test - per library package, which is true of projects using 'go build', - but not other build systems. -4) because of the two-phase nature of test processing, all packages that - import the library package had to be processed before augmentation, - forcing a "one-shot" API and preventing the client from calling Load - in several times in sequence as is now possible in WholeProgram mode. - (TypeCheck mode has a similar one-shot restriction for a different reason.) - -Early drafts of this package supported "multi-shot" operation. -Although it allowed clients to make a sequence of calls (or concurrent -calls) to Load, building up the graph of Packages incrementally, -it was of marginal value: it complicated the API -(since it allowed some options to vary across calls but not others), -it complicated the implementation, -it cannot be made to work in Types mode, as explained above, -and it was less efficient than making one combined call (when this is possible). -Among the clients we have inspected, none made multiple calls to load -but could not be easily and satisfactorily modified to make only a single call. -However, applications changes may be required. -For example, the ssadump command loads the user-specified packages -and in addition the runtime package. It is tempting to simply append -"runtime" to the user-provided list, but that does not work if the user -specified an ad-hoc package such as [a.go b.go]. -Instead, ssadump no longer requests the runtime package, -but seeks it among the dependencies of the user-specified packages, -and emits an error if it is not found. - -Overlays: The Overlay field in the Config allows providing alternate contents -for Go source files, by providing a mapping from file path to contents. -go/packages will pull in new imports added in overlay files when go/packages -is run in LoadImports mode or greater. -Overlay support for the go list driver isn't complete yet: if the file doesn't -exist on disk, it will only be recognized in an overlay if it is a non-test file -and the package would be reported even without the overlay. - -Questions & Tasks - -- Add GOARCH/GOOS? - They are not portable concepts, but could be made portable. - Our goal has been to allow users to express themselves using the conventions - of the underlying build system: if the build system honors GOARCH - during a build and during a metadata query, then so should - applications built atop that query mechanism. - Conversely, if the target architecture of the build is determined by - command-line flags, the application can pass the relevant - flags through to the build system using a command such as: - myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin" - However, this approach is low-level, unwieldy, and non-portable. - GOOS and GOARCH seem important enough to warrant a dedicated option. - -- How should we handle partial failures such as a mixture of good and - malformed patterns, existing and non-existent packages, successful and - failed builds, import failures, import cycles, and so on, in a call to - Load? - -- Support bazel, blaze, and go1.10 list, not just go1.11 list. - -- Handle (and test) various partial success cases, e.g. - a mixture of good packages and: - invalid patterns - nonexistent packages - empty packages - packages with malformed package or import declarations - unreadable files - import cycles - other parse errors - type errors - Make sure we record errors at the correct place in the graph. - -- Missing packages among initial arguments are not reported. - Return bogus packages for them, like golist does. - -- "undeclared name" errors (for example) are reported out of source file - order. I suspect this is due to the breadth-first resolution now used - by go/types. Is that a bug? Discuss with gri. - -*/ diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go deleted file mode 100644 index 7db1d1293ab..00000000000 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file enables an external tool to intercept package requests. -// If the tool is present then its results are used in preference to -// the go list command. - -package packages - -import ( - "bytes" - "encoding/json" - "fmt" - "os" - "os/exec" - "strings" -) - -// The Driver Protocol -// -// The driver, given the inputs to a call to Load, returns metadata about the packages specified. -// This allows for different build systems to support go/packages by telling go/packages how the -// packages' source is organized. -// The driver is a binary, either specified by the GOPACKAGESDRIVER environment variable or in -// the path as gopackagesdriver. It's given the inputs to load in its argv. See the package -// documentation in doc.go for the full description of the patterns that need to be supported. -// A driver receives as a JSON-serialized driverRequest struct in standard input and will -// produce a JSON-serialized driverResponse (see definition in packages.go) in its standard output. - -// driverRequest is used to provide the portion of Load's Config that is needed by a driver. -type driverRequest struct { - Mode LoadMode `json:"mode"` - // Env specifies the environment the underlying build system should be run in. - Env []string `json:"env"` - // BuildFlags are flags that should be passed to the underlying build system. - BuildFlags []string `json:"build_flags"` - // Tests specifies whether the patterns should also return test packages. - Tests bool `json:"tests"` - // Overlay maps file paths (relative to the driver's working directory) to the byte contents - // of overlay files. - Overlay map[string][]byte `json:"overlay"` -} - -// findExternalDriver returns the file path of a tool that supplies -// the build system package structure, or "" if not found." -// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its -// value, otherwise it searches for a binary named gopackagesdriver on the PATH. -func findExternalDriver(cfg *Config) driver { - const toolPrefix = "GOPACKAGESDRIVER=" - tool := "" - for _, env := range cfg.Env { - if val := strings.TrimPrefix(env, toolPrefix); val != env { - tool = val - } - } - if tool != "" && tool == "off" { - return nil - } - if tool == "" { - var err error - tool, err = exec.LookPath("gopackagesdriver") - if err != nil { - return nil - } - } - return func(cfg *Config, words ...string) (*driverResponse, error) { - req, err := json.Marshal(driverRequest{ - Mode: cfg.Mode, - Env: cfg.Env, - BuildFlags: cfg.BuildFlags, - Tests: cfg.Tests, - Overlay: cfg.Overlay, - }) - if err != nil { - return nil, fmt.Errorf("failed to encode message to driver tool: %v", err) - } - - buf := new(bytes.Buffer) - stderr := new(bytes.Buffer) - cmd := exec.CommandContext(cfg.Context, tool, words...) - cmd.Dir = cfg.Dir - cmd.Env = cfg.Env - cmd.Stdin = bytes.NewReader(req) - cmd.Stdout = buf - cmd.Stderr = stderr - - if err := cmd.Run(); err != nil { - return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) - } - if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" { - fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd), stderr) - } - - var response driverResponse - if err := json.Unmarshal(buf.Bytes(), &response); err != nil { - return nil, err - } - return &response, nil - } -} diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go deleted file mode 100644 index cd375fbc3c2..00000000000 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ /dev/null @@ -1,1107 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packages - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "log" - "os" - "os/exec" - "path" - "path/filepath" - "reflect" - "sort" - "strconv" - "strings" - "sync" - "unicode" - - "golang.org/x/tools/go/internal/packagesdriver" - "golang.org/x/tools/internal/gocommand" - "golang.org/x/tools/internal/packagesinternal" -) - -// debug controls verbose logging. -var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG")) - -// A goTooOldError reports that the go command -// found by exec.LookPath is too old to use the new go list behavior. -type goTooOldError struct { - error -} - -// responseDeduper wraps a driverResponse, deduplicating its contents. -type responseDeduper struct { - seenRoots map[string]bool - seenPackages map[string]*Package - dr *driverResponse -} - -func newDeduper() *responseDeduper { - return &responseDeduper{ - dr: &driverResponse{}, - seenRoots: map[string]bool{}, - seenPackages: map[string]*Package{}, - } -} - -// addAll fills in r with a driverResponse. -func (r *responseDeduper) addAll(dr *driverResponse) { - for _, pkg := range dr.Packages { - r.addPackage(pkg) - } - for _, root := range dr.Roots { - r.addRoot(root) - } - r.dr.GoVersion = dr.GoVersion -} - -func (r *responseDeduper) addPackage(p *Package) { - if r.seenPackages[p.ID] != nil { - return - } - r.seenPackages[p.ID] = p - r.dr.Packages = append(r.dr.Packages, p) -} - -func (r *responseDeduper) addRoot(id string) { - if r.seenRoots[id] { - return - } - r.seenRoots[id] = true - r.dr.Roots = append(r.dr.Roots, id) -} - -type golistState struct { - cfg *Config - ctx context.Context - - envOnce sync.Once - goEnvError error - goEnv map[string]string - - rootsOnce sync.Once - rootDirsError error - rootDirs map[string]string - - goVersionOnce sync.Once - goVersionError error - goVersion int // The X in Go 1.X. - - // vendorDirs caches the (non)existence of vendor directories. - vendorDirs map[string]bool -} - -// getEnv returns Go environment variables. Only specific variables are -// populated -- computing all of them is slow. -func (state *golistState) getEnv() (map[string]string, error) { - state.envOnce.Do(func() { - var b *bytes.Buffer - b, state.goEnvError = state.invokeGo("env", "-json", "GOMOD", "GOPATH") - if state.goEnvError != nil { - return - } - - state.goEnv = make(map[string]string) - decoder := json.NewDecoder(b) - if state.goEnvError = decoder.Decode(&state.goEnv); state.goEnvError != nil { - return - } - }) - return state.goEnv, state.goEnvError -} - -// mustGetEnv is a convenience function that can be used if getEnv has already succeeded. -func (state *golistState) mustGetEnv() map[string]string { - env, err := state.getEnv() - if err != nil { - panic(fmt.Sprintf("mustGetEnv: %v", err)) - } - return env -} - -// goListDriver uses the go list command to interpret the patterns and produce -// the build system package structure. -// See driver for more details. -func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { - // Make sure that any asynchronous go commands are killed when we return. - parentCtx := cfg.Context - if parentCtx == nil { - parentCtx = context.Background() - } - ctx, cancel := context.WithCancel(parentCtx) - defer cancel() - - response := newDeduper() - - state := &golistState{ - cfg: cfg, - ctx: ctx, - vendorDirs: map[string]bool{}, - } - - // Fill in response.Sizes asynchronously if necessary. - var sizeserr error - var sizeswg sync.WaitGroup - if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { - sizeswg.Add(1) - go func() { - compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) - sizeserr = err - response.dr.Compiler = compiler - response.dr.Arch = arch - sizeswg.Done() - }() - } - - // Determine files requested in contains patterns - var containFiles []string - restPatterns := make([]string, 0, len(patterns)) - // Extract file= and other [querytype]= patterns. Report an error if querytype - // doesn't exist. -extractQueries: - for _, pattern := range patterns { - eqidx := strings.Index(pattern, "=") - if eqidx < 0 { - restPatterns = append(restPatterns, pattern) - } else { - query, value := pattern[:eqidx], pattern[eqidx+len("="):] - switch query { - case "file": - containFiles = append(containFiles, value) - case "pattern": - restPatterns = append(restPatterns, value) - case "": // not a reserved query - restPatterns = append(restPatterns, pattern) - default: - for _, rune := range query { - if rune < 'a' || rune > 'z' { // not a reserved query - restPatterns = append(restPatterns, pattern) - continue extractQueries - } - } - // Reject all other patterns containing "=" - return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern) - } - } - } - - // See if we have any patterns to pass through to go list. Zero initial - // patterns also requires a go list call, since it's the equivalent of - // ".". - if len(restPatterns) > 0 || len(patterns) == 0 { - dr, err := state.createDriverResponse(restPatterns...) - if err != nil { - return nil, err - } - response.addAll(dr) - } - - if len(containFiles) != 0 { - if err := state.runContainsQueries(response, containFiles); err != nil { - return nil, err - } - } - - sizeswg.Wait() - if sizeserr != nil { - return nil, sizeserr - } - return response.dr, nil -} - -func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error { - for _, query := range queries { - // TODO(matloob): Do only one query per directory. - fdir := filepath.Dir(query) - // Pass absolute path of directory to go list so that it knows to treat it as a directory, - // not a package path. - pattern, err := filepath.Abs(fdir) - if err != nil { - return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) - } - dirResponse, err := state.createDriverResponse(pattern) - - // If there was an error loading the package, or no packages are returned, - // or the package is returned with errors, try to load the file as an - // ad-hoc package. - // Usually the error will appear in a returned package, but may not if we're - // in module mode and the ad-hoc is located outside a module. - if err != nil || len(dirResponse.Packages) == 0 || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 && - len(dirResponse.Packages[0].Errors) == 1 { - var queryErr error - if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil { - return err // return the original error - } - } - isRoot := make(map[string]bool, len(dirResponse.Roots)) - for _, root := range dirResponse.Roots { - isRoot[root] = true - } - for _, pkg := range dirResponse.Packages { - // Add any new packages to the main set - // We don't bother to filter packages that will be dropped by the changes of roots, - // that will happen anyway during graph construction outside this function. - // Over-reporting packages is not a problem. - response.addPackage(pkg) - // if the package was not a root one, it cannot have the file - if !isRoot[pkg.ID] { - continue - } - for _, pkgFile := range pkg.GoFiles { - if filepath.Base(query) == filepath.Base(pkgFile) { - response.addRoot(pkg.ID) - break - } - } - } - } - return nil -} - -// adhocPackage attempts to load or construct an ad-hoc package for a given -// query, if the original call to the driver produced inadequate results. -func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, error) { - response, err := state.createDriverResponse(query) - if err != nil { - return nil, err - } - // If we get nothing back from `go list`, - // try to make this file into its own ad-hoc package. - // TODO(rstambler): Should this check against the original response? - if len(response.Packages) == 0 { - response.Packages = append(response.Packages, &Package{ - ID: "command-line-arguments", - PkgPath: query, - GoFiles: []string{query}, - CompiledGoFiles: []string{query}, - Imports: make(map[string]*Package), - }) - response.Roots = append(response.Roots, "command-line-arguments") - } - // Handle special cases. - if len(response.Packages) == 1 { - // golang/go#33482: If this is a file= query for ad-hoc packages where - // the file only exists on an overlay, and exists outside of a module, - // add the file to the package and remove the errors. - if response.Packages[0].ID == "command-line-arguments" || - filepath.ToSlash(response.Packages[0].PkgPath) == filepath.ToSlash(query) { - if len(response.Packages[0].GoFiles) == 0 { - filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath - // TODO(matloob): check if the file is outside of a root dir? - for path := range state.cfg.Overlay { - if path == filename { - response.Packages[0].Errors = nil - response.Packages[0].GoFiles = []string{path} - response.Packages[0].CompiledGoFiles = []string{path} - } - } - } - } - } - return response, nil -} - -// Fields must match go list; -// see $GOROOT/src/cmd/go/internal/load/pkg.go. -type jsonPackage struct { - ImportPath string - Dir string - Name string - Export string - GoFiles []string - CompiledGoFiles []string - IgnoredGoFiles []string - IgnoredOtherFiles []string - EmbedPatterns []string - EmbedFiles []string - CFiles []string - CgoFiles []string - CXXFiles []string - MFiles []string - HFiles []string - FFiles []string - SFiles []string - SwigFiles []string - SwigCXXFiles []string - SysoFiles []string - Imports []string - ImportMap map[string]string - Deps []string - Module *Module - TestGoFiles []string - TestImports []string - XTestGoFiles []string - XTestImports []string - ForTest string // q in a "p [q.test]" package, else "" - DepOnly bool - - Error *packagesinternal.PackageError - DepsErrors []*packagesinternal.PackageError -} - -type jsonPackageError struct { - ImportStack []string - Pos string - Err string -} - -func otherFiles(p *jsonPackage) [][]string { - return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles} -} - -// createDriverResponse uses the "go list" command to expand the pattern -// words and return a response for the specified packages. -func (state *golistState) createDriverResponse(words ...string) (*driverResponse, error) { - // go list uses the following identifiers in ImportPath and Imports: - // - // "p" -- importable package or main (command) - // "q.test" -- q's test executable - // "p [q.test]" -- variant of p as built for q's test executable - // "q_test [q.test]" -- q's external test package - // - // The packages p that are built differently for a test q.test - // are q itself, plus any helpers used by the external test q_test, - // typically including "testing" and all its dependencies. - - // Run "go list" for complete - // information on the specified packages. - goVersion, err := state.getGoVersion() - if err != nil { - return nil, err - } - buf, err := state.invokeGo("list", golistargs(state.cfg, words, goVersion)...) - if err != nil { - return nil, err - } - - seen := make(map[string]*jsonPackage) - pkgs := make(map[string]*Package) - additionalErrors := make(map[string][]Error) - // Decode the JSON and convert it to Package form. - response := &driverResponse{ - GoVersion: goVersion, - } - for dec := json.NewDecoder(buf); dec.More(); { - p := new(jsonPackage) - if err := dec.Decode(p); err != nil { - return nil, fmt.Errorf("JSON decoding failed: %v", err) - } - - if p.ImportPath == "" { - // The documentation for go list says that “[e]rroneous packages will have - // a non-empty ImportPath”. If for some reason it comes back empty, we - // prefer to error out rather than silently discarding data or handing - // back a package without any way to refer to it. - if p.Error != nil { - return nil, Error{ - Pos: p.Error.Pos, - Msg: p.Error.Err, - } - } - return nil, fmt.Errorf("package missing import path: %+v", p) - } - - // Work around https://golang.org/issue/33157: - // go list -e, when given an absolute path, will find the package contained at - // that directory. But when no package exists there, it will return a fake package - // with an error and the ImportPath set to the absolute path provided to go list. - // Try to convert that absolute path to what its package path would be if it's - // contained in a known module or GOPATH entry. This will allow the package to be - // properly "reclaimed" when overlays are processed. - if filepath.IsAbs(p.ImportPath) && p.Error != nil { - pkgPath, ok, err := state.getPkgPath(p.ImportPath) - if err != nil { - return nil, err - } - if ok { - p.ImportPath = pkgPath - } - } - - if old, found := seen[p.ImportPath]; found { - // If one version of the package has an error, and the other doesn't, assume - // that this is a case where go list is reporting a fake dependency variant - // of the imported package: When a package tries to invalidly import another - // package, go list emits a variant of the imported package (with the same - // import path, but with an error on it, and the package will have a - // DepError set on it). An example of when this can happen is for imports of - // main packages: main packages can not be imported, but they may be - // separately matched and listed by another pattern. - // See golang.org/issue/36188 for more details. - - // The plan is that eventually, hopefully in Go 1.15, the error will be - // reported on the importing package rather than the duplicate "fake" - // version of the imported package. Once all supported versions of Go - // have the new behavior this logic can be deleted. - // TODO(matloob): delete the workaround logic once all supported versions of - // Go return the errors on the proper package. - - // There should be exactly one version of a package that doesn't have an - // error. - if old.Error == nil && p.Error == nil { - if !reflect.DeepEqual(p, old) { - return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) - } - continue - } - - // Determine if this package's error needs to be bubbled up. - // This is a hack, and we expect for go list to eventually set the error - // on the package. - if old.Error != nil { - var errkind string - if strings.Contains(old.Error.Err, "not an importable package") { - errkind = "not an importable package" - } else if strings.Contains(old.Error.Err, "use of internal package") && strings.Contains(old.Error.Err, "not allowed") { - errkind = "use of internal package not allowed" - } - if errkind != "" { - if len(old.Error.ImportStack) < 1 { - return nil, fmt.Errorf(`internal error: go list gave a %q error with empty import stack`, errkind) - } - importingPkg := old.Error.ImportStack[len(old.Error.ImportStack)-1] - if importingPkg == old.ImportPath { - // Using an older version of Go which put this package itself on top of import - // stack, instead of the importer. Look for importer in second from top - // position. - if len(old.Error.ImportStack) < 2 { - return nil, fmt.Errorf(`internal error: go list gave a %q error with an import stack without importing package`, errkind) - } - importingPkg = old.Error.ImportStack[len(old.Error.ImportStack)-2] - } - additionalErrors[importingPkg] = append(additionalErrors[importingPkg], Error{ - Pos: old.Error.Pos, - Msg: old.Error.Err, - Kind: ListError, - }) - } - } - - // Make sure that if there's a version of the package without an error, - // that's the one reported to the user. - if old.Error == nil { - continue - } - - // This package will replace the old one at the end of the loop. - } - seen[p.ImportPath] = p - - pkg := &Package{ - Name: p.Name, - ID: p.ImportPath, - GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), - CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), - OtherFiles: absJoin(p.Dir, otherFiles(p)...), - EmbedFiles: absJoin(p.Dir, p.EmbedFiles), - EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns), - IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles), - forTest: p.ForTest, - depsErrors: p.DepsErrors, - Module: p.Module, - } - - if (state.cfg.Mode&typecheckCgo) != 0 && len(p.CgoFiles) != 0 { - if len(p.CompiledGoFiles) > len(p.GoFiles) { - // We need the cgo definitions, which are in the first - // CompiledGoFile after the non-cgo ones. This is a hack but there - // isn't currently a better way to find it. We also need the pure - // Go files and unprocessed cgo files, all of which are already - // in pkg.GoFiles. - cgoTypes := p.CompiledGoFiles[len(p.GoFiles)] - pkg.CompiledGoFiles = append([]string{cgoTypes}, pkg.GoFiles...) - } else { - // golang/go#38990: go list silently fails to do cgo processing - pkg.CompiledGoFiles = nil - pkg.Errors = append(pkg.Errors, Error{ - Msg: "go list failed to return CompiledGoFiles. This may indicate failure to perform cgo processing; try building at the command line. See https://golang.org/issue/38990.", - Kind: ListError, - }) - } - } - - // Work around https://golang.org/issue/28749: - // cmd/go puts assembly, C, and C++ files in CompiledGoFiles. - // Remove files from CompiledGoFiles that are non-go files - // (or are not files that look like they are from the cache). - if len(pkg.CompiledGoFiles) > 0 { - out := pkg.CompiledGoFiles[:0] - for _, f := range pkg.CompiledGoFiles { - if ext := filepath.Ext(f); ext != ".go" && ext != "" { // ext == "" means the file is from the cache, so probably cgo-processed file - continue - } - out = append(out, f) - } - pkg.CompiledGoFiles = out - } - - // Extract the PkgPath from the package's ID. - if i := strings.IndexByte(pkg.ID, ' '); i >= 0 { - pkg.PkgPath = pkg.ID[:i] - } else { - pkg.PkgPath = pkg.ID - } - - if pkg.PkgPath == "unsafe" { - pkg.CompiledGoFiles = nil // ignore fake unsafe.go file (#59929) - } else if len(pkg.CompiledGoFiles) == 0 { - // Work around for pre-go.1.11 versions of go list. - // TODO(matloob): they should be handled by the fallback. - // Can we delete this? - pkg.CompiledGoFiles = pkg.GoFiles - } - - // Assume go list emits only absolute paths for Dir. - if p.Dir != "" && !filepath.IsAbs(p.Dir) { - log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir) - } - - if p.Export != "" && !filepath.IsAbs(p.Export) { - pkg.ExportFile = filepath.Join(p.Dir, p.Export) - } else { - pkg.ExportFile = p.Export - } - - // imports - // - // Imports contains the IDs of all imported packages. - // ImportsMap records (path, ID) only where they differ. - ids := make(map[string]bool) - for _, id := range p.Imports { - ids[id] = true - } - pkg.Imports = make(map[string]*Package) - for path, id := range p.ImportMap { - pkg.Imports[path] = &Package{ID: id} // non-identity import - delete(ids, id) - } - for id := range ids { - if id == "C" { - continue - } - - pkg.Imports[id] = &Package{ID: id} // identity import - } - if !p.DepOnly { - response.Roots = append(response.Roots, pkg.ID) - } - - // Temporary work-around for golang/go#39986. Parse filenames out of - // error messages. This happens if there are unrecoverable syntax - // errors in the source, so we can't match on a specific error message. - // - // TODO(rfindley): remove this heuristic, in favor of considering - // InvalidGoFiles from the list driver. - if err := p.Error; err != nil && state.shouldAddFilenameFromError(p) { - addFilenameFromPos := func(pos string) bool { - split := strings.Split(pos, ":") - if len(split) < 1 { - return false - } - filename := strings.TrimSpace(split[0]) - if filename == "" { - return false - } - if !filepath.IsAbs(filename) { - filename = filepath.Join(state.cfg.Dir, filename) - } - info, _ := os.Stat(filename) - if info == nil { - return false - } - pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename) - pkg.GoFiles = append(pkg.GoFiles, filename) - return true - } - found := addFilenameFromPos(err.Pos) - // In some cases, go list only reports the error position in the - // error text, not the error position. One such case is when the - // file's package name is a keyword (see golang.org/issue/39763). - if !found { - addFilenameFromPos(err.Err) - } - } - - if p.Error != nil { - msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363. - // Address golang.org/issue/35964 by appending import stack to error message. - if msg == "import cycle not allowed" && len(p.Error.ImportStack) != 0 { - msg += fmt.Sprintf(": import stack: %v", p.Error.ImportStack) - } - pkg.Errors = append(pkg.Errors, Error{ - Pos: p.Error.Pos, - Msg: msg, - Kind: ListError, - }) - } - - pkgs[pkg.ID] = pkg - } - - for id, errs := range additionalErrors { - if p, ok := pkgs[id]; ok { - p.Errors = append(p.Errors, errs...) - } - } - for _, pkg := range pkgs { - response.Packages = append(response.Packages, pkg) - } - sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID }) - - return response, nil -} - -func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { - if len(p.GoFiles) > 0 || len(p.CompiledGoFiles) > 0 { - return false - } - - goV, err := state.getGoVersion() - if err != nil { - return false - } - - // On Go 1.14 and earlier, only add filenames from errors if the import stack is empty. - // The import stack behaves differently for these versions than newer Go versions. - if goV < 15 { - return len(p.Error.ImportStack) == 0 - } - - // On Go 1.15 and later, only parse filenames out of error if there's no import stack, - // or the current package is at the top of the import stack. This is not guaranteed - // to work perfectly, but should avoid some cases where files in errors don't belong to this - // package. - return len(p.Error.ImportStack) == 0 || p.Error.ImportStack[len(p.Error.ImportStack)-1] == p.ImportPath -} - -// getGoVersion returns the effective minor version of the go command. -func (state *golistState) getGoVersion() (int, error) { - state.goVersionOnce.Do(func() { - state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner) - }) - return state.goVersion, state.goVersionError -} - -// getPkgPath finds the package path of a directory if it's relative to a root -// directory. -func (state *golistState) getPkgPath(dir string) (string, bool, error) { - absDir, err := filepath.Abs(dir) - if err != nil { - return "", false, err - } - roots, err := state.determineRootDirs() - if err != nil { - return "", false, err - } - - for rdir, rpath := range roots { - // Make sure that the directory is in the module, - // to avoid creating a path relative to another module. - if !strings.HasPrefix(absDir, rdir) { - continue - } - // TODO(matloob): This doesn't properly handle symlinks. - r, err := filepath.Rel(rdir, dir) - if err != nil { - continue - } - if rpath != "" { - // We choose only one root even though the directory even it can belong in multiple modules - // or GOPATH entries. This is okay because we only need to work with absolute dirs when a - // file is missing from disk, for instance when gopls calls go/packages in an overlay. - // Once the file is saved, gopls, or the next invocation of the tool will get the correct - // result straight from golist. - // TODO(matloob): Implement module tiebreaking? - return path.Join(rpath, filepath.ToSlash(r)), true, nil - } - return filepath.ToSlash(r), true, nil - } - return "", false, nil -} - -// absJoin absolutizes and flattens the lists of files. -func absJoin(dir string, fileses ...[]string) (res []string) { - for _, files := range fileses { - for _, file := range files { - if !filepath.IsAbs(file) { - file = filepath.Join(dir, file) - } - res = append(res, file) - } - } - return res -} - -func jsonFlag(cfg *Config, goVersion int) string { - if goVersion < 19 { - return "-json" - } - var fields []string - added := make(map[string]bool) - addFields := func(fs ...string) { - for _, f := range fs { - if !added[f] { - added[f] = true - fields = append(fields, f) - } - } - } - addFields("Name", "ImportPath", "Error") // These fields are always needed - if cfg.Mode&NeedFiles != 0 || cfg.Mode&NeedTypes != 0 { - addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles", - "CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles", - "SwigFiles", "SwigCXXFiles", "SysoFiles") - if cfg.Tests { - addFields("TestGoFiles", "XTestGoFiles") - } - } - if cfg.Mode&NeedTypes != 0 { - // CompiledGoFiles seems to be required for the test case TestCgoNoSyntax, - // even when -compiled isn't passed in. - // TODO(#52435): Should we make the test ask for -compiled, or automatically - // request CompiledGoFiles in certain circumstances? - addFields("Dir", "CompiledGoFiles") - } - if cfg.Mode&NeedCompiledGoFiles != 0 { - addFields("Dir", "CompiledGoFiles", "Export") - } - if cfg.Mode&NeedImports != 0 { - // When imports are requested, DepOnly is used to distinguish between packages - // explicitly requested and transitive imports of those packages. - addFields("DepOnly", "Imports", "ImportMap") - if cfg.Tests { - addFields("TestImports", "XTestImports") - } - } - if cfg.Mode&NeedDeps != 0 { - addFields("DepOnly") - } - if usesExportData(cfg) { - // Request Dir in the unlikely case Export is not absolute. - addFields("Dir", "Export") - } - if cfg.Mode&needInternalForTest != 0 { - addFields("ForTest") - } - if cfg.Mode&needInternalDepsErrors != 0 { - addFields("DepsErrors") - } - if cfg.Mode&NeedModule != 0 { - addFields("Module") - } - if cfg.Mode&NeedEmbedFiles != 0 { - addFields("EmbedFiles") - } - if cfg.Mode&NeedEmbedPatterns != 0 { - addFields("EmbedPatterns") - } - return "-json=" + strings.Join(fields, ",") -} - -func golistargs(cfg *Config, words []string, goVersion int) []string { - const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo - fullargs := []string{ - "-e", jsonFlag(cfg, goVersion), - fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypes|NeedTypesInfo|NeedTypesSizes) != 0), - fmt.Sprintf("-test=%t", cfg.Tests), - fmt.Sprintf("-export=%t", usesExportData(cfg)), - fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0), - // go list doesn't let you pass -test and -find together, - // probably because you'd just get the TestMain. - fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)), - } - - // golang/go#60456: with go1.21 and later, go list serves pgo variants, which - // can be costly to compute and may result in redundant processing for the - // caller. Disable these variants. If someone wants to add e.g. a NeedPGO - // mode flag, that should be a separate proposal. - if goVersion >= 21 { - fullargs = append(fullargs, "-pgo=off") - } - - fullargs = append(fullargs, cfg.BuildFlags...) - fullargs = append(fullargs, "--") - fullargs = append(fullargs, words...) - return fullargs -} - -// cfgInvocation returns an Invocation that reflects cfg's settings. -func (state *golistState) cfgInvocation() gocommand.Invocation { - cfg := state.cfg - return gocommand.Invocation{ - BuildFlags: cfg.BuildFlags, - ModFile: cfg.modFile, - ModFlag: cfg.modFlag, - CleanEnv: cfg.Env != nil, - Env: cfg.Env, - Logf: cfg.Logf, - WorkingDir: cfg.Dir, - } -} - -// invokeGo returns the stdout of a go command invocation. -func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, error) { - cfg := state.cfg - - inv := state.cfgInvocation() - - // For Go versions 1.16 and above, `go list` accepts overlays directly via - // the -overlay flag. Set it, if it's available. - // - // The check for "list" is not necessarily required, but we should avoid - // getting the go version if possible. - if verb == "list" { - goVersion, err := state.getGoVersion() - if err != nil { - return nil, err - } - if goVersion >= 16 { - filename, cleanup, err := state.writeOverlays() - if err != nil { - return nil, err - } - defer cleanup() - inv.Overlay = filename - } - } - inv.Verb = verb - inv.Args = args - gocmdRunner := cfg.gocmdRunner - if gocmdRunner == nil { - gocmdRunner = &gocommand.Runner{} - } - stdout, stderr, friendlyErr, err := gocmdRunner.RunRaw(cfg.Context, inv) - if err != nil { - // Check for 'go' executable not being found. - if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { - return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound) - } - - exitErr, ok := err.(*exec.ExitError) - if !ok { - // Catastrophic error: - // - context cancellation - return nil, fmt.Errorf("couldn't run 'go': %w", err) - } - - // Old go version? - if strings.Contains(stderr.String(), "flag provided but not defined") { - return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)} - } - - // Related to #24854 - if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "unexpected directory layout") { - return nil, friendlyErr - } - - // Is there an error running the C compiler in cgo? This will be reported in the "Error" field - // and should be suppressed by go list -e. - // - // This condition is not perfect yet because the error message can include other error messages than runtime/cgo. - isPkgPathRune := func(r rune) bool { - // From https://golang.org/ref/spec#Import_declarations: - // Implementation restriction: A compiler may restrict ImportPaths to non-empty strings - // using only characters belonging to Unicode's L, M, N, P, and S general categories - // (the Graphic characters without spaces) and may also exclude the - // characters !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character U+FFFD. - return unicode.IsOneOf([]*unicode.RangeTable{unicode.L, unicode.M, unicode.N, unicode.P, unicode.S}, r) && - !strings.ContainsRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r) - } - // golang/go#36770: Handle case where cmd/go prints module download messages before the error. - msg := stderr.String() - for strings.HasPrefix(msg, "go: downloading") { - msg = msg[strings.IndexRune(msg, '\n')+1:] - } - if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") { - msg := msg[len("# "):] - if strings.HasPrefix(strings.TrimLeftFunc(msg, isPkgPathRune), "\n") { - return stdout, nil - } - // Treat pkg-config errors as a special case (golang.org/issue/36770). - if strings.HasPrefix(msg, "pkg-config") { - return stdout, nil - } - } - - // This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show - // the error in the Err section of stdout in case -e option is provided. - // This fix is provided for backwards compatibility. - if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") { - output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, - strings.Trim(stderr.String(), "\n")) - return bytes.NewBufferString(output), nil - } - - // Similar to the previous error, but currently lacks a fix in Go. - if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must all be in one directory") { - output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, - strings.Trim(stderr.String(), "\n")) - return bytes.NewBufferString(output), nil - } - - // Backwards compatibility for Go 1.11 because 1.12 and 1.13 put the directory in the ImportPath. - // If the package doesn't exist, put the absolute path of the directory into the error message, - // as Go 1.13 list does. - const noSuchDirectory = "no such directory" - if len(stderr.String()) > 0 && strings.Contains(stderr.String(), noSuchDirectory) { - errstr := stderr.String() - abspath := strings.TrimSpace(errstr[strings.Index(errstr, noSuchDirectory)+len(noSuchDirectory):]) - output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, - abspath, strings.Trim(stderr.String(), "\n")) - return bytes.NewBufferString(output), nil - } - - // Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist. - // Note that the error message we look for in this case is different that the one looked for above. - if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") { - output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, - strings.Trim(stderr.String(), "\n")) - return bytes.NewBufferString(output), nil - } - - // Workaround for #34273. go list -e with GO111MODULE=on has incorrect behavior when listing a - // directory outside any module. - if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside available modules") { - output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, - // TODO(matloob): command-line-arguments isn't correct here. - "command-line-arguments", strings.Trim(stderr.String(), "\n")) - return bytes.NewBufferString(output), nil - } - - // Another variation of the previous error - if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside module root") { - output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, - // TODO(matloob): command-line-arguments isn't correct here. - "command-line-arguments", strings.Trim(stderr.String(), "\n")) - return bytes.NewBufferString(output), nil - } - - // Workaround for an instance of golang.org/issue/26755: go list -e will return a non-zero exit - // status if there's a dependency on a package that doesn't exist. But it should return - // a zero exit status and set an error on that package. - if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no Go files in") { - // Don't clobber stdout if `go list` actually returned something. - if len(stdout.String()) > 0 { - return stdout, nil - } - // try to extract package name from string - stderrStr := stderr.String() - var importPath string - colon := strings.Index(stderrStr, ":") - if colon > 0 && strings.HasPrefix(stderrStr, "go build ") { - importPath = stderrStr[len("go build "):colon] - } - output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, - importPath, strings.Trim(stderrStr, "\n")) - return bytes.NewBufferString(output), nil - } - - // Export mode entails a build. - // If that build fails, errors appear on stderr - // (despite the -e flag) and the Export field is blank. - // Do not fail in that case. - // The same is true if an ad-hoc package given to go list doesn't exist. - // TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when - // packages don't exist or a build fails. - if !usesExportData(cfg) && !containsGoFile(args) { - return nil, friendlyErr - } - } - return stdout, nil -} - -// OverlayJSON is the format overlay files are expected to be in. -// The Replace map maps from overlaid paths to replacement paths: -// the Go command will forward all reads trying to open -// each overlaid path to its replacement path, or consider the overlaid -// path not to exist if the replacement path is empty. -// -// From golang/go#39958. -type OverlayJSON struct { - Replace map[string]string `json:"replace,omitempty"` -} - -// writeOverlays writes out files for go list's -overlay flag, as described -// above. -func (state *golistState) writeOverlays() (filename string, cleanup func(), err error) { - // Do nothing if there are no overlays in the config. - if len(state.cfg.Overlay) == 0 { - return "", func() {}, nil - } - dir, err := os.MkdirTemp("", "gopackages-*") - if err != nil { - return "", nil, err - } - // The caller must clean up this directory, unless this function returns an - // error. - cleanup = func() { - os.RemoveAll(dir) - } - defer func() { - if err != nil { - cleanup() - } - }() - overlays := map[string]string{} - for k, v := range state.cfg.Overlay { - // Create a unique filename for the overlaid files, to avoid - // creating nested directories. - noSeparator := strings.Join(strings.Split(filepath.ToSlash(k), "/"), "") - f, err := os.CreateTemp(dir, fmt.Sprintf("*-%s", noSeparator)) - if err != nil { - return "", func() {}, err - } - if _, err := f.Write(v); err != nil { - return "", func() {}, err - } - if err := f.Close(); err != nil { - return "", func() {}, err - } - overlays[k] = f.Name() - } - b, err := json.Marshal(OverlayJSON{Replace: overlays}) - if err != nil { - return "", func() {}, err - } - // Write out the overlay file that contains the filepath mappings. - filename = filepath.Join(dir, "overlay.json") - if err := os.WriteFile(filename, b, 0665); err != nil { - return "", func() {}, err - } - return filename, cleanup, nil -} - -func containsGoFile(s []string) bool { - for _, f := range s { - if strings.HasSuffix(f, ".go") { - return true - } - } - return false -} - -func cmdDebugStr(cmd *exec.Cmd) string { - env := make(map[string]string) - for _, kv := range cmd.Env { - split := strings.SplitN(kv, "=", 2) - k, v := split[0], split[1] - env[k] = v - } - - var args []string - for _, arg := range cmd.Args { - quoted := strconv.Quote(arg) - if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") { - args = append(args, quoted) - } else { - args = append(args, arg) - } - } - return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) -} diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go deleted file mode 100644 index d823c474ad3..00000000000 --- a/vendor/golang.org/x/tools/go/packages/golist_overlay.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packages - -import ( - "encoding/json" - "path/filepath" - - "golang.org/x/tools/internal/gocommand" -) - -// determineRootDirs returns a mapping from absolute directories that could -// contain code to their corresponding import path prefixes. -func (state *golistState) determineRootDirs() (map[string]string, error) { - env, err := state.getEnv() - if err != nil { - return nil, err - } - if env["GOMOD"] != "" { - state.rootsOnce.Do(func() { - state.rootDirs, state.rootDirsError = state.determineRootDirsModules() - }) - } else { - state.rootsOnce.Do(func() { - state.rootDirs, state.rootDirsError = state.determineRootDirsGOPATH() - }) - } - return state.rootDirs, state.rootDirsError -} - -func (state *golistState) determineRootDirsModules() (map[string]string, error) { - // List all of the modules--the first will be the directory for the main - // module. Any replaced modules will also need to be treated as roots. - // Editing files in the module cache isn't a great idea, so we don't - // plan to ever support that. - out, err := state.invokeGo("list", "-m", "-json", "all") - if err != nil { - // 'go list all' will fail if we're outside of a module and - // GO111MODULE=on. Try falling back without 'all'. - var innerErr error - out, innerErr = state.invokeGo("list", "-m", "-json") - if innerErr != nil { - return nil, err - } - } - roots := map[string]string{} - modules := map[string]string{} - var i int - for dec := json.NewDecoder(out); dec.More(); { - mod := new(gocommand.ModuleJSON) - if err := dec.Decode(mod); err != nil { - return nil, err - } - if mod.Dir != "" && mod.Path != "" { - // This is a valid module; add it to the map. - absDir, err := filepath.Abs(mod.Dir) - if err != nil { - return nil, err - } - modules[absDir] = mod.Path - // The first result is the main module. - if i == 0 || mod.Replace != nil && mod.Replace.Path != "" { - roots[absDir] = mod.Path - } - } - i++ - } - return roots, nil -} - -func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) { - m := map[string]string{} - for _, dir := range filepath.SplitList(state.mustGetEnv()["GOPATH"]) { - absDir, err := filepath.Abs(dir) - if err != nil { - return nil, err - } - m[filepath.Join(absDir, "src")] = "" - } - return m, nil -} diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go deleted file mode 100644 index 5c080d21b54..00000000000 --- a/vendor/golang.org/x/tools/go/packages/loadmode_string.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packages - -import ( - "fmt" - "strings" -) - -var allModes = []LoadMode{ - NeedName, - NeedFiles, - NeedCompiledGoFiles, - NeedImports, - NeedDeps, - NeedExportFile, - NeedTypes, - NeedSyntax, - NeedTypesInfo, - NeedTypesSizes, -} - -var modeStrings = []string{ - "NeedName", - "NeedFiles", - "NeedCompiledGoFiles", - "NeedImports", - "NeedDeps", - "NeedExportFile", - "NeedTypes", - "NeedSyntax", - "NeedTypesInfo", - "NeedTypesSizes", -} - -func (mod LoadMode) String() string { - m := mod - if m == 0 { - return "LoadMode(0)" - } - var out []string - for i, x := range allModes { - if x > m { - break - } - if (m & x) != 0 { - out = append(out, modeStrings[i]) - m = m ^ x - } - } - if m != 0 { - out = append(out, "Unknown") - } - return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|")) -} diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go deleted file mode 100644 index 81e9e6a727d..00000000000 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ /dev/null @@ -1,1347 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packages - -// See doc.go for package documentation and implementation notes. - -import ( - "context" - "encoding/json" - "fmt" - "go/ast" - "go/parser" - "go/scanner" - "go/token" - "go/types" - "io" - "log" - "os" - "path/filepath" - "runtime" - "strings" - "sync" - "time" - - "golang.org/x/tools/go/gcexportdata" - "golang.org/x/tools/internal/gocommand" - "golang.org/x/tools/internal/packagesinternal" - "golang.org/x/tools/internal/typesinternal" - "golang.org/x/tools/internal/versions" -) - -// A LoadMode controls the amount of detail to return when loading. -// The bits below can be combined to specify which fields should be -// filled in the result packages. -// The zero value is a special case, equivalent to combining -// the NeedName, NeedFiles, and NeedCompiledGoFiles bits. -// ID and Errors (if present) will always be filled. -// Load may return more information than requested. -type LoadMode int - -const ( - // NeedName adds Name and PkgPath. - NeedName LoadMode = 1 << iota - - // NeedFiles adds GoFiles and OtherFiles. - NeedFiles - - // NeedCompiledGoFiles adds CompiledGoFiles. - NeedCompiledGoFiles - - // NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain - // "placeholder" Packages with only the ID set. - NeedImports - - // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. - NeedDeps - - // NeedExportFile adds ExportFile. - NeedExportFile - - // NeedTypes adds Types, Fset, and IllTyped. - NeedTypes - - // NeedSyntax adds Syntax. - NeedSyntax - - // NeedTypesInfo adds TypesInfo. - NeedTypesInfo - - // NeedTypesSizes adds TypesSizes. - NeedTypesSizes - - // needInternalDepsErrors adds the internal deps errors field for use by gopls. - needInternalDepsErrors - - // needInternalForTest adds the internal forTest field. - // Tests must also be set on the context for this field to be populated. - needInternalForTest - - // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+. - // Modifies CompiledGoFiles and Types, and has no effect on its own. - typecheckCgo - - // NeedModule adds Module. - NeedModule - - // NeedEmbedFiles adds EmbedFiles. - NeedEmbedFiles - - // NeedEmbedPatterns adds EmbedPatterns. - NeedEmbedPatterns -) - -const ( - // Deprecated: LoadFiles exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. - LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles - - // Deprecated: LoadImports exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. - LoadImports = LoadFiles | NeedImports - - // Deprecated: LoadTypes exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. - LoadTypes = LoadImports | NeedTypes | NeedTypesSizes - - // Deprecated: LoadSyntax exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. - LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo - - // Deprecated: LoadAllSyntax exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. - LoadAllSyntax = LoadSyntax | NeedDeps - - // Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile. - NeedExportsFile = NeedExportFile -) - -// A Config specifies details about how packages should be loaded. -// The zero value is a valid configuration. -// Calls to Load do not modify this struct. -type Config struct { - // Mode controls the level of information returned for each package. - Mode LoadMode - - // Context specifies the context for the load operation. - // If the context is cancelled, the loader may stop early - // and return an ErrCancelled error. - // If Context is nil, the load cannot be cancelled. - Context context.Context - - // Logf is the logger for the config. - // If the user provides a logger, debug logging is enabled. - // If the GOPACKAGESDEBUG environment variable is set to true, - // but the logger is nil, default to log.Printf. - Logf func(format string, args ...interface{}) - - // Dir is the directory in which to run the build system's query tool - // that provides information about the packages. - // If Dir is empty, the tool is run in the current directory. - Dir string - - // Env is the environment to use when invoking the build system's query tool. - // If Env is nil, the current environment is used. - // As in os/exec's Cmd, only the last value in the slice for - // each environment key is used. To specify the setting of only - // a few variables, append to the current environment, as in: - // - // opt.Env = append(os.Environ(), "GOOS=plan9", "GOARCH=386") - // - Env []string - - // gocmdRunner guards go command calls from concurrency errors. - gocmdRunner *gocommand.Runner - - // BuildFlags is a list of command-line flags to be passed through to - // the build system's query tool. - BuildFlags []string - - // modFile will be used for -modfile in go command invocations. - modFile string - - // modFlag will be used for -modfile in go command invocations. - modFlag string - - // Fset provides source position information for syntax trees and types. - // If Fset is nil, Load will use a new fileset, but preserve Fset's value. - Fset *token.FileSet - - // ParseFile is called to read and parse each file - // when preparing a package's type-checked syntax tree. - // It must be safe to call ParseFile simultaneously from multiple goroutines. - // If ParseFile is nil, the loader will uses parser.ParseFile. - // - // ParseFile should parse the source from src and use filename only for - // recording position information. - // - // An application may supply a custom implementation of ParseFile - // to change the effective file contents or the behavior of the parser, - // or to modify the syntax tree. For example, selectively eliminating - // unwanted function bodies can significantly accelerate type checking. - ParseFile func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) - - // If Tests is set, the loader includes not just the packages - // matching a particular pattern but also any related test packages, - // including test-only variants of the package and the test executable. - // - // For example, when using the go command, loading "fmt" with Tests=true - // returns four packages, with IDs "fmt" (the standard package), - // "fmt [fmt.test]" (the package as compiled for the test), - // "fmt_test" (the test functions from source files in package fmt_test), - // and "fmt.test" (the test binary). - // - // In build systems with explicit names for tests, - // setting Tests may have no effect. - Tests bool - - // Overlay provides a mapping of absolute file paths to file contents. - // If the file with the given path already exists, the parser will use the - // alternative file contents provided by the map. - // - // Overlays provide incomplete support for when a given file doesn't - // already exist on disk. See the package doc above for more details. - Overlay map[string][]byte -} - -// driver is the type for functions that query the build system for the -// packages named by the patterns. -type driver func(cfg *Config, patterns ...string) (*driverResponse, error) - -// driverResponse contains the results for a driver query. -type driverResponse struct { - // NotHandled is returned if the request can't be handled by the current - // driver. If an external driver returns a response with NotHandled, the - // rest of the driverResponse is ignored, and go/packages will fallback - // to the next driver. If go/packages is extended in the future to support - // lists of multiple drivers, go/packages will fall back to the next driver. - NotHandled bool - - // Compiler and Arch are the arguments pass of types.SizesFor - // to get a types.Sizes to use when type checking. - Compiler string - Arch string - - // Roots is the set of package IDs that make up the root packages. - // We have to encode this separately because when we encode a single package - // we cannot know if it is one of the roots as that requires knowledge of the - // graph it is part of. - Roots []string `json:",omitempty"` - - // Packages is the full set of packages in the graph. - // The packages are not connected into a graph. - // The Imports if populated will be stubs that only have their ID set. - // Imports will be connected and then type and syntax information added in a - // later pass (see refine). - Packages []*Package - - // GoVersion is the minor version number used by the driver - // (e.g. the go command on the PATH) when selecting .go files. - // Zero means unknown. - GoVersion int -} - -// Load loads and returns the Go packages named by the given patterns. -// -// Config specifies loading options; -// nil behaves the same as an empty Config. -// -// Load returns an error if any of the patterns was invalid -// as defined by the underlying build system. -// It may return an empty list of packages without an error, -// for instance for an empty expansion of a valid wildcard. -// Errors associated with a particular package are recorded in the -// corresponding Package's Errors list, and do not cause Load to -// return an error. Clients may need to handle such errors before -// proceeding with further analysis. The PrintErrors function is -// provided for convenient display of all errors. -func Load(cfg *Config, patterns ...string) ([]*Package, error) { - ld := newLoader(cfg) - response, external, err := defaultDriver(&ld.Config, patterns...) - if err != nil { - return nil, err - } - - ld.sizes = types.SizesFor(response.Compiler, response.Arch) - if ld.sizes == nil && ld.Config.Mode&(NeedTypes|NeedTypesSizes|NeedTypesInfo) != 0 { - // Type size information is needed but unavailable. - if external { - // An external driver may fail to populate the Compiler/GOARCH fields, - // especially since they are relatively new (see #63700). - // Provide a sensible fallback in this case. - ld.sizes = types.SizesFor("gc", runtime.GOARCH) - if ld.sizes == nil { // gccgo-only arch - ld.sizes = types.SizesFor("gc", "amd64") - } - } else { - // Go list should never fail to deliver accurate size information. - // Reject the whole Load since the error is the same for every package. - return nil, fmt.Errorf("can't determine type sizes for compiler %q on GOARCH %q", - response.Compiler, response.Arch) - } - } - - return ld.refine(response) -} - -// defaultDriver is a driver that implements go/packages' fallback behavior. -// It will try to request to an external driver, if one exists. If there's -// no external driver, or the driver returns a response with NotHandled set, -// defaultDriver will fall back to the go list driver. -// The boolean result indicates that an external driver handled the request. -func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, bool, error) { - if driver := findExternalDriver(cfg); driver != nil { - response, err := driver(cfg, patterns...) - if err != nil { - return nil, false, err - } else if !response.NotHandled { - return response, true, nil - } - // (fall through) - } - - response, err := goListDriver(cfg, patterns...) - return response, false, err -} - -// A Package describes a loaded Go package. -type Package struct { - // ID is a unique identifier for a package, - // in a syntax provided by the underlying build system. - // - // Because the syntax varies based on the build system, - // clients should treat IDs as opaque and not attempt to - // interpret them. - ID string - - // Name is the package name as it appears in the package source code. - Name string - - // PkgPath is the package path as used by the go/types package. - PkgPath string - - // Errors contains any errors encountered querying the metadata - // of the package, or while parsing or type-checking its files. - Errors []Error - - // TypeErrors contains the subset of errors produced during type checking. - TypeErrors []types.Error - - // GoFiles lists the absolute file paths of the package's Go source files. - // It may include files that should not be compiled, for example because - // they contain non-matching build tags, are documentary pseudo-files such as - // unsafe/unsafe.go or builtin/builtin.go, or are subject to cgo preprocessing. - GoFiles []string - - // CompiledGoFiles lists the absolute file paths of the package's source - // files that are suitable for type checking. - // This may differ from GoFiles if files are processed before compilation. - CompiledGoFiles []string - - // OtherFiles lists the absolute file paths of the package's non-Go source files, - // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on. - OtherFiles []string - - // EmbedFiles lists the absolute file paths of the package's files - // embedded with go:embed. - EmbedFiles []string - - // EmbedPatterns lists the absolute file patterns of the package's - // files embedded with go:embed. - EmbedPatterns []string - - // IgnoredFiles lists source files that are not part of the package - // using the current build configuration but that might be part of - // the package using other build configurations. - IgnoredFiles []string - - // ExportFile is the absolute path to a file containing type - // information for the package as provided by the build system. - ExportFile string - - // Imports maps import paths appearing in the package's Go source files - // to corresponding loaded Packages. - Imports map[string]*Package - - // Types provides type information for the package. - // The NeedTypes LoadMode bit sets this field for packages matching the - // patterns; type information for dependencies may be missing or incomplete, - // unless NeedDeps and NeedImports are also set. - Types *types.Package - - // Fset provides position information for Types, TypesInfo, and Syntax. - // It is set only when Types is set. - Fset *token.FileSet - - // IllTyped indicates whether the package or any dependency contains errors. - // It is set only when Types is set. - IllTyped bool - - // Syntax is the package's syntax trees, for the files listed in CompiledGoFiles. - // - // The NeedSyntax LoadMode bit populates this field for packages matching the patterns. - // If NeedDeps and NeedImports are also set, this field will also be populated - // for dependencies. - // - // Syntax is kept in the same order as CompiledGoFiles, with the caveat that nils are - // removed. If parsing returned nil, Syntax may be shorter than CompiledGoFiles. - Syntax []*ast.File - - // TypesInfo provides type information about the package's syntax trees. - // It is set only when Syntax is set. - TypesInfo *types.Info - - // TypesSizes provides the effective size function for types in TypesInfo. - TypesSizes types.Sizes - - // forTest is the package under test, if any. - forTest string - - // depsErrors is the DepsErrors field from the go list response, if any. - depsErrors []*packagesinternal.PackageError - - // module is the module information for the package if it exists. - Module *Module -} - -// Module provides module information for a package. -type Module struct { - Path string // module path - Version string // module version - Replace *Module // replaced by this module - Time *time.Time // time version was created - Main bool // is this the main module? - Indirect bool // is this module only an indirect dependency of main module? - Dir string // directory holding files for this module, if any - GoMod string // path to go.mod file used when loading this module, if any - GoVersion string // go version used in module - Error *ModuleError // error loading module -} - -// ModuleError holds errors loading a module. -type ModuleError struct { - Err string // the error itself -} - -func init() { - packagesinternal.GetForTest = func(p interface{}) string { - return p.(*Package).forTest - } - packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { - return p.(*Package).depsErrors - } - packagesinternal.SetModFile = func(config interface{}, value string) { - config.(*Config).modFile = value - } - packagesinternal.SetModFlag = func(config interface{}, value string) { - config.(*Config).modFlag = value - } - packagesinternal.TypecheckCgo = int(typecheckCgo) - packagesinternal.DepsErrors = int(needInternalDepsErrors) - packagesinternal.ForTest = int(needInternalForTest) -} - -// An Error describes a problem with a package's metadata, syntax, or types. -type Error struct { - Pos string // "file:line:col" or "file:line" or "" or "-" - Msg string - Kind ErrorKind -} - -// ErrorKind describes the source of the error, allowing the user to -// differentiate between errors generated by the driver, the parser, or the -// type-checker. -type ErrorKind int - -const ( - UnknownError ErrorKind = iota - ListError - ParseError - TypeError -) - -func (err Error) Error() string { - pos := err.Pos - if pos == "" { - pos = "-" // like token.Position{}.String() - } - return pos + ": " + err.Msg -} - -// flatPackage is the JSON form of Package -// It drops all the type and syntax fields, and transforms the Imports -// -// TODO(adonovan): identify this struct with Package, effectively -// publishing the JSON protocol. -type flatPackage struct { - ID string - Name string `json:",omitempty"` - PkgPath string `json:",omitempty"` - Errors []Error `json:",omitempty"` - GoFiles []string `json:",omitempty"` - CompiledGoFiles []string `json:",omitempty"` - OtherFiles []string `json:",omitempty"` - EmbedFiles []string `json:",omitempty"` - EmbedPatterns []string `json:",omitempty"` - IgnoredFiles []string `json:",omitempty"` - ExportFile string `json:",omitempty"` - Imports map[string]string `json:",omitempty"` -} - -// MarshalJSON returns the Package in its JSON form. -// For the most part, the structure fields are written out unmodified, and -// the type and syntax fields are skipped. -// The imports are written out as just a map of path to package id. -// The errors are written using a custom type that tries to preserve the -// structure of error types we know about. -// -// This method exists to enable support for additional build systems. It is -// not intended for use by clients of the API and we may change the format. -func (p *Package) MarshalJSON() ([]byte, error) { - flat := &flatPackage{ - ID: p.ID, - Name: p.Name, - PkgPath: p.PkgPath, - Errors: p.Errors, - GoFiles: p.GoFiles, - CompiledGoFiles: p.CompiledGoFiles, - OtherFiles: p.OtherFiles, - EmbedFiles: p.EmbedFiles, - EmbedPatterns: p.EmbedPatterns, - IgnoredFiles: p.IgnoredFiles, - ExportFile: p.ExportFile, - } - if len(p.Imports) > 0 { - flat.Imports = make(map[string]string, len(p.Imports)) - for path, ipkg := range p.Imports { - flat.Imports[path] = ipkg.ID - } - } - return json.Marshal(flat) -} - -// UnmarshalJSON reads in a Package from its JSON format. -// See MarshalJSON for details about the format accepted. -func (p *Package) UnmarshalJSON(b []byte) error { - flat := &flatPackage{} - if err := json.Unmarshal(b, &flat); err != nil { - return err - } - *p = Package{ - ID: flat.ID, - Name: flat.Name, - PkgPath: flat.PkgPath, - Errors: flat.Errors, - GoFiles: flat.GoFiles, - CompiledGoFiles: flat.CompiledGoFiles, - OtherFiles: flat.OtherFiles, - EmbedFiles: flat.EmbedFiles, - EmbedPatterns: flat.EmbedPatterns, - ExportFile: flat.ExportFile, - } - if len(flat.Imports) > 0 { - p.Imports = make(map[string]*Package, len(flat.Imports)) - for path, id := range flat.Imports { - p.Imports[path] = &Package{ID: id} - } - } - return nil -} - -func (p *Package) String() string { return p.ID } - -// loaderPackage augments Package with state used during the loading phase -type loaderPackage struct { - *Package - importErrors map[string]error // maps each bad import to its error - loadOnce sync.Once - color uint8 // for cycle detection - needsrc bool // load from source (Mode >= LoadTypes) - needtypes bool // type information is either requested or depended on - initial bool // package was matched by a pattern - goVersion int // minor version number of go command on PATH -} - -// loader holds the working state of a single call to load. -type loader struct { - pkgs map[string]*loaderPackage - Config - sizes types.Sizes // non-nil if needed by mode - parseCache map[string]*parseValue - parseCacheMu sync.Mutex - exportMu sync.Mutex // enforces mutual exclusion of exportdata operations - - // Config.Mode contains the implied mode (see impliedLoadMode). - // Implied mode contains all the fields we need the data for. - // In requestedMode there are the actually requested fields. - // We'll zero them out before returning packages to the user. - // This makes it easier for us to get the conditions where - // we need certain modes right. - requestedMode LoadMode -} - -type parseValue struct { - f *ast.File - err error - ready chan struct{} -} - -func newLoader(cfg *Config) *loader { - ld := &loader{ - parseCache: map[string]*parseValue{}, - } - if cfg != nil { - ld.Config = *cfg - // If the user has provided a logger, use it. - ld.Config.Logf = cfg.Logf - } - if ld.Config.Logf == nil { - // If the GOPACKAGESDEBUG environment variable is set to true, - // but the user has not provided a logger, default to log.Printf. - if debug { - ld.Config.Logf = log.Printf - } else { - ld.Config.Logf = func(format string, args ...interface{}) {} - } - } - if ld.Config.Mode == 0 { - ld.Config.Mode = NeedName | NeedFiles | NeedCompiledGoFiles // Preserve zero behavior of Mode for backwards compatibility. - } - if ld.Config.Env == nil { - ld.Config.Env = os.Environ() - } - if ld.Config.gocmdRunner == nil { - ld.Config.gocmdRunner = &gocommand.Runner{} - } - if ld.Context == nil { - ld.Context = context.Background() - } - if ld.Dir == "" { - if dir, err := os.Getwd(); err == nil { - ld.Dir = dir - } - } - - // Save the actually requested fields. We'll zero them out before returning packages to the user. - ld.requestedMode = ld.Mode - ld.Mode = impliedLoadMode(ld.Mode) - - if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { - if ld.Fset == nil { - ld.Fset = token.NewFileSet() - } - - // ParseFile is required even in LoadTypes mode - // because we load source if export data is missing. - if ld.ParseFile == nil { - ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { - const mode = parser.AllErrors | parser.ParseComments - return parser.ParseFile(fset, filename, src, mode) - } - } - } - - return ld -} - -// refine connects the supplied packages into a graph and then adds type -// and syntax information as requested by the LoadMode. -func (ld *loader) refine(response *driverResponse) ([]*Package, error) { - roots := response.Roots - rootMap := make(map[string]int, len(roots)) - for i, root := range roots { - rootMap[root] = i - } - ld.pkgs = make(map[string]*loaderPackage) - // first pass, fixup and build the map and roots - var initial = make([]*loaderPackage, len(roots)) - for _, pkg := range response.Packages { - rootIndex := -1 - if i, found := rootMap[pkg.ID]; found { - rootIndex = i - } - - // Overlays can invalidate export data. - // TODO(matloob): make this check fine-grained based on dependencies on overlaid files - exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe" - // This package needs type information if the caller requested types and the package is - // either a root, or it's a non-root and the user requested dependencies ... - needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) - // This package needs source if the call requested source (or types info, which implies source) - // and the package is either a root, or itas a non- root and the user requested dependencies... - needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) || - // ... or if we need types and the exportData is invalid. We fall back to (incompletely) - // typechecking packages from source if they fail to compile. - (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe" - lpkg := &loaderPackage{ - Package: pkg, - needtypes: needtypes, - needsrc: needsrc, - goVersion: response.GoVersion, - } - ld.pkgs[lpkg.ID] = lpkg - if rootIndex >= 0 { - initial[rootIndex] = lpkg - lpkg.initial = true - } - } - for i, root := range roots { - if initial[i] == nil { - return nil, fmt.Errorf("root package %v is missing", root) - } - } - - if ld.Mode&NeedImports != 0 { - // Materialize the import graph. - - const ( - white = 0 // new - grey = 1 // in progress - black = 2 // complete - ) - - // visit traverses the import graph, depth-first, - // and materializes the graph as Packages.Imports. - // - // Valid imports are saved in the Packages.Import map. - // Invalid imports (cycles and missing nodes) are saved in the importErrors map. - // Thus, even in the presence of both kinds of errors, - // the Import graph remains a DAG. - // - // visit returns whether the package needs src or has a transitive - // dependency on a package that does. These are the only packages - // for which we load source code. - var stack []*loaderPackage - var visit func(lpkg *loaderPackage) bool - visit = func(lpkg *loaderPackage) bool { - switch lpkg.color { - case black: - return lpkg.needsrc - case grey: - panic("internal error: grey node") - } - lpkg.color = grey - stack = append(stack, lpkg) // push - stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports - lpkg.Imports = make(map[string]*Package, len(stubs)) - for importPath, ipkg := range stubs { - var importErr error - imp := ld.pkgs[ipkg.ID] - if imp == nil { - // (includes package "C" when DisableCgo) - importErr = fmt.Errorf("missing package: %q", ipkg.ID) - } else if imp.color == grey { - importErr = fmt.Errorf("import cycle: %s", stack) - } - if importErr != nil { - if lpkg.importErrors == nil { - lpkg.importErrors = make(map[string]error) - } - lpkg.importErrors[importPath] = importErr - continue - } - - if visit(imp) { - lpkg.needsrc = true - } - lpkg.Imports[importPath] = imp.Package - } - - // Complete type information is required for the - // immediate dependencies of each source package. - if lpkg.needsrc && ld.Mode&NeedTypes != 0 { - for _, ipkg := range lpkg.Imports { - ld.pkgs[ipkg.ID].needtypes = true - } - } - - // NeedTypeSizes causes TypeSizes to be set even - // on packages for which types aren't needed. - if ld.Mode&NeedTypesSizes != 0 { - lpkg.TypesSizes = ld.sizes - } - stack = stack[:len(stack)-1] // pop - lpkg.color = black - - return lpkg.needsrc - } - - // For each initial package, create its import DAG. - for _, lpkg := range initial { - visit(lpkg) - } - - } else { - // !NeedImports: drop the stub (ID-only) import packages - // that we are not even going to try to resolve. - for _, lpkg := range initial { - lpkg.Imports = nil - } - } - - // Load type data and syntax if needed, starting at - // the initial packages (roots of the import DAG). - if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { - var wg sync.WaitGroup - for _, lpkg := range initial { - wg.Add(1) - go func(lpkg *loaderPackage) { - ld.loadRecursive(lpkg) - wg.Done() - }(lpkg) - } - wg.Wait() - } - - result := make([]*Package, len(initial)) - for i, lpkg := range initial { - result[i] = lpkg.Package - } - for i := range ld.pkgs { - // Clear all unrequested fields, - // to catch programs that use more than they request. - if ld.requestedMode&NeedName == 0 { - ld.pkgs[i].Name = "" - ld.pkgs[i].PkgPath = "" - } - if ld.requestedMode&NeedFiles == 0 { - ld.pkgs[i].GoFiles = nil - ld.pkgs[i].OtherFiles = nil - ld.pkgs[i].IgnoredFiles = nil - } - if ld.requestedMode&NeedEmbedFiles == 0 { - ld.pkgs[i].EmbedFiles = nil - } - if ld.requestedMode&NeedEmbedPatterns == 0 { - ld.pkgs[i].EmbedPatterns = nil - } - if ld.requestedMode&NeedCompiledGoFiles == 0 { - ld.pkgs[i].CompiledGoFiles = nil - } - if ld.requestedMode&NeedImports == 0 { - ld.pkgs[i].Imports = nil - } - if ld.requestedMode&NeedExportFile == 0 { - ld.pkgs[i].ExportFile = "" - } - if ld.requestedMode&NeedTypes == 0 { - ld.pkgs[i].Types = nil - ld.pkgs[i].Fset = nil - ld.pkgs[i].IllTyped = false - } - if ld.requestedMode&NeedSyntax == 0 { - ld.pkgs[i].Syntax = nil - } - if ld.requestedMode&NeedTypesInfo == 0 { - ld.pkgs[i].TypesInfo = nil - } - if ld.requestedMode&NeedTypesSizes == 0 { - ld.pkgs[i].TypesSizes = nil - } - if ld.requestedMode&NeedModule == 0 { - ld.pkgs[i].Module = nil - } - } - - return result, nil -} - -// loadRecursive loads the specified package and its dependencies, -// recursively, in parallel, in topological order. -// It is atomic and idempotent. -// Precondition: ld.Mode&NeedTypes. -func (ld *loader) loadRecursive(lpkg *loaderPackage) { - lpkg.loadOnce.Do(func() { - // Load the direct dependencies, in parallel. - var wg sync.WaitGroup - for _, ipkg := range lpkg.Imports { - imp := ld.pkgs[ipkg.ID] - wg.Add(1) - go func(imp *loaderPackage) { - ld.loadRecursive(imp) - wg.Done() - }(imp) - } - wg.Wait() - ld.loadPackage(lpkg) - }) -} - -// loadPackage loads the specified package. -// It must be called only once per Package, -// after immediate dependencies are loaded. -// Precondition: ld.Mode & NeedTypes. -func (ld *loader) loadPackage(lpkg *loaderPackage) { - if lpkg.PkgPath == "unsafe" { - // Fill in the blanks to avoid surprises. - lpkg.Types = types.Unsafe - lpkg.Fset = ld.Fset - lpkg.Syntax = []*ast.File{} - lpkg.TypesInfo = new(types.Info) - lpkg.TypesSizes = ld.sizes - return - } - - // Call NewPackage directly with explicit name. - // This avoids skew between golist and go/types when the files' - // package declarations are inconsistent. - lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name) - lpkg.Fset = ld.Fset - - // Subtle: we populate all Types fields with an empty Package - // before loading export data so that export data processing - // never has to create a types.Package for an indirect dependency, - // which would then require that such created packages be explicitly - // inserted back into the Import graph as a final step after export data loading. - // (Hence this return is after the Types assignment.) - // The Diamond test exercises this case. - if !lpkg.needtypes && !lpkg.needsrc { - return - } - if !lpkg.needsrc { - if err := ld.loadFromExportData(lpkg); err != nil { - lpkg.Errors = append(lpkg.Errors, Error{ - Pos: "-", - Msg: err.Error(), - Kind: UnknownError, // e.g. can't find/open/parse export data - }) - } - return // not a source package, don't get syntax trees - } - - appendError := func(err error) { - // Convert various error types into the one true Error. - var errs []Error - switch err := err.(type) { - case Error: - // from driver - errs = append(errs, err) - - case *os.PathError: - // from parser - errs = append(errs, Error{ - Pos: err.Path + ":1", - Msg: err.Err.Error(), - Kind: ParseError, - }) - - case scanner.ErrorList: - // from parser - for _, err := range err { - errs = append(errs, Error{ - Pos: err.Pos.String(), - Msg: err.Msg, - Kind: ParseError, - }) - } - - case types.Error: - // from type checker - lpkg.TypeErrors = append(lpkg.TypeErrors, err) - errs = append(errs, Error{ - Pos: err.Fset.Position(err.Pos).String(), - Msg: err.Msg, - Kind: TypeError, - }) - - default: - // unexpected impoverished error from parser? - errs = append(errs, Error{ - Pos: "-", - Msg: err.Error(), - Kind: UnknownError, - }) - - // If you see this error message, please file a bug. - log.Printf("internal error: error %q (%T) without position", err, err) - } - - lpkg.Errors = append(lpkg.Errors, errs...) - } - - // If the go command on the PATH is newer than the runtime, - // then the go/{scanner,ast,parser,types} packages from the - // standard library may be unable to process the files - // selected by go list. - // - // There is currently no way to downgrade the effective - // version of the go command (see issue 52078), so we proceed - // with the newer go command but, in case of parse or type - // errors, we emit an additional diagnostic. - // - // See: - // - golang.org/issue/52078 (flag to set release tags) - // - golang.org/issue/50825 (gopls legacy version support) - // - golang.org/issue/55883 (go/packages confusing error) - // - // Should we assert a hard minimum of (currently) go1.16 here? - var runtimeVersion int - if _, err := fmt.Sscanf(runtime.Version(), "go1.%d", &runtimeVersion); err == nil && runtimeVersion < lpkg.goVersion { - defer func() { - if len(lpkg.Errors) > 0 { - appendError(Error{ - Pos: "-", - Msg: fmt.Sprintf("This application uses version go1.%d of the source-processing packages but runs version go1.%d of 'go list'. It may fail to process source files that rely on newer language features. If so, rebuild the application using a newer version of Go.", runtimeVersion, lpkg.goVersion), - Kind: UnknownError, - }) - } - }() - } - - if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" { - // The config requested loading sources and types, but sources are missing. - // Add an error to the package and fall back to loading from export data. - appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError}) - _ = ld.loadFromExportData(lpkg) // ignore any secondary errors - - return // can't get syntax trees for this package - } - - files, errs := ld.parseFiles(lpkg.CompiledGoFiles) - for _, err := range errs { - appendError(err) - } - - lpkg.Syntax = files - if ld.Config.Mode&NeedTypes == 0 { - return - } - - lpkg.TypesInfo = &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Instances: make(map[*ast.Ident]types.Instance), - Scopes: make(map[ast.Node]*types.Scope), - Selections: make(map[*ast.SelectorExpr]*types.Selection), - } - versions.InitFileVersions(lpkg.TypesInfo) - lpkg.TypesSizes = ld.sizes - - importer := importerFunc(func(path string) (*types.Package, error) { - if path == "unsafe" { - return types.Unsafe, nil - } - - // The imports map is keyed by import path. - ipkg := lpkg.Imports[path] - if ipkg == nil { - if err := lpkg.importErrors[path]; err != nil { - return nil, err - } - // There was skew between the metadata and the - // import declarations, likely due to an edit - // race, or because the ParseFile feature was - // used to supply alternative file contents. - return nil, fmt.Errorf("no metadata for %s", path) - } - - if ipkg.Types != nil && ipkg.Types.Complete() { - return ipkg.Types, nil - } - log.Fatalf("internal error: package %q without types was imported from %q", path, lpkg) - panic("unreachable") - }) - - // type-check - tc := &types.Config{ - Importer: importer, - - // Type-check bodies of functions only in initial packages. - // Example: for import graph A->B->C and initial packages {A,C}, - // we can ignore function bodies in B. - IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial, - - Error: appendError, - Sizes: ld.sizes, // may be nil - } - if lpkg.Module != nil && lpkg.Module.GoVersion != "" { - typesinternal.SetGoVersion(tc, "go"+lpkg.Module.GoVersion) - } - if (ld.Mode & typecheckCgo) != 0 { - if !typesinternal.SetUsesCgo(tc) { - appendError(Error{ - Msg: "typecheckCgo requires Go 1.15+", - Kind: ListError, - }) - return - } - } - types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) - - lpkg.importErrors = nil // no longer needed - - // If !Cgo, the type-checker uses FakeImportC mode, so - // it doesn't invoke the importer for import "C", - // nor report an error for the import, - // or for any undefined C.f reference. - // We must detect this explicitly and correctly - // mark the package as IllTyped (by reporting an error). - // TODO(adonovan): if these errors are annoying, - // we could just set IllTyped quietly. - if tc.FakeImportC { - outer: - for _, f := range lpkg.Syntax { - for _, imp := range f.Imports { - if imp.Path.Value == `"C"` { - err := types.Error{Fset: ld.Fset, Pos: imp.Pos(), Msg: `import "C" ignored`} - appendError(err) - break outer - } - } - } - } - - // Record accumulated errors. - illTyped := len(lpkg.Errors) > 0 - if !illTyped { - for _, imp := range lpkg.Imports { - if imp.IllTyped { - illTyped = true - break - } - } - } - lpkg.IllTyped = illTyped -} - -// An importFunc is an implementation of the single-method -// types.Importer interface based on a function value. -type importerFunc func(path string) (*types.Package, error) - -func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } - -// We use a counting semaphore to limit -// the number of parallel I/O calls per process. -var ioLimit = make(chan bool, 20) - -func (ld *loader) parseFile(filename string) (*ast.File, error) { - ld.parseCacheMu.Lock() - v, ok := ld.parseCache[filename] - if ok { - // cache hit - ld.parseCacheMu.Unlock() - <-v.ready - } else { - // cache miss - v = &parseValue{ready: make(chan struct{})} - ld.parseCache[filename] = v - ld.parseCacheMu.Unlock() - - var src []byte - for f, contents := range ld.Config.Overlay { - if sameFile(f, filename) { - src = contents - } - } - var err error - if src == nil { - ioLimit <- true // wait - src, err = os.ReadFile(filename) - <-ioLimit // signal - } - if err != nil { - v.err = err - } else { - v.f, v.err = ld.ParseFile(ld.Fset, filename, src) - } - - close(v.ready) - } - return v.f, v.err -} - -// parseFiles reads and parses the Go source files and returns the ASTs -// of the ones that could be at least partially parsed, along with a -// list of I/O and parse errors encountered. -// -// Because files are scanned in parallel, the token.Pos -// positions of the resulting ast.Files are not ordered. -func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { - var wg sync.WaitGroup - n := len(filenames) - parsed := make([]*ast.File, n) - errors := make([]error, n) - for i, file := range filenames { - if ld.Config.Context.Err() != nil { - parsed[i] = nil - errors[i] = ld.Config.Context.Err() - continue - } - wg.Add(1) - go func(i int, filename string) { - parsed[i], errors[i] = ld.parseFile(filename) - wg.Done() - }(i, file) - } - wg.Wait() - - // Eliminate nils, preserving order. - var o int - for _, f := range parsed { - if f != nil { - parsed[o] = f - o++ - } - } - parsed = parsed[:o] - - o = 0 - for _, err := range errors { - if err != nil { - errors[o] = err - o++ - } - } - errors = errors[:o] - - return parsed, errors -} - -// sameFile returns true if x and y have the same basename and denote -// the same file. -func sameFile(x, y string) bool { - if x == y { - // It could be the case that y doesn't exist. - // For instance, it may be an overlay file that - // hasn't been written to disk. To handle that case - // let x == y through. (We added the exact absolute path - // string to the CompiledGoFiles list, so the unwritten - // overlay case implies x==y.) - return true - } - if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation) - if xi, err := os.Stat(x); err == nil { - if yi, err := os.Stat(y); err == nil { - return os.SameFile(xi, yi) - } - } - } - return false -} - -// loadFromExportData ensures that type information is present for the specified -// package, loading it from an export data file on the first request. -// On success it sets lpkg.Types to a new Package. -func (ld *loader) loadFromExportData(lpkg *loaderPackage) error { - if lpkg.PkgPath == "" { - log.Fatalf("internal error: Package %s has no PkgPath", lpkg) - } - - // Because gcexportdata.Read has the potential to create or - // modify the types.Package for each node in the transitive - // closure of dependencies of lpkg, all exportdata operations - // must be sequential. (Finer-grained locking would require - // changes to the gcexportdata API.) - // - // The exportMu lock guards the lpkg.Types field and the - // types.Package it points to, for each loaderPackage in the graph. - // - // Not all accesses to Package.Pkg need to be protected by exportMu: - // graph ordering ensures that direct dependencies of source - // packages are fully loaded before the importer reads their Pkg field. - ld.exportMu.Lock() - defer ld.exportMu.Unlock() - - if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() { - return nil // cache hit - } - - lpkg.IllTyped = true // fail safe - - if lpkg.ExportFile == "" { - // Errors while building export data will have been printed to stderr. - return fmt.Errorf("no export data file") - } - f, err := os.Open(lpkg.ExportFile) - if err != nil { - return err - } - defer f.Close() - - // Read gc export data. - // - // We don't currently support gccgo export data because all - // underlying workspaces use the gc toolchain. (Even build - // systems that support gccgo don't use it for workspace - // queries.) - r, err := gcexportdata.NewReader(f) - if err != nil { - return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) - } - - // Build the view. - // - // The gcexportdata machinery has no concept of package ID. - // It identifies packages by their PkgPath, which although not - // globally unique is unique within the scope of one invocation - // of the linker, type-checker, or gcexportdata. - // - // So, we must build a PkgPath-keyed view of the global - // (conceptually ID-keyed) cache of packages and pass it to - // gcexportdata. The view must contain every existing - // package that might possibly be mentioned by the - // current package---its transitive closure. - // - // In loadPackage, we unconditionally create a types.Package for - // each dependency so that export data loading does not - // create new ones. - // - // TODO(adonovan): it would be simpler and more efficient - // if the export data machinery invoked a callback to - // get-or-create a package instead of a map. - // - view := make(map[string]*types.Package) // view seen by gcexportdata - seen := make(map[*loaderPackage]bool) // all visited packages - var visit func(pkgs map[string]*Package) - visit = func(pkgs map[string]*Package) { - for _, p := range pkgs { - lpkg := ld.pkgs[p.ID] - if !seen[lpkg] { - seen[lpkg] = true - view[lpkg.PkgPath] = lpkg.Types - visit(lpkg.Imports) - } - } - } - visit(lpkg.Imports) - - viewLen := len(view) + 1 // adding the self package - // Parse the export data. - // (May modify incomplete packages in view but not create new ones.) - tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath) - if err != nil { - return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) - } - if _, ok := view["go.shape"]; ok { - // Account for the pseudopackage "go.shape" that gets - // created by generic code. - viewLen++ - } - if viewLen != len(view) { - log.Panicf("golang.org/x/tools/go/packages: unexpected new packages during load of %s", lpkg.PkgPath) - } - - lpkg.Types = tpkg - lpkg.IllTyped = false - return nil -} - -// impliedLoadMode returns loadMode with its dependencies. -func impliedLoadMode(loadMode LoadMode) LoadMode { - if loadMode&(NeedDeps|NeedTypes|NeedTypesInfo) != 0 { - // All these things require knowing the import graph. - loadMode |= NeedImports - } - - return loadMode -} - -func usesExportData(cfg *Config) bool { - return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 -} - -var _ interface{} = io.Discard // assert build toolchain is go1.16 or later diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go deleted file mode 100644 index a1dcc40b727..00000000000 --- a/vendor/golang.org/x/tools/go/packages/visit.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packages - -import ( - "fmt" - "os" - "sort" -) - -// Visit visits all the packages in the import graph whose roots are -// pkgs, calling the optional pre function the first time each package -// is encountered (preorder), and the optional post function after a -// package's dependencies have been visited (postorder). -// The boolean result of pre(pkg) determines whether -// the imports of package pkg are visited. -func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { - seen := make(map[*Package]bool) - var visit func(*Package) - visit = func(pkg *Package) { - if !seen[pkg] { - seen[pkg] = true - - if pre == nil || pre(pkg) { - paths := make([]string, 0, len(pkg.Imports)) - for path := range pkg.Imports { - paths = append(paths, path) - } - sort.Strings(paths) // Imports is a map, this makes visit stable - for _, path := range paths { - visit(pkg.Imports[path]) - } - } - - if post != nil { - post(pkg) - } - } - } - for _, pkg := range pkgs { - visit(pkg) - } -} - -// PrintErrors prints to os.Stderr the accumulated errors of all -// packages in the import graph rooted at pkgs, dependencies first. -// PrintErrors returns the number of errors printed. -func PrintErrors(pkgs []*Package) int { - var n int - Visit(pkgs, nil, func(pkg *Package) { - for _, err := range pkg.Errors { - fmt.Fprintln(os.Stderr, err) - n++ - } - }) - return n -} diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go deleted file mode 100644 index 11d5c8c3adf..00000000000 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ /dev/null @@ -1,752 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package objectpath defines a naming scheme for types.Objects -// (that is, named entities in Go programs) relative to their enclosing -// package. -// -// Type-checker objects are canonical, so they are usually identified by -// their address in memory (a pointer), but a pointer has meaning only -// within one address space. By contrast, objectpath names allow the -// identity of an object to be sent from one program to another, -// establishing a correspondence between types.Object variables that are -// distinct but logically equivalent. -// -// A single object may have multiple paths. In this example, -// -// type A struct{ X int } -// type B A -// -// the field X has two paths due to its membership of both A and B. -// The For(obj) function always returns one of these paths, arbitrarily -// but consistently. -package objectpath - -import ( - "fmt" - "go/types" - "strconv" - "strings" - - "golang.org/x/tools/internal/typeparams" -) - -// A Path is an opaque name that identifies a types.Object -// relative to its package. Conceptually, the name consists of a -// sequence of destructuring operations applied to the package scope -// to obtain the original object. -// The name does not include the package itself. -type Path string - -// Encoding -// -// An object path is a textual and (with training) human-readable encoding -// of a sequence of destructuring operators, starting from a types.Package. -// The sequences represent a path through the package/object/type graph. -// We classify these operators by their type: -// -// PO package->object Package.Scope.Lookup -// OT object->type Object.Type -// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU] -// TO type->object Type.{At,Field,Method,Obj} [AFMO] -// -// All valid paths start with a package and end at an object -// and thus may be defined by the regular language: -// -// objectpath = PO (OT TT* TO)* -// -// The concrete encoding follows directly: -// - The only PO operator is Package.Scope.Lookup, which requires an identifier. -// - The only OT operator is Object.Type, -// which we encode as '.' because dot cannot appear in an identifier. -// - The TT operators are encoded as [EKPRUTC]; -// one of these (TypeParam) requires an integer operand, -// which is encoded as a string of decimal digits. -// - The TO operators are encoded as [AFMO]; -// three of these (At,Field,Method) require an integer operand, -// which is encoded as a string of decimal digits. -// These indices are stable across different representations -// of the same package, even source and export data. -// The indices used are implementation specific and may not correspond to -// the argument to the go/types function. -// -// In the example below, -// -// package p -// -// type T interface { -// f() (a string, b struct{ X int }) -// } -// -// field X has the path "T.UM0.RA1.F0", -// representing the following sequence of operations: -// -// p.Lookup("T") T -// .Type().Underlying().Method(0). f -// .Type().Results().At(1) b -// .Type().Field(0) X -// -// The encoding is not maximally compact---every R or P is -// followed by an A, for example---but this simplifies the -// encoder and decoder. -const ( - // object->type operators - opType = '.' // .Type() (Object) - - // type->type operators - opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) - opKey = 'K' // .Key() (Map) - opParams = 'P' // .Params() (Signature) - opResults = 'R' // .Results() (Signature) - opUnderlying = 'U' // .Underlying() (Named) - opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) - opConstraint = 'C' // .Constraint() (TypeParam) - - // type->object operators - opAt = 'A' // .At(i) (Tuple) - opField = 'F' // .Field(i) (Struct) - opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) - opObj = 'O' // .Obj() (Named, TypeParam) -) - -// For is equivalent to new(Encoder).For(obj). -// -// It may be more efficient to reuse a single Encoder across several calls. -func For(obj types.Object) (Path, error) { - return new(Encoder).For(obj) -} - -// An Encoder amortizes the cost of encoding the paths of multiple objects. -// The zero value of an Encoder is ready to use. -type Encoder struct { - scopeMemo map[*types.Scope][]types.Object // memoization of scopeObjects -} - -// For returns the path to an object relative to its package, -// or an error if the object is not accessible from the package's Scope. -// -// The For function guarantees to return a path only for the following objects: -// - package-level types -// - exported package-level non-types -// - methods -// - parameter and result variables -// - struct fields -// These objects are sufficient to define the API of their package. -// The objects described by a package's export data are drawn from this set. -// -// The set of objects accessible from a package's Scope depends on -// whether the package was produced by type-checking syntax, or -// reading export data; the latter may have a smaller Scope since -// export data trims objects that are not reachable from an exported -// declaration. For example, the For function will return a path for -// an exported method of an unexported type that is not reachable -// from any public declaration; this path will cause the Object -// function to fail if called on a package loaded from export data. -// TODO(adonovan): is this a bug or feature? Should this package -// compute accessibility in the same way? -// -// For does not return a path for predeclared names, imported package -// names, local names, and unexported package-level names (except -// types). -// -// Example: given this definition, -// -// package p -// -// type T interface { -// f() (a string, b struct{ X int }) -// } -// -// For(X) would return a path that denotes the following sequence of operations: -// -// p.Scope().Lookup("T") (TypeName T) -// .Type().Underlying().Method(0). (method Func f) -// .Type().Results().At(1) (field Var b) -// .Type().Field(0) (field Var X) -// -// where p is the package (*types.Package) to which X belongs. -func (enc *Encoder) For(obj types.Object) (Path, error) { - pkg := obj.Pkg() - - // This table lists the cases of interest. - // - // Object Action - // ------ ------ - // nil reject - // builtin reject - // pkgname reject - // label reject - // var - // package-level accept - // func param/result accept - // local reject - // struct field accept - // const - // package-level accept - // local reject - // func - // package-level accept - // init functions reject - // concrete method accept - // interface method accept - // type - // package-level accept - // local reject - // - // The only accessible package-level objects are members of pkg itself. - // - // The cases are handled in four steps: - // - // 1. reject nil and builtin - // 2. accept package-level objects - // 3. reject obviously invalid objects - // 4. search the API for the path to the param/result/field/method. - - // 1. reference to nil or builtin? - if pkg == nil { - return "", fmt.Errorf("predeclared %s has no path", obj) - } - scope := pkg.Scope() - - // 2. package-level object? - if scope.Lookup(obj.Name()) == obj { - // Only exported objects (and non-exported types) have a path. - // Non-exported types may be referenced by other objects. - if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() { - return "", fmt.Errorf("no path for non-exported %v", obj) - } - return Path(obj.Name()), nil - } - - // 3. Not a package-level object. - // Reject obviously non-viable cases. - switch obj := obj.(type) { - case *types.TypeName: - if _, ok := obj.Type().(*types.TypeParam); !ok { - // With the exception of type parameters, only package-level type names - // have a path. - return "", fmt.Errorf("no path for %v", obj) - } - case *types.Const, // Only package-level constants have a path. - *types.Label, // Labels are function-local. - *types.PkgName: // PkgNames are file-local. - return "", fmt.Errorf("no path for %v", obj) - - case *types.Var: - // Could be: - // - a field (obj.IsField()) - // - a func parameter or result - // - a local var. - // Sadly there is no way to distinguish - // a param/result from a local - // so we must proceed to the find. - - case *types.Func: - // A func, if not package-level, must be a method. - if recv := obj.Type().(*types.Signature).Recv(); recv == nil { - return "", fmt.Errorf("func is not a method: %v", obj) - } - - if path, ok := enc.concreteMethod(obj); ok { - // Fast path for concrete methods that avoids looping over scope. - return path, nil - } - - default: - panic(obj) - } - - // 4. Search the API for the path to the var (field/param/result) or method. - - // First inspect package-level named types. - // In the presence of path aliases, these give - // the best paths because non-types may - // refer to types, but not the reverse. - empty := make([]byte, 0, 48) // initial space - objs := enc.scopeObjects(scope) - for _, o := range objs { - tname, ok := o.(*types.TypeName) - if !ok { - continue // handle non-types in second pass - } - - path := append(empty, o.Name()...) - path = append(path, opType) - - T := o.Type() - - if tname.IsAlias() { - // type alias - if r := find(obj, T, path, nil); r != nil { - return Path(r), nil - } - } else { - if named, _ := T.(*types.Named); named != nil { - if r := findTypeParam(obj, named.TypeParams(), path, nil); r != nil { - // generic named type - return Path(r), nil - } - } - // defined (named) type - if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil { - return Path(r), nil - } - } - } - - // Then inspect everything else: - // non-types, and declared methods of defined types. - for _, o := range objs { - path := append(empty, o.Name()...) - if _, ok := o.(*types.TypeName); !ok { - if o.Exported() { - // exported non-type (const, var, func) - if r := find(obj, o.Type(), append(path, opType), nil); r != nil { - return Path(r), nil - } - } - continue - } - - // Inspect declared methods of defined types. - if T, ok := o.Type().(*types.Named); ok { - path = append(path, opType) - // The method index here is always with respect - // to the underlying go/types data structures, - // which ultimately derives from source order - // and must be preserved by export data. - for i := 0; i < T.NumMethods(); i++ { - m := T.Method(i) - path2 := appendOpArg(path, opMethod, i) - if m == obj { - return Path(path2), nil // found declared method - } - if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { - return Path(r), nil - } - } - } - } - - return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path()) -} - -func appendOpArg(path []byte, op byte, arg int) []byte { - path = append(path, op) - path = strconv.AppendInt(path, int64(arg), 10) - return path -} - -// concreteMethod returns the path for meth, which must have a non-nil receiver. -// The second return value indicates success and may be false if the method is -// an interface method or if it is an instantiated method. -// -// This function is just an optimization that avoids the general scope walking -// approach. You are expected to fall back to the general approach if this -// function fails. -func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { - // Concrete methods can only be declared on package-scoped named types. For - // that reason we can skip the expensive walk over the package scope: the - // path will always be package -> named type -> method. We can trivially get - // the type name from the receiver, and only have to look over the type's - // methods to find the method index. - // - // Methods on generic types require special consideration, however. Consider - // the following package: - // - // L1: type S[T any] struct{} - // L2: func (recv S[A]) Foo() { recv.Bar() } - // L3: func (recv S[B]) Bar() { } - // L4: type Alias = S[int] - // L5: func _[T any]() { var s S[int]; s.Foo() } - // - // The receivers of methods on generic types are instantiations. L2 and L3 - // instantiate S with the type-parameters A and B, which are scoped to the - // respective methods. L4 and L5 each instantiate S with int. Each of these - // instantiations has its own method set, full of methods (and thus objects) - // with receivers whose types are the respective instantiations. In other - // words, we have - // - // S[A].Foo, S[A].Bar - // S[B].Foo, S[B].Bar - // S[int].Foo, S[int].Bar - // - // We may thus be trying to produce object paths for any of these objects. - // - // S[A].Foo and S[B].Bar are the origin methods, and their paths are S.Foo - // and S.Bar, which are the paths that this function naturally produces. - // - // S[A].Bar, S[B].Foo, and both methods on S[int] are instantiations that - // don't correspond to the origin methods. For S[int], this is significant. - // The most precise object path for S[int].Foo, for example, is Alias.Foo, - // not S.Foo. Our function, however, would produce S.Foo, which would - // resolve to a different object. - // - // For S[A].Bar and S[B].Foo it could be argued that S.Bar and S.Foo are - // still the correct paths, since only the origin methods have meaningful - // paths. But this is likely only true for trivial cases and has edge cases. - // Since this function is only an optimization, we err on the side of giving - // up, deferring to the slower but definitely correct algorithm. Most users - // of objectpath will only be giving us origin methods, anyway, as referring - // to instantiated methods is usually not useful. - - if typeparams.OriginMethod(meth) != meth { - return "", false - } - - recvT := meth.Type().(*types.Signature).Recv().Type() - if ptr, ok := recvT.(*types.Pointer); ok { - recvT = ptr.Elem() - } - - named, ok := recvT.(*types.Named) - if !ok { - return "", false - } - - if types.IsInterface(named) { - // Named interfaces don't have to be package-scoped - // - // TODO(dominikh): opt: if scope.Lookup(name) == named, then we can apply this optimization to interface - // methods, too, I think. - return "", false - } - - // Preallocate space for the name, opType, opMethod, and some digits. - name := named.Obj().Name() - path := make([]byte, 0, len(name)+8) - path = append(path, name...) - path = append(path, opType) - - // Method indices are w.r.t. the go/types data structures, - // ultimately deriving from source order, - // which is preserved by export data. - for i := 0; i < named.NumMethods(); i++ { - if named.Method(i) == meth { - path = appendOpArg(path, opMethod, i) - return Path(path), true - } - } - - // Due to golang/go#59944, go/types fails to associate the receiver with - // certain methods on cgo types. - // - // TODO(rfindley): replace this panic once golang/go#59944 is fixed in all Go - // versions gopls supports. - return "", false - // panic(fmt.Sprintf("couldn't find method %s on type %s; methods: %#v", meth, named, enc.namedMethods(named))) -} - -// find finds obj within type T, returning the path to it, or nil if not found. -// -// The seen map is used to short circuit cycles through type parameters. If -// nil, it will be allocated as necessary. -func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { - switch T := T.(type) { - case *types.Basic, *types.Named: - // Named types belonging to pkg were handled already, - // so T must belong to another package. No path. - return nil - case *types.Pointer: - return find(obj, T.Elem(), append(path, opElem), seen) - case *types.Slice: - return find(obj, T.Elem(), append(path, opElem), seen) - case *types.Array: - return find(obj, T.Elem(), append(path, opElem), seen) - case *types.Chan: - return find(obj, T.Elem(), append(path, opElem), seen) - case *types.Map: - if r := find(obj, T.Key(), append(path, opKey), seen); r != nil { - return r - } - return find(obj, T.Elem(), append(path, opElem), seen) - case *types.Signature: - if r := findTypeParam(obj, T.TypeParams(), path, seen); r != nil { - return r - } - if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { - return r - } - return find(obj, T.Results(), append(path, opResults), seen) - case *types.Struct: - for i := 0; i < T.NumFields(); i++ { - fld := T.Field(i) - path2 := appendOpArg(path, opField, i) - if fld == obj { - return path2 // found field var - } - if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil { - return r - } - } - return nil - case *types.Tuple: - for i := 0; i < T.Len(); i++ { - v := T.At(i) - path2 := appendOpArg(path, opAt, i) - if v == obj { - return path2 // found param/result var - } - if r := find(obj, v.Type(), append(path2, opType), seen); r != nil { - return r - } - } - return nil - case *types.Interface: - for i := 0; i < T.NumMethods(); i++ { - m := T.Method(i) - path2 := appendOpArg(path, opMethod, i) - if m == obj { - return path2 // found interface method - } - if r := find(obj, m.Type(), append(path2, opType), seen); r != nil { - return r - } - } - return nil - case *types.TypeParam: - name := T.Obj() - if name == obj { - return append(path, opObj) - } - if seen[name] { - return nil - } - if seen == nil { - seen = make(map[*types.TypeName]bool) - } - seen[name] = true - if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil { - return r - } - return nil - } - panic(T) -} - -func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { - for i := 0; i < list.Len(); i++ { - tparam := list.At(i) - path2 := appendOpArg(path, opTypeParam, i) - if r := find(obj, tparam, path2, seen); r != nil { - return r - } - } - return nil -} - -// Object returns the object denoted by path p within the package pkg. -func Object(pkg *types.Package, p Path) (types.Object, error) { - pathstr := string(p) - if pathstr == "" { - return nil, fmt.Errorf("empty path") - } - - var pkgobj, suffix string - if dot := strings.IndexByte(pathstr, opType); dot < 0 { - pkgobj = pathstr - } else { - pkgobj = pathstr[:dot] - suffix = pathstr[dot:] // suffix starts with "." - } - - obj := pkg.Scope().Lookup(pkgobj) - if obj == nil { - return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj) - } - - // abstraction of *types.{Pointer,Slice,Array,Chan,Map} - type hasElem interface { - Elem() types.Type - } - // abstraction of *types.{Named,Signature} - type hasTypeParams interface { - TypeParams() *types.TypeParamList - } - // abstraction of *types.{Named,TypeParam} - type hasObj interface { - Obj() *types.TypeName - } - - // The loop state is the pair (t, obj), - // exactly one of which is non-nil, initially obj. - // All suffixes start with '.' (the only object->type operation), - // followed by optional type->type operations, - // then a type->object operation. - // The cycle then repeats. - var t types.Type - for suffix != "" { - code := suffix[0] - suffix = suffix[1:] - - // Codes [AFM] have an integer operand. - var index int - switch code { - case opAt, opField, opMethod, opTypeParam: - rest := strings.TrimLeft(suffix, "0123456789") - numerals := suffix[:len(suffix)-len(rest)] - suffix = rest - i, err := strconv.Atoi(numerals) - if err != nil { - return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code) - } - index = int(i) - case opObj: - // no operand - default: - // The suffix must end with a type->object operation. - if suffix == "" { - return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code) - } - } - - if code == opType { - if t != nil { - return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType) - } - t = obj.Type() - obj = nil - continue - } - - if t == nil { - return nil, fmt.Errorf("invalid path: code %q in object context", code) - } - - // Inv: t != nil, obj == nil - - switch code { - case opElem: - hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t) - } - t = hasElem.Elem() - - case opKey: - mapType, ok := t.(*types.Map) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t) - } - t = mapType.Key() - - case opParams: - sig, ok := t.(*types.Signature) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) - } - t = sig.Params() - - case opResults: - sig, ok := t.(*types.Signature) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) - } - t = sig.Results() - - case opUnderlying: - named, ok := t.(*types.Named) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named)", code, t, t) - } - t = named.Underlying() - - case opTypeParam: - hasTypeParams, ok := t.(hasTypeParams) // Named, Signature - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or signature)", code, t, t) - } - tparams := hasTypeParams.TypeParams() - if n := tparams.Len(); index >= n { - return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) - } - t = tparams.At(index) - - case opConstraint: - tparam, ok := t.(*types.TypeParam) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t) - } - t = tparam.Constraint() - - case opAt: - tuple, ok := t.(*types.Tuple) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want tuple)", code, t, t) - } - if n := tuple.Len(); index >= n { - return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) - } - obj = tuple.At(index) - t = nil - - case opField: - structType, ok := t.(*types.Struct) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t) - } - if n := structType.NumFields(); index >= n { - return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n) - } - obj = structType.Field(index) - t = nil - - case opMethod: - switch t := t.(type) { - case *types.Interface: - if index >= t.NumMethods() { - return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) - } - obj = t.Method(index) // Id-ordered - - case *types.Named: - if index >= t.NumMethods() { - return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) - } - obj = t.Method(index) - - default: - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t) - } - t = nil - - case opObj: - hasObj, ok := t.(hasObj) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or type param)", code, t, t) - } - obj = hasObj.Obj() - t = nil - - default: - return nil, fmt.Errorf("invalid path: unknown code %q", code) - } - } - - if obj.Pkg() != pkg { - return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj) - } - - return obj, nil // success -} - -// scopeObjects is a memoization of scope objects. -// Callers must not modify the result. -func (enc *Encoder) scopeObjects(scope *types.Scope) []types.Object { - m := enc.scopeMemo - if m == nil { - m = make(map[*types.Scope][]types.Object) - enc.scopeMemo = m - } - objs, ok := m[scope] - if !ok { - names := scope.Names() // allocates and sorts - objs = make([]types.Object, len(names)) - for i, name := range names { - objs[i] = scope.Lookup(name) - } - m[scope] = objs - } - return objs -} diff --git a/vendor/golang.org/x/tools/internal/event/core/event.go b/vendor/golang.org/x/tools/internal/event/core/event.go deleted file mode 100644 index a6cf0e64a4b..00000000000 --- a/vendor/golang.org/x/tools/internal/event/core/event.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package core provides support for event based telemetry. -package core - -import ( - "fmt" - "time" - - "golang.org/x/tools/internal/event/label" -) - -// Event holds the information about an event of note that occurred. -type Event struct { - at time.Time - - // As events are often on the stack, storing the first few labels directly - // in the event can avoid an allocation at all for the very common cases of - // simple events. - // The length needs to be large enough to cope with the majority of events - // but no so large as to cause undue stack pressure. - // A log message with two values will use 3 labels (one for each value and - // one for the message itself). - - static [3]label.Label // inline storage for the first few labels - dynamic []label.Label // dynamically sized storage for remaining labels -} - -// eventLabelMap implements label.Map for a the labels of an Event. -type eventLabelMap struct { - event Event -} - -func (ev Event) At() time.Time { return ev.at } - -func (ev Event) Format(f fmt.State, r rune) { - if !ev.at.IsZero() { - fmt.Fprint(f, ev.at.Format("2006/01/02 15:04:05 ")) - } - for index := 0; ev.Valid(index); index++ { - if l := ev.Label(index); l.Valid() { - fmt.Fprintf(f, "\n\t%v", l) - } - } -} - -func (ev Event) Valid(index int) bool { - return index >= 0 && index < len(ev.static)+len(ev.dynamic) -} - -func (ev Event) Label(index int) label.Label { - if index < len(ev.static) { - return ev.static[index] - } - return ev.dynamic[index-len(ev.static)] -} - -func (ev Event) Find(key label.Key) label.Label { - for _, l := range ev.static { - if l.Key() == key { - return l - } - } - for _, l := range ev.dynamic { - if l.Key() == key { - return l - } - } - return label.Label{} -} - -func MakeEvent(static [3]label.Label, labels []label.Label) Event { - return Event{ - static: static, - dynamic: labels, - } -} - -// CloneEvent event returns a copy of the event with the time adjusted to at. -func CloneEvent(ev Event, at time.Time) Event { - ev.at = at - return ev -} diff --git a/vendor/golang.org/x/tools/internal/event/core/export.go b/vendor/golang.org/x/tools/internal/event/core/export.go deleted file mode 100644 index 05f3a9a5791..00000000000 --- a/vendor/golang.org/x/tools/internal/event/core/export.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package core - -import ( - "context" - "sync/atomic" - "time" - "unsafe" - - "golang.org/x/tools/internal/event/label" -) - -// Exporter is a function that handles events. -// It may return a modified context and event. -type Exporter func(context.Context, Event, label.Map) context.Context - -var ( - exporter unsafe.Pointer -) - -// SetExporter sets the global exporter function that handles all events. -// The exporter is called synchronously from the event call site, so it should -// return quickly so as not to hold up user code. -func SetExporter(e Exporter) { - p := unsafe.Pointer(&e) - if e == nil { - // &e is always valid, and so p is always valid, but for the early abort - // of ProcessEvent to be efficient it needs to make the nil check on the - // pointer without having to dereference it, so we make the nil function - // also a nil pointer - p = nil - } - atomic.StorePointer(&exporter, p) -} - -// deliver is called to deliver an event to the supplied exporter. -// it will fill in the time. -func deliver(ctx context.Context, exporter Exporter, ev Event) context.Context { - // add the current time to the event - ev.at = time.Now() - // hand the event off to the current exporter - return exporter(ctx, ev, ev) -} - -// Export is called to deliver an event to the global exporter if set. -func Export(ctx context.Context, ev Event) context.Context { - // get the global exporter and abort early if there is not one - exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter)) - if exporterPtr == nil { - return ctx - } - return deliver(ctx, *exporterPtr, ev) -} - -// ExportPair is called to deliver a start event to the supplied exporter. -// It also returns a function that will deliver the end event to the same -// exporter. -// It will fill in the time. -func ExportPair(ctx context.Context, begin, end Event) (context.Context, func()) { - // get the global exporter and abort early if there is not one - exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter)) - if exporterPtr == nil { - return ctx, func() {} - } - ctx = deliver(ctx, *exporterPtr, begin) - return ctx, func() { deliver(ctx, *exporterPtr, end) } -} diff --git a/vendor/golang.org/x/tools/internal/event/core/fast.go b/vendor/golang.org/x/tools/internal/event/core/fast.go deleted file mode 100644 index 06c1d4615e6..00000000000 --- a/vendor/golang.org/x/tools/internal/event/core/fast.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package core - -import ( - "context" - - "golang.org/x/tools/internal/event/keys" - "golang.org/x/tools/internal/event/label" -) - -// Log1 takes a message and one label delivers a log event to the exporter. -// It is a customized version of Print that is faster and does no allocation. -func Log1(ctx context.Context, message string, t1 label.Label) { - Export(ctx, MakeEvent([3]label.Label{ - keys.Msg.Of(message), - t1, - }, nil)) -} - -// Log2 takes a message and two labels and delivers a log event to the exporter. -// It is a customized version of Print that is faster and does no allocation. -func Log2(ctx context.Context, message string, t1 label.Label, t2 label.Label) { - Export(ctx, MakeEvent([3]label.Label{ - keys.Msg.Of(message), - t1, - t2, - }, nil)) -} - -// Metric1 sends a label event to the exporter with the supplied labels. -func Metric1(ctx context.Context, t1 label.Label) context.Context { - return Export(ctx, MakeEvent([3]label.Label{ - keys.Metric.New(), - t1, - }, nil)) -} - -// Metric2 sends a label event to the exporter with the supplied labels. -func Metric2(ctx context.Context, t1, t2 label.Label) context.Context { - return Export(ctx, MakeEvent([3]label.Label{ - keys.Metric.New(), - t1, - t2, - }, nil)) -} - -// Start1 sends a span start event with the supplied label list to the exporter. -// It also returns a function that will end the span, which should normally be -// deferred. -func Start1(ctx context.Context, name string, t1 label.Label) (context.Context, func()) { - return ExportPair(ctx, - MakeEvent([3]label.Label{ - keys.Start.Of(name), - t1, - }, nil), - MakeEvent([3]label.Label{ - keys.End.New(), - }, nil)) -} - -// Start2 sends a span start event with the supplied label list to the exporter. -// It also returns a function that will end the span, which should normally be -// deferred. -func Start2(ctx context.Context, name string, t1, t2 label.Label) (context.Context, func()) { - return ExportPair(ctx, - MakeEvent([3]label.Label{ - keys.Start.Of(name), - t1, - t2, - }, nil), - MakeEvent([3]label.Label{ - keys.End.New(), - }, nil)) -} diff --git a/vendor/golang.org/x/tools/internal/event/doc.go b/vendor/golang.org/x/tools/internal/event/doc.go deleted file mode 100644 index 5dc6e6babed..00000000000 --- a/vendor/golang.org/x/tools/internal/event/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package event provides a set of packages that cover the main -// concepts of telemetry in an implementation agnostic way. -package event diff --git a/vendor/golang.org/x/tools/internal/event/event.go b/vendor/golang.org/x/tools/internal/event/event.go deleted file mode 100644 index 4d55e577d1a..00000000000 --- a/vendor/golang.org/x/tools/internal/event/event.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package event - -import ( - "context" - - "golang.org/x/tools/internal/event/core" - "golang.org/x/tools/internal/event/keys" - "golang.org/x/tools/internal/event/label" -) - -// Exporter is a function that handles events. -// It may return a modified context and event. -type Exporter func(context.Context, core.Event, label.Map) context.Context - -// SetExporter sets the global exporter function that handles all events. -// The exporter is called synchronously from the event call site, so it should -// return quickly so as not to hold up user code. -func SetExporter(e Exporter) { - core.SetExporter(core.Exporter(e)) -} - -// Log takes a message and a label list and combines them into a single event -// before delivering them to the exporter. -func Log(ctx context.Context, message string, labels ...label.Label) { - core.Export(ctx, core.MakeEvent([3]label.Label{ - keys.Msg.Of(message), - }, labels)) -} - -// IsLog returns true if the event was built by the Log function. -// It is intended to be used in exporters to identify the semantics of the -// event when deciding what to do with it. -func IsLog(ev core.Event) bool { - return ev.Label(0).Key() == keys.Msg -} - -// Error takes a message and a label list and combines them into a single event -// before delivering them to the exporter. It captures the error in the -// delivered event. -func Error(ctx context.Context, message string, err error, labels ...label.Label) { - core.Export(ctx, core.MakeEvent([3]label.Label{ - keys.Msg.Of(message), - keys.Err.Of(err), - }, labels)) -} - -// IsError returns true if the event was built by the Error function. -// It is intended to be used in exporters to identify the semantics of the -// event when deciding what to do with it. -func IsError(ev core.Event) bool { - return ev.Label(0).Key() == keys.Msg && - ev.Label(1).Key() == keys.Err -} - -// Metric sends a label event to the exporter with the supplied labels. -func Metric(ctx context.Context, labels ...label.Label) { - core.Export(ctx, core.MakeEvent([3]label.Label{ - keys.Metric.New(), - }, labels)) -} - -// IsMetric returns true if the event was built by the Metric function. -// It is intended to be used in exporters to identify the semantics of the -// event when deciding what to do with it. -func IsMetric(ev core.Event) bool { - return ev.Label(0).Key() == keys.Metric -} - -// Label sends a label event to the exporter with the supplied labels. -func Label(ctx context.Context, labels ...label.Label) context.Context { - return core.Export(ctx, core.MakeEvent([3]label.Label{ - keys.Label.New(), - }, labels)) -} - -// IsLabel returns true if the event was built by the Label function. -// It is intended to be used in exporters to identify the semantics of the -// event when deciding what to do with it. -func IsLabel(ev core.Event) bool { - return ev.Label(0).Key() == keys.Label -} - -// Start sends a span start event with the supplied label list to the exporter. -// It also returns a function that will end the span, which should normally be -// deferred. -func Start(ctx context.Context, name string, labels ...label.Label) (context.Context, func()) { - return core.ExportPair(ctx, - core.MakeEvent([3]label.Label{ - keys.Start.Of(name), - }, labels), - core.MakeEvent([3]label.Label{ - keys.End.New(), - }, nil)) -} - -// IsStart returns true if the event was built by the Start function. -// It is intended to be used in exporters to identify the semantics of the -// event when deciding what to do with it. -func IsStart(ev core.Event) bool { - return ev.Label(0).Key() == keys.Start -} - -// IsEnd returns true if the event was built by the End function. -// It is intended to be used in exporters to identify the semantics of the -// event when deciding what to do with it. -func IsEnd(ev core.Event) bool { - return ev.Label(0).Key() == keys.End -} - -// Detach returns a context without an associated span. -// This allows the creation of spans that are not children of the current span. -func Detach(ctx context.Context) context.Context { - return core.Export(ctx, core.MakeEvent([3]label.Label{ - keys.Detach.New(), - }, nil)) -} - -// IsDetach returns true if the event was built by the Detach function. -// It is intended to be used in exporters to identify the semantics of the -// event when deciding what to do with it. -func IsDetach(ev core.Event) bool { - return ev.Label(0).Key() == keys.Detach -} diff --git a/vendor/golang.org/x/tools/internal/event/keys/keys.go b/vendor/golang.org/x/tools/internal/event/keys/keys.go deleted file mode 100644 index a02206e3015..00000000000 --- a/vendor/golang.org/x/tools/internal/event/keys/keys.go +++ /dev/null @@ -1,564 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package keys - -import ( - "fmt" - "io" - "math" - "strconv" - - "golang.org/x/tools/internal/event/label" -) - -// Value represents a key for untyped values. -type Value struct { - name string - description string -} - -// New creates a new Key for untyped values. -func New(name, description string) *Value { - return &Value{name: name, description: description} -} - -func (k *Value) Name() string { return k.name } -func (k *Value) Description() string { return k.description } - -func (k *Value) Format(w io.Writer, buf []byte, l label.Label) { - fmt.Fprint(w, k.From(l)) -} - -// Get can be used to get a label for the key from a label.Map. -func (k *Value) Get(lm label.Map) interface{} { - if t := lm.Find(k); t.Valid() { - return k.From(t) - } - return nil -} - -// From can be used to get a value from a Label. -func (k *Value) From(t label.Label) interface{} { return t.UnpackValue() } - -// Of creates a new Label with this key and the supplied value. -func (k *Value) Of(value interface{}) label.Label { return label.OfValue(k, value) } - -// Tag represents a key for tagging labels that have no value. -// These are used when the existence of the label is the entire information it -// carries, such as marking events to be of a specific kind, or from a specific -// package. -type Tag struct { - name string - description string -} - -// NewTag creates a new Key for tagging labels. -func NewTag(name, description string) *Tag { - return &Tag{name: name, description: description} -} - -func (k *Tag) Name() string { return k.name } -func (k *Tag) Description() string { return k.description } - -func (k *Tag) Format(w io.Writer, buf []byte, l label.Label) {} - -// New creates a new Label with this key. -func (k *Tag) New() label.Label { return label.OfValue(k, nil) } - -// Int represents a key -type Int struct { - name string - description string -} - -// NewInt creates a new Key for int values. -func NewInt(name, description string) *Int { - return &Int{name: name, description: description} -} - -func (k *Int) Name() string { return k.name } -func (k *Int) Description() string { return k.description } - -func (k *Int) Format(w io.Writer, buf []byte, l label.Label) { - w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) -} - -// Of creates a new Label with this key and the supplied value. -func (k *Int) Of(v int) label.Label { return label.Of64(k, uint64(v)) } - -// Get can be used to get a label for the key from a label.Map. -func (k *Int) Get(lm label.Map) int { - if t := lm.Find(k); t.Valid() { - return k.From(t) - } - return 0 -} - -// From can be used to get a value from a Label. -func (k *Int) From(t label.Label) int { return int(t.Unpack64()) } - -// Int8 represents a key -type Int8 struct { - name string - description string -} - -// NewInt8 creates a new Key for int8 values. -func NewInt8(name, description string) *Int8 { - return &Int8{name: name, description: description} -} - -func (k *Int8) Name() string { return k.name } -func (k *Int8) Description() string { return k.description } - -func (k *Int8) Format(w io.Writer, buf []byte, l label.Label) { - w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) -} - -// Of creates a new Label with this key and the supplied value. -func (k *Int8) Of(v int8) label.Label { return label.Of64(k, uint64(v)) } - -// Get can be used to get a label for the key from a label.Map. -func (k *Int8) Get(lm label.Map) int8 { - if t := lm.Find(k); t.Valid() { - return k.From(t) - } - return 0 -} - -// From can be used to get a value from a Label. -func (k *Int8) From(t label.Label) int8 { return int8(t.Unpack64()) } - -// Int16 represents a key -type Int16 struct { - name string - description string -} - -// NewInt16 creates a new Key for int16 values. -func NewInt16(name, description string) *Int16 { - return &Int16{name: name, description: description} -} - -func (k *Int16) Name() string { return k.name } -func (k *Int16) Description() string { return k.description } - -func (k *Int16) Format(w io.Writer, buf []byte, l label.Label) { - w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) -} - -// Of creates a new Label with this key and the supplied value. -func (k *Int16) Of(v int16) label.Label { return label.Of64(k, uint64(v)) } - -// Get can be used to get a label for the key from a label.Map. -func (k *Int16) Get(lm label.Map) int16 { - if t := lm.Find(k); t.Valid() { - return k.From(t) - } - return 0 -} - -// From can be used to get a value from a Label. -func (k *Int16) From(t label.Label) int16 { return int16(t.Unpack64()) } - -// Int32 represents a key -type Int32 struct { - name string - description string -} - -// NewInt32 creates a new Key for int32 values. -func NewInt32(name, description string) *Int32 { - return &Int32{name: name, description: description} -} - -func (k *Int32) Name() string { return k.name } -func (k *Int32) Description() string { return k.description } - -func (k *Int32) Format(w io.Writer, buf []byte, l label.Label) { - w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) -} - -// Of creates a new Label with this key and the supplied value. -func (k *Int32) Of(v int32) label.Label { return label.Of64(k, uint64(v)) } - -// Get can be used to get a label for the key from a label.Map. -func (k *Int32) Get(lm label.Map) int32 { - if t := lm.Find(k); t.Valid() { - return k.From(t) - } - return 0 -} - -// From can be used to get a value from a Label. -func (k *Int32) From(t label.Label) int32 { return int32(t.Unpack64()) } - -// Int64 represents a key -type Int64 struct { - name string - description string -} - -// NewInt64 creates a new Key for int64 values. -func NewInt64(name, description string) *Int64 { - return &Int64{name: name, description: description} -} - -func (k *Int64) Name() string { return k.name } -func (k *Int64) Description() string { return k.description } - -func (k *Int64) Format(w io.Writer, buf []byte, l label.Label) { - w.Write(strconv.AppendInt(buf, k.From(l), 10)) -} - -// Of creates a new Label with this key and the supplied value. -func (k *Int64) Of(v int64) label.Label { return label.Of64(k, uint64(v)) } - -// Get can be used to get a label for the key from a label.Map. -func (k *Int64) Get(lm label.Map) int64 { - if t := lm.Find(k); t.Valid() { - return k.From(t) - } - return 0 -} - -// From can be used to get a value from a Label. -func (k *Int64) From(t label.Label) int64 { return int64(t.Unpack64()) } - -// UInt represents a key -type UInt struct { - name string - description string -} - -// NewUInt creates a new Key for uint values. -func NewUInt(name, description string) *UInt { - return &UInt{name: name, description: description} -} - -func (k *UInt) Name() string { return k.name } -func (k *UInt) Description() string { return k.description } - -func (k *UInt) Format(w io.Writer, buf []byte, l label.Label) { - w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) -} - -// Of creates a new Label with this key and the supplied value. -func (k *UInt) Of(v uint) label.Label { return label.Of64(k, uint64(v)) } - -// Get can be used to get a label for the key from a label.Map. -func (k *UInt) Get(lm label.Map) uint { - if t := lm.Find(k); t.Valid() { - return k.From(t) - } - return 0 -} - -// From can be used to get a value from a Label. -func (k *UInt) From(t label.Label) uint { return uint(t.Unpack64()) } - -// UInt8 represents a key -type UInt8 struct { - name string - description string -} - -// NewUInt8 creates a new Key for uint8 values. -func NewUInt8(name, description string) *UInt8 { - return &UInt8{name: name, description: description} -} - -func (k *UInt8) Name() string { return k.name } -func (k *UInt8) Description() string { return k.description } - -func (k *UInt8) Format(w io.Writer, buf []byte, l label.Label) { - w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) -} - -// Of creates a new Label with this key and the supplied value. -func (k *UInt8) Of(v uint8) label.Label { return label.Of64(k, uint64(v)) } - -// Get can be used to get a label for the key from a label.Map. -func (k *UInt8) Get(lm label.Map) uint8 { - if t := lm.Find(k); t.Valid() { - return k.From(t) - } - return 0 -} - -// From can be used to get a value from a Label. -func (k *UInt8) From(t label.Label) uint8 { return uint8(t.Unpack64()) } - -// UInt16 represents a key -type UInt16 struct { - name string - description string -} - -// NewUInt16 creates a new Key for uint16 values. -func NewUInt16(name, description string) *UInt16 { - return &UInt16{name: name, description: description} -} - -func (k *UInt16) Name() string { return k.name } -func (k *UInt16) Description() string { return k.description } - -func (k *UInt16) Format(w io.Writer, buf []byte, l label.Label) { - w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) -} - -// Of creates a new Label with this key and the supplied value. -func (k *UInt16) Of(v uint16) label.Label { return label.Of64(k, uint64(v)) } - -// Get can be used to get a label for the key from a label.Map. -func (k *UInt16) Get(lm label.Map) uint16 { - if t := lm.Find(k); t.Valid() { - return k.From(t) - } - return 0 -} - -// From can be used to get a value from a Label. -func (k *UInt16) From(t label.Label) uint16 { return uint16(t.Unpack64()) } - -// UInt32 represents a key -type UInt32 struct { - name string - description string -} - -// NewUInt32 creates a new Key for uint32 values. -func NewUInt32(name, description string) *UInt32 { - return &UInt32{name: name, description: description} -} - -func (k *UInt32) Name() string { return k.name } -func (k *UInt32) Description() string { return k.description } - -func (k *UInt32) Format(w io.Writer, buf []byte, l label.Label) { - w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) -} - -// Of creates a new Label with this key and the supplied value. -func (k *UInt32) Of(v uint32) label.Label { return label.Of64(k, uint64(v)) } - -// Get can be used to get a label for the key from a label.Map. -func (k *UInt32) Get(lm label.Map) uint32 { - if t := lm.Find(k); t.Valid() { - return k.From(t) - } - return 0 -} - -// From can be used to get a value from a Label. -func (k *UInt32) From(t label.Label) uint32 { return uint32(t.Unpack64()) } - -// UInt64 represents a key -type UInt64 struct { - name string - description string -} - -// NewUInt64 creates a new Key for uint64 values. -func NewUInt64(name, description string) *UInt64 { - return &UInt64{name: name, description: description} -} - -func (k *UInt64) Name() string { return k.name } -func (k *UInt64) Description() string { return k.description } - -func (k *UInt64) Format(w io.Writer, buf []byte, l label.Label) { - w.Write(strconv.AppendUint(buf, k.From(l), 10)) -} - -// Of creates a new Label with this key and the supplied value. -func (k *UInt64) Of(v uint64) label.Label { return label.Of64(k, v) } - -// Get can be used to get a label for the key from a label.Map. -func (k *UInt64) Get(lm label.Map) uint64 { - if t := lm.Find(k); t.Valid() { - return k.From(t) - } - return 0 -} - -// From can be used to get a value from a Label. -func (k *UInt64) From(t label.Label) uint64 { return t.Unpack64() } - -// Float32 represents a key -type Float32 struct { - name string - description string -} - -// NewFloat32 creates a new Key for float32 values. -func NewFloat32(name, description string) *Float32 { - return &Float32{name: name, description: description} -} - -func (k *Float32) Name() string { return k.name } -func (k *Float32) Description() string { return k.description } - -func (k *Float32) Format(w io.Writer, buf []byte, l label.Label) { - w.Write(strconv.AppendFloat(buf, float64(k.From(l)), 'E', -1, 32)) -} - -// Of creates a new Label with this key and the supplied value. -func (k *Float32) Of(v float32) label.Label { - return label.Of64(k, uint64(math.Float32bits(v))) -} - -// Get can be used to get a label for the key from a label.Map. -func (k *Float32) Get(lm label.Map) float32 { - if t := lm.Find(k); t.Valid() { - return k.From(t) - } - return 0 -} - -// From can be used to get a value from a Label. -func (k *Float32) From(t label.Label) float32 { - return math.Float32frombits(uint32(t.Unpack64())) -} - -// Float64 represents a key -type Float64 struct { - name string - description string -} - -// NewFloat64 creates a new Key for int64 values. -func NewFloat64(name, description string) *Float64 { - return &Float64{name: name, description: description} -} - -func (k *Float64) Name() string { return k.name } -func (k *Float64) Description() string { return k.description } - -func (k *Float64) Format(w io.Writer, buf []byte, l label.Label) { - w.Write(strconv.AppendFloat(buf, k.From(l), 'E', -1, 64)) -} - -// Of creates a new Label with this key and the supplied value. -func (k *Float64) Of(v float64) label.Label { - return label.Of64(k, math.Float64bits(v)) -} - -// Get can be used to get a label for the key from a label.Map. -func (k *Float64) Get(lm label.Map) float64 { - if t := lm.Find(k); t.Valid() { - return k.From(t) - } - return 0 -} - -// From can be used to get a value from a Label. -func (k *Float64) From(t label.Label) float64 { - return math.Float64frombits(t.Unpack64()) -} - -// String represents a key -type String struct { - name string - description string -} - -// NewString creates a new Key for int64 values. -func NewString(name, description string) *String { - return &String{name: name, description: description} -} - -func (k *String) Name() string { return k.name } -func (k *String) Description() string { return k.description } - -func (k *String) Format(w io.Writer, buf []byte, l label.Label) { - w.Write(strconv.AppendQuote(buf, k.From(l))) -} - -// Of creates a new Label with this key and the supplied value. -func (k *String) Of(v string) label.Label { return label.OfString(k, v) } - -// Get can be used to get a label for the key from a label.Map. -func (k *String) Get(lm label.Map) string { - if t := lm.Find(k); t.Valid() { - return k.From(t) - } - return "" -} - -// From can be used to get a value from a Label. -func (k *String) From(t label.Label) string { return t.UnpackString() } - -// Boolean represents a key -type Boolean struct { - name string - description string -} - -// NewBoolean creates a new Key for bool values. -func NewBoolean(name, description string) *Boolean { - return &Boolean{name: name, description: description} -} - -func (k *Boolean) Name() string { return k.name } -func (k *Boolean) Description() string { return k.description } - -func (k *Boolean) Format(w io.Writer, buf []byte, l label.Label) { - w.Write(strconv.AppendBool(buf, k.From(l))) -} - -// Of creates a new Label with this key and the supplied value. -func (k *Boolean) Of(v bool) label.Label { - if v { - return label.Of64(k, 1) - } - return label.Of64(k, 0) -} - -// Get can be used to get a label for the key from a label.Map. -func (k *Boolean) Get(lm label.Map) bool { - if t := lm.Find(k); t.Valid() { - return k.From(t) - } - return false -} - -// From can be used to get a value from a Label. -func (k *Boolean) From(t label.Label) bool { return t.Unpack64() > 0 } - -// Error represents a key -type Error struct { - name string - description string -} - -// NewError creates a new Key for int64 values. -func NewError(name, description string) *Error { - return &Error{name: name, description: description} -} - -func (k *Error) Name() string { return k.name } -func (k *Error) Description() string { return k.description } - -func (k *Error) Format(w io.Writer, buf []byte, l label.Label) { - io.WriteString(w, k.From(l).Error()) -} - -// Of creates a new Label with this key and the supplied value. -func (k *Error) Of(v error) label.Label { return label.OfValue(k, v) } - -// Get can be used to get a label for the key from a label.Map. -func (k *Error) Get(lm label.Map) error { - if t := lm.Find(k); t.Valid() { - return k.From(t) - } - return nil -} - -// From can be used to get a value from a Label. -func (k *Error) From(t label.Label) error { - err, _ := t.UnpackValue().(error) - return err -} diff --git a/vendor/golang.org/x/tools/internal/event/keys/standard.go b/vendor/golang.org/x/tools/internal/event/keys/standard.go deleted file mode 100644 index 7e958665921..00000000000 --- a/vendor/golang.org/x/tools/internal/event/keys/standard.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package keys - -var ( - // Msg is a key used to add message strings to label lists. - Msg = NewString("message", "a readable message") - // Label is a key used to indicate an event adds labels to the context. - Label = NewTag("label", "a label context marker") - // Start is used for things like traces that have a name. - Start = NewString("start", "span start") - // Metric is a key used to indicate an event records metrics. - End = NewTag("end", "a span end marker") - // Metric is a key used to indicate an event records metrics. - Detach = NewTag("detach", "a span detach marker") - // Err is a key used to add error values to label lists. - Err = NewError("error", "an error that occurred") - // Metric is a key used to indicate an event records metrics. - Metric = NewTag("metric", "a metric event marker") -) diff --git a/vendor/golang.org/x/tools/internal/event/keys/util.go b/vendor/golang.org/x/tools/internal/event/keys/util.go deleted file mode 100644 index c0e8e731c90..00000000000 --- a/vendor/golang.org/x/tools/internal/event/keys/util.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package keys - -import ( - "sort" - "strings" -) - -// Join returns a canonical join of the keys in S: -// a sorted comma-separated string list. -func Join[S ~[]T, T ~string](s S) string { - strs := make([]string, 0, len(s)) - for _, v := range s { - strs = append(strs, string(v)) - } - sort.Strings(strs) - return strings.Join(strs, ",") -} diff --git a/vendor/golang.org/x/tools/internal/event/label/label.go b/vendor/golang.org/x/tools/internal/event/label/label.go deleted file mode 100644 index 0f526e1f9ab..00000000000 --- a/vendor/golang.org/x/tools/internal/event/label/label.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package label - -import ( - "fmt" - "io" - "reflect" - "unsafe" -) - -// Key is used as the identity of a Label. -// Keys are intended to be compared by pointer only, the name should be unique -// for communicating with external systems, but it is not required or enforced. -type Key interface { - // Name returns the key name. - Name() string - // Description returns a string that can be used to describe the value. - Description() string - - // Format is used in formatting to append the value of the label to the - // supplied buffer. - // The formatter may use the supplied buf as a scratch area to avoid - // allocations. - Format(w io.Writer, buf []byte, l Label) -} - -// Label holds a key and value pair. -// It is normally used when passing around lists of labels. -type Label struct { - key Key - packed uint64 - untyped interface{} -} - -// Map is the interface to a collection of Labels indexed by key. -type Map interface { - // Find returns the label that matches the supplied key. - Find(key Key) Label -} - -// List is the interface to something that provides an iterable -// list of labels. -// Iteration should start from 0 and continue until Valid returns false. -type List interface { - // Valid returns true if the index is within range for the list. - // It does not imply the label at that index will itself be valid. - Valid(index int) bool - // Label returns the label at the given index. - Label(index int) Label -} - -// list implements LabelList for a list of Labels. -type list struct { - labels []Label -} - -// filter wraps a LabelList filtering out specific labels. -type filter struct { - keys []Key - underlying List -} - -// listMap implements LabelMap for a simple list of labels. -type listMap struct { - labels []Label -} - -// mapChain implements LabelMap for a list of underlying LabelMap. -type mapChain struct { - maps []Map -} - -// OfValue creates a new label from the key and value. -// This method is for implementing new key types, label creation should -// normally be done with the Of method of the key. -func OfValue(k Key, value interface{}) Label { return Label{key: k, untyped: value} } - -// UnpackValue assumes the label was built using LabelOfValue and returns the value -// that was passed to that constructor. -// This method is for implementing new key types, for type safety normal -// access should be done with the From method of the key. -func (t Label) UnpackValue() interface{} { return t.untyped } - -// Of64 creates a new label from a key and a uint64. This is often -// used for non uint64 values that can be packed into a uint64. -// This method is for implementing new key types, label creation should -// normally be done with the Of method of the key. -func Of64(k Key, v uint64) Label { return Label{key: k, packed: v} } - -// Unpack64 assumes the label was built using LabelOf64 and returns the value that -// was passed to that constructor. -// This method is for implementing new key types, for type safety normal -// access should be done with the From method of the key. -func (t Label) Unpack64() uint64 { return t.packed } - -type stringptr unsafe.Pointer - -// OfString creates a new label from a key and a string. -// This method is for implementing new key types, label creation should -// normally be done with the Of method of the key. -func OfString(k Key, v string) Label { - hdr := (*reflect.StringHeader)(unsafe.Pointer(&v)) - return Label{ - key: k, - packed: uint64(hdr.Len), - untyped: stringptr(hdr.Data), - } -} - -// UnpackString assumes the label was built using LabelOfString and returns the -// value that was passed to that constructor. -// This method is for implementing new key types, for type safety normal -// access should be done with the From method of the key. -func (t Label) UnpackString() string { - var v string - hdr := (*reflect.StringHeader)(unsafe.Pointer(&v)) - hdr.Data = uintptr(t.untyped.(stringptr)) - hdr.Len = int(t.packed) - return v -} - -// Valid returns true if the Label is a valid one (it has a key). -func (t Label) Valid() bool { return t.key != nil } - -// Key returns the key of this Label. -func (t Label) Key() Key { return t.key } - -// Format is used for debug printing of labels. -func (t Label) Format(f fmt.State, r rune) { - if !t.Valid() { - io.WriteString(f, `nil`) - return - } - io.WriteString(f, t.Key().Name()) - io.WriteString(f, "=") - var buf [128]byte - t.Key().Format(f, buf[:0], t) -} - -func (l *list) Valid(index int) bool { - return index >= 0 && index < len(l.labels) -} - -func (l *list) Label(index int) Label { - return l.labels[index] -} - -func (f *filter) Valid(index int) bool { - return f.underlying.Valid(index) -} - -func (f *filter) Label(index int) Label { - l := f.underlying.Label(index) - for _, f := range f.keys { - if l.Key() == f { - return Label{} - } - } - return l -} - -func (lm listMap) Find(key Key) Label { - for _, l := range lm.labels { - if l.Key() == key { - return l - } - } - return Label{} -} - -func (c mapChain) Find(key Key) Label { - for _, src := range c.maps { - l := src.Find(key) - if l.Valid() { - return l - } - } - return Label{} -} - -var emptyList = &list{} - -func NewList(labels ...Label) List { - if len(labels) == 0 { - return emptyList - } - return &list{labels: labels} -} - -func Filter(l List, keys ...Key) List { - if len(keys) == 0 { - return l - } - return &filter{keys: keys, underlying: l} -} - -func NewMap(labels ...Label) Map { - return listMap{labels: labels} -} - -func MergeMaps(srcs ...Map) Map { - var nonNil []Map - for _, src := range srcs { - if src != nil { - nonNil = append(nonNil, src) - } - } - if len(nonNil) == 1 { - return nonNil[0] - } - return mapChain{maps: nonNil} -} diff --git a/vendor/golang.org/x/tools/internal/event/tag/tag.go b/vendor/golang.org/x/tools/internal/event/tag/tag.go deleted file mode 100644 index 581b26c2041..00000000000 --- a/vendor/golang.org/x/tools/internal/event/tag/tag.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package tag provides the labels used for telemetry throughout gopls. -package tag - -import ( - "golang.org/x/tools/internal/event/keys" -) - -var ( - // create the label keys we use - Method = keys.NewString("method", "") - StatusCode = keys.NewString("status.code", "") - StatusMessage = keys.NewString("status.message", "") - RPCID = keys.NewString("id", "") - RPCDirection = keys.NewString("direction", "") - File = keys.NewString("file", "") - Directory = keys.New("directory", "") - URI = keys.New("URI", "") - Package = keys.NewString("package", "") // sorted comma-separated list of Package IDs - PackagePath = keys.NewString("package_path", "") - Query = keys.New("query", "") - Snapshot = keys.NewUInt64("snapshot", "") - Operation = keys.NewString("operation", "") - - Position = keys.New("position", "") - Category = keys.NewString("category", "") - PackageCount = keys.NewInt("packages", "") - Files = keys.New("files", "") - Port = keys.NewInt("port", "") - Type = keys.New("type", "") - HoverKind = keys.NewString("hoverkind", "") - - NewServer = keys.NewString("new_server", "A new server was added") - EndServer = keys.NewString("end_server", "A server was shut down") - - ServerID = keys.NewString("server", "The server ID an event is related to") - Logfile = keys.NewString("logfile", "") - DebugAddress = keys.NewString("debug_address", "") - GoplsPath = keys.NewString("gopls_path", "") - ClientID = keys.NewString("client_id", "") - - Level = keys.NewInt("level", "The logging level") -) - -var ( - // create the stats we measure - Started = keys.NewInt64("started", "Count of started RPCs.") - ReceivedBytes = keys.NewInt64("received_bytes", "Bytes received.") //, unit.Bytes) - SentBytes = keys.NewInt64("sent_bytes", "Bytes sent.") //, unit.Bytes) - Latency = keys.NewFloat64("latency_ms", "Elapsed time in milliseconds") //, unit.Milliseconds) -) - -const ( - Inbound = "in" - Outbound = "out" -) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go deleted file mode 100644 index d98b0db2a9a..00000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file contains the remaining vestiges of -// $GOROOT/src/go/internal/gcimporter/bimport.go. - -package gcimporter - -import ( - "fmt" - "go/token" - "go/types" - "sync" -) - -func errorf(format string, args ...interface{}) { - panic(fmt.Sprintf(format, args...)) -} - -const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go - -// Synthesize a token.Pos -type fakeFileSet struct { - fset *token.FileSet - files map[string]*fileInfo -} - -type fileInfo struct { - file *token.File - lastline int -} - -const maxlines = 64 * 1024 - -func (s *fakeFileSet) pos(file string, line, column int) token.Pos { - // TODO(mdempsky): Make use of column. - - // Since we don't know the set of needed file positions, we reserve maxlines - // positions per file. We delay calling token.File.SetLines until all - // positions have been calculated (by way of fakeFileSet.setLines), so that - // we can avoid setting unnecessary lines. See also golang/go#46586. - f := s.files[file] - if f == nil { - f = &fileInfo{file: s.fset.AddFile(file, -1, maxlines)} - s.files[file] = f - } - if line > maxlines { - line = 1 - } - if line > f.lastline { - f.lastline = line - } - - // Return a fake position assuming that f.file consists only of newlines. - return token.Pos(f.file.Base() + line - 1) -} - -func (s *fakeFileSet) setLines() { - fakeLinesOnce.Do(func() { - fakeLines = make([]int, maxlines) - for i := range fakeLines { - fakeLines[i] = i - } - }) - for _, f := range s.files { - f.file.SetLines(fakeLines[:f.lastline]) - } -} - -var ( - fakeLines []int - fakeLinesOnce sync.Once -) - -func chanDir(d int) types.ChanDir { - // tag values must match the constants in cmd/compile/internal/gc/go.go - switch d { - case 1 /* Crecv */ : - return types.RecvOnly - case 2 /* Csend */ : - return types.SendOnly - case 3 /* Cboth */ : - return types.SendRecv - default: - errorf("unexpected channel dir %d", d) - return 0 - } -} - -var predeclOnce sync.Once -var predecl []types.Type // initialized lazily - -func predeclared() []types.Type { - predeclOnce.Do(func() { - // initialize lazily to be sure that all - // elements have been initialized before - predecl = []types.Type{ // basic types - types.Typ[types.Bool], - types.Typ[types.Int], - types.Typ[types.Int8], - types.Typ[types.Int16], - types.Typ[types.Int32], - types.Typ[types.Int64], - types.Typ[types.Uint], - types.Typ[types.Uint8], - types.Typ[types.Uint16], - types.Typ[types.Uint32], - types.Typ[types.Uint64], - types.Typ[types.Uintptr], - types.Typ[types.Float32], - types.Typ[types.Float64], - types.Typ[types.Complex64], - types.Typ[types.Complex128], - types.Typ[types.String], - - // basic type aliases - types.Universe.Lookup("byte").Type(), - types.Universe.Lookup("rune").Type(), - - // error - types.Universe.Lookup("error").Type(), - - // untyped types - types.Typ[types.UntypedBool], - types.Typ[types.UntypedInt], - types.Typ[types.UntypedRune], - types.Typ[types.UntypedFloat], - types.Typ[types.UntypedComplex], - types.Typ[types.UntypedString], - types.Typ[types.UntypedNil], - - // package unsafe - types.Typ[types.UnsafePointer], - - // invalid type - types.Typ[types.Invalid], // only appears in packages with errors - - // used internally by gc; never used by this package or in .a files - anyType{}, - } - predecl = append(predecl, additionalPredeclared()...) - }) - return predecl -} - -type anyType struct{} - -func (t anyType) Underlying() types.Type { return t } -func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go deleted file mode 100644 index f6437feb1cf..00000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go. - -// This file implements FindExportData. - -package gcimporter - -import ( - "bufio" - "fmt" - "io" - "strconv" - "strings" -) - -func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) { - // See $GOROOT/include/ar.h. - hdr := make([]byte, 16+12+6+6+8+10+2) - _, err = io.ReadFull(r, hdr) - if err != nil { - return - } - // leave for debugging - if false { - fmt.Printf("header: %s", hdr) - } - s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10])) - length, err := strconv.Atoi(s) - size = int64(length) - if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' { - err = fmt.Errorf("invalid archive header") - return - } - name = strings.TrimSpace(string(hdr[:16])) - return -} - -// FindExportData positions the reader r at the beginning of the -// export data section of an underlying GC-created object/archive -// file by reading from it. The reader must be positioned at the -// start of the file before calling this function. The hdr result -// is the string before the export data, either "$$" or "$$B". -// The size result is the length of the export data in bytes, or -1 if not known. -func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { - // Read first line to make sure this is an object file. - line, err := r.ReadSlice('\n') - if err != nil { - err = fmt.Errorf("can't find export data (%v)", err) - return - } - - if string(line) == "!\n" { - // Archive file. Scan to __.PKGDEF. - var name string - if name, size, err = readGopackHeader(r); err != nil { - return - } - - // First entry should be __.PKGDEF. - if name != "__.PKGDEF" { - err = fmt.Errorf("go archive is missing __.PKGDEF") - return - } - - // Read first line of __.PKGDEF data, so that line - // is once again the first line of the input. - if line, err = r.ReadSlice('\n'); err != nil { - err = fmt.Errorf("can't find export data (%v)", err) - return - } - size -= int64(len(line)) - } - - // Now at __.PKGDEF in archive or still at beginning of file. - // Either way, line should begin with "go object ". - if !strings.HasPrefix(string(line), "go object ") { - err = fmt.Errorf("not a Go object file") - return - } - - // Skip over object header to export data. - // Begins after first line starting with $$. - for line[0] != '$' { - if line, err = r.ReadSlice('\n'); err != nil { - err = fmt.Errorf("can't find export data (%v)", err) - return - } - size -= int64(len(line)) - } - hdr = string(line) - if size < 0 { - size = -1 - } - - return -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go deleted file mode 100644 index 2d078ccb19c..00000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ /dev/null @@ -1,273 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file is a reduced copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go. - -// Package gcimporter provides various functions for reading -// gc-generated object files that can be used to implement the -// Importer interface defined by the Go 1.5 standard library package. -// -// The encoding is deterministic: if the encoder is applied twice to -// the same types.Package data structure, both encodings are equal. -// This property may be important to avoid spurious changes in -// applications such as build systems. -// -// However, the encoder is not necessarily idempotent. Importing an -// exported package may yield a types.Package that, while it -// represents the same set of Go types as the original, may differ in -// the details of its internal representation. Because of these -// differences, re-encoding the imported package may yield a -// different, but equally valid, encoding of the package. -package gcimporter // import "golang.org/x/tools/internal/gcimporter" - -import ( - "bufio" - "bytes" - "fmt" - "go/build" - "go/token" - "go/types" - "io" - "os" - "os/exec" - "path/filepath" - "strings" - "sync" -) - -const ( - // Enable debug during development: it adds some additional checks, and - // prevents errors from being recovered. - debug = false - - // If trace is set, debugging output is printed to std out. - trace = false -) - -var exportMap sync.Map // package dir → func() (string, bool) - -// lookupGorootExport returns the location of the export data -// (normally found in the build cache, but located in GOROOT/pkg -// in prior Go releases) for the package located in pkgDir. -// -// (We use the package's directory instead of its import path -// mainly to simplify handling of the packages in src/vendor -// and cmd/vendor.) -func lookupGorootExport(pkgDir string) (string, bool) { - f, ok := exportMap.Load(pkgDir) - if !ok { - var ( - listOnce sync.Once - exportPath string - ) - f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) { - listOnce.Do(func() { - cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir) - cmd.Dir = build.Default.GOROOT - var output []byte - output, err := cmd.Output() - if err != nil { - return - } - - exports := strings.Split(string(bytes.TrimSpace(output)), "\n") - if len(exports) != 1 { - return - } - - exportPath = exports[0] - }) - - return exportPath, exportPath != "" - }) - } - - return f.(func() (string, bool))() -} - -var pkgExts = [...]string{".a", ".o"} - -// FindPkg returns the filename and unique package id for an import -// path based on package information provided by build.Import (using -// the build.Default build.Context). A relative srcDir is interpreted -// relative to the current working directory. -// If no file was found, an empty filename is returned. -func FindPkg(path, srcDir string) (filename, id string) { - if path == "" { - return - } - - var noext string - switch { - default: - // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" - // Don't require the source files to be present. - if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 - srcDir = abs - } - bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) - if bp.PkgObj == "" { - var ok bool - if bp.Goroot && bp.Dir != "" { - filename, ok = lookupGorootExport(bp.Dir) - } - if !ok { - id = path // make sure we have an id to print in error message - return - } - } else { - noext = strings.TrimSuffix(bp.PkgObj, ".a") - id = bp.ImportPath - } - - case build.IsLocalImport(path): - // "./x" -> "/this/directory/x.ext", "/this/directory/x" - noext = filepath.Join(srcDir, path) - id = noext - - case filepath.IsAbs(path): - // for completeness only - go/build.Import - // does not support absolute imports - // "/x" -> "/x.ext", "/x" - noext = path - id = path - } - - if false { // for debugging - if path != id { - fmt.Printf("%s -> %s\n", path, id) - } - } - - if filename != "" { - if f, err := os.Stat(filename); err == nil && !f.IsDir() { - return - } - } - - // try extensions - for _, ext := range pkgExts { - filename = noext + ext - if f, err := os.Stat(filename); err == nil && !f.IsDir() { - return - } - } - - filename = "" // not found - return -} - -// Import imports a gc-generated package given its import path and srcDir, adds -// the corresponding package object to the packages map, and returns the object. -// The packages map must contain all packages already imported. -func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { - var rc io.ReadCloser - var filename, id string - if lookup != nil { - // With custom lookup specified, assume that caller has - // converted path to a canonical import path for use in the map. - if path == "unsafe" { - return types.Unsafe, nil - } - id = path - - // No need to re-import if the package was imported completely before. - if pkg = packages[id]; pkg != nil && pkg.Complete() { - return - } - f, err := lookup(path) - if err != nil { - return nil, err - } - rc = f - } else { - filename, id = FindPkg(path, srcDir) - if filename == "" { - if path == "unsafe" { - return types.Unsafe, nil - } - return nil, fmt.Errorf("can't find import: %q", id) - } - - // no need to re-import if the package was imported completely before - if pkg = packages[id]; pkg != nil && pkg.Complete() { - return - } - - // open file - f, err := os.Open(filename) - if err != nil { - return nil, err - } - defer func() { - if err != nil { - // add file name to error - err = fmt.Errorf("%s: %v", filename, err) - } - }() - rc = f - } - defer rc.Close() - - var hdr string - var size int64 - buf := bufio.NewReader(rc) - if hdr, size, err = FindExportData(buf); err != nil { - return - } - - switch hdr { - case "$$B\n": - var data []byte - data, err = io.ReadAll(buf) - if err != nil { - break - } - - // TODO(gri): allow clients of go/importer to provide a FileSet. - // Or, define a new standard go/types/gcexportdata package. - fset := token.NewFileSet() - - // Select appropriate importer. - if len(data) > 0 { - switch data[0] { - case 'v', 'c', 'd': // binary, till go1.10 - return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - - case 'i': // indexed, till go1.19 - _, pkg, err := IImportData(fset, packages, data[1:], id) - return pkg, err - - case 'u': // unified, from go1.20 - _, pkg, err := UImportData(fset, packages, data[1:size], id) - return pkg, err - - default: - l := len(data) - if l > 10 { - l = 10 - } - return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) - } - } - - default: - err = fmt.Errorf("unknown export data header: %q", hdr) - } - - return -} - -func deref(typ types.Type) types.Type { - if p, _ := typ.(*types.Pointer); p != nil { - return p.Elem() - } - return typ -} - -type byPath []*types.Package - -func (a byPath) Len() int { return len(a) } -func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go deleted file mode 100644 index 2ee8c70164f..00000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ /dev/null @@ -1,1321 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Indexed binary package export. -// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; -// see that file for specification of the format. - -package gcimporter - -import ( - "bytes" - "encoding/binary" - "fmt" - "go/constant" - "go/token" - "go/types" - "io" - "math/big" - "reflect" - "sort" - "strconv" - "strings" - - "golang.org/x/tools/go/types/objectpath" - "golang.org/x/tools/internal/tokeninternal" -) - -// IExportShallow encodes "shallow" export data for the specified package. -// -// No promises are made about the encoding other than that it can be decoded by -// the same version of IIExportShallow. If you plan to save export data in the -// file system, be sure to include a cryptographic digest of the executable in -// the key to avoid version skew. -// -// If the provided reportf func is non-nil, it will be used for reporting bugs -// encountered during export. -// TODO(rfindley): remove reportf when we are confident enough in the new -// objectpath encoding. -func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) ([]byte, error) { - // In principle this operation can only fail if out.Write fails, - // but that's impossible for bytes.Buffer---and as a matter of - // fact iexportCommon doesn't even check for I/O errors. - // TODO(adonovan): handle I/O errors properly. - // TODO(adonovan): use byte slices throughout, avoiding copying. - const bundle, shallow = false, true - var out bytes.Buffer - err := iexportCommon(&out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}) - return out.Bytes(), err -} - -// IImportShallow decodes "shallow" types.Package data encoded by -// IExportShallow in the same executable. This function cannot import data from -// cmd/compile or gcexportdata.Write. -// -// The importer calls getPackages to obtain package symbols for all -// packages mentioned in the export data, including the one being -// decoded. -// -// If the provided reportf func is non-nil, it will be used for reporting bugs -// encountered during import. -// TODO(rfindley): remove reportf when we are confident enough in the new -// objectpath encoding. -func IImportShallow(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, path string, reportf ReportFunc) (*types.Package, error) { - const bundle = false - const shallow = true - pkgs, err := iimportCommon(fset, getPackages, data, bundle, path, shallow, reportf) - if err != nil { - return nil, err - } - return pkgs[0], nil -} - -// ReportFunc is the type of a function used to report formatted bugs. -type ReportFunc = func(string, ...interface{}) - -// Current bundled export format version. Increase with each format change. -// 0: initial implementation -const bundleVersion = 0 - -// IExportData writes indexed export data for pkg to out. -// -// If no file set is provided, position info will be missing. -// The package path of the top-level package will not be recorded, -// so that calls to IImportData can override with a provided package path. -func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error { - const bundle, shallow = false, false - return iexportCommon(out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}) -} - -// IExportBundle writes an indexed export bundle for pkgs to out. -func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { - const bundle, shallow = true, false - return iexportCommon(out, fset, bundle, shallow, iexportVersion, pkgs) -} - -func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, version int, pkgs []*types.Package) (err error) { - if !debug { - defer func() { - if e := recover(); e != nil { - if ierr, ok := e.(internalError); ok { - err = ierr - return - } - // Not an internal error; panic again. - panic(e) - } - }() - } - - p := iexporter{ - fset: fset, - version: version, - shallow: shallow, - allPkgs: map[*types.Package]bool{}, - stringIndex: map[string]uint64{}, - declIndex: map[types.Object]uint64{}, - tparamNames: map[types.Object]string{}, - typIndex: map[types.Type]uint64{}, - } - if !bundle { - p.localpkg = pkgs[0] - } - - for i, pt := range predeclared() { - p.typIndex[pt] = uint64(i) - } - if len(p.typIndex) > predeclReserved { - panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)) - } - - // Initialize work queue with exported declarations. - for _, pkg := range pkgs { - scope := pkg.Scope() - for _, name := range scope.Names() { - if token.IsExported(name) { - p.pushDecl(scope.Lookup(name)) - } - } - - if bundle { - // Ensure pkg and its imports are included in the index. - p.allPkgs[pkg] = true - for _, imp := range pkg.Imports() { - p.allPkgs[imp] = true - } - } - } - - // Loop until no more work. - for !p.declTodo.empty() { - p.doDecl(p.declTodo.popHead()) - } - - // Produce index of offset of each file record in files. - var files intWriter - var fileOffset []uint64 // fileOffset[i] is offset in files of file encoded as i - if p.shallow { - fileOffset = make([]uint64, len(p.fileInfos)) - for i, info := range p.fileInfos { - fileOffset[i] = uint64(files.Len()) - p.encodeFile(&files, info.file, info.needed) - } - } - - // Append indices to data0 section. - dataLen := uint64(p.data0.Len()) - w := p.newWriter() - w.writeIndex(p.declIndex) - - if bundle { - w.uint64(uint64(len(pkgs))) - for _, pkg := range pkgs { - w.pkg(pkg) - imps := pkg.Imports() - w.uint64(uint64(len(imps))) - for _, imp := range imps { - w.pkg(imp) - } - } - } - w.flush() - - // Assemble header. - var hdr intWriter - if bundle { - hdr.uint64(bundleVersion) - } - hdr.uint64(uint64(p.version)) - hdr.uint64(uint64(p.strings.Len())) - if p.shallow { - hdr.uint64(uint64(files.Len())) - hdr.uint64(uint64(len(fileOffset))) - for _, offset := range fileOffset { - hdr.uint64(offset) - } - } - hdr.uint64(dataLen) - - // Flush output. - io.Copy(out, &hdr) - io.Copy(out, &p.strings) - if p.shallow { - io.Copy(out, &files) - } - io.Copy(out, &p.data0) - - return nil -} - -// encodeFile writes to w a representation of the file sufficient to -// faithfully restore position information about all needed offsets. -// Mutates the needed array. -func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) { - _ = needed[0] // precondition: needed is non-empty - - w.uint64(p.stringOff(file.Name())) - - size := uint64(file.Size()) - w.uint64(size) - - // Sort the set of needed offsets. Duplicates are harmless. - sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] }) - - lines := tokeninternal.GetLines(file) // byte offset of each line start - w.uint64(uint64(len(lines))) - - // Rather than record the entire array of line start offsets, - // we save only a sparse list of (index, offset) pairs for - // the start of each line that contains a needed position. - var sparse [][2]int // (index, offset) pairs -outer: - for i, lineStart := range lines { - lineEnd := size - if i < len(lines)-1 { - lineEnd = uint64(lines[i+1]) - } - // Does this line contains a needed offset? - if needed[0] < lineEnd { - sparse = append(sparse, [2]int{i, lineStart}) - for needed[0] < lineEnd { - needed = needed[1:] - if len(needed) == 0 { - break outer - } - } - } - } - - // Delta-encode the columns. - w.uint64(uint64(len(sparse))) - var prev [2]int - for _, pair := range sparse { - w.uint64(uint64(pair[0] - prev[0])) - w.uint64(uint64(pair[1] - prev[1])) - prev = pair - } -} - -// writeIndex writes out an object index. mainIndex indicates whether -// we're writing out the main index, which is also read by -// non-compiler tools and includes a complete package description -// (i.e., name and height). -func (w *exportWriter) writeIndex(index map[types.Object]uint64) { - type pkgObj struct { - obj types.Object - name string // qualified name; differs from obj.Name for type params - } - // Build a map from packages to objects from that package. - pkgObjs := map[*types.Package][]pkgObj{} - - // For the main index, make sure to include every package that - // we reference, even if we're not exporting (or reexporting) - // any symbols from it. - if w.p.localpkg != nil { - pkgObjs[w.p.localpkg] = nil - } - for pkg := range w.p.allPkgs { - pkgObjs[pkg] = nil - } - - for obj := range index { - name := w.p.exportName(obj) - pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], pkgObj{obj, name}) - } - - var pkgs []*types.Package - for pkg, objs := range pkgObjs { - pkgs = append(pkgs, pkg) - - sort.Slice(objs, func(i, j int) bool { - return objs[i].name < objs[j].name - }) - } - - sort.Slice(pkgs, func(i, j int) bool { - return w.exportPath(pkgs[i]) < w.exportPath(pkgs[j]) - }) - - w.uint64(uint64(len(pkgs))) - for _, pkg := range pkgs { - w.string(w.exportPath(pkg)) - w.string(pkg.Name()) - w.uint64(uint64(0)) // package height is not needed for go/types - - objs := pkgObjs[pkg] - w.uint64(uint64(len(objs))) - for _, obj := range objs { - w.string(obj.name) - w.uint64(index[obj.obj]) - } - } -} - -// exportName returns the 'exported' name of an object. It differs from -// obj.Name() only for type parameters (see tparamExportName for details). -func (p *iexporter) exportName(obj types.Object) (res string) { - if name := p.tparamNames[obj]; name != "" { - return name - } - return obj.Name() -} - -type iexporter struct { - fset *token.FileSet - out *bytes.Buffer - version int - - shallow bool // don't put types from other packages in the index - objEncoder *objectpath.Encoder // encodes objects from other packages in shallow mode; lazily allocated - localpkg *types.Package // (nil in bundle mode) - - // allPkgs tracks all packages that have been referenced by - // the export data, so we can ensure to include them in the - // main index. - allPkgs map[*types.Package]bool - - declTodo objQueue - - strings intWriter - stringIndex map[string]uint64 - - // In shallow mode, object positions are encoded as (file, offset). - // Each file is recorded as a line-number table. - // Only the lines of needed positions are saved faithfully. - fileInfo map[*token.File]uint64 // value is index in fileInfos - fileInfos []*filePositions - - data0 intWriter - declIndex map[types.Object]uint64 - tparamNames map[types.Object]string // typeparam->exported name - typIndex map[types.Type]uint64 - - indent int // for tracing support -} - -type filePositions struct { - file *token.File - needed []uint64 // unordered list of needed file offsets -} - -func (p *iexporter) trace(format string, args ...interface{}) { - if !trace { - // Call sites should also be guarded, but having this check here allows - // easily enabling/disabling debug trace statements. - return - } - fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...) -} - -// objectpathEncoder returns the lazily allocated objectpath.Encoder to use -// when encoding objects in other packages during shallow export. -// -// Using a shared Encoder amortizes some of cost of objectpath search. -func (p *iexporter) objectpathEncoder() *objectpath.Encoder { - if p.objEncoder == nil { - p.objEncoder = new(objectpath.Encoder) - } - return p.objEncoder -} - -// stringOff returns the offset of s within the string section. -// If not already present, it's added to the end. -func (p *iexporter) stringOff(s string) uint64 { - off, ok := p.stringIndex[s] - if !ok { - off = uint64(p.strings.Len()) - p.stringIndex[s] = off - - p.strings.uint64(uint64(len(s))) - p.strings.WriteString(s) - } - return off -} - -// fileIndexAndOffset returns the index of the token.File and the byte offset of pos within it. -func (p *iexporter) fileIndexAndOffset(file *token.File, pos token.Pos) (uint64, uint64) { - index, ok := p.fileInfo[file] - if !ok { - index = uint64(len(p.fileInfo)) - p.fileInfos = append(p.fileInfos, &filePositions{file: file}) - if p.fileInfo == nil { - p.fileInfo = make(map[*token.File]uint64) - } - p.fileInfo[file] = index - } - // Record each needed offset. - info := p.fileInfos[index] - offset := uint64(file.Offset(pos)) - info.needed = append(info.needed, offset) - - return index, offset -} - -// pushDecl adds n to the declaration work queue, if not already present. -func (p *iexporter) pushDecl(obj types.Object) { - // Package unsafe is known to the compiler and predeclared. - // Caller should not ask us to do export it. - if obj.Pkg() == types.Unsafe { - panic("cannot export package unsafe") - } - - // Shallow export data: don't index decls from other packages. - if p.shallow && obj.Pkg() != p.localpkg { - return - } - - if _, ok := p.declIndex[obj]; ok { - return - } - - p.declIndex[obj] = ^uint64(0) // mark obj present in work queue - p.declTodo.pushTail(obj) -} - -// exportWriter handles writing out individual data section chunks. -type exportWriter struct { - p *iexporter - - data intWriter - prevFile string - prevLine int64 - prevColumn int64 -} - -func (w *exportWriter) exportPath(pkg *types.Package) string { - if pkg == w.p.localpkg { - return "" - } - return pkg.Path() -} - -func (p *iexporter) doDecl(obj types.Object) { - if trace { - p.trace("exporting decl %v (%T)", obj, obj) - p.indent++ - defer func() { - p.indent-- - p.trace("=> %s", obj) - }() - } - w := p.newWriter() - - switch obj := obj.(type) { - case *types.Var: - w.tag('V') - w.pos(obj.Pos()) - w.typ(obj.Type(), obj.Pkg()) - - case *types.Func: - sig, _ := obj.Type().(*types.Signature) - if sig.Recv() != nil { - // We shouldn't see methods in the package scope, - // but the type checker may repair "func () F() {}" - // to "func (Invalid) F()" and then treat it like "func F()", - // so allow that. See golang/go#57729. - if sig.Recv().Type() != types.Typ[types.Invalid] { - panic(internalErrorf("unexpected method: %v", sig)) - } - } - - // Function. - if sig.TypeParams().Len() == 0 { - w.tag('F') - } else { - w.tag('G') - } - w.pos(obj.Pos()) - // The tparam list of the function type is the declaration of the type - // params. So, write out the type params right now. Then those type params - // will be referenced via their type offset (via typOff) in all other - // places in the signature and function where they are used. - // - // While importing the type parameters, tparamList computes and records - // their export name, so that it can be later used when writing the index. - if tparams := sig.TypeParams(); tparams.Len() > 0 { - w.tparamList(obj.Name(), tparams, obj.Pkg()) - } - w.signature(sig) - - case *types.Const: - w.tag('C') - w.pos(obj.Pos()) - w.value(obj.Type(), obj.Val()) - - case *types.TypeName: - t := obj.Type() - - if tparam, ok := t.(*types.TypeParam); ok { - w.tag('P') - w.pos(obj.Pos()) - constraint := tparam.Constraint() - if p.version >= iexportVersionGo1_18 { - implicit := false - if iface, _ := constraint.(*types.Interface); iface != nil { - implicit = iface.IsImplicit() - } - w.bool(implicit) - } - w.typ(constraint, obj.Pkg()) - break - } - - if obj.IsAlias() { - w.tag('A') - w.pos(obj.Pos()) - w.typ(t, obj.Pkg()) - break - } - - // Defined type. - named, ok := t.(*types.Named) - if !ok { - panic(internalErrorf("%s is not a defined type", t)) - } - - if named.TypeParams().Len() == 0 { - w.tag('T') - } else { - w.tag('U') - } - w.pos(obj.Pos()) - - if named.TypeParams().Len() > 0 { - // While importing the type parameters, tparamList computes and records - // their export name, so that it can be later used when writing the index. - w.tparamList(obj.Name(), named.TypeParams(), obj.Pkg()) - } - - underlying := obj.Type().Underlying() - w.typ(underlying, obj.Pkg()) - - if types.IsInterface(t) { - break - } - - n := named.NumMethods() - w.uint64(uint64(n)) - for i := 0; i < n; i++ { - m := named.Method(i) - w.pos(m.Pos()) - w.string(m.Name()) - sig, _ := m.Type().(*types.Signature) - - // Receiver type parameters are type arguments of the receiver type, so - // their name must be qualified before exporting recv. - if rparams := sig.RecvTypeParams(); rparams.Len() > 0 { - prefix := obj.Name() + "." + m.Name() - for i := 0; i < rparams.Len(); i++ { - rparam := rparams.At(i) - name := tparamExportName(prefix, rparam) - w.p.tparamNames[rparam.Obj()] = name - } - } - w.param(sig.Recv()) - w.signature(sig) - } - - default: - panic(internalErrorf("unexpected object: %v", obj)) - } - - p.declIndex[obj] = w.flush() -} - -func (w *exportWriter) tag(tag byte) { - w.data.WriteByte(tag) -} - -func (w *exportWriter) pos(pos token.Pos) { - if w.p.shallow { - w.posV2(pos) - } else if w.p.version >= iexportVersionPosCol { - w.posV1(pos) - } else { - w.posV0(pos) - } -} - -// posV2 encoding (used only in shallow mode) records positions as -// (file, offset), where file is the index in the token.File table -// (which records the file name and newline offsets) and offset is a -// byte offset. It effectively ignores //line directives. -func (w *exportWriter) posV2(pos token.Pos) { - if pos == token.NoPos { - w.uint64(0) - return - } - file := w.p.fset.File(pos) // fset must be non-nil - index, offset := w.p.fileIndexAndOffset(file, pos) - w.uint64(1 + index) - w.uint64(offset) -} - -func (w *exportWriter) posV1(pos token.Pos) { - if w.p.fset == nil { - w.int64(0) - return - } - - p := w.p.fset.Position(pos) - file := p.Filename - line := int64(p.Line) - column := int64(p.Column) - - deltaColumn := (column - w.prevColumn) << 1 - deltaLine := (line - w.prevLine) << 1 - - if file != w.prevFile { - deltaLine |= 1 - } - if deltaLine != 0 { - deltaColumn |= 1 - } - - w.int64(deltaColumn) - if deltaColumn&1 != 0 { - w.int64(deltaLine) - if deltaLine&1 != 0 { - w.string(file) - } - } - - w.prevFile = file - w.prevLine = line - w.prevColumn = column -} - -func (w *exportWriter) posV0(pos token.Pos) { - if w.p.fset == nil { - w.int64(0) - return - } - - p := w.p.fset.Position(pos) - file := p.Filename - line := int64(p.Line) - - // When file is the same as the last position (common case), - // we can save a few bytes by delta encoding just the line - // number. - // - // Note: Because data objects may be read out of order (or not - // at all), we can only apply delta encoding within a single - // object. This is handled implicitly by tracking prevFile and - // prevLine as fields of exportWriter. - - if file == w.prevFile { - delta := line - w.prevLine - w.int64(delta) - if delta == deltaNewFile { - w.int64(-1) - } - } else { - w.int64(deltaNewFile) - w.int64(line) // line >= 0 - w.string(file) - w.prevFile = file - } - w.prevLine = line -} - -func (w *exportWriter) pkg(pkg *types.Package) { - // Ensure any referenced packages are declared in the main index. - w.p.allPkgs[pkg] = true - - w.string(w.exportPath(pkg)) -} - -func (w *exportWriter) qualifiedType(obj *types.TypeName) { - name := w.p.exportName(obj) - - // Ensure any referenced declarations are written out too. - w.p.pushDecl(obj) - w.string(name) - w.pkg(obj.Pkg()) -} - -// TODO(rfindley): what does 'pkg' even mean here? It would be better to pass -// it in explicitly into signatures and structs that may use it for -// constructing fields. -func (w *exportWriter) typ(t types.Type, pkg *types.Package) { - w.data.uint64(w.p.typOff(t, pkg)) -} - -func (p *iexporter) newWriter() *exportWriter { - return &exportWriter{p: p} -} - -func (w *exportWriter) flush() uint64 { - off := uint64(w.p.data0.Len()) - io.Copy(&w.p.data0, &w.data) - return off -} - -func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 { - off, ok := p.typIndex[t] - if !ok { - w := p.newWriter() - w.doTyp(t, pkg) - off = predeclReserved + w.flush() - p.typIndex[t] = off - } - return off -} - -func (w *exportWriter) startType(k itag) { - w.data.uint64(uint64(k)) -} - -func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { - if trace { - w.p.trace("exporting type %s (%T)", t, t) - w.p.indent++ - defer func() { - w.p.indent-- - w.p.trace("=> %s", t) - }() - } - switch t := t.(type) { - case *types.Named: - if targs := t.TypeArgs(); targs.Len() > 0 { - w.startType(instanceType) - // TODO(rfindley): investigate if this position is correct, and if it - // matters. - w.pos(t.Obj().Pos()) - w.typeList(targs, pkg) - w.typ(t.Origin(), pkg) - return - } - w.startType(definedType) - w.qualifiedType(t.Obj()) - - case *types.TypeParam: - w.startType(typeParamType) - w.qualifiedType(t.Obj()) - - case *types.Pointer: - w.startType(pointerType) - w.typ(t.Elem(), pkg) - - case *types.Slice: - w.startType(sliceType) - w.typ(t.Elem(), pkg) - - case *types.Array: - w.startType(arrayType) - w.uint64(uint64(t.Len())) - w.typ(t.Elem(), pkg) - - case *types.Chan: - w.startType(chanType) - // 1 RecvOnly; 2 SendOnly; 3 SendRecv - var dir uint64 - switch t.Dir() { - case types.RecvOnly: - dir = 1 - case types.SendOnly: - dir = 2 - case types.SendRecv: - dir = 3 - } - w.uint64(dir) - w.typ(t.Elem(), pkg) - - case *types.Map: - w.startType(mapType) - w.typ(t.Key(), pkg) - w.typ(t.Elem(), pkg) - - case *types.Signature: - w.startType(signatureType) - w.pkg(pkg) - w.signature(t) - - case *types.Struct: - w.startType(structType) - n := t.NumFields() - // Even for struct{} we must emit some qualifying package, because that's - // what the compiler does, and thus that's what the importer expects. - fieldPkg := pkg - if n > 0 { - fieldPkg = t.Field(0).Pkg() - } - if fieldPkg == nil { - // TODO(rfindley): improve this very hacky logic. - // - // The importer expects a package to be set for all struct types, even - // those with no fields. A better encoding might be to set NumFields - // before pkg. setPkg panics with a nil package, which may be possible - // to reach with invalid packages (and perhaps valid packages, too?), so - // (arbitrarily) set the localpkg if available. - // - // Alternatively, we may be able to simply guarantee that pkg != nil, by - // reconsidering the encoding of constant values. - if w.p.shallow { - fieldPkg = w.p.localpkg - } else { - panic(internalErrorf("no package to set for empty struct")) - } - } - w.pkg(fieldPkg) - w.uint64(uint64(n)) - - for i := 0; i < n; i++ { - f := t.Field(i) - if w.p.shallow { - w.objectPath(f) - } - w.pos(f.Pos()) - w.string(f.Name()) // unexported fields implicitly qualified by prior setPkg - w.typ(f.Type(), fieldPkg) - w.bool(f.Anonymous()) - w.string(t.Tag(i)) // note (or tag) - } - - case *types.Interface: - w.startType(interfaceType) - w.pkg(pkg) - - n := t.NumEmbeddeds() - w.uint64(uint64(n)) - for i := 0; i < n; i++ { - ft := t.EmbeddedType(i) - tPkg := pkg - if named, _ := ft.(*types.Named); named != nil { - w.pos(named.Obj().Pos()) - } else { - w.pos(token.NoPos) - } - w.typ(ft, tPkg) - } - - // See comment for struct fields. In shallow mode we change the encoding - // for interface methods that are promoted from other packages. - - n = t.NumExplicitMethods() - w.uint64(uint64(n)) - for i := 0; i < n; i++ { - m := t.ExplicitMethod(i) - if w.p.shallow { - w.objectPath(m) - } - w.pos(m.Pos()) - w.string(m.Name()) - sig, _ := m.Type().(*types.Signature) - w.signature(sig) - } - - case *types.Union: - w.startType(unionType) - nt := t.Len() - w.uint64(uint64(nt)) - for i := 0; i < nt; i++ { - term := t.Term(i) - w.bool(term.Tilde()) - w.typ(term.Type(), pkg) - } - - default: - panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t))) - } -} - -// objectPath writes the package and objectPath to use to look up obj in a -// different package, when encoding in "shallow" mode. -// -// When doing a shallow import, the importer creates only the local package, -// and requests package symbols for dependencies from the client. -// However, certain types defined in the local package may hold objects defined -// (perhaps deeply) within another package. -// -// For example, consider the following: -// -// package a -// func F() chan * map[string] struct { X int } -// -// package b -// import "a" -// var B = a.F() -// -// In this example, the type of b.B holds fields defined in package a. -// In order to have the correct canonical objects for the field defined in the -// type of B, they are encoded as objectPaths and later looked up in the -// importer. The same problem applies to interface methods. -func (w *exportWriter) objectPath(obj types.Object) { - if obj.Pkg() == nil || obj.Pkg() == w.p.localpkg { - // obj.Pkg() may be nil for the builtin error.Error. - // In this case, or if obj is declared in the local package, no need to - // encode. - w.string("") - return - } - objectPath, err := w.p.objectpathEncoder().For(obj) - if err != nil { - // Fall back to the empty string, which will cause the importer to create a - // new object, which matches earlier behavior. Creating a new object is - // sufficient for many purposes (such as type checking), but causes certain - // references algorithms to fail (golang/go#60819). However, we didn't - // notice this problem during months of gopls@v0.12.0 testing. - // - // TODO(golang/go#61674): this workaround is insufficient, as in the case - // where the field forwarded from an instantiated type that may not appear - // in the export data of the original package: - // - // // package a - // type A[P any] struct{ F P } - // - // // package b - // type B a.A[int] - // - // We need to update references algorithms not to depend on this - // de-duplication, at which point we may want to simply remove the - // workaround here. - w.string("") - return - } - w.string(string(objectPath)) - w.pkg(obj.Pkg()) -} - -func (w *exportWriter) signature(sig *types.Signature) { - w.paramList(sig.Params()) - w.paramList(sig.Results()) - if sig.Params().Len() > 0 { - w.bool(sig.Variadic()) - } -} - -func (w *exportWriter) typeList(ts *types.TypeList, pkg *types.Package) { - w.uint64(uint64(ts.Len())) - for i := 0; i < ts.Len(); i++ { - w.typ(ts.At(i), pkg) - } -} - -func (w *exportWriter) tparamList(prefix string, list *types.TypeParamList, pkg *types.Package) { - ll := uint64(list.Len()) - w.uint64(ll) - for i := 0; i < list.Len(); i++ { - tparam := list.At(i) - // Set the type parameter exportName before exporting its type. - exportName := tparamExportName(prefix, tparam) - w.p.tparamNames[tparam.Obj()] = exportName - w.typ(list.At(i), pkg) - } -} - -const blankMarker = "$" - -// tparamExportName returns the 'exported' name of a type parameter, which -// differs from its actual object name: it is prefixed with a qualifier, and -// blank type parameter names are disambiguated by their index in the type -// parameter list. -func tparamExportName(prefix string, tparam *types.TypeParam) string { - assert(prefix != "") - name := tparam.Obj().Name() - if name == "_" { - name = blankMarker + strconv.Itoa(tparam.Index()) - } - return prefix + "." + name -} - -// tparamName returns the real name of a type parameter, after stripping its -// qualifying prefix and reverting blank-name encoding. See tparamExportName -// for details. -func tparamName(exportName string) string { - // Remove the "path" from the type param name that makes it unique. - ix := strings.LastIndex(exportName, ".") - if ix < 0 { - errorf("malformed type parameter export name %s: missing prefix", exportName) - } - name := exportName[ix+1:] - if strings.HasPrefix(name, blankMarker) { - return "_" - } - return name -} - -func (w *exportWriter) paramList(tup *types.Tuple) { - n := tup.Len() - w.uint64(uint64(n)) - for i := 0; i < n; i++ { - w.param(tup.At(i)) - } -} - -func (w *exportWriter) param(obj types.Object) { - w.pos(obj.Pos()) - w.localIdent(obj) - w.typ(obj.Type(), obj.Pkg()) -} - -func (w *exportWriter) value(typ types.Type, v constant.Value) { - w.typ(typ, nil) - if w.p.version >= iexportVersionGo1_18 { - w.int64(int64(v.Kind())) - } - - if v.Kind() == constant.Unknown { - // golang/go#60605: treat unknown constant values as if they have invalid type - // - // This loses some fidelity over the package type-checked from source, but that - // is acceptable. - // - // TODO(rfindley): we should switch on the recorded constant kind rather - // than the constant type - return - } - - switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { - case types.IsBoolean: - w.bool(constant.BoolVal(v)) - case types.IsInteger: - var i big.Int - if i64, exact := constant.Int64Val(v); exact { - i.SetInt64(i64) - } else if ui64, exact := constant.Uint64Val(v); exact { - i.SetUint64(ui64) - } else { - i.SetString(v.ExactString(), 10) - } - w.mpint(&i, typ) - case types.IsFloat: - f := constantToFloat(v) - w.mpfloat(f, typ) - case types.IsComplex: - w.mpfloat(constantToFloat(constant.Real(v)), typ) - w.mpfloat(constantToFloat(constant.Imag(v)), typ) - case types.IsString: - w.string(constant.StringVal(v)) - default: - if b.Kind() == types.Invalid { - // package contains type errors - break - } - panic(internalErrorf("unexpected type %v (%v)", typ, typ.Underlying())) - } -} - -// constantToFloat converts a constant.Value with kind constant.Float to a -// big.Float. -func constantToFloat(x constant.Value) *big.Float { - x = constant.ToFloat(x) - // Use the same floating-point precision (512) as cmd/compile - // (see Mpprec in cmd/compile/internal/gc/mpfloat.go). - const mpprec = 512 - var f big.Float - f.SetPrec(mpprec) - if v, exact := constant.Float64Val(x); exact { - // float64 - f.SetFloat64(v) - } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { - // TODO(gri): add big.Rat accessor to constant.Value. - n := valueToRat(num) - d := valueToRat(denom) - f.SetRat(n.Quo(n, d)) - } else { - // Value too large to represent as a fraction => inaccessible. - // TODO(gri): add big.Float accessor to constant.Value. - _, ok := f.SetString(x.ExactString()) - assert(ok) - } - return &f -} - -func valueToRat(x constant.Value) *big.Rat { - // Convert little-endian to big-endian. - // I can't believe this is necessary. - bytes := constant.Bytes(x) - for i := 0; i < len(bytes)/2; i++ { - bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] - } - return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) -} - -// mpint exports a multi-precision integer. -// -// For unsigned types, small values are written out as a single -// byte. Larger values are written out as a length-prefixed big-endian -// byte string, where the length prefix is encoded as its complement. -// For example, bytes 0, 1, and 2 directly represent the integer -// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-, -// 2-, and 3-byte big-endian string follow. -// -// Encoding for signed types use the same general approach as for -// unsigned types, except small values use zig-zag encoding and the -// bottom bit of length prefix byte for large values is reserved as a -// sign bit. -// -// The exact boundary between small and large encodings varies -// according to the maximum number of bytes needed to encode a value -// of type typ. As a special case, 8-bit types are always encoded as a -// single byte. -// -// TODO(mdempsky): Is this level of complexity really worthwhile? -func (w *exportWriter) mpint(x *big.Int, typ types.Type) { - basic, ok := typ.Underlying().(*types.Basic) - if !ok { - panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying())) - } - - signed, maxBytes := intSize(basic) - - negative := x.Sign() < 0 - if !signed && negative { - panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x)) - } - - b := x.Bytes() - if len(b) > 0 && b[0] == 0 { - panic(internalErrorf("leading zeros")) - } - if uint(len(b)) > maxBytes { - panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)) - } - - maxSmall := 256 - maxBytes - if signed { - maxSmall = 256 - 2*maxBytes - } - if maxBytes == 1 { - maxSmall = 256 - } - - // Check if x can use small value encoding. - if len(b) <= 1 { - var ux uint - if len(b) == 1 { - ux = uint(b[0]) - } - if signed { - ux <<= 1 - if negative { - ux-- - } - } - if ux < maxSmall { - w.data.WriteByte(byte(ux)) - return - } - } - - n := 256 - uint(len(b)) - if signed { - n = 256 - 2*uint(len(b)) - if negative { - n |= 1 - } - } - if n < maxSmall || n >= 256 { - panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)) - } - - w.data.WriteByte(byte(n)) - w.data.Write(b) -} - -// mpfloat exports a multi-precision floating point number. -// -// The number's value is decomposed into mantissa × 2**exponent, where -// mantissa is an integer. The value is written out as mantissa (as a -// multi-precision integer) and then the exponent, except exponent is -// omitted if mantissa is zero. -func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) { - if f.IsInf() { - panic("infinite constant") - } - - // Break into f = mant × 2**exp, with 0.5 <= mant < 1. - var mant big.Float - exp := int64(f.MantExp(&mant)) - - // Scale so that mant is an integer. - prec := mant.MinPrec() - mant.SetMantExp(&mant, int(prec)) - exp -= int64(prec) - - manti, acc := mant.Int(nil) - if acc != big.Exact { - panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc)) - } - w.mpint(manti, typ) - if manti.Sign() != 0 { - w.int64(exp) - } -} - -func (w *exportWriter) bool(b bool) bool { - var x uint64 - if b { - x = 1 - } - w.uint64(x) - return b -} - -func (w *exportWriter) int64(x int64) { w.data.int64(x) } -func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) } -func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) } - -func (w *exportWriter) localIdent(obj types.Object) { - // Anonymous parameters. - if obj == nil { - w.string("") - return - } - - name := obj.Name() - if name == "_" { - w.string("_") - return - } - - w.string(name) -} - -type intWriter struct { - bytes.Buffer -} - -func (w *intWriter) int64(x int64) { - var buf [binary.MaxVarintLen64]byte - n := binary.PutVarint(buf[:], x) - w.Write(buf[:n]) -} - -func (w *intWriter) uint64(x uint64) { - var buf [binary.MaxVarintLen64]byte - n := binary.PutUvarint(buf[:], x) - w.Write(buf[:n]) -} - -func assert(cond bool) { - if !cond { - panic("internal error: assertion failed") - } -} - -// The below is copied from go/src/cmd/compile/internal/gc/syntax.go. - -// objQueue is a FIFO queue of types.Object. The zero value of objQueue is -// a ready-to-use empty queue. -type objQueue struct { - ring []types.Object - head, tail int -} - -// empty returns true if q contains no Nodes. -func (q *objQueue) empty() bool { - return q.head == q.tail -} - -// pushTail appends n to the tail of the queue. -func (q *objQueue) pushTail(obj types.Object) { - if len(q.ring) == 0 { - q.ring = make([]types.Object, 16) - } else if q.head+len(q.ring) == q.tail { - // Grow the ring. - nring := make([]types.Object, len(q.ring)*2) - // Copy the old elements. - part := q.ring[q.head%len(q.ring):] - if q.tail-q.head <= len(part) { - part = part[:q.tail-q.head] - copy(nring, part) - } else { - pos := copy(nring, part) - copy(nring[pos:], q.ring[:q.tail%len(q.ring)]) - } - q.ring, q.head, q.tail = nring, 0, q.tail-q.head - } - - q.ring[q.tail%len(q.ring)] = obj - q.tail++ -} - -// popHead pops a node from the head of the queue. It panics if q is empty. -func (q *objQueue) popHead() types.Object { - if q.empty() { - panic("dequeue empty") - } - obj := q.ring[q.head%len(q.ring)] - q.head++ - return obj -} - -// internalError represents an error generated inside this package. -type internalError string - -func (e internalError) Error() string { return "gcimporter: " + string(e) } - -// TODO(adonovan): make this call panic, so that it's symmetric with errorf. -// Otherwise it's easy to forget to do anything with the error. -// -// TODO(adonovan): also, consider switching the names "errorf" and -// "internalErrorf" as the former is used for bugs, whose cause is -// internal inconsistency, whereas the latter is used for ordinary -// situations like bad input, whose cause is external. -func internalErrorf(format string, args ...interface{}) error { - return internalError(fmt.Sprintf(format, args...)) -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go deleted file mode 100644 index 9bde15e3bc6..00000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ /dev/null @@ -1,1082 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Indexed package import. -// See cmd/compile/internal/gc/iexport.go for the export data format. - -// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. - -package gcimporter - -import ( - "bytes" - "encoding/binary" - "fmt" - "go/constant" - "go/token" - "go/types" - "io" - "math/big" - "sort" - "strings" - - "golang.org/x/tools/go/types/objectpath" -) - -type intReader struct { - *bytes.Reader - path string -} - -func (r *intReader) int64() int64 { - i, err := binary.ReadVarint(r.Reader) - if err != nil { - errorf("import %q: read varint error: %v", r.path, err) - } - return i -} - -func (r *intReader) uint64() uint64 { - i, err := binary.ReadUvarint(r.Reader) - if err != nil { - errorf("import %q: read varint error: %v", r.path, err) - } - return i -} - -// Keep this in sync with constants in iexport.go. -const ( - iexportVersionGo1_11 = 0 - iexportVersionPosCol = 1 - iexportVersionGo1_18 = 2 - iexportVersionGenerics = 2 - - iexportVersionCurrent = 2 -) - -type ident struct { - pkg *types.Package - name string -} - -const predeclReserved = 32 - -type itag uint64 - -const ( - // Types - definedType itag = iota - pointerType - sliceType - arrayType - chanType - mapType - signatureType - structType - interfaceType - typeParamType - instanceType - unionType -) - -// IImportData imports a package from the serialized package data -// and returns 0 and a reference to the package. -// If the export data version is not recognized or the format is otherwise -// compromised, an error is returned. -func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) { - pkgs, err := iimportCommon(fset, GetPackagesFromMap(imports), data, false, path, false, nil) - if err != nil { - return 0, nil, err - } - return 0, pkgs[0], nil -} - -// IImportBundle imports a set of packages from the serialized package bundle. -func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) { - return iimportCommon(fset, GetPackagesFromMap(imports), data, true, "", false, nil) -} - -// A GetPackagesFunc function obtains the non-nil symbols for a set of -// packages, creating and recursively importing them as needed. An -// implementation should store each package symbol is in the Pkg -// field of the items array. -// -// Any error causes importing to fail. This can be used to quickly read -// the import manifest of an export data file without fully decoding it. -type GetPackagesFunc = func(items []GetPackagesItem) error - -// A GetPackagesItem is a request from the importer for the package -// symbol of the specified name and path. -type GetPackagesItem struct { - Name, Path string - Pkg *types.Package // to be filled in by GetPackagesFunc call - - // private importer state - pathOffset uint64 - nameIndex map[string]uint64 -} - -// GetPackagesFromMap returns a GetPackagesFunc that retrieves -// packages from the given map of package path to package. -// -// The returned function may mutate m: each requested package that is not -// found is created with types.NewPackage and inserted into m. -func GetPackagesFromMap(m map[string]*types.Package) GetPackagesFunc { - return func(items []GetPackagesItem) error { - for i, item := range items { - pkg, ok := m[item.Path] - if !ok { - pkg = types.NewPackage(item.Path, item.Name) - m[item.Path] = pkg - } - items[i].Pkg = pkg - } - return nil - } -} - -func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, bundle bool, path string, shallow bool, reportf ReportFunc) (pkgs []*types.Package, err error) { - const currentVersion = iexportVersionCurrent - version := int64(-1) - if !debug { - defer func() { - if e := recover(); e != nil { - if bundle { - err = fmt.Errorf("%v", e) - } else if version > currentVersion { - err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) - } else { - err = fmt.Errorf("internal error while importing %q (%v); please report an issue", path, e) - } - } - }() - } - - r := &intReader{bytes.NewReader(data), path} - - if bundle { - if v := r.uint64(); v != bundleVersion { - errorf("unknown bundle format version %d", v) - } - } - - version = int64(r.uint64()) - switch version { - case iexportVersionGo1_18, iexportVersionPosCol, iexportVersionGo1_11: - default: - if version > iexportVersionGo1_18 { - errorf("unstable iexport format version %d, just rebuild compiler and std library", version) - } else { - errorf("unknown iexport format version %d", version) - } - } - - sLen := int64(r.uint64()) - var fLen int64 - var fileOffset []uint64 - if shallow { - // Shallow mode uses a different position encoding. - fLen = int64(r.uint64()) - fileOffset = make([]uint64, r.uint64()) - for i := range fileOffset { - fileOffset[i] = r.uint64() - } - } - dLen := int64(r.uint64()) - - whence, _ := r.Seek(0, io.SeekCurrent) - stringData := data[whence : whence+sLen] - fileData := data[whence+sLen : whence+sLen+fLen] - declData := data[whence+sLen+fLen : whence+sLen+fLen+dLen] - r.Seek(sLen+fLen+dLen, io.SeekCurrent) - - p := iimporter{ - version: int(version), - ipath: path, - shallow: shallow, - reportf: reportf, - - stringData: stringData, - stringCache: make(map[uint64]string), - fileOffset: fileOffset, - fileData: fileData, - fileCache: make([]*token.File, len(fileOffset)), - pkgCache: make(map[uint64]*types.Package), - - declData: declData, - pkgIndex: make(map[*types.Package]map[string]uint64), - typCache: make(map[uint64]types.Type), - // Separate map for typeparams, keyed by their package and unique - // name. - tparamIndex: make(map[ident]types.Type), - - fake: fakeFileSet{ - fset: fset, - files: make(map[string]*fileInfo), - }, - } - defer p.fake.setLines() // set lines for files in fset - - for i, pt := range predeclared() { - p.typCache[uint64(i)] = pt - } - - // Gather the relevant packages from the manifest. - items := make([]GetPackagesItem, r.uint64()) - for i := range items { - pkgPathOff := r.uint64() - pkgPath := p.stringAt(pkgPathOff) - pkgName := p.stringAt(r.uint64()) - _ = r.uint64() // package height; unused by go/types - - if pkgPath == "" { - pkgPath = path - } - items[i].Name = pkgName - items[i].Path = pkgPath - items[i].pathOffset = pkgPathOff - - // Read index for package. - nameIndex := make(map[string]uint64) - nSyms := r.uint64() - // In shallow mode, only the current package (i=0) has an index. - assert(!(shallow && i > 0 && nSyms != 0)) - for ; nSyms > 0; nSyms-- { - name := p.stringAt(r.uint64()) - nameIndex[name] = r.uint64() - } - - items[i].nameIndex = nameIndex - } - - // Request packages all at once from the client, - // enabling a parallel implementation. - if err := getPackages(items); err != nil { - return nil, err // don't wrap this error - } - - // Check the results and complete the index. - pkgList := make([]*types.Package, len(items)) - for i, item := range items { - pkg := item.Pkg - if pkg == nil { - errorf("internal error: getPackages returned nil package for %q", item.Path) - } else if pkg.Path() != item.Path { - errorf("internal error: getPackages returned wrong path %q, want %q", pkg.Path(), item.Path) - } else if pkg.Name() != item.Name { - errorf("internal error: getPackages returned wrong name %s for package %q, want %s", pkg.Name(), item.Path, item.Name) - } - p.pkgCache[item.pathOffset] = pkg - p.pkgIndex[pkg] = item.nameIndex - pkgList[i] = pkg - } - - if bundle { - pkgs = make([]*types.Package, r.uint64()) - for i := range pkgs { - pkg := p.pkgAt(r.uint64()) - imps := make([]*types.Package, r.uint64()) - for j := range imps { - imps[j] = p.pkgAt(r.uint64()) - } - pkg.SetImports(imps) - pkgs[i] = pkg - } - } else { - if len(pkgList) == 0 { - errorf("no packages found for %s", path) - panic("unreachable") - } - pkgs = pkgList[:1] - - // record all referenced packages as imports - list := append(([]*types.Package)(nil), pkgList[1:]...) - sort.Sort(byPath(list)) - pkgs[0].SetImports(list) - } - - for _, pkg := range pkgs { - if pkg.Complete() { - continue - } - - names := make([]string, 0, len(p.pkgIndex[pkg])) - for name := range p.pkgIndex[pkg] { - names = append(names, name) - } - sort.Strings(names) - for _, name := range names { - p.doDecl(pkg, name) - } - - // package was imported completely and without errors - pkg.MarkComplete() - } - - // SetConstraint can't be called if the constraint type is not yet complete. - // When type params are created in the 'P' case of (*importReader).obj(), - // the associated constraint type may not be complete due to recursion. - // Therefore, we defer calling SetConstraint there, and call it here instead - // after all types are complete. - for _, d := range p.later { - d.t.SetConstraint(d.constraint) - } - - for _, typ := range p.interfaceList { - typ.Complete() - } - - // Workaround for golang/go#61561. See the doc for instanceList for details. - for _, typ := range p.instanceList { - if iface, _ := typ.Underlying().(*types.Interface); iface != nil { - iface.Complete() - } - } - - return pkgs, nil -} - -type setConstraintArgs struct { - t *types.TypeParam - constraint types.Type -} - -type iimporter struct { - version int - ipath string - - shallow bool - reportf ReportFunc // if non-nil, used to report bugs - - stringData []byte - stringCache map[uint64]string - fileOffset []uint64 // fileOffset[i] is offset in fileData for info about file encoded as i - fileData []byte - fileCache []*token.File // memoized decoding of file encoded as i - pkgCache map[uint64]*types.Package - - declData []byte - pkgIndex map[*types.Package]map[string]uint64 - typCache map[uint64]types.Type - tparamIndex map[ident]types.Type - - fake fakeFileSet - interfaceList []*types.Interface - - // Workaround for the go/types bug golang/go#61561: instances produced during - // instantiation may contain incomplete interfaces. Here we only complete the - // underlying type of the instance, which is the most common case but doesn't - // handle parameterized interface literals defined deeper in the type. - instanceList []types.Type // instances for later completion (see golang/go#61561) - - // Arguments for calls to SetConstraint that are deferred due to recursive types - later []setConstraintArgs - - indent int // for tracing support -} - -func (p *iimporter) trace(format string, args ...interface{}) { - if !trace { - // Call sites should also be guarded, but having this check here allows - // easily enabling/disabling debug trace statements. - return - } - fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...) -} - -func (p *iimporter) doDecl(pkg *types.Package, name string) { - if debug { - p.trace("import decl %s", name) - p.indent++ - defer func() { - p.indent-- - p.trace("=> %s", name) - }() - } - // See if we've already imported this declaration. - if obj := pkg.Scope().Lookup(name); obj != nil { - return - } - - off, ok := p.pkgIndex[pkg][name] - if !ok { - // In deep mode, the index should be complete. In shallow - // mode, we should have already recursively loaded necessary - // dependencies so the above Lookup succeeds. - errorf("%v.%v not in index", pkg, name) - } - - r := &importReader{p: p, currPkg: pkg} - r.declReader.Reset(p.declData[off:]) - - r.obj(name) -} - -func (p *iimporter) stringAt(off uint64) string { - if s, ok := p.stringCache[off]; ok { - return s - } - - slen, n := binary.Uvarint(p.stringData[off:]) - if n <= 0 { - errorf("varint failed") - } - spos := off + uint64(n) - s := string(p.stringData[spos : spos+slen]) - p.stringCache[off] = s - return s -} - -func (p *iimporter) fileAt(index uint64) *token.File { - file := p.fileCache[index] - if file == nil { - off := p.fileOffset[index] - file = p.decodeFile(intReader{bytes.NewReader(p.fileData[off:]), p.ipath}) - p.fileCache[index] = file - } - return file -} - -func (p *iimporter) decodeFile(rd intReader) *token.File { - filename := p.stringAt(rd.uint64()) - size := int(rd.uint64()) - file := p.fake.fset.AddFile(filename, -1, size) - - // SetLines requires a nondecreasing sequence. - // Because it is common for clients to derive the interval - // [start, start+len(name)] from a start position, and we - // want to ensure that the end offset is on the same line, - // we fill in the gaps of the sparse encoding with values - // that strictly increase by the largest possible amount. - // This allows us to avoid having to record the actual end - // offset of each needed line. - - lines := make([]int, int(rd.uint64())) - var index, offset int - for i, n := 0, int(rd.uint64()); i < n; i++ { - index += int(rd.uint64()) - offset += int(rd.uint64()) - lines[index] = offset - - // Ensure monotonicity between points. - for j := index - 1; j > 0 && lines[j] == 0; j-- { - lines[j] = lines[j+1] - 1 - } - } - - // Ensure monotonicity after last point. - for j := len(lines) - 1; j > 0 && lines[j] == 0; j-- { - size-- - lines[j] = size - } - - if !file.SetLines(lines) { - errorf("SetLines failed: %d", lines) // can't happen - } - return file -} - -func (p *iimporter) pkgAt(off uint64) *types.Package { - if pkg, ok := p.pkgCache[off]; ok { - return pkg - } - path := p.stringAt(off) - errorf("missing package %q in %q", path, p.ipath) - return nil -} - -func (p *iimporter) typAt(off uint64, base *types.Named) types.Type { - if t, ok := p.typCache[off]; ok && canReuse(base, t) { - return t - } - - if off < predeclReserved { - errorf("predeclared type missing from cache: %v", off) - } - - r := &importReader{p: p} - r.declReader.Reset(p.declData[off-predeclReserved:]) - t := r.doType(base) - - if canReuse(base, t) { - p.typCache[off] = t - } - return t -} - -// canReuse reports whether the type rhs on the RHS of the declaration for def -// may be re-used. -// -// Specifically, if def is non-nil and rhs is an interface type with methods, it -// may not be re-used because we have a convention of setting the receiver type -// for interface methods to def. -func canReuse(def *types.Named, rhs types.Type) bool { - if def == nil { - return true - } - iface, _ := rhs.(*types.Interface) - if iface == nil { - return true - } - // Don't use iface.Empty() here as iface may not be complete. - return iface.NumEmbeddeds() == 0 && iface.NumExplicitMethods() == 0 -} - -type importReader struct { - p *iimporter - declReader bytes.Reader - currPkg *types.Package - prevFile string - prevLine int64 - prevColumn int64 -} - -func (r *importReader) obj(name string) { - tag := r.byte() - pos := r.pos() - - switch tag { - case 'A': - typ := r.typ() - - r.declare(types.NewTypeName(pos, r.currPkg, name, typ)) - - case 'C': - typ, val := r.value() - - r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) - - case 'F', 'G': - var tparams []*types.TypeParam - if tag == 'G' { - tparams = r.tparamList() - } - sig := r.signature(nil, nil, tparams) - r.declare(types.NewFunc(pos, r.currPkg, name, sig)) - - case 'T', 'U': - // Types can be recursive. We need to setup a stub - // declaration before recursing. - obj := types.NewTypeName(pos, r.currPkg, name, nil) - named := types.NewNamed(obj, nil, nil) - // Declare obj before calling r.tparamList, so the new type name is recognized - // if used in the constraint of one of its own typeparams (see #48280). - r.declare(obj) - if tag == 'U' { - tparams := r.tparamList() - named.SetTypeParams(tparams) - } - - underlying := r.p.typAt(r.uint64(), named).Underlying() - named.SetUnderlying(underlying) - - if !isInterface(underlying) { - for n := r.uint64(); n > 0; n-- { - mpos := r.pos() - mname := r.ident() - recv := r.param() - - // If the receiver has any targs, set those as the - // rparams of the method (since those are the - // typeparams being used in the method sig/body). - base := baseType(recv.Type()) - assert(base != nil) - targs := base.TypeArgs() - var rparams []*types.TypeParam - if targs.Len() > 0 { - rparams = make([]*types.TypeParam, targs.Len()) - for i := range rparams { - rparams[i] = targs.At(i).(*types.TypeParam) - } - } - msig := r.signature(recv, rparams, nil) - - named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig)) - } - } - - case 'P': - // We need to "declare" a typeparam in order to have a name that - // can be referenced recursively (if needed) in the type param's - // bound. - if r.p.version < iexportVersionGenerics { - errorf("unexpected type param type") - } - name0 := tparamName(name) - tn := types.NewTypeName(pos, r.currPkg, name0, nil) - t := types.NewTypeParam(tn, nil) - - // To handle recursive references to the typeparam within its - // bound, save the partial type in tparamIndex before reading the bounds. - id := ident{r.currPkg, name} - r.p.tparamIndex[id] = t - var implicit bool - if r.p.version >= iexportVersionGo1_18 { - implicit = r.bool() - } - constraint := r.typ() - if implicit { - iface, _ := constraint.(*types.Interface) - if iface == nil { - errorf("non-interface constraint marked implicit") - } - iface.MarkImplicit() - } - // The constraint type may not be complete, if we - // are in the middle of a type recursion involving type - // constraints. So, we defer SetConstraint until we have - // completely set up all types in ImportData. - r.p.later = append(r.p.later, setConstraintArgs{t: t, constraint: constraint}) - - case 'V': - typ := r.typ() - - r.declare(types.NewVar(pos, r.currPkg, name, typ)) - - default: - errorf("unexpected tag: %v", tag) - } -} - -func (r *importReader) declare(obj types.Object) { - obj.Pkg().Scope().Insert(obj) -} - -func (r *importReader) value() (typ types.Type, val constant.Value) { - typ = r.typ() - if r.p.version >= iexportVersionGo1_18 { - // TODO: add support for using the kind. - _ = constant.Kind(r.int64()) - } - - switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { - case types.IsBoolean: - val = constant.MakeBool(r.bool()) - - case types.IsString: - val = constant.MakeString(r.string()) - - case types.IsInteger: - var x big.Int - r.mpint(&x, b) - val = constant.Make(&x) - - case types.IsFloat: - val = r.mpfloat(b) - - case types.IsComplex: - re := r.mpfloat(b) - im := r.mpfloat(b) - val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) - - default: - if b.Kind() == types.Invalid { - val = constant.MakeUnknown() - return - } - errorf("unexpected type %v", typ) // panics - panic("unreachable") - } - - return -} - -func intSize(b *types.Basic) (signed bool, maxBytes uint) { - if (b.Info() & types.IsUntyped) != 0 { - return true, 64 - } - - switch b.Kind() { - case types.Float32, types.Complex64: - return true, 3 - case types.Float64, types.Complex128: - return true, 7 - } - - signed = (b.Info() & types.IsUnsigned) == 0 - switch b.Kind() { - case types.Int8, types.Uint8: - maxBytes = 1 - case types.Int16, types.Uint16: - maxBytes = 2 - case types.Int32, types.Uint32: - maxBytes = 4 - default: - maxBytes = 8 - } - - return -} - -func (r *importReader) mpint(x *big.Int, typ *types.Basic) { - signed, maxBytes := intSize(typ) - - maxSmall := 256 - maxBytes - if signed { - maxSmall = 256 - 2*maxBytes - } - if maxBytes == 1 { - maxSmall = 256 - } - - n, _ := r.declReader.ReadByte() - if uint(n) < maxSmall { - v := int64(n) - if signed { - v >>= 1 - if n&1 != 0 { - v = ^v - } - } - x.SetInt64(v) - return - } - - v := -n - if signed { - v = -(n &^ 1) >> 1 - } - if v < 1 || uint(v) > maxBytes { - errorf("weird decoding: %v, %v => %v", n, signed, v) - } - b := make([]byte, v) - io.ReadFull(&r.declReader, b) - x.SetBytes(b) - if signed && n&1 != 0 { - x.Neg(x) - } -} - -func (r *importReader) mpfloat(typ *types.Basic) constant.Value { - var mant big.Int - r.mpint(&mant, typ) - var f big.Float - f.SetInt(&mant) - if f.Sign() != 0 { - f.SetMantExp(&f, int(r.int64())) - } - return constant.Make(&f) -} - -func (r *importReader) ident() string { - return r.string() -} - -func (r *importReader) qualifiedIdent() (*types.Package, string) { - name := r.string() - pkg := r.pkg() - return pkg, name -} - -func (r *importReader) pos() token.Pos { - if r.p.shallow { - // precise offsets are encoded only in shallow mode - return r.posv2() - } - if r.p.version >= iexportVersionPosCol { - r.posv1() - } else { - r.posv0() - } - - if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 { - return token.NoPos - } - return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn)) -} - -func (r *importReader) posv0() { - delta := r.int64() - if delta != deltaNewFile { - r.prevLine += delta - } else if l := r.int64(); l == -1 { - r.prevLine += deltaNewFile - } else { - r.prevFile = r.string() - r.prevLine = l - } -} - -func (r *importReader) posv1() { - delta := r.int64() - r.prevColumn += delta >> 1 - if delta&1 != 0 { - delta = r.int64() - r.prevLine += delta >> 1 - if delta&1 != 0 { - r.prevFile = r.string() - } - } -} - -func (r *importReader) posv2() token.Pos { - file := r.uint64() - if file == 0 { - return token.NoPos - } - tf := r.p.fileAt(file - 1) - return tf.Pos(int(r.uint64())) -} - -func (r *importReader) typ() types.Type { - return r.p.typAt(r.uint64(), nil) -} - -func isInterface(t types.Type) bool { - _, ok := t.(*types.Interface) - return ok -} - -func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) } -func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } - -func (r *importReader) doType(base *types.Named) (res types.Type) { - k := r.kind() - if debug { - r.p.trace("importing type %d (base: %s)", k, base) - r.p.indent++ - defer func() { - r.p.indent-- - r.p.trace("=> %s", res) - }() - } - switch k { - default: - errorf("unexpected kind tag in %q: %v", r.p.ipath, k) - return nil - - case definedType: - pkg, name := r.qualifiedIdent() - r.p.doDecl(pkg, name) - return pkg.Scope().Lookup(name).(*types.TypeName).Type() - case pointerType: - return types.NewPointer(r.typ()) - case sliceType: - return types.NewSlice(r.typ()) - case arrayType: - n := r.uint64() - return types.NewArray(r.typ(), int64(n)) - case chanType: - dir := chanDir(int(r.uint64())) - return types.NewChan(dir, r.typ()) - case mapType: - return types.NewMap(r.typ(), r.typ()) - case signatureType: - r.currPkg = r.pkg() - return r.signature(nil, nil, nil) - - case structType: - r.currPkg = r.pkg() - - fields := make([]*types.Var, r.uint64()) - tags := make([]string, len(fields)) - for i := range fields { - var field *types.Var - if r.p.shallow { - field, _ = r.objectPathObject().(*types.Var) - } - - fpos := r.pos() - fname := r.ident() - ftyp := r.typ() - emb := r.bool() - tag := r.string() - - // Either this is not a shallow import, the field is local, or the - // encoded objectPath failed to produce an object (a bug). - // - // Even in this last, buggy case, fall back on creating a new field. As - // discussed in iexport.go, this is not correct, but mostly works and is - // preferable to failing (for now at least). - if field == nil { - field = types.NewField(fpos, r.currPkg, fname, ftyp, emb) - } - - fields[i] = field - tags[i] = tag - } - return types.NewStruct(fields, tags) - - case interfaceType: - r.currPkg = r.pkg() - - embeddeds := make([]types.Type, r.uint64()) - for i := range embeddeds { - _ = r.pos() - embeddeds[i] = r.typ() - } - - methods := make([]*types.Func, r.uint64()) - for i := range methods { - var method *types.Func - if r.p.shallow { - method, _ = r.objectPathObject().(*types.Func) - } - - mpos := r.pos() - mname := r.ident() - - // TODO(mdempsky): Matches bimport.go, but I - // don't agree with this. - var recv *types.Var - if base != nil { - recv = types.NewVar(token.NoPos, r.currPkg, "", base) - } - msig := r.signature(recv, nil, nil) - - if method == nil { - method = types.NewFunc(mpos, r.currPkg, mname, msig) - } - methods[i] = method - } - - typ := newInterface(methods, embeddeds) - r.p.interfaceList = append(r.p.interfaceList, typ) - return typ - - case typeParamType: - if r.p.version < iexportVersionGenerics { - errorf("unexpected type param type") - } - pkg, name := r.qualifiedIdent() - id := ident{pkg, name} - if t, ok := r.p.tparamIndex[id]; ok { - // We're already in the process of importing this typeparam. - return t - } - // Otherwise, import the definition of the typeparam now. - r.p.doDecl(pkg, name) - return r.p.tparamIndex[id] - - case instanceType: - if r.p.version < iexportVersionGenerics { - errorf("unexpected instantiation type") - } - // pos does not matter for instances: they are positioned on the original - // type. - _ = r.pos() - len := r.uint64() - targs := make([]types.Type, len) - for i := range targs { - targs[i] = r.typ() - } - baseType := r.typ() - // The imported instantiated type doesn't include any methods, so - // we must always use the methods of the base (orig) type. - // TODO provide a non-nil *Environment - t, _ := types.Instantiate(nil, baseType, targs, false) - - // Workaround for golang/go#61561. See the doc for instanceList for details. - r.p.instanceList = append(r.p.instanceList, t) - return t - - case unionType: - if r.p.version < iexportVersionGenerics { - errorf("unexpected instantiation type") - } - terms := make([]*types.Term, r.uint64()) - for i := range terms { - terms[i] = types.NewTerm(r.bool(), r.typ()) - } - return types.NewUnion(terms) - } -} - -func (r *importReader) kind() itag { - return itag(r.uint64()) -} - -// objectPathObject is the inverse of exportWriter.objectPath. -// -// In shallow mode, certain fields and methods may need to be looked up in an -// imported package. See the doc for exportWriter.objectPath for a full -// explanation. -func (r *importReader) objectPathObject() types.Object { - objPath := objectpath.Path(r.string()) - if objPath == "" { - return nil - } - pkg := r.pkg() - obj, err := objectpath.Object(pkg, objPath) - if err != nil { - if r.p.reportf != nil { - r.p.reportf("failed to find object for objectPath %q: %v", objPath, err) - } - } - return obj -} - -func (r *importReader) signature(recv *types.Var, rparams []*types.TypeParam, tparams []*types.TypeParam) *types.Signature { - params := r.paramList() - results := r.paramList() - variadic := params.Len() > 0 && r.bool() - return types.NewSignatureType(recv, rparams, tparams, params, results, variadic) -} - -func (r *importReader) tparamList() []*types.TypeParam { - n := r.uint64() - if n == 0 { - return nil - } - xs := make([]*types.TypeParam, n) - for i := range xs { - // Note: the standard library importer is tolerant of nil types here, - // though would panic in SetTypeParams. - xs[i] = r.typ().(*types.TypeParam) - } - return xs -} - -func (r *importReader) paramList() *types.Tuple { - xs := make([]*types.Var, r.uint64()) - for i := range xs { - xs[i] = r.param() - } - return types.NewTuple(xs...) -} - -func (r *importReader) param() *types.Var { - pos := r.pos() - name := r.ident() - typ := r.typ() - return types.NewParam(pos, r.currPkg, name, typ) -} - -func (r *importReader) bool() bool { - return r.uint64() != 0 -} - -func (r *importReader) int64() int64 { - n, err := binary.ReadVarint(&r.declReader) - if err != nil { - errorf("readVarint: %v", err) - } - return n -} - -func (r *importReader) uint64() uint64 { - n, err := binary.ReadUvarint(&r.declReader) - if err != nil { - errorf("readUvarint: %v", err) - } - return n -} - -func (r *importReader) byte() byte { - x, err := r.declReader.ReadByte() - if err != nil { - errorf("declReader.ReadByte: %v", err) - } - return x -} - -func baseType(typ types.Type) *types.Named { - // pointer receivers are never types.Named types - if p, _ := typ.(*types.Pointer); p != nil { - typ = p.Elem() - } - // receiver base types are always (possibly generic) types.Named types - n, _ := typ.(*types.Named) - return n -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go deleted file mode 100644 index 8b163e3d058..00000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.11 -// +build !go1.11 - -package gcimporter - -import "go/types" - -func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { - named := make([]*types.Named, len(embeddeds)) - for i, e := range embeddeds { - var ok bool - named[i], ok = e.(*types.Named) - if !ok { - panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11") - } - } - return types.NewInterface(methods, named) -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go deleted file mode 100644 index 49984f40fd8..00000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.11 -// +build go1.11 - -package gcimporter - -import "go/types" - -func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { - return types.NewInterfaceType(methods, embeddeds) -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go deleted file mode 100644 index d892273efb6..00000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package gcimporter - -import "go/types" - -const iexportVersion = iexportVersionGo1_11 - -func additionalPredeclared() []types.Type { - return nil -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go deleted file mode 100644 index edbe6ea7041..00000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package gcimporter - -import "go/types" - -const iexportVersion = iexportVersionGenerics - -// additionalPredeclared returns additional predeclared types in go.1.18. -func additionalPredeclared() []types.Type { - return []types.Type{ - // comparable - types.Universe.Lookup("comparable").Type(), - - // any - types.Universe.Lookup("any").Type(), - } -} - -// See cmd/compile/internal/types.SplitVargenSuffix. -func splitVargenSuffix(name string) (base, suffix string) { - i := len(name) - for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { - i-- - } - const dot = "·" - if i >= len(dot) && name[i-len(dot):i] == dot { - i -= len(dot) - return name[:i], name[i:] - } - return name, "" -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go deleted file mode 100644 index 286bf445483..00000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !(go1.18 && goexperiment.unified) -// +build !go1.18 !goexperiment.unified - -package gcimporter - -const unifiedIR = false diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go deleted file mode 100644 index b5d69ffbe68..00000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 && goexperiment.unified -// +build go1.18,goexperiment.unified - -package gcimporter - -const unifiedIR = true diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go deleted file mode 100644 index 8eb20729c2a..00000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package gcimporter - -import ( - "fmt" - "go/token" - "go/types" -) - -func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { - err = fmt.Errorf("go/tools compiled with a Go version earlier than 1.18 cannot read unified IR export data") - return -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go deleted file mode 100644 index b977435f626..00000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ /dev/null @@ -1,728 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Derived from go/internal/gcimporter/ureader.go - -//go:build go1.18 -// +build go1.18 - -package gcimporter - -import ( - "fmt" - "go/token" - "go/types" - "sort" - "strings" - - "golang.org/x/tools/internal/pkgbits" -) - -// A pkgReader holds the shared state for reading a unified IR package -// description. -type pkgReader struct { - pkgbits.PkgDecoder - - fake fakeFileSet - - ctxt *types.Context - imports map[string]*types.Package // previously imported packages, indexed by path - - // lazily initialized arrays corresponding to the unified IR - // PosBase, Pkg, and Type sections, respectively. - posBases []string // position bases (i.e., file names) - pkgs []*types.Package - typs []types.Type - - // laterFns holds functions that need to be invoked at the end of - // import reading. - laterFns []func() - // laterFors is used in case of 'type A B' to ensure that B is processed before A. - laterFors map[types.Type]int - - // ifaces holds a list of constructed Interfaces, which need to have - // Complete called after importing is done. - ifaces []*types.Interface -} - -// later adds a function to be invoked at the end of import reading. -func (pr *pkgReader) later(fn func()) { - pr.laterFns = append(pr.laterFns, fn) -} - -// See cmd/compile/internal/noder.derivedInfo. -type derivedInfo struct { - idx pkgbits.Index - needed bool -} - -// See cmd/compile/internal/noder.typeInfo. -type typeInfo struct { - idx pkgbits.Index - derived bool -} - -func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { - if !debug { - defer func() { - if x := recover(); x != nil { - err = fmt.Errorf("internal error in importing %q (%v); please report an issue", path, x) - } - }() - } - - s := string(data) - s = s[:strings.LastIndex(s, "\n$$\n")] - input := pkgbits.NewPkgDecoder(path, s) - pkg = readUnifiedPackage(fset, nil, imports, input) - return -} - -// laterFor adds a function to be invoked at the end of import reading, and records the type that function is finishing. -func (pr *pkgReader) laterFor(t types.Type, fn func()) { - if pr.laterFors == nil { - pr.laterFors = make(map[types.Type]int) - } - pr.laterFors[t] = len(pr.laterFns) - pr.laterFns = append(pr.laterFns, fn) -} - -// readUnifiedPackage reads a package description from the given -// unified IR export data decoder. -func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[string]*types.Package, input pkgbits.PkgDecoder) *types.Package { - pr := pkgReader{ - PkgDecoder: input, - - fake: fakeFileSet{ - fset: fset, - files: make(map[string]*fileInfo), - }, - - ctxt: ctxt, - imports: imports, - - posBases: make([]string, input.NumElems(pkgbits.RelocPosBase)), - pkgs: make([]*types.Package, input.NumElems(pkgbits.RelocPkg)), - typs: make([]types.Type, input.NumElems(pkgbits.RelocType)), - } - defer pr.fake.setLines() - - r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) - pkg := r.pkg() - r.Bool() // has init - - for i, n := 0, r.Len(); i < n; i++ { - // As if r.obj(), but avoiding the Scope.Lookup call, - // to avoid eager loading of imports. - r.Sync(pkgbits.SyncObject) - assert(!r.Bool()) - r.p.objIdx(r.Reloc(pkgbits.RelocObj)) - assert(r.Len() == 0) - } - - r.Sync(pkgbits.SyncEOF) - - for _, fn := range pr.laterFns { - fn() - } - - for _, iface := range pr.ifaces { - iface.Complete() - } - - // Imports() of pkg are all of the transitive packages that were loaded. - var imps []*types.Package - for _, imp := range pr.pkgs { - if imp != nil && imp != pkg { - imps = append(imps, imp) - } - } - sort.Sort(byPath(imps)) - pkg.SetImports(imps) - - pkg.MarkComplete() - return pkg -} - -// A reader holds the state for reading a single unified IR element -// within a package. -type reader struct { - pkgbits.Decoder - - p *pkgReader - - dict *readerDict -} - -// A readerDict holds the state for type parameters that parameterize -// the current unified IR element. -type readerDict struct { - // bounds is a slice of typeInfos corresponding to the underlying - // bounds of the element's type parameters. - bounds []typeInfo - - // tparams is a slice of the constructed TypeParams for the element. - tparams []*types.TypeParam - - // devived is a slice of types derived from tparams, which may be - // instantiated while reading the current element. - derived []derivedInfo - derivedTypes []types.Type // lazily instantiated from derived -} - -func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { - return &reader{ - Decoder: pr.NewDecoder(k, idx, marker), - p: pr, - } -} - -func (pr *pkgReader) tempReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { - return &reader{ - Decoder: pr.TempDecoder(k, idx, marker), - p: pr, - } -} - -func (pr *pkgReader) retireReader(r *reader) { - pr.RetireDecoder(&r.Decoder) -} - -// @@@ Positions - -func (r *reader) pos() token.Pos { - r.Sync(pkgbits.SyncPos) - if !r.Bool() { - return token.NoPos - } - - // TODO(mdempsky): Delta encoding. - posBase := r.posBase() - line := r.Uint() - col := r.Uint() - return r.p.fake.pos(posBase, int(line), int(col)) -} - -func (r *reader) posBase() string { - return r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase)) -} - -func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) string { - if b := pr.posBases[idx]; b != "" { - return b - } - - var filename string - { - r := pr.tempReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase) - - // Within types2, position bases have a lot more details (e.g., - // keeping track of where //line directives appeared exactly). - // - // For go/types, we just track the file name. - - filename = r.String() - - if r.Bool() { // file base - // Was: "b = token.NewTrimmedFileBase(filename, true)" - } else { // line base - pos := r.pos() - line := r.Uint() - col := r.Uint() - - // Was: "b = token.NewLineBase(pos, filename, true, line, col)" - _, _, _ = pos, line, col - } - pr.retireReader(r) - } - b := filename - pr.posBases[idx] = b - return b -} - -// @@@ Packages - -func (r *reader) pkg() *types.Package { - r.Sync(pkgbits.SyncPkg) - return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg)) -} - -func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package { - // TODO(mdempsky): Consider using some non-nil pointer to indicate - // the universe scope, so we don't need to keep re-reading it. - if pkg := pr.pkgs[idx]; pkg != nil { - return pkg - } - - pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg() - pr.pkgs[idx] = pkg - return pkg -} - -func (r *reader) doPkg() *types.Package { - path := r.String() - switch path { - case "": - path = r.p.PkgPath() - case "builtin": - return nil // universe - case "unsafe": - return types.Unsafe - } - - if pkg := r.p.imports[path]; pkg != nil { - return pkg - } - - name := r.String() - - pkg := types.NewPackage(path, name) - r.p.imports[path] = pkg - - return pkg -} - -// @@@ Types - -func (r *reader) typ() types.Type { - return r.p.typIdx(r.typInfo(), r.dict) -} - -func (r *reader) typInfo() typeInfo { - r.Sync(pkgbits.SyncType) - if r.Bool() { - return typeInfo{idx: pkgbits.Index(r.Len()), derived: true} - } - return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false} -} - -func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types.Type { - idx := info.idx - var where *types.Type - if info.derived { - where = &dict.derivedTypes[idx] - idx = dict.derived[idx].idx - } else { - where = &pr.typs[idx] - } - - if typ := *where; typ != nil { - return typ - } - - var typ types.Type - { - r := pr.tempReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx) - r.dict = dict - - typ = r.doTyp() - assert(typ != nil) - pr.retireReader(r) - } - // See comment in pkgReader.typIdx explaining how this happens. - if prev := *where; prev != nil { - return prev - } - - *where = typ - return typ -} - -func (r *reader) doTyp() (res types.Type) { - switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag { - default: - errorf("unhandled type tag: %v", tag) - panic("unreachable") - - case pkgbits.TypeBasic: - return types.Typ[r.Len()] - - case pkgbits.TypeNamed: - obj, targs := r.obj() - name := obj.(*types.TypeName) - if len(targs) != 0 { - t, _ := types.Instantiate(r.p.ctxt, name.Type(), targs, false) - return t - } - return name.Type() - - case pkgbits.TypeTypeParam: - return r.dict.tparams[r.Len()] - - case pkgbits.TypeArray: - len := int64(r.Uint64()) - return types.NewArray(r.typ(), len) - case pkgbits.TypeChan: - dir := types.ChanDir(r.Len()) - return types.NewChan(dir, r.typ()) - case pkgbits.TypeMap: - return types.NewMap(r.typ(), r.typ()) - case pkgbits.TypePointer: - return types.NewPointer(r.typ()) - case pkgbits.TypeSignature: - return r.signature(nil, nil, nil) - case pkgbits.TypeSlice: - return types.NewSlice(r.typ()) - case pkgbits.TypeStruct: - return r.structType() - case pkgbits.TypeInterface: - return r.interfaceType() - case pkgbits.TypeUnion: - return r.unionType() - } -} - -func (r *reader) structType() *types.Struct { - fields := make([]*types.Var, r.Len()) - var tags []string - for i := range fields { - pos := r.pos() - pkg, name := r.selector() - ftyp := r.typ() - tag := r.String() - embedded := r.Bool() - - fields[i] = types.NewField(pos, pkg, name, ftyp, embedded) - if tag != "" { - for len(tags) < i { - tags = append(tags, "") - } - tags = append(tags, tag) - } - } - return types.NewStruct(fields, tags) -} - -func (r *reader) unionType() *types.Union { - terms := make([]*types.Term, r.Len()) - for i := range terms { - terms[i] = types.NewTerm(r.Bool(), r.typ()) - } - return types.NewUnion(terms) -} - -func (r *reader) interfaceType() *types.Interface { - methods := make([]*types.Func, r.Len()) - embeddeds := make([]types.Type, r.Len()) - implicit := len(methods) == 0 && len(embeddeds) == 1 && r.Bool() - - for i := range methods { - pos := r.pos() - pkg, name := r.selector() - mtyp := r.signature(nil, nil, nil) - methods[i] = types.NewFunc(pos, pkg, name, mtyp) - } - - for i := range embeddeds { - embeddeds[i] = r.typ() - } - - iface := types.NewInterfaceType(methods, embeddeds) - if implicit { - iface.MarkImplicit() - } - - // We need to call iface.Complete(), but if there are any embedded - // defined types, then we may not have set their underlying - // interface type yet. So we need to defer calling Complete until - // after we've called SetUnderlying everywhere. - // - // TODO(mdempsky): After CL 424876 lands, it should be safe to call - // iface.Complete() immediately. - r.p.ifaces = append(r.p.ifaces, iface) - - return iface -} - -func (r *reader) signature(recv *types.Var, rtparams, tparams []*types.TypeParam) *types.Signature { - r.Sync(pkgbits.SyncSignature) - - params := r.params() - results := r.params() - variadic := r.Bool() - - return types.NewSignatureType(recv, rtparams, tparams, params, results, variadic) -} - -func (r *reader) params() *types.Tuple { - r.Sync(pkgbits.SyncParams) - - params := make([]*types.Var, r.Len()) - for i := range params { - params[i] = r.param() - } - - return types.NewTuple(params...) -} - -func (r *reader) param() *types.Var { - r.Sync(pkgbits.SyncParam) - - pos := r.pos() - pkg, name := r.localIdent() - typ := r.typ() - - return types.NewParam(pos, pkg, name, typ) -} - -// @@@ Objects - -func (r *reader) obj() (types.Object, []types.Type) { - r.Sync(pkgbits.SyncObject) - - assert(!r.Bool()) - - pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj)) - obj := pkgScope(pkg).Lookup(name) - - targs := make([]types.Type, r.Len()) - for i := range targs { - targs[i] = r.typ() - } - - return obj, targs -} - -func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { - - var objPkg *types.Package - var objName string - var tag pkgbits.CodeObj - { - rname := pr.tempReader(pkgbits.RelocName, idx, pkgbits.SyncObject1) - - objPkg, objName = rname.qualifiedIdent() - assert(objName != "") - - tag = pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj)) - pr.retireReader(rname) - } - - if tag == pkgbits.ObjStub { - assert(objPkg == nil || objPkg == types.Unsafe) - return objPkg, objName - } - - // Ignore local types promoted to global scope (#55110). - if _, suffix := splitVargenSuffix(objName); suffix != "" { - return objPkg, objName - } - - if objPkg.Scope().Lookup(objName) == nil { - dict := pr.objDictIdx(idx) - - r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1) - r.dict = dict - - declare := func(obj types.Object) { - objPkg.Scope().Insert(obj) - } - - switch tag { - default: - panic("weird") - - case pkgbits.ObjAlias: - pos := r.pos() - typ := r.typ() - declare(types.NewTypeName(pos, objPkg, objName, typ)) - - case pkgbits.ObjConst: - pos := r.pos() - typ := r.typ() - val := r.Value() - declare(types.NewConst(pos, objPkg, objName, typ, val)) - - case pkgbits.ObjFunc: - pos := r.pos() - tparams := r.typeParamNames() - sig := r.signature(nil, nil, tparams) - declare(types.NewFunc(pos, objPkg, objName, sig)) - - case pkgbits.ObjType: - pos := r.pos() - - obj := types.NewTypeName(pos, objPkg, objName, nil) - named := types.NewNamed(obj, nil, nil) - declare(obj) - - named.SetTypeParams(r.typeParamNames()) - - setUnderlying := func(underlying types.Type) { - // If the underlying type is an interface, we need to - // duplicate its methods so we can replace the receiver - // parameter's type (#49906). - if iface, ok := underlying.(*types.Interface); ok && iface.NumExplicitMethods() != 0 { - methods := make([]*types.Func, iface.NumExplicitMethods()) - for i := range methods { - fn := iface.ExplicitMethod(i) - sig := fn.Type().(*types.Signature) - - recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named) - methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic())) - } - - embeds := make([]types.Type, iface.NumEmbeddeds()) - for i := range embeds { - embeds[i] = iface.EmbeddedType(i) - } - - newIface := types.NewInterfaceType(methods, embeds) - r.p.ifaces = append(r.p.ifaces, newIface) - underlying = newIface - } - - named.SetUnderlying(underlying) - } - - // Since go.dev/cl/455279, we can assume rhs.Underlying() will - // always be non-nil. However, to temporarily support users of - // older snapshot releases, we continue to fallback to the old - // behavior for now. - // - // TODO(mdempsky): Remove fallback code and simplify after - // allowing time for snapshot users to upgrade. - rhs := r.typ() - if underlying := rhs.Underlying(); underlying != nil { - setUnderlying(underlying) - } else { - pk := r.p - pk.laterFor(named, func() { - // First be sure that the rhs is initialized, if it needs to be initialized. - delete(pk.laterFors, named) // prevent cycles - if i, ok := pk.laterFors[rhs]; ok { - f := pk.laterFns[i] - pk.laterFns[i] = func() {} // function is running now, so replace it with a no-op - f() // initialize RHS - } - setUnderlying(rhs.Underlying()) - }) - } - - for i, n := 0, r.Len(); i < n; i++ { - named.AddMethod(r.method()) - } - - case pkgbits.ObjVar: - pos := r.pos() - typ := r.typ() - declare(types.NewVar(pos, objPkg, objName, typ)) - } - } - - return objPkg, objName -} - -func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { - - var dict readerDict - - { - r := pr.tempReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1) - if implicits := r.Len(); implicits != 0 { - errorf("unexpected object with %v implicit type parameter(s)", implicits) - } - - dict.bounds = make([]typeInfo, r.Len()) - for i := range dict.bounds { - dict.bounds[i] = r.typInfo() - } - - dict.derived = make([]derivedInfo, r.Len()) - dict.derivedTypes = make([]types.Type, len(dict.derived)) - for i := range dict.derived { - dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} - } - - pr.retireReader(r) - } - // function references follow, but reader doesn't need those - - return &dict -} - -func (r *reader) typeParamNames() []*types.TypeParam { - r.Sync(pkgbits.SyncTypeParamNames) - - // Note: This code assumes it only processes objects without - // implement type parameters. This is currently fine, because - // reader is only used to read in exported declarations, which are - // always package scoped. - - if len(r.dict.bounds) == 0 { - return nil - } - - // Careful: Type parameter lists may have cycles. To allow for this, - // we construct the type parameter list in two passes: first we - // create all the TypeNames and TypeParams, then we construct and - // set the bound type. - - r.dict.tparams = make([]*types.TypeParam, len(r.dict.bounds)) - for i := range r.dict.bounds { - pos := r.pos() - pkg, name := r.localIdent() - - tname := types.NewTypeName(pos, pkg, name, nil) - r.dict.tparams[i] = types.NewTypeParam(tname, nil) - } - - typs := make([]types.Type, len(r.dict.bounds)) - for i, bound := range r.dict.bounds { - typs[i] = r.p.typIdx(bound, r.dict) - } - - // TODO(mdempsky): This is subtle, elaborate further. - // - // We have to save tparams outside of the closure, because - // typeParamNames() can be called multiple times with the same - // dictionary instance. - // - // Also, this needs to happen later to make sure SetUnderlying has - // been called. - // - // TODO(mdempsky): Is it safe to have a single "later" slice or do - // we need to have multiple passes? See comments on CL 386002 and - // go.dev/issue/52104. - tparams := r.dict.tparams - r.p.later(func() { - for i, typ := range typs { - tparams[i].SetConstraint(typ) - } - }) - - return r.dict.tparams -} - -func (r *reader) method() *types.Func { - r.Sync(pkgbits.SyncMethod) - pos := r.pos() - pkg, name := r.selector() - - rparams := r.typeParamNames() - sig := r.signature(r.param(), rparams, nil) - - _ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go. - return types.NewFunc(pos, pkg, name, sig) -} - -func (r *reader) qualifiedIdent() (*types.Package, string) { return r.ident(pkgbits.SyncSym) } -func (r *reader) localIdent() (*types.Package, string) { return r.ident(pkgbits.SyncLocalIdent) } -func (r *reader) selector() (*types.Package, string) { return r.ident(pkgbits.SyncSelector) } - -func (r *reader) ident(marker pkgbits.SyncMarker) (*types.Package, string) { - r.Sync(marker) - return r.pkg(), r.String() -} - -// pkgScope returns pkg.Scope(). -// If pkg is nil, it returns types.Universe instead. -// -// TODO(mdempsky): Remove after x/tools can depend on Go 1.19. -func pkgScope(pkg *types.Package) *types.Scope { - if pkg != nil { - return pkg.Scope() - } - return types.Universe -} diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go deleted file mode 100644 index 55312522dc2..00000000000 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ /dev/null @@ -1,465 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package gocommand is a helper for calling the go command. -package gocommand - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "log" - "os" - "os/exec" - "reflect" - "regexp" - "runtime" - "strconv" - "strings" - "sync" - "time" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/event/keys" - "golang.org/x/tools/internal/event/label" - "golang.org/x/tools/internal/event/tag" -) - -// An Runner will run go command invocations and serialize -// them if it sees a concurrency error. -type Runner struct { - // once guards the runner initialization. - once sync.Once - - // inFlight tracks available workers. - inFlight chan struct{} - - // serialized guards the ability to run a go command serially, - // to avoid deadlocks when claiming workers. - serialized chan struct{} -} - -const maxInFlight = 10 - -func (runner *Runner) initialize() { - runner.once.Do(func() { - runner.inFlight = make(chan struct{}, maxInFlight) - runner.serialized = make(chan struct{}, 1) - }) -} - -// 1.13: go: updates to go.mod needed, but contents have changed -// 1.14: go: updating go.mod: existing contents have changed since last read -var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`) - -// verb is an event label for the go command verb. -var verb = keys.NewString("verb", "go command verb") - -func invLabels(inv Invocation) []label.Label { - return []label.Label{verb.Of(inv.Verb), tag.Directory.Of(inv.WorkingDir)} -} - -// Run is a convenience wrapper around RunRaw. -// It returns only stdout and a "friendly" error. -func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) { - ctx, done := event.Start(ctx, "gocommand.Runner.Run", invLabels(inv)...) - defer done() - - stdout, _, friendly, _ := runner.RunRaw(ctx, inv) - return stdout, friendly -} - -// RunPiped runs the invocation serially, always waiting for any concurrent -// invocations to complete first. -func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error { - ctx, done := event.Start(ctx, "gocommand.Runner.RunPiped", invLabels(inv)...) - defer done() - - _, err := runner.runPiped(ctx, inv, stdout, stderr) - return err -} - -// RunRaw runs the invocation, serializing requests only if they fight over -// go.mod changes. -// Postcondition: both error results have same nilness. -func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { - ctx, done := event.Start(ctx, "gocommand.Runner.RunRaw", invLabels(inv)...) - defer done() - // Make sure the runner is always initialized. - runner.initialize() - - // First, try to run the go command concurrently. - stdout, stderr, friendlyErr, err := runner.runConcurrent(ctx, inv) - - // If we encounter a load concurrency error, we need to retry serially. - if friendlyErr != nil && modConcurrencyError.MatchString(friendlyErr.Error()) { - event.Error(ctx, "Load concurrency error, will retry serially", err) - - // Run serially by calling runPiped. - stdout.Reset() - stderr.Reset() - friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr) - } - - return stdout, stderr, friendlyErr, err -} - -// Postcondition: both error results have same nilness. -func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { - // Wait for 1 worker to become available. - select { - case <-ctx.Done(): - return nil, nil, ctx.Err(), ctx.Err() - case runner.inFlight <- struct{}{}: - defer func() { <-runner.inFlight }() - } - - stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{} - friendlyErr, err := inv.runWithFriendlyError(ctx, stdout, stderr) - return stdout, stderr, friendlyErr, err -} - -// Postcondition: both error results have same nilness. -func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) { - // Make sure the runner is always initialized. - runner.initialize() - - // Acquire the serialization lock. This avoids deadlocks between two - // runPiped commands. - select { - case <-ctx.Done(): - return ctx.Err(), ctx.Err() - case runner.serialized <- struct{}{}: - defer func() { <-runner.serialized }() - } - - // Wait for all in-progress go commands to return before proceeding, - // to avoid load concurrency errors. - for i := 0; i < maxInFlight; i++ { - select { - case <-ctx.Done(): - return ctx.Err(), ctx.Err() - case runner.inFlight <- struct{}{}: - // Make sure we always "return" any workers we took. - defer func() { <-runner.inFlight }() - } - } - - return inv.runWithFriendlyError(ctx, stdout, stderr) -} - -// An Invocation represents a call to the go command. -type Invocation struct { - Verb string - Args []string - BuildFlags []string - - // If ModFlag is set, the go command is invoked with -mod=ModFlag. - ModFlag string - - // If ModFile is set, the go command is invoked with -modfile=ModFile. - ModFile string - - // If Overlay is set, the go command is invoked with -overlay=Overlay. - Overlay string - - // If CleanEnv is set, the invocation will run only with the environment - // in Env, not starting with os.Environ. - CleanEnv bool - Env []string - WorkingDir string - Logf func(format string, args ...interface{}) -} - -// Postcondition: both error results have same nilness. -func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io.Writer) (friendlyError error, rawError error) { - rawError = i.run(ctx, stdout, stderr) - if rawError != nil { - friendlyError = rawError - // Check for 'go' executable not being found. - if ee, ok := rawError.(*exec.Error); ok && ee.Err == exec.ErrNotFound { - friendlyError = fmt.Errorf("go command required, not found: %v", ee) - } - if ctx.Err() != nil { - friendlyError = ctx.Err() - } - friendlyError = fmt.Errorf("err: %v: stderr: %s", friendlyError, stderr) - } - return -} - -func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { - log := i.Logf - if log == nil { - log = func(string, ...interface{}) {} - } - - goArgs := []string{i.Verb} - - appendModFile := func() { - if i.ModFile != "" { - goArgs = append(goArgs, "-modfile="+i.ModFile) - } - } - appendModFlag := func() { - if i.ModFlag != "" { - goArgs = append(goArgs, "-mod="+i.ModFlag) - } - } - appendOverlayFlag := func() { - if i.Overlay != "" { - goArgs = append(goArgs, "-overlay="+i.Overlay) - } - } - - switch i.Verb { - case "env", "version": - goArgs = append(goArgs, i.Args...) - case "mod": - // mod needs the sub-verb before flags. - goArgs = append(goArgs, i.Args[0]) - appendModFile() - goArgs = append(goArgs, i.Args[1:]...) - case "get": - goArgs = append(goArgs, i.BuildFlags...) - appendModFile() - goArgs = append(goArgs, i.Args...) - - default: // notably list and build. - goArgs = append(goArgs, i.BuildFlags...) - appendModFile() - appendModFlag() - appendOverlayFlag() - goArgs = append(goArgs, i.Args...) - } - cmd := exec.Command("go", goArgs...) - cmd.Stdout = stdout - cmd.Stderr = stderr - - // cmd.WaitDelay was added only in go1.20 (see #50436). - if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() { - // https://go.dev/issue/59541: don't wait forever copying stderr - // after the command has exited. - // After CL 484741 we copy stdout manually, so we we'll stop reading that as - // soon as ctx is done. However, we also don't want to wait around forever - // for stderr. Give a much-longer-than-reasonable delay and then assume that - // something has wedged in the kernel or runtime. - waitDelay.Set(reflect.ValueOf(30 * time.Second)) - } - - // On darwin the cwd gets resolved to the real path, which breaks anything that - // expects the working directory to keep the original path, including the - // go command when dealing with modules. - // The Go stdlib has a special feature where if the cwd and the PWD are the - // same node then it trusts the PWD, so by setting it in the env for the child - // process we fix up all the paths returned by the go command. - if !i.CleanEnv { - cmd.Env = os.Environ() - } - cmd.Env = append(cmd.Env, i.Env...) - if i.WorkingDir != "" { - cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir) - cmd.Dir = i.WorkingDir - } - - defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) - - return runCmdContext(ctx, cmd) -} - -// DebugHangingGoCommands may be set by tests to enable additional -// instrumentation (including panics) for debugging hanging Go commands. -// -// See golang/go#54461 for details. -var DebugHangingGoCommands = false - -// runCmdContext is like exec.CommandContext except it sends os.Interrupt -// before os.Kill. -func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { - // If cmd.Stdout is not an *os.File, the exec package will create a pipe and - // copy it to the Writer in a goroutine until the process has finished and - // either the pipe reaches EOF or command's WaitDelay expires. - // - // However, the output from 'go list' can be quite large, and we don't want to - // keep reading (and allocating buffers) if we've already decided we don't - // care about the output. We don't want to wait for the process to finish, and - // we don't wait to wait for the WaitDelay to expire either. - // - // Instead, if cmd.Stdout requires a copying goroutine we explicitly replace - // it with a pipe (which is an *os.File), which we can close in order to stop - // copying output as soon as we realize we don't care about it. - var stdoutW *os.File - if cmd.Stdout != nil { - if _, ok := cmd.Stdout.(*os.File); !ok { - var stdoutR *os.File - stdoutR, stdoutW, err = os.Pipe() - if err != nil { - return err - } - prevStdout := cmd.Stdout - cmd.Stdout = stdoutW - - stdoutErr := make(chan error, 1) - go func() { - _, err := io.Copy(prevStdout, stdoutR) - if err != nil { - err = fmt.Errorf("copying stdout: %w", err) - } - stdoutErr <- err - }() - defer func() { - // We started a goroutine to copy a stdout pipe. - // Wait for it to finish, or terminate it if need be. - var err2 error - select { - case err2 = <-stdoutErr: - stdoutR.Close() - case <-ctx.Done(): - stdoutR.Close() - // Per https://pkg.go.dev/os#File.Close, the call to stdoutR.Close - // should cause the Read call in io.Copy to unblock and return - // immediately, but we still need to receive from stdoutErr to confirm - // that it has happened. - <-stdoutErr - err2 = ctx.Err() - } - if err == nil { - err = err2 - } - }() - - // Per https://pkg.go.dev/os/exec#Cmd, “If Stdout and Stderr are the - // same writer, and have a type that can be compared with ==, at most - // one goroutine at a time will call Write.” - // - // Since we're starting a goroutine that writes to cmd.Stdout, we must - // also update cmd.Stderr so that it still holds. - func() { - defer func() { recover() }() - if cmd.Stderr == prevStdout { - cmd.Stderr = cmd.Stdout - } - }() - } - } - - err = cmd.Start() - if stdoutW != nil { - // The child process has inherited the pipe file, - // so close the copy held in this process. - stdoutW.Close() - stdoutW = nil - } - if err != nil { - return err - } - - resChan := make(chan error, 1) - go func() { - resChan <- cmd.Wait() - }() - - // If we're interested in debugging hanging Go commands, stop waiting after a - // minute and panic with interesting information. - debug := DebugHangingGoCommands - if debug { - timer := time.NewTimer(1 * time.Minute) - defer timer.Stop() - select { - case err := <-resChan: - return err - case <-timer.C: - HandleHangingGoCommand(cmd.Process) - case <-ctx.Done(): - } - } else { - select { - case err := <-resChan: - return err - case <-ctx.Done(): - } - } - - // Cancelled. Interrupt and see if it ends voluntarily. - if err := cmd.Process.Signal(os.Interrupt); err == nil { - // (We used to wait only 1s but this proved - // fragile on loaded builder machines.) - timer := time.NewTimer(5 * time.Second) - defer timer.Stop() - select { - case err := <-resChan: - return err - case <-timer.C: - } - } - - // Didn't shut down in response to interrupt. Kill it hard. - // TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT - // on certain platforms, such as unix. - if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug { - log.Printf("error killing the Go command: %v", err) - } - - return <-resChan -} - -func HandleHangingGoCommand(proc *os.Process) { - switch runtime.GOOS { - case "linux", "darwin", "freebsd", "netbsd": - fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND - -The gopls test runner has detected a hanging go command. In order to debug -this, the output of ps and lsof/fstat is printed below. - -See golang/go#54461 for more details.`) - - fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:") - fmt.Fprintln(os.Stderr, "-------------------------") - psCmd := exec.Command("ps", "axo", "ppid,pid,command") - psCmd.Stdout = os.Stderr - psCmd.Stderr = os.Stderr - if err := psCmd.Run(); err != nil { - panic(fmt.Sprintf("running ps: %v", err)) - } - - listFiles := "lsof" - if runtime.GOOS == "freebsd" || runtime.GOOS == "netbsd" { - listFiles = "fstat" - } - - fmt.Fprintln(os.Stderr, "\n"+listFiles+":") - fmt.Fprintln(os.Stderr, "-----") - listFilesCmd := exec.Command(listFiles) - listFilesCmd.Stdout = os.Stderr - listFilesCmd.Stderr = os.Stderr - if err := listFilesCmd.Run(); err != nil { - panic(fmt.Sprintf("running %s: %v", listFiles, err)) - } - } - panic(fmt.Sprintf("detected hanging go command (pid %d): see golang/go#54461 for more details", proc.Pid)) -} - -func cmdDebugStr(cmd *exec.Cmd) string { - env := make(map[string]string) - for _, kv := range cmd.Env { - split := strings.SplitN(kv, "=", 2) - if len(split) == 2 { - k, v := split[0], split[1] - env[k] = v - } - } - - var args []string - for _, arg := range cmd.Args { - quoted := strconv.Quote(arg) - if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") { - args = append(args, quoted) - } else { - args = append(args, arg) - } - } - return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) -} diff --git a/vendor/golang.org/x/tools/internal/gocommand/vendor.go b/vendor/golang.org/x/tools/internal/gocommand/vendor.go deleted file mode 100644 index 2d3d408c0be..00000000000 --- a/vendor/golang.org/x/tools/internal/gocommand/vendor.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gocommand - -import ( - "bytes" - "context" - "fmt" - "os" - "path/filepath" - "regexp" - "strings" - "time" - - "golang.org/x/mod/semver" -) - -// ModuleJSON holds information about a module. -type ModuleJSON struct { - Path string // module path - Version string // module version - Versions []string // available module versions (with -versions) - Replace *ModuleJSON // replaced by this module - Time *time.Time // time version was created - Update *ModuleJSON // available update, if any (with -u) - Main bool // is this the main module? - Indirect bool // is this module only an indirect dependency of main module? - Dir string // directory holding files for this module, if any - GoMod string // path to go.mod file used when loading this module, if any - GoVersion string // go version used in module -} - -var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`) - -// VendorEnabled reports whether vendoring is enabled. It takes a *Runner to execute Go commands -// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields, -// of which only Verb and Args are modified to run the appropriate Go command. -// Inspired by setDefaultBuildMod in modload/init.go -func VendorEnabled(ctx context.Context, inv Invocation, r *Runner) (bool, *ModuleJSON, error) { - mainMod, go114, err := getMainModuleAnd114(ctx, inv, r) - if err != nil { - return false, nil, err - } - - // We check the GOFLAGS to see if there is anything overridden or not. - inv.Verb = "env" - inv.Args = []string{"GOFLAGS"} - stdout, err := r.Run(ctx, inv) - if err != nil { - return false, nil, err - } - goflags := string(bytes.TrimSpace(stdout.Bytes())) - matches := modFlagRegexp.FindStringSubmatch(goflags) - var modFlag string - if len(matches) != 0 { - modFlag = matches[1] - } - // Don't override an explicit '-mod=' argument. - if modFlag == "vendor" { - return true, mainMod, nil - } else if modFlag != "" { - return false, nil, nil - } - if mainMod == nil || !go114 { - return false, nil, nil - } - // Check 1.14's automatic vendor mode. - if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() { - if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 { - // The Go version is at least 1.14, and a vendor directory exists. - // Set -mod=vendor by default. - return true, mainMod, nil - } - } - return false, nil, nil -} - -// getMainModuleAnd114 gets one of the main modules' information and whether the -// go command in use is 1.14+. This is the information needed to figure out -// if vendoring should be enabled. -func getMainModuleAnd114(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) { - const format = `{{.Path}} -{{.Dir}} -{{.GoMod}} -{{.GoVersion}} -{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}} -` - inv.Verb = "list" - inv.Args = []string{"-m", "-f", format} - stdout, err := r.Run(ctx, inv) - if err != nil { - return nil, false, err - } - - lines := strings.Split(stdout.String(), "\n") - if len(lines) < 5 { - return nil, false, fmt.Errorf("unexpected stdout: %q", stdout.String()) - } - mod := &ModuleJSON{ - Path: lines[0], - Dir: lines[1], - GoMod: lines[2], - GoVersion: lines[3], - Main: true, - } - return mod, lines[4] == "go1.14", nil -} diff --git a/vendor/golang.org/x/tools/internal/gocommand/version.go b/vendor/golang.org/x/tools/internal/gocommand/version.go deleted file mode 100644 index 446c5846a60..00000000000 --- a/vendor/golang.org/x/tools/internal/gocommand/version.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gocommand - -import ( - "context" - "fmt" - "regexp" - "strings" -) - -// GoVersion reports the minor version number of the highest release -// tag built into the go command on the PATH. -// -// Note that this may be higher than the version of the go tool used -// to build this application, and thus the versions of the standard -// go/{scanner,parser,ast,types} packages that are linked into it. -// In that case, callers should either downgrade to the version of -// go used to build the application, or report an error that the -// application is too old to use the go command on the PATH. -func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { - inv.Verb = "list" - inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`} - inv.BuildFlags = nil // This is not a build command. - inv.ModFlag = "" - inv.ModFile = "" - inv.Env = append(inv.Env[:len(inv.Env):len(inv.Env)], "GO111MODULE=off") - - stdoutBytes, err := r.Run(ctx, inv) - if err != nil { - return 0, err - } - stdout := stdoutBytes.String() - if len(stdout) < 3 { - return 0, fmt.Errorf("bad ReleaseTags output: %q", stdout) - } - // Split up "[go1.1 go1.15]" and return highest go1.X value. - tags := strings.Fields(stdout[1 : len(stdout)-2]) - for i := len(tags) - 1; i >= 0; i-- { - var version int - if _, err := fmt.Sscanf(tags[i], "go1.%d", &version); err != nil { - continue - } - return version, nil - } - return 0, fmt.Errorf("no parseable ReleaseTags in %v", tags) -} - -// GoVersionOutput returns the complete output of the go version command. -func GoVersionOutput(ctx context.Context, inv Invocation, r *Runner) (string, error) { - inv.Verb = "version" - goVersion, err := r.Run(ctx, inv) - if err != nil { - return "", err - } - return goVersion.String(), nil -} - -// ParseGoVersionOutput extracts the Go version string -// from the output of the "go version" command. -// Given an unrecognized form, it returns an empty string. -func ParseGoVersionOutput(data string) string { - re := regexp.MustCompile(`^go version (go\S+|devel \S+)`) - m := re.FindStringSubmatch(data) - if len(m) != 2 { - return "" // unrecognized version - } - return m[1] -} diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go deleted file mode 100644 index 44719de173b..00000000000 --- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package packagesinternal exposes internal-only fields from go/packages. -package packagesinternal - -var GetForTest = func(p interface{}) string { return "" } -var GetDepsErrors = func(p interface{}) []*PackageError { return nil } - -type PackageError struct { - ImportStack []string // shortest path from package named on command line to this one - Pos string // position of error (if present, file:line:col) - Err string // the error itself -} - -var TypecheckCgo int -var DepsErrors int // must be set as a LoadMode to call GetDepsErrors -var ForTest int // must be set as a LoadMode to call GetForTest - -var SetModFlag = func(config interface{}, value string) {} -var SetModFile = func(config interface{}, value string) {} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/codes.go b/vendor/golang.org/x/tools/internal/pkgbits/codes.go deleted file mode 100644 index f0cabde96eb..00000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/codes.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkgbits - -// A Code is an enum value that can be encoded into bitstreams. -// -// Code types are preferable for enum types, because they allow -// Decoder to detect desyncs. -type Code interface { - // Marker returns the SyncMarker for the Code's dynamic type. - Marker() SyncMarker - - // Value returns the Code's ordinal value. - Value() int -} - -// A CodeVal distinguishes among go/constant.Value encodings. -type CodeVal int - -func (c CodeVal) Marker() SyncMarker { return SyncVal } -func (c CodeVal) Value() int { return int(c) } - -// Note: These values are public and cannot be changed without -// updating the go/types importers. - -const ( - ValBool CodeVal = iota - ValString - ValInt64 - ValBigInt - ValBigRat - ValBigFloat -) - -// A CodeType distinguishes among go/types.Type encodings. -type CodeType int - -func (c CodeType) Marker() SyncMarker { return SyncType } -func (c CodeType) Value() int { return int(c) } - -// Note: These values are public and cannot be changed without -// updating the go/types importers. - -const ( - TypeBasic CodeType = iota - TypeNamed - TypePointer - TypeSlice - TypeArray - TypeChan - TypeMap - TypeSignature - TypeStruct - TypeInterface - TypeUnion - TypeTypeParam -) - -// A CodeObj distinguishes among go/types.Object encodings. -type CodeObj int - -func (c CodeObj) Marker() SyncMarker { return SyncCodeObj } -func (c CodeObj) Value() int { return int(c) } - -// Note: These values are public and cannot be changed without -// updating the go/types importers. - -const ( - ObjAlias CodeObj = iota - ObjConst - ObjType - ObjFunc - ObjVar - ObjStub -) diff --git a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go deleted file mode 100644 index b92e8e6eb32..00000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go +++ /dev/null @@ -1,517 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkgbits - -import ( - "encoding/binary" - "errors" - "fmt" - "go/constant" - "go/token" - "io" - "math/big" - "os" - "runtime" - "strings" -) - -// A PkgDecoder provides methods for decoding a package's Unified IR -// export data. -type PkgDecoder struct { - // version is the file format version. - version uint32 - - // sync indicates whether the file uses sync markers. - sync bool - - // pkgPath is the package path for the package to be decoded. - // - // TODO(mdempsky): Remove; unneeded since CL 391014. - pkgPath string - - // elemData is the full data payload of the encoded package. - // Elements are densely and contiguously packed together. - // - // The last 8 bytes of elemData are the package fingerprint. - elemData string - - // elemEnds stores the byte-offset end positions of element - // bitstreams within elemData. - // - // For example, element I's bitstream data starts at elemEnds[I-1] - // (or 0, if I==0) and ends at elemEnds[I]. - // - // Note: elemEnds is indexed by absolute indices, not - // section-relative indices. - elemEnds []uint32 - - // elemEndsEnds stores the index-offset end positions of relocation - // sections within elemEnds. - // - // For example, section K's end positions start at elemEndsEnds[K-1] - // (or 0, if K==0) and end at elemEndsEnds[K]. - elemEndsEnds [numRelocs]uint32 - - scratchRelocEnt []RelocEnt -} - -// PkgPath returns the package path for the package -// -// TODO(mdempsky): Remove; unneeded since CL 391014. -func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath } - -// SyncMarkers reports whether pr uses sync markers. -func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync } - -// NewPkgDecoder returns a PkgDecoder initialized to read the Unified -// IR export data from input. pkgPath is the package path for the -// compilation unit that produced the export data. -// -// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014. -func NewPkgDecoder(pkgPath, input string) PkgDecoder { - pr := PkgDecoder{ - pkgPath: pkgPath, - } - - // TODO(mdempsky): Implement direct indexing of input string to - // avoid copying the position information. - - r := strings.NewReader(input) - - assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil) - - switch pr.version { - default: - panic(fmt.Errorf("unsupported version: %v", pr.version)) - case 0: - // no flags - case 1: - var flags uint32 - assert(binary.Read(r, binary.LittleEndian, &flags) == nil) - pr.sync = flags&flagSyncMarkers != 0 - } - - assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil) - - pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1]) - assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil) - - pos, err := r.Seek(0, io.SeekCurrent) - assert(err == nil) - - pr.elemData = input[pos:] - assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1])) - - return pr -} - -// NumElems returns the number of elements in section k. -func (pr *PkgDecoder) NumElems(k RelocKind) int { - count := int(pr.elemEndsEnds[k]) - if k > 0 { - count -= int(pr.elemEndsEnds[k-1]) - } - return count -} - -// TotalElems returns the total number of elements across all sections. -func (pr *PkgDecoder) TotalElems() int { - return len(pr.elemEnds) -} - -// Fingerprint returns the package fingerprint. -func (pr *PkgDecoder) Fingerprint() [8]byte { - var fp [8]byte - copy(fp[:], pr.elemData[len(pr.elemData)-8:]) - return fp -} - -// AbsIdx returns the absolute index for the given (section, index) -// pair. -func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int { - absIdx := int(idx) - if k > 0 { - absIdx += int(pr.elemEndsEnds[k-1]) - } - if absIdx >= int(pr.elemEndsEnds[k]) { - errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) - } - return absIdx -} - -// DataIdx returns the raw element bitstream for the given (section, -// index) pair. -func (pr *PkgDecoder) DataIdx(k RelocKind, idx Index) string { - absIdx := pr.AbsIdx(k, idx) - - var start uint32 - if absIdx > 0 { - start = pr.elemEnds[absIdx-1] - } - end := pr.elemEnds[absIdx] - - return pr.elemData[start:end] -} - -// StringIdx returns the string value for the given string index. -func (pr *PkgDecoder) StringIdx(idx Index) string { - return pr.DataIdx(RelocString, idx) -} - -// NewDecoder returns a Decoder for the given (section, index) pair, -// and decodes the given SyncMarker from the element bitstream. -func (pr *PkgDecoder) NewDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder { - r := pr.NewDecoderRaw(k, idx) - r.Sync(marker) - return r -} - -// TempDecoder returns a Decoder for the given (section, index) pair, -// and decodes the given SyncMarker from the element bitstream. -// If possible the Decoder should be RetireDecoder'd when it is no longer -// needed, this will avoid heap allocations. -func (pr *PkgDecoder) TempDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder { - r := pr.TempDecoderRaw(k, idx) - r.Sync(marker) - return r -} - -func (pr *PkgDecoder) RetireDecoder(d *Decoder) { - pr.scratchRelocEnt = d.Relocs - d.Relocs = nil -} - -// NewDecoderRaw returns a Decoder for the given (section, index) pair. -// -// Most callers should use NewDecoder instead. -func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder { - r := Decoder{ - common: pr, - k: k, - Idx: idx, - } - - // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved. - r.Data = *strings.NewReader(pr.DataIdx(k, idx)) - - r.Sync(SyncRelocs) - r.Relocs = make([]RelocEnt, r.Len()) - for i := range r.Relocs { - r.Sync(SyncReloc) - r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())} - } - - return r -} - -func (pr *PkgDecoder) TempDecoderRaw(k RelocKind, idx Index) Decoder { - r := Decoder{ - common: pr, - k: k, - Idx: idx, - } - - r.Data.Reset(pr.DataIdx(k, idx)) - r.Sync(SyncRelocs) - l := r.Len() - if cap(pr.scratchRelocEnt) >= l { - r.Relocs = pr.scratchRelocEnt[:l] - pr.scratchRelocEnt = nil - } else { - r.Relocs = make([]RelocEnt, l) - } - for i := range r.Relocs { - r.Sync(SyncReloc) - r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())} - } - - return r -} - -// A Decoder provides methods for decoding an individual element's -// bitstream data. -type Decoder struct { - common *PkgDecoder - - Relocs []RelocEnt - Data strings.Reader - - k RelocKind - Idx Index -} - -func (r *Decoder) checkErr(err error) { - if err != nil { - errorf("unexpected decoding error: %w", err) - } -} - -func (r *Decoder) rawUvarint() uint64 { - x, err := readUvarint(&r.Data) - r.checkErr(err) - return x -} - -// readUvarint is a type-specialized copy of encoding/binary.ReadUvarint. -// This avoids the interface conversion and thus has better escape properties, -// which flows up the stack. -func readUvarint(r *strings.Reader) (uint64, error) { - var x uint64 - var s uint - for i := 0; i < binary.MaxVarintLen64; i++ { - b, err := r.ReadByte() - if err != nil { - if i > 0 && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return x, err - } - if b < 0x80 { - if i == binary.MaxVarintLen64-1 && b > 1 { - return x, overflow - } - return x | uint64(b)<> 1) - if ux&1 != 0 { - x = ^x - } - return x -} - -func (r *Decoder) rawReloc(k RelocKind, idx int) Index { - e := r.Relocs[idx] - assert(e.Kind == k) - return e.Idx -} - -// Sync decodes a sync marker from the element bitstream and asserts -// that it matches the expected marker. -// -// If r.common.sync is false, then Sync is a no-op. -func (r *Decoder) Sync(mWant SyncMarker) { - if !r.common.sync { - return - } - - pos, _ := r.Data.Seek(0, io.SeekCurrent) - mHave := SyncMarker(r.rawUvarint()) - writerPCs := make([]int, r.rawUvarint()) - for i := range writerPCs { - writerPCs[i] = int(r.rawUvarint()) - } - - if mHave == mWant { - return - } - - // There's some tension here between printing: - // - // (1) full file paths that tools can recognize (e.g., so emacs - // hyperlinks the "file:line" text for easy navigation), or - // - // (2) short file paths that are easier for humans to read (e.g., by - // omitting redundant or irrelevant details, so it's easier to - // focus on the useful bits that remain). - // - // The current formatting favors the former, as it seems more - // helpful in practice. But perhaps the formatting could be improved - // to better address both concerns. For example, use relative file - // paths if they would be shorter, or rewrite file paths to contain - // "$GOROOT" (like objabi.AbsFile does) if tools can be taught how - // to reliably expand that again. - - fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.Idx, pos) - - fmt.Printf("\nfound %v, written at:\n", mHave) - if len(writerPCs) == 0 { - fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath) - } - for _, pc := range writerPCs { - fmt.Printf("\t%s\n", r.common.StringIdx(r.rawReloc(RelocString, pc))) - } - - fmt.Printf("\nexpected %v, reading at:\n", mWant) - var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size? - n := runtime.Callers(2, readerPCs[:]) - for _, pc := range fmtFrames(readerPCs[:n]...) { - fmt.Printf("\t%s\n", pc) - } - - // We already printed a stack trace for the reader, so now we can - // simply exit. Printing a second one with panic or base.Fatalf - // would just be noise. - os.Exit(1) -} - -// Bool decodes and returns a bool value from the element bitstream. -func (r *Decoder) Bool() bool { - r.Sync(SyncBool) - x, err := r.Data.ReadByte() - r.checkErr(err) - assert(x < 2) - return x != 0 -} - -// Int64 decodes and returns an int64 value from the element bitstream. -func (r *Decoder) Int64() int64 { - r.Sync(SyncInt64) - return r.rawVarint() -} - -// Uint64 decodes and returns a uint64 value from the element bitstream. -func (r *Decoder) Uint64() uint64 { - r.Sync(SyncUint64) - return r.rawUvarint() -} - -// Len decodes and returns a non-negative int value from the element bitstream. -func (r *Decoder) Len() int { x := r.Uint64(); v := int(x); assert(uint64(v) == x); return v } - -// Int decodes and returns an int value from the element bitstream. -func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v } - -// Uint decodes and returns a uint value from the element bitstream. -func (r *Decoder) Uint() uint { x := r.Uint64(); v := uint(x); assert(uint64(v) == x); return v } - -// Code decodes a Code value from the element bitstream and returns -// its ordinal value. It's the caller's responsibility to convert the -// result to an appropriate Code type. -// -// TODO(mdempsky): Ideally this method would have signature "Code[T -// Code] T" instead, but we don't allow generic methods and the -// compiler can't depend on generics yet anyway. -func (r *Decoder) Code(mark SyncMarker) int { - r.Sync(mark) - return r.Len() -} - -// Reloc decodes a relocation of expected section k from the element -// bitstream and returns an index to the referenced element. -func (r *Decoder) Reloc(k RelocKind) Index { - r.Sync(SyncUseReloc) - return r.rawReloc(k, r.Len()) -} - -// String decodes and returns a string value from the element -// bitstream. -func (r *Decoder) String() string { - r.Sync(SyncString) - return r.common.StringIdx(r.Reloc(RelocString)) -} - -// Strings decodes and returns a variable-length slice of strings from -// the element bitstream. -func (r *Decoder) Strings() []string { - res := make([]string, r.Len()) - for i := range res { - res[i] = r.String() - } - return res -} - -// Value decodes and returns a constant.Value from the element -// bitstream. -func (r *Decoder) Value() constant.Value { - r.Sync(SyncValue) - isComplex := r.Bool() - val := r.scalar() - if isComplex { - val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar())) - } - return val -} - -func (r *Decoder) scalar() constant.Value { - switch tag := CodeVal(r.Code(SyncVal)); tag { - default: - panic(fmt.Errorf("unexpected scalar tag: %v", tag)) - - case ValBool: - return constant.MakeBool(r.Bool()) - case ValString: - return constant.MakeString(r.String()) - case ValInt64: - return constant.MakeInt64(r.Int64()) - case ValBigInt: - return constant.Make(r.bigInt()) - case ValBigRat: - num := r.bigInt() - denom := r.bigInt() - return constant.Make(new(big.Rat).SetFrac(num, denom)) - case ValBigFloat: - return constant.Make(r.bigFloat()) - } -} - -func (r *Decoder) bigInt() *big.Int { - v := new(big.Int).SetBytes([]byte(r.String())) - if r.Bool() { - v.Neg(v) - } - return v -} - -func (r *Decoder) bigFloat() *big.Float { - v := new(big.Float).SetPrec(512) - assert(v.UnmarshalText([]byte(r.String())) == nil) - return v -} - -// @@@ Helpers - -// TODO(mdempsky): These should probably be removed. I think they're a -// smell that the export data format is not yet quite right. - -// PeekPkgPath returns the package path for the specified package -// index. -func (pr *PkgDecoder) PeekPkgPath(idx Index) string { - var path string - { - r := pr.TempDecoder(RelocPkg, idx, SyncPkgDef) - path = r.String() - pr.RetireDecoder(&r) - } - if path == "" { - path = pr.pkgPath - } - return path -} - -// PeekObj returns the package path, object name, and CodeObj for the -// specified object index. -func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) { - var ridx Index - var name string - var rcode int - { - r := pr.TempDecoder(RelocName, idx, SyncObject1) - r.Sync(SyncSym) - r.Sync(SyncPkg) - ridx = r.Reloc(RelocPkg) - name = r.String() - rcode = r.Code(SyncCodeObj) - pr.RetireDecoder(&r) - } - - path := pr.PeekPkgPath(ridx) - assert(name != "") - - tag := CodeObj(rcode) - - return path, name, tag -} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/doc.go b/vendor/golang.org/x/tools/internal/pkgbits/doc.go deleted file mode 100644 index c8a2796b5e4..00000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/doc.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package pkgbits implements low-level coding abstractions for -// Unified IR's export data format. -// -// At a low-level, a package is a collection of bitstream elements. -// Each element has a "kind" and a dense, non-negative index. -// Elements can be randomly accessed given their kind and index. -// -// Individual elements are sequences of variable-length values (e.g., -// integers, booleans, strings, go/constant values, cross-references -// to other elements). Package pkgbits provides APIs for encoding and -// decoding these low-level values, but the details of mapping -// higher-level Go constructs into elements is left to higher-level -// abstractions. -// -// Elements may cross-reference each other with "relocations." For -// example, an element representing a pointer type has a relocation -// referring to the element type. -// -// Go constructs may be composed as a constellation of multiple -// elements. For example, a declared function may have one element to -// describe the object (e.g., its name, type, position), and a -// separate element to describe its function body. This allows readers -// some flexibility in efficiently seeking or re-reading data (e.g., -// inlining requires re-reading the function body for each inlined -// call, without needing to re-read the object-level details). -// -// This is a copy of internal/pkgbits in the Go implementation. -package pkgbits diff --git a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go deleted file mode 100644 index 6482617a4fc..00000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go +++ /dev/null @@ -1,383 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkgbits - -import ( - "bytes" - "crypto/md5" - "encoding/binary" - "go/constant" - "io" - "math/big" - "runtime" -) - -// currentVersion is the current version number. -// -// - v0: initial prototype -// -// - v1: adds the flags uint32 word -const currentVersion uint32 = 1 - -// A PkgEncoder provides methods for encoding a package's Unified IR -// export data. -type PkgEncoder struct { - // elems holds the bitstream for previously encoded elements. - elems [numRelocs][]string - - // stringsIdx maps previously encoded strings to their index within - // the RelocString section, to allow deduplication. That is, - // elems[RelocString][stringsIdx[s]] == s (if present). - stringsIdx map[string]Index - - // syncFrames is the number of frames to write at each sync - // marker. A negative value means sync markers are omitted. - syncFrames int -} - -// SyncMarkers reports whether pw uses sync markers. -func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 } - -// NewPkgEncoder returns an initialized PkgEncoder. -// -// syncFrames is the number of caller frames that should be serialized -// at Sync points. Serializing additional frames results in larger -// export data files, but can help diagnosing desync errors in -// higher-level Unified IR reader/writer code. If syncFrames is -// negative, then sync markers are omitted entirely. -func NewPkgEncoder(syncFrames int) PkgEncoder { - return PkgEncoder{ - stringsIdx: make(map[string]Index), - syncFrames: syncFrames, - } -} - -// DumpTo writes the package's encoded data to out0 and returns the -// package fingerprint. -func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) { - h := md5.New() - out := io.MultiWriter(out0, h) - - writeUint32 := func(x uint32) { - assert(binary.Write(out, binary.LittleEndian, x) == nil) - } - - writeUint32(currentVersion) - - var flags uint32 - if pw.SyncMarkers() { - flags |= flagSyncMarkers - } - writeUint32(flags) - - // Write elemEndsEnds. - var sum uint32 - for _, elems := range &pw.elems { - sum += uint32(len(elems)) - writeUint32(sum) - } - - // Write elemEnds. - sum = 0 - for _, elems := range &pw.elems { - for _, elem := range elems { - sum += uint32(len(elem)) - writeUint32(sum) - } - } - - // Write elemData. - for _, elems := range &pw.elems { - for _, elem := range elems { - _, err := io.WriteString(out, elem) - assert(err == nil) - } - } - - // Write fingerprint. - copy(fingerprint[:], h.Sum(nil)) - _, err := out0.Write(fingerprint[:]) - assert(err == nil) - - return -} - -// StringIdx adds a string value to the strings section, if not -// already present, and returns its index. -func (pw *PkgEncoder) StringIdx(s string) Index { - if idx, ok := pw.stringsIdx[s]; ok { - assert(pw.elems[RelocString][idx] == s) - return idx - } - - idx := Index(len(pw.elems[RelocString])) - pw.elems[RelocString] = append(pw.elems[RelocString], s) - pw.stringsIdx[s] = idx - return idx -} - -// NewEncoder returns an Encoder for a new element within the given -// section, and encodes the given SyncMarker as the start of the -// element bitstream. -func (pw *PkgEncoder) NewEncoder(k RelocKind, marker SyncMarker) Encoder { - e := pw.NewEncoderRaw(k) - e.Sync(marker) - return e -} - -// NewEncoderRaw returns an Encoder for a new element within the given -// section. -// -// Most callers should use NewEncoder instead. -func (pw *PkgEncoder) NewEncoderRaw(k RelocKind) Encoder { - idx := Index(len(pw.elems[k])) - pw.elems[k] = append(pw.elems[k], "") // placeholder - - return Encoder{ - p: pw, - k: k, - Idx: idx, - } -} - -// An Encoder provides methods for encoding an individual element's -// bitstream data. -type Encoder struct { - p *PkgEncoder - - Relocs []RelocEnt - RelocMap map[RelocEnt]uint32 - Data bytes.Buffer // accumulated element bitstream data - - encodingRelocHeader bool - - k RelocKind - Idx Index // index within relocation section -} - -// Flush finalizes the element's bitstream and returns its Index. -func (w *Encoder) Flush() Index { - var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved - - // Backup the data so we write the relocations at the front. - var tmp bytes.Buffer - io.Copy(&tmp, &w.Data) - - // TODO(mdempsky): Consider writing these out separately so they're - // easier to strip, along with function bodies, so that we can prune - // down to just the data that's relevant to go/types. - if w.encodingRelocHeader { - panic("encodingRelocHeader already true; recursive flush?") - } - w.encodingRelocHeader = true - w.Sync(SyncRelocs) - w.Len(len(w.Relocs)) - for _, rEnt := range w.Relocs { - w.Sync(SyncReloc) - w.Len(int(rEnt.Kind)) - w.Len(int(rEnt.Idx)) - } - - io.Copy(&sb, &w.Data) - io.Copy(&sb, &tmp) - w.p.elems[w.k][w.Idx] = sb.String() - - return w.Idx -} - -func (w *Encoder) checkErr(err error) { - if err != nil { - errorf("unexpected encoding error: %v", err) - } -} - -func (w *Encoder) rawUvarint(x uint64) { - var buf [binary.MaxVarintLen64]byte - n := binary.PutUvarint(buf[:], x) - _, err := w.Data.Write(buf[:n]) - w.checkErr(err) -} - -func (w *Encoder) rawVarint(x int64) { - // Zig-zag encode. - ux := uint64(x) << 1 - if x < 0 { - ux = ^ux - } - - w.rawUvarint(ux) -} - -func (w *Encoder) rawReloc(r RelocKind, idx Index) int { - e := RelocEnt{r, idx} - if w.RelocMap != nil { - if i, ok := w.RelocMap[e]; ok { - return int(i) - } - } else { - w.RelocMap = make(map[RelocEnt]uint32) - } - - i := len(w.Relocs) - w.RelocMap[e] = uint32(i) - w.Relocs = append(w.Relocs, e) - return i -} - -func (w *Encoder) Sync(m SyncMarker) { - if !w.p.SyncMarkers() { - return - } - - // Writing out stack frame string references requires working - // relocations, but writing out the relocations themselves involves - // sync markers. To prevent infinite recursion, we simply trim the - // stack frame for sync markers within the relocation header. - var frames []string - if !w.encodingRelocHeader && w.p.syncFrames > 0 { - pcs := make([]uintptr, w.p.syncFrames) - n := runtime.Callers(2, pcs) - frames = fmtFrames(pcs[:n]...) - } - - // TODO(mdempsky): Save space by writing out stack frames as a - // linked list so we can share common stack frames. - w.rawUvarint(uint64(m)) - w.rawUvarint(uint64(len(frames))) - for _, frame := range frames { - w.rawUvarint(uint64(w.rawReloc(RelocString, w.p.StringIdx(frame)))) - } -} - -// Bool encodes and writes a bool value into the element bitstream, -// and then returns the bool value. -// -// For simple, 2-alternative encodings, the idiomatic way to call Bool -// is something like: -// -// if w.Bool(x != 0) { -// // alternative #1 -// } else { -// // alternative #2 -// } -// -// For multi-alternative encodings, use Code instead. -func (w *Encoder) Bool(b bool) bool { - w.Sync(SyncBool) - var x byte - if b { - x = 1 - } - err := w.Data.WriteByte(x) - w.checkErr(err) - return b -} - -// Int64 encodes and writes an int64 value into the element bitstream. -func (w *Encoder) Int64(x int64) { - w.Sync(SyncInt64) - w.rawVarint(x) -} - -// Uint64 encodes and writes a uint64 value into the element bitstream. -func (w *Encoder) Uint64(x uint64) { - w.Sync(SyncUint64) - w.rawUvarint(x) -} - -// Len encodes and writes a non-negative int value into the element bitstream. -func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) } - -// Int encodes and writes an int value into the element bitstream. -func (w *Encoder) Int(x int) { w.Int64(int64(x)) } - -// Uint encodes and writes a uint value into the element bitstream. -func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) } - -// Reloc encodes and writes a relocation for the given (section, -// index) pair into the element bitstream. -// -// Note: Only the index is formally written into the element -// bitstream, so bitstream decoders must know from context which -// section an encoded relocation refers to. -func (w *Encoder) Reloc(r RelocKind, idx Index) { - w.Sync(SyncUseReloc) - w.Len(w.rawReloc(r, idx)) -} - -// Code encodes and writes a Code value into the element bitstream. -func (w *Encoder) Code(c Code) { - w.Sync(c.Marker()) - w.Len(c.Value()) -} - -// String encodes and writes a string value into the element -// bitstream. -// -// Internally, strings are deduplicated by adding them to the strings -// section (if not already present), and then writing a relocation -// into the element bitstream. -func (w *Encoder) String(s string) { - w.Sync(SyncString) - w.Reloc(RelocString, w.p.StringIdx(s)) -} - -// Strings encodes and writes a variable-length slice of strings into -// the element bitstream. -func (w *Encoder) Strings(ss []string) { - w.Len(len(ss)) - for _, s := range ss { - w.String(s) - } -} - -// Value encodes and writes a constant.Value into the element -// bitstream. -func (w *Encoder) Value(val constant.Value) { - w.Sync(SyncValue) - if w.Bool(val.Kind() == constant.Complex) { - w.scalar(constant.Real(val)) - w.scalar(constant.Imag(val)) - } else { - w.scalar(val) - } -} - -func (w *Encoder) scalar(val constant.Value) { - switch v := constant.Val(val).(type) { - default: - errorf("unhandled %v (%v)", val, val.Kind()) - case bool: - w.Code(ValBool) - w.Bool(v) - case string: - w.Code(ValString) - w.String(v) - case int64: - w.Code(ValInt64) - w.Int64(v) - case *big.Int: - w.Code(ValBigInt) - w.bigInt(v) - case *big.Rat: - w.Code(ValBigRat) - w.bigInt(v.Num()) - w.bigInt(v.Denom()) - case *big.Float: - w.Code(ValBigFloat) - w.bigFloat(v) - } -} - -func (w *Encoder) bigInt(v *big.Int) { - b := v.Bytes() - w.String(string(b)) // TODO: More efficient encoding. - w.Bool(v.Sign() < 0) -} - -func (w *Encoder) bigFloat(v *big.Float) { - b := v.Append(nil, 'p', -1) - w.String(string(b)) // TODO: More efficient encoding. -} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/flags.go b/vendor/golang.org/x/tools/internal/pkgbits/flags.go deleted file mode 100644 index 654222745fa..00000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/flags.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkgbits - -const ( - flagSyncMarkers = 1 << iota // file format contains sync markers -) diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go deleted file mode 100644 index 5294f6a63ed..00000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 -// +build !go1.7 - -// TODO(mdempsky): Remove after #44505 is resolved - -package pkgbits - -import "runtime" - -func walkFrames(pcs []uintptr, visit frameVisitor) { - for _, pc := range pcs { - fn := runtime.FuncForPC(pc) - file, line := fn.FileLine(pc) - - visit(file, line, fn.Name(), pc-fn.Entry()) - } -} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go deleted file mode 100644 index 2324ae7adfe..00000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 -// +build go1.7 - -package pkgbits - -import "runtime" - -// walkFrames calls visit for each call frame represented by pcs. -// -// pcs should be a slice of PCs, as returned by runtime.Callers. -func walkFrames(pcs []uintptr, visit frameVisitor) { - if len(pcs) == 0 { - return - } - - frames := runtime.CallersFrames(pcs) - for { - frame, more := frames.Next() - visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry) - if !more { - return - } - } -} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/reloc.go b/vendor/golang.org/x/tools/internal/pkgbits/reloc.go deleted file mode 100644 index fcdfb97ca99..00000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/reloc.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkgbits - -// A RelocKind indicates a particular section within a unified IR export. -type RelocKind int32 - -// An Index represents a bitstream element index within a particular -// section. -type Index int32 - -// A relocEnt (relocation entry) is an entry in an element's local -// reference table. -// -// TODO(mdempsky): Rename this too. -type RelocEnt struct { - Kind RelocKind - Idx Index -} - -// Reserved indices within the meta relocation section. -const ( - PublicRootIdx Index = 0 - PrivateRootIdx Index = 1 -) - -const ( - RelocString RelocKind = iota - RelocMeta - RelocPosBase - RelocPkg - RelocName - RelocType - RelocObj - RelocObjExt - RelocObjDict - RelocBody - - numRelocs = iota -) diff --git a/vendor/golang.org/x/tools/internal/pkgbits/support.go b/vendor/golang.org/x/tools/internal/pkgbits/support.go deleted file mode 100644 index ad26d3b28ca..00000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/support.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkgbits - -import "fmt" - -func assert(b bool) { - if !b { - panic("assertion failed") - } -} - -func errorf(format string, args ...interface{}) { - panic(fmt.Errorf(format, args...)) -} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/sync.go b/vendor/golang.org/x/tools/internal/pkgbits/sync.go deleted file mode 100644 index 5bd51ef7170..00000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/sync.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkgbits - -import ( - "fmt" - "strings" -) - -// fmtFrames formats a backtrace for reporting reader/writer desyncs. -func fmtFrames(pcs ...uintptr) []string { - res := make([]string, 0, len(pcs)) - walkFrames(pcs, func(file string, line int, name string, offset uintptr) { - // Trim package from function name. It's just redundant noise. - name = strings.TrimPrefix(name, "cmd/compile/internal/noder.") - - res = append(res, fmt.Sprintf("%s:%v: %s +0x%v", file, line, name, offset)) - }) - return res -} - -type frameVisitor func(file string, line int, name string, offset uintptr) - -// SyncMarker is an enum type that represents markers that may be -// written to export data to ensure the reader and writer stay -// synchronized. -type SyncMarker int - -//go:generate stringer -type=SyncMarker -trimprefix=Sync - -const ( - _ SyncMarker = iota - - // Public markers (known to go/types importers). - - // Low-level coding markers. - SyncEOF - SyncBool - SyncInt64 - SyncUint64 - SyncString - SyncValue - SyncVal - SyncRelocs - SyncReloc - SyncUseReloc - - // Higher-level object and type markers. - SyncPublic - SyncPos - SyncPosBase - SyncObject - SyncObject1 - SyncPkg - SyncPkgDef - SyncMethod - SyncType - SyncTypeIdx - SyncTypeParamNames - SyncSignature - SyncParams - SyncParam - SyncCodeObj - SyncSym - SyncLocalIdent - SyncSelector - - // Private markers (only known to cmd/compile). - SyncPrivate - - SyncFuncExt - SyncVarExt - SyncTypeExt - SyncPragma - - SyncExprList - SyncExprs - SyncExpr - SyncExprType - SyncAssign - SyncOp - SyncFuncLit - SyncCompLit - - SyncDecl - SyncFuncBody - SyncOpenScope - SyncCloseScope - SyncCloseAnotherScope - SyncDeclNames - SyncDeclName - - SyncStmts - SyncBlockStmt - SyncIfStmt - SyncForStmt - SyncSwitchStmt - SyncRangeStmt - SyncCaseClause - SyncCommClause - SyncSelectStmt - SyncDecls - SyncLabeledStmt - SyncUseObjLocal - SyncAddLocal - SyncLinkname - SyncStmt1 - SyncStmtsEnd - SyncLabel - SyncOptLabel -) diff --git a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go deleted file mode 100644 index 4a5b0ca5f2f..00000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go +++ /dev/null @@ -1,89 +0,0 @@ -// Code generated by "stringer -type=SyncMarker -trimprefix=Sync"; DO NOT EDIT. - -package pkgbits - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[SyncEOF-1] - _ = x[SyncBool-2] - _ = x[SyncInt64-3] - _ = x[SyncUint64-4] - _ = x[SyncString-5] - _ = x[SyncValue-6] - _ = x[SyncVal-7] - _ = x[SyncRelocs-8] - _ = x[SyncReloc-9] - _ = x[SyncUseReloc-10] - _ = x[SyncPublic-11] - _ = x[SyncPos-12] - _ = x[SyncPosBase-13] - _ = x[SyncObject-14] - _ = x[SyncObject1-15] - _ = x[SyncPkg-16] - _ = x[SyncPkgDef-17] - _ = x[SyncMethod-18] - _ = x[SyncType-19] - _ = x[SyncTypeIdx-20] - _ = x[SyncTypeParamNames-21] - _ = x[SyncSignature-22] - _ = x[SyncParams-23] - _ = x[SyncParam-24] - _ = x[SyncCodeObj-25] - _ = x[SyncSym-26] - _ = x[SyncLocalIdent-27] - _ = x[SyncSelector-28] - _ = x[SyncPrivate-29] - _ = x[SyncFuncExt-30] - _ = x[SyncVarExt-31] - _ = x[SyncTypeExt-32] - _ = x[SyncPragma-33] - _ = x[SyncExprList-34] - _ = x[SyncExprs-35] - _ = x[SyncExpr-36] - _ = x[SyncExprType-37] - _ = x[SyncAssign-38] - _ = x[SyncOp-39] - _ = x[SyncFuncLit-40] - _ = x[SyncCompLit-41] - _ = x[SyncDecl-42] - _ = x[SyncFuncBody-43] - _ = x[SyncOpenScope-44] - _ = x[SyncCloseScope-45] - _ = x[SyncCloseAnotherScope-46] - _ = x[SyncDeclNames-47] - _ = x[SyncDeclName-48] - _ = x[SyncStmts-49] - _ = x[SyncBlockStmt-50] - _ = x[SyncIfStmt-51] - _ = x[SyncForStmt-52] - _ = x[SyncSwitchStmt-53] - _ = x[SyncRangeStmt-54] - _ = x[SyncCaseClause-55] - _ = x[SyncCommClause-56] - _ = x[SyncSelectStmt-57] - _ = x[SyncDecls-58] - _ = x[SyncLabeledStmt-59] - _ = x[SyncUseObjLocal-60] - _ = x[SyncAddLocal-61] - _ = x[SyncLinkname-62] - _ = x[SyncStmt1-63] - _ = x[SyncStmtsEnd-64] - _ = x[SyncLabel-65] - _ = x[SyncOptLabel-66] -} - -const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel" - -var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458} - -func (i SyncMarker) String() string { - i -= 1 - if i < 0 || i >= SyncMarker(len(_SyncMarker_index)-1) { - return "SyncMarker(" + strconv.FormatInt(int64(i+1), 10) + ")" - } - return _SyncMarker_name[_SyncMarker_index[i]:_SyncMarker_index[i+1]] -} diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go deleted file mode 100644 index 7e638ec24fc..00000000000 --- a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// package tokeninternal provides access to some internal features of the token -// package. -package tokeninternal - -import ( - "fmt" - "go/token" - "sort" - "sync" - "unsafe" -) - -// GetLines returns the table of line-start offsets from a token.File. -func GetLines(file *token.File) []int { - // token.File has a Lines method on Go 1.21 and later. - if file, ok := (interface{})(file).(interface{ Lines() []int }); ok { - return file.Lines() - } - - // This declaration must match that of token.File. - // This creates a risk of dependency skew. - // For now we check that the size of the two - // declarations is the same, on the (fragile) assumption - // that future changes would add fields. - type tokenFile119 struct { - _ string - _ int - _ int - mu sync.Mutex // we're not complete monsters - lines []int - _ []struct{} - } - type tokenFile118 struct { - _ *token.FileSet // deleted in go1.19 - tokenFile119 - } - - type uP = unsafe.Pointer - switch unsafe.Sizeof(*file) { - case unsafe.Sizeof(tokenFile118{}): - var ptr *tokenFile118 - *(*uP)(uP(&ptr)) = uP(file) - ptr.mu.Lock() - defer ptr.mu.Unlock() - return ptr.lines - - case unsafe.Sizeof(tokenFile119{}): - var ptr *tokenFile119 - *(*uP)(uP(&ptr)) = uP(file) - ptr.mu.Lock() - defer ptr.mu.Unlock() - return ptr.lines - - default: - panic("unexpected token.File size") - } -} - -// AddExistingFiles adds the specified files to the FileSet if they -// are not already present. It panics if any pair of files in the -// resulting FileSet would overlap. -func AddExistingFiles(fset *token.FileSet, files []*token.File) { - // Punch through the FileSet encapsulation. - type tokenFileSet struct { - // This type remained essentially consistent from go1.16 to go1.21. - mutex sync.RWMutex - base int - files []*token.File - _ *token.File // changed to atomic.Pointer[token.File] in go1.19 - } - - // If the size of token.FileSet changes, this will fail to compile. - const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{})) - var _ [-delta * delta]int - - type uP = unsafe.Pointer - var ptr *tokenFileSet - *(*uP)(uP(&ptr)) = uP(fset) - ptr.mutex.Lock() - defer ptr.mutex.Unlock() - - // Merge and sort. - newFiles := append(ptr.files, files...) - sort.Slice(newFiles, func(i, j int) bool { - return newFiles[i].Base() < newFiles[j].Base() - }) - - // Reject overlapping files. - // Discard adjacent identical files. - out := newFiles[:0] - for i, file := range newFiles { - if i > 0 { - prev := newFiles[i-1] - if file == prev { - continue - } - if prev.Base()+prev.Size()+1 > file.Base() { - panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)", - prev.Name(), prev.Base(), prev.Base()+prev.Size(), - file.Name(), file.Base(), file.Base()+file.Size())) - } - } - out = append(out, file) - } - newFiles = out - - ptr.files = newFiles - - // Advance FileSet.Base(). - if len(newFiles) > 0 { - last := newFiles[len(newFiles)-1] - newBase := last.Base() + last.Size() + 1 - if ptr.base < newBase { - ptr.base = newBase - } - } -} - -// FileSetFor returns a new FileSet containing a sequence of new Files with -// the same base, size, and line as the input files, for use in APIs that -// require a FileSet. -// -// Precondition: the input files must be non-overlapping, and sorted in order -// of their Base. -func FileSetFor(files ...*token.File) *token.FileSet { - fset := token.NewFileSet() - for _, f := range files { - f2 := fset.AddFile(f.Name(), f.Base(), f.Size()) - lines := GetLines(f) - f2.SetLines(lines) - } - return fset -} - -// CloneFileSet creates a new FileSet holding all files in fset. It does not -// create copies of the token.Files in fset: they are added to the resulting -// FileSet unmodified. -func CloneFileSet(fset *token.FileSet) *token.FileSet { - var files []*token.File - fset.Iterate(func(f *token.File) bool { - files = append(files, f) - return true - }) - newFileSet := token.NewFileSet() - AddExistingFiles(newFileSet, files) - return newFileSet -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go deleted file mode 100644 index cdab9885314..00000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/common.go +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package typeparams contains common utilities for writing tools that interact -// with generic Go code, as introduced with Go 1.18. -// -// Many of the types and functions in this package are proxies for the new APIs -// introduced in the standard library with Go 1.18. For example, the -// typeparams.Union type is an alias for go/types.Union, and the ForTypeSpec -// function returns the value of the go/ast.TypeSpec.TypeParams field. At Go -// versions older than 1.18 these helpers are implemented as stubs, allowing -// users of this package to write code that handles generic constructs inline, -// even if the Go version being used to compile does not support generics. -// -// Additionally, this package contains common utilities for working with the -// new generic constructs, to supplement the standard library APIs. Notably, -// the StructuralTerms API computes a minimal representation of the structural -// restrictions on a type parameter. -// -// An external version of these APIs is available in the -// golang.org/x/exp/typeparams module. -package typeparams - -import ( - "fmt" - "go/ast" - "go/token" - "go/types" -) - -// UnpackIndexExpr extracts data from AST nodes that represent index -// expressions. -// -// For an ast.IndexExpr, the resulting indices slice will contain exactly one -// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable -// number of index expressions. -// -// For nodes that don't represent index expressions, the first return value of -// UnpackIndexExpr will be nil. -func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) { - switch e := n.(type) { - case *ast.IndexExpr: - return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack - case *ast.IndexListExpr: - return e.X, e.Lbrack, e.Indices, e.Rbrack - } - return nil, token.NoPos, nil, token.NoPos -} - -// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on -// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0 -// will panic. -func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr { - switch len(indices) { - case 0: - panic("empty indices") - case 1: - return &ast.IndexExpr{ - X: x, - Lbrack: lbrack, - Index: indices[0], - Rbrack: rbrack, - } - default: - return &ast.IndexListExpr{ - X: x, - Lbrack: lbrack, - Indices: indices, - Rbrack: rbrack, - } - } -} - -// IsTypeParam reports whether t is a type parameter. -func IsTypeParam(t types.Type) bool { - _, ok := t.(*types.TypeParam) - return ok -} - -// OriginMethod returns the origin method associated with the method fn. -// For methods on a non-generic receiver base type, this is just -// fn. However, for methods with a generic receiver, OriginMethod returns the -// corresponding method in the method set of the origin type. -// -// As a special case, if fn is not a method (has no receiver), OriginMethod -// returns fn. -func OriginMethod(fn *types.Func) *types.Func { - recv := fn.Type().(*types.Signature).Recv() - if recv == nil { - return fn - } - base := recv.Type() - p, isPtr := base.(*types.Pointer) - if isPtr { - base = p.Elem() - } - named, isNamed := base.(*types.Named) - if !isNamed { - // Receiver is a *types.Interface. - return fn - } - if named.TypeParams().Len() == 0 { - // Receiver base has no type parameters, so we can avoid the lookup below. - return fn - } - orig := named.Origin() - gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name()) - - // This is a fix for a gopls crash (#60628) due to a go/types bug (#60634). In: - // package p - // type T *int - // func (*T) f() {} - // LookupFieldOrMethod(T, true, p, f)=nil, but NewMethodSet(*T)={(*T).f}. - // Here we make them consistent by force. - // (The go/types bug is general, but this workaround is reached only - // for generic T thanks to the early return above.) - if gfn == nil { - mset := types.NewMethodSet(types.NewPointer(orig)) - for i := 0; i < mset.Len(); i++ { - m := mset.At(i) - if m.Obj().Id() == fn.Id() { - gfn = m.Obj() - break - } - } - } - - // In golang/go#61196, we observe another crash, this time inexplicable. - if gfn == nil { - panic(fmt.Sprintf("missing origin method for %s.%s; named == origin: %t, named.NumMethods(): %d, origin.NumMethods(): %d", named, fn, named == orig, named.NumMethods(), orig.NumMethods())) - } - - return gfn.(*types.Func) -} - -// GenericAssignableTo is a generalization of types.AssignableTo that -// implements the following rule for uninstantiated generic types: -// -// If V and T are generic named types, then V is considered assignable to T if, -// for every possible instantation of V[A_1, ..., A_N], the instantiation -// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N]. -// -// If T has structural constraints, they must be satisfied by V. -// -// For example, consider the following type declarations: -// -// type Interface[T any] interface { -// Accept(T) -// } -// -// type Container[T any] struct { -// Element T -// } -// -// func (c Container[T]) Accept(t T) { c.Element = t } -// -// In this case, GenericAssignableTo reports that instantiations of Container -// are assignable to the corresponding instantiation of Interface. -func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool { - // If V and T are not both named, or do not have matching non-empty type - // parameter lists, fall back on types.AssignableTo. - - VN, Vnamed := V.(*types.Named) - TN, Tnamed := T.(*types.Named) - if !Vnamed || !Tnamed { - return types.AssignableTo(V, T) - } - - vtparams := VN.TypeParams() - ttparams := TN.TypeParams() - if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || VN.TypeArgs().Len() != 0 || TN.TypeArgs().Len() != 0 { - return types.AssignableTo(V, T) - } - - // V and T have the same (non-zero) number of type params. Instantiate both - // with the type parameters of V. This must always succeed for V, and will - // succeed for T if and only if the type set of each type parameter of V is a - // subset of the type set of the corresponding type parameter of T, meaning - // that every instantiation of V corresponds to a valid instantiation of T. - - // Minor optimization: ensure we share a context across the two - // instantiations below. - if ctxt == nil { - ctxt = types.NewContext() - } - - var targs []types.Type - for i := 0; i < vtparams.Len(); i++ { - targs = append(targs, vtparams.At(i)) - } - - vinst, err := types.Instantiate(ctxt, V, targs, true) - if err != nil { - panic("type parameters should satisfy their own constraints") - } - - tinst, err := types.Instantiate(ctxt, T, targs, true) - if err != nil { - return false - } - - return types.AssignableTo(vinst, tinst) -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go deleted file mode 100644 index 7ea8840eab7..00000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/coretype.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typeparams - -import ( - "go/types" -) - -// CoreType returns the core type of T or nil if T does not have a core type. -// -// See https://go.dev/ref/spec#Core_types for the definition of a core type. -func CoreType(T types.Type) types.Type { - U := T.Underlying() - if _, ok := U.(*types.Interface); !ok { - return U // for non-interface types, - } - - terms, err := _NormalTerms(U) - if len(terms) == 0 || err != nil { - // len(terms) -> empty type set of interface. - // err != nil => U is invalid, exceeds complexity bounds, or has an empty type set. - return nil // no core type. - } - - U = terms[0].Type().Underlying() - var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying()) - for identical = 1; identical < len(terms); identical++ { - if !types.Identical(U, terms[identical].Type().Underlying()) { - break - } - } - - if identical == len(terms) { - // https://go.dev/ref/spec#Core_types - // "There is a single type U which is the underlying type of all types in the type set of T" - return U - } - ch, ok := U.(*types.Chan) - if !ok { - return nil // no core type as identical < len(terms) and U is not a channel. - } - // https://go.dev/ref/spec#Core_types - // "the type chan E if T contains only bidirectional channels, or the type chan<- E or - // <-chan E depending on the direction of the directional channels present." - for chans := identical; chans < len(terms); chans++ { - curr, ok := terms[chans].Type().Underlying().(*types.Chan) - if !ok { - return nil - } - if !types.Identical(ch.Elem(), curr.Elem()) { - return nil // channel elements are not identical. - } - if ch.Dir() == types.SendRecv { - // ch is bidirectional. We can safely always use curr's direction. - ch = curr - } else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() { - // ch and curr are not bidirectional and not the same direction. - return nil - } - } - return ch -} - -// _NormalTerms returns a slice of terms representing the normalized structural -// type restrictions of a type, if any. -// -// For all types other than *types.TypeParam, *types.Interface, and -// *types.Union, this is just a single term with Tilde() == false and -// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see -// below. -// -// Structural type restrictions of a type parameter are created via -// non-interface types embedded in its constraint interface (directly, or via a -// chain of interface embeddings). For example, in the declaration type -// T[P interface{~int; m()}] int the structural restriction of the type -// parameter P is ~int. -// -// With interface embedding and unions, the specification of structural type -// restrictions may be arbitrarily complex. For example, consider the -// following: -// -// type A interface{ ~string|~[]byte } -// -// type B interface{ int|string } -// -// type C interface { ~string|~int } -// -// type T[P interface{ A|B; C }] int -// -// In this example, the structural type restriction of P is ~string|int: A|B -// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, -// which when intersected with C (~string|~int) yields ~string|int. -// -// _NormalTerms computes these expansions and reductions, producing a -// "normalized" form of the embeddings. A structural restriction is normalized -// if it is a single union containing no interface terms, and is minimal in the -// sense that removing any term changes the set of types satisfying the -// constraint. It is left as a proof for the reader that, modulo sorting, there -// is exactly one such normalized form. -// -// Because the minimal representation always takes this form, _NormalTerms -// returns a slice of tilde terms corresponding to the terms of the union in -// the normalized structural restriction. An error is returned if the type is -// invalid, exceeds complexity bounds, or has an empty type set. In the latter -// case, _NormalTerms returns ErrEmptyTypeSet. -// -// _NormalTerms makes no guarantees about the order of terms, except that it -// is deterministic. -func _NormalTerms(typ types.Type) ([]*types.Term, error) { - switch typ := typ.(type) { - case *types.TypeParam: - return StructuralTerms(typ) - case *types.Union: - return UnionTermSet(typ) - case *types.Interface: - return InterfaceTermSet(typ) - default: - return []*types.Term{types.NewTerm(false, typ)}, nil - } -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go deleted file mode 100644 index 93c80fdc96c..00000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/normalize.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typeparams - -import ( - "errors" - "fmt" - "go/types" - "os" - "strings" -) - -//go:generate go run copytermlist.go - -const debug = false - -var ErrEmptyTypeSet = errors.New("empty type set") - -// StructuralTerms returns a slice of terms representing the normalized -// structural type restrictions of a type parameter, if any. -// -// Structural type restrictions of a type parameter are created via -// non-interface types embedded in its constraint interface (directly, or via a -// chain of interface embeddings). For example, in the declaration -// -// type T[P interface{~int; m()}] int -// -// the structural restriction of the type parameter P is ~int. -// -// With interface embedding and unions, the specification of structural type -// restrictions may be arbitrarily complex. For example, consider the -// following: -// -// type A interface{ ~string|~[]byte } -// -// type B interface{ int|string } -// -// type C interface { ~string|~int } -// -// type T[P interface{ A|B; C }] int -// -// In this example, the structural type restriction of P is ~string|int: A|B -// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, -// which when intersected with C (~string|~int) yields ~string|int. -// -// StructuralTerms computes these expansions and reductions, producing a -// "normalized" form of the embeddings. A structural restriction is normalized -// if it is a single union containing no interface terms, and is minimal in the -// sense that removing any term changes the set of types satisfying the -// constraint. It is left as a proof for the reader that, modulo sorting, there -// is exactly one such normalized form. -// -// Because the minimal representation always takes this form, StructuralTerms -// returns a slice of tilde terms corresponding to the terms of the union in -// the normalized structural restriction. An error is returned if the -// constraint interface is invalid, exceeds complexity bounds, or has an empty -// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet. -// -// StructuralTerms makes no guarantees about the order of terms, except that it -// is deterministic. -func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) { - constraint := tparam.Constraint() - if constraint == nil { - return nil, fmt.Errorf("%s has nil constraint", tparam) - } - iface, _ := constraint.Underlying().(*types.Interface) - if iface == nil { - return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying()) - } - return InterfaceTermSet(iface) -} - -// InterfaceTermSet computes the normalized terms for a constraint interface, -// returning an error if the term set cannot be computed or is empty. In the -// latter case, the error will be ErrEmptyTypeSet. -// -// See the documentation of StructuralTerms for more information on -// normalization. -func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) { - return computeTermSet(iface) -} - -// UnionTermSet computes the normalized terms for a union, returning an error -// if the term set cannot be computed or is empty. In the latter case, the -// error will be ErrEmptyTypeSet. -// -// See the documentation of StructuralTerms for more information on -// normalization. -func UnionTermSet(union *types.Union) ([]*types.Term, error) { - return computeTermSet(union) -} - -func computeTermSet(typ types.Type) ([]*types.Term, error) { - tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0) - if err != nil { - return nil, err - } - if tset.terms.isEmpty() { - return nil, ErrEmptyTypeSet - } - if tset.terms.isAll() { - return nil, nil - } - var terms []*types.Term - for _, term := range tset.terms { - terms = append(terms, types.NewTerm(term.tilde, term.typ)) - } - return terms, nil -} - -// A termSet holds the normalized set of terms for a given type. -// -// The name termSet is intentionally distinct from 'type set': a type set is -// all types that implement a type (and includes method restrictions), whereas -// a term set just represents the structural restrictions on a type. -type termSet struct { - complete bool - terms termlist -} - -func indentf(depth int, format string, args ...interface{}) { - fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...) -} - -func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) { - if t == nil { - panic("nil type") - } - - if debug { - indentf(depth, "%s", t.String()) - defer func() { - if err != nil { - indentf(depth, "=> %s", err) - } else { - indentf(depth, "=> %s", res.terms.String()) - } - }() - } - - const maxTermCount = 100 - if tset, ok := seen[t]; ok { - if !tset.complete { - return nil, fmt.Errorf("cycle detected in the declaration of %s", t) - } - return tset, nil - } - - // Mark the current type as seen to avoid infinite recursion. - tset := new(termSet) - defer func() { - tset.complete = true - }() - seen[t] = tset - - switch u := t.Underlying().(type) { - case *types.Interface: - // The term set of an interface is the intersection of the term sets of its - // embedded types. - tset.terms = allTermlist - for i := 0; i < u.NumEmbeddeds(); i++ { - embedded := u.EmbeddedType(i) - if _, ok := embedded.Underlying().(*types.TypeParam); ok { - return nil, fmt.Errorf("invalid embedded type %T", embedded) - } - tset2, err := computeTermSetInternal(embedded, seen, depth+1) - if err != nil { - return nil, err - } - tset.terms = tset.terms.intersect(tset2.terms) - } - case *types.Union: - // The term set of a union is the union of term sets of its terms. - tset.terms = nil - for i := 0; i < u.Len(); i++ { - t := u.Term(i) - var terms termlist - switch t.Type().Underlying().(type) { - case *types.Interface: - tset2, err := computeTermSetInternal(t.Type(), seen, depth+1) - if err != nil { - return nil, err - } - terms = tset2.terms - case *types.TypeParam, *types.Union: - // A stand-alone type parameter or union is not permitted as union - // term. - return nil, fmt.Errorf("invalid union term %T", t) - default: - if t.Type() == types.Typ[types.Invalid] { - continue - } - terms = termlist{{t.Tilde(), t.Type()}} - } - tset.terms = tset.terms.union(terms) - if len(tset.terms) > maxTermCount { - return nil, fmt.Errorf("exceeded max term count %d", maxTermCount) - } - } - case *types.TypeParam: - panic("unreachable") - default: - // For all other types, the term set is just a single non-tilde term - // holding the type itself. - if u != types.Typ[types.Invalid] { - tset.terms = termlist{{false, t}} - } - } - return tset, nil -} - -// under is a facade for the go/types internal function of the same name. It is -// used by typeterm.go. -func under(t types.Type) types.Type { - return t.Underlying() -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go deleted file mode 100644 index cbd12f80131..00000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/termlist.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by copytermlist.go DO NOT EDIT. - -package typeparams - -import ( - "bytes" - "go/types" -) - -// A termlist represents the type set represented by the union -// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn. -// A termlist is in normal form if all terms are disjoint. -// termlist operations don't require the operands to be in -// normal form. -type termlist []*term - -// allTermlist represents the set of all types. -// It is in normal form. -var allTermlist = termlist{new(term)} - -// String prints the termlist exactly (without normalization). -func (xl termlist) String() string { - if len(xl) == 0 { - return "∅" - } - var buf bytes.Buffer - for i, x := range xl { - if i > 0 { - buf.WriteString(" | ") - } - buf.WriteString(x.String()) - } - return buf.String() -} - -// isEmpty reports whether the termlist xl represents the empty set of types. -func (xl termlist) isEmpty() bool { - // If there's a non-nil term, the entire list is not empty. - // If the termlist is in normal form, this requires at most - // one iteration. - for _, x := range xl { - if x != nil { - return false - } - } - return true -} - -// isAll reports whether the termlist xl represents the set of all types. -func (xl termlist) isAll() bool { - // If there's a 𝓤 term, the entire list is 𝓤. - // If the termlist is in normal form, this requires at most - // one iteration. - for _, x := range xl { - if x != nil && x.typ == nil { - return true - } - } - return false -} - -// norm returns the normal form of xl. -func (xl termlist) norm() termlist { - // Quadratic algorithm, but good enough for now. - // TODO(gri) fix asymptotic performance - used := make([]bool, len(xl)) - var rl termlist - for i, xi := range xl { - if xi == nil || used[i] { - continue - } - for j := i + 1; j < len(xl); j++ { - xj := xl[j] - if xj == nil || used[j] { - continue - } - if u1, u2 := xi.union(xj); u2 == nil { - // If we encounter a 𝓤 term, the entire list is 𝓤. - // Exit early. - // (Note that this is not just an optimization; - // if we continue, we may end up with a 𝓤 term - // and other terms and the result would not be - // in normal form.) - if u1.typ == nil { - return allTermlist - } - xi = u1 - used[j] = true // xj is now unioned into xi - ignore it in future iterations - } - } - rl = append(rl, xi) - } - return rl -} - -// union returns the union xl ∪ yl. -func (xl termlist) union(yl termlist) termlist { - return append(xl, yl...).norm() -} - -// intersect returns the intersection xl ∩ yl. -func (xl termlist) intersect(yl termlist) termlist { - if xl.isEmpty() || yl.isEmpty() { - return nil - } - - // Quadratic algorithm, but good enough for now. - // TODO(gri) fix asymptotic performance - var rl termlist - for _, x := range xl { - for _, y := range yl { - if r := x.intersect(y); r != nil { - rl = append(rl, r) - } - } - } - return rl.norm() -} - -// equal reports whether xl and yl represent the same type set. -func (xl termlist) equal(yl termlist) bool { - // TODO(gri) this should be more efficient - return xl.subsetOf(yl) && yl.subsetOf(xl) -} - -// includes reports whether t ∈ xl. -func (xl termlist) includes(t types.Type) bool { - for _, x := range xl { - if x.includes(t) { - return true - } - } - return false -} - -// supersetOf reports whether y ⊆ xl. -func (xl termlist) supersetOf(y *term) bool { - for _, x := range xl { - if y.subsetOf(x) { - return true - } - } - return false -} - -// subsetOf reports whether xl ⊆ yl. -func (xl termlist) subsetOf(yl termlist) bool { - if yl.isEmpty() { - return xl.isEmpty() - } - - // each term x of xl must be a subset of yl - for _, x := range xl { - if !yl.supersetOf(x) { - return false // x is not a subset yl - } - } - return true -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go deleted file mode 100644 index 7350bb702a1..00000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by copytermlist.go DO NOT EDIT. - -package typeparams - -import "go/types" - -// A term describes elementary type sets: -// -// ∅: (*term)(nil) == ∅ // set of no types (empty set) -// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) -// T: &term{false, T} == {T} // set of type T -// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t -type term struct { - tilde bool // valid if typ != nil - typ types.Type -} - -func (x *term) String() string { - switch { - case x == nil: - return "∅" - case x.typ == nil: - return "𝓤" - case x.tilde: - return "~" + x.typ.String() - default: - return x.typ.String() - } -} - -// equal reports whether x and y represent the same type set. -func (x *term) equal(y *term) bool { - // easy cases - switch { - case x == nil || y == nil: - return x == y - case x.typ == nil || y.typ == nil: - return x.typ == y.typ - } - // ∅ ⊂ x, y ⊂ 𝓤 - - return x.tilde == y.tilde && types.Identical(x.typ, y.typ) -} - -// union returns the union x ∪ y: zero, one, or two non-nil terms. -func (x *term) union(y *term) (_, _ *term) { - // easy cases - switch { - case x == nil && y == nil: - return nil, nil // ∅ ∪ ∅ == ∅ - case x == nil: - return y, nil // ∅ ∪ y == y - case y == nil: - return x, nil // x ∪ ∅ == x - case x.typ == nil: - return x, nil // 𝓤 ∪ y == 𝓤 - case y.typ == nil: - return y, nil // x ∪ 𝓤 == 𝓤 - } - // ∅ ⊂ x, y ⊂ 𝓤 - - if x.disjoint(y) { - return x, y // x ∪ y == (x, y) if x ∩ y == ∅ - } - // x.typ == y.typ - - // ~t ∪ ~t == ~t - // ~t ∪ T == ~t - // T ∪ ~t == ~t - // T ∪ T == T - if x.tilde || !y.tilde { - return x, nil - } - return y, nil -} - -// intersect returns the intersection x ∩ y. -func (x *term) intersect(y *term) *term { - // easy cases - switch { - case x == nil || y == nil: - return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅ - case x.typ == nil: - return y // 𝓤 ∩ y == y - case y.typ == nil: - return x // x ∩ 𝓤 == x - } - // ∅ ⊂ x, y ⊂ 𝓤 - - if x.disjoint(y) { - return nil // x ∩ y == ∅ if x ∩ y == ∅ - } - // x.typ == y.typ - - // ~t ∩ ~t == ~t - // ~t ∩ T == T - // T ∩ ~t == T - // T ∩ T == T - if !x.tilde || y.tilde { - return x - } - return y -} - -// includes reports whether t ∈ x. -func (x *term) includes(t types.Type) bool { - // easy cases - switch { - case x == nil: - return false // t ∈ ∅ == false - case x.typ == nil: - return true // t ∈ 𝓤 == true - } - // ∅ ⊂ x ⊂ 𝓤 - - u := t - if x.tilde { - u = under(u) - } - return types.Identical(x.typ, u) -} - -// subsetOf reports whether x ⊆ y. -func (x *term) subsetOf(y *term) bool { - // easy cases - switch { - case x == nil: - return true // ∅ ⊆ y == true - case y == nil: - return false // x ⊆ ∅ == false since x != ∅ - case y.typ == nil: - return true // x ⊆ 𝓤 == true - case x.typ == nil: - return false // 𝓤 ⊆ y == false since y != 𝓤 - } - // ∅ ⊂ x, y ⊂ 𝓤 - - if x.disjoint(y) { - return false // x ⊆ y == false if x ∩ y == ∅ - } - // x.typ == y.typ - - // ~t ⊆ ~t == true - // ~t ⊆ T == false - // T ⊆ ~t == true - // T ⊆ T == true - return !x.tilde || y.tilde -} - -// disjoint reports whether x ∩ y == ∅. -// x.typ and y.typ must not be nil. -func (x *term) disjoint(y *term) bool { - if debug && (x.typ == nil || y.typ == nil) { - panic("invalid argument(s)") - } - ux := x.typ - if y.tilde { - ux = under(ux) - } - uy := y.typ - if x.tilde { - uy = under(uy) - } - return !types.Identical(ux, uy) -} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go deleted file mode 100644 index 07484073a57..00000000000 --- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go +++ /dev/null @@ -1,1560 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typesinternal - -//go:generate stringer -type=ErrorCode - -type ErrorCode int - -// This file defines the error codes that can be produced during type-checking. -// Collectively, these codes provide an identifier that may be used to -// implement special handling for certain types of errors. -// -// Error codes should be fine-grained enough that the exact nature of the error -// can be easily determined, but coarse enough that they are not an -// implementation detail of the type checking algorithm. As a rule-of-thumb, -// errors should be considered equivalent if there is a theoretical refactoring -// of the type checker in which they are emitted in exactly one place. For -// example, the type checker emits different error messages for "too many -// arguments" and "too few arguments", but one can imagine an alternative type -// checker where this check instead just emits a single "wrong number of -// arguments", so these errors should have the same code. -// -// Error code names should be as brief as possible while retaining accuracy and -// distinctiveness. In most cases names should start with an adjective -// describing the nature of the error (e.g. "invalid", "unused", "misplaced"), -// and end with a noun identifying the relevant language object. For example, -// "DuplicateDecl" or "InvalidSliceExpr". For brevity, naming follows the -// convention that "bad" implies a problem with syntax, and "invalid" implies a -// problem with types. - -const ( - // InvalidSyntaxTree occurs if an invalid syntax tree is provided - // to the type checker. It should never happen. - InvalidSyntaxTree ErrorCode = -1 -) - -const ( - _ ErrorCode = iota - - // Test is reserved for errors that only apply while in self-test mode. - Test - - /* package names */ - - // BlankPkgName occurs when a package name is the blank identifier "_". - // - // Per the spec: - // "The PackageName must not be the blank identifier." - BlankPkgName - - // MismatchedPkgName occurs when a file's package name doesn't match the - // package name already established by other files. - MismatchedPkgName - - // InvalidPkgUse occurs when a package identifier is used outside of a - // selector expression. - // - // Example: - // import "fmt" - // - // var _ = fmt - InvalidPkgUse - - /* imports */ - - // BadImportPath occurs when an import path is not valid. - BadImportPath - - // BrokenImport occurs when importing a package fails. - // - // Example: - // import "amissingpackage" - BrokenImport - - // ImportCRenamed occurs when the special import "C" is renamed. "C" is a - // pseudo-package, and must not be renamed. - // - // Example: - // import _ "C" - ImportCRenamed - - // UnusedImport occurs when an import is unused. - // - // Example: - // import "fmt" - // - // func main() {} - UnusedImport - - /* initialization */ - - // InvalidInitCycle occurs when an invalid cycle is detected within the - // initialization graph. - // - // Example: - // var x int = f() - // - // func f() int { return x } - InvalidInitCycle - - /* decls */ - - // DuplicateDecl occurs when an identifier is declared multiple times. - // - // Example: - // var x = 1 - // var x = 2 - DuplicateDecl - - // InvalidDeclCycle occurs when a declaration cycle is not valid. - // - // Example: - // import "unsafe" - // - // type T struct { - // a [n]int - // } - // - // var n = unsafe.Sizeof(T{}) - InvalidDeclCycle - - // InvalidTypeCycle occurs when a cycle in type definitions results in a - // type that is not well-defined. - // - // Example: - // import "unsafe" - // - // type T [unsafe.Sizeof(T{})]int - InvalidTypeCycle - - /* decls > const */ - - // InvalidConstInit occurs when a const declaration has a non-constant - // initializer. - // - // Example: - // var x int - // const _ = x - InvalidConstInit - - // InvalidConstVal occurs when a const value cannot be converted to its - // target type. - // - // TODO(findleyr): this error code and example are not very clear. Consider - // removing it. - // - // Example: - // const _ = 1 << "hello" - InvalidConstVal - - // InvalidConstType occurs when the underlying type in a const declaration - // is not a valid constant type. - // - // Example: - // const c *int = 4 - InvalidConstType - - /* decls > var (+ other variable assignment codes) */ - - // UntypedNilUse occurs when the predeclared (untyped) value nil is used to - // initialize a variable declared without an explicit type. - // - // Example: - // var x = nil - UntypedNilUse - - // WrongAssignCount occurs when the number of values on the right-hand side - // of an assignment or or initialization expression does not match the number - // of variables on the left-hand side. - // - // Example: - // var x = 1, 2 - WrongAssignCount - - // UnassignableOperand occurs when the left-hand side of an assignment is - // not assignable. - // - // Example: - // func f() { - // const c = 1 - // c = 2 - // } - UnassignableOperand - - // NoNewVar occurs when a short variable declaration (':=') does not declare - // new variables. - // - // Example: - // func f() { - // x := 1 - // x := 2 - // } - NoNewVar - - // MultiValAssignOp occurs when an assignment operation (+=, *=, etc) does - // not have single-valued left-hand or right-hand side. - // - // Per the spec: - // "In assignment operations, both the left- and right-hand expression lists - // must contain exactly one single-valued expression" - // - // Example: - // func f() int { - // x, y := 1, 2 - // x, y += 1 - // return x + y - // } - MultiValAssignOp - - // InvalidIfaceAssign occurs when a value of type T is used as an - // interface, but T does not implement a method of the expected interface. - // - // Example: - // type I interface { - // f() - // } - // - // type T int - // - // var x I = T(1) - InvalidIfaceAssign - - // InvalidChanAssign occurs when a chan assignment is invalid. - // - // Per the spec, a value x is assignable to a channel type T if: - // "x is a bidirectional channel value, T is a channel type, x's type V and - // T have identical element types, and at least one of V or T is not a - // defined type." - // - // Example: - // type T1 chan int - // type T2 chan int - // - // var x T1 - // // Invalid assignment because both types are named - // var _ T2 = x - InvalidChanAssign - - // IncompatibleAssign occurs when the type of the right-hand side expression - // in an assignment cannot be assigned to the type of the variable being - // assigned. - // - // Example: - // var x []int - // var _ int = x - IncompatibleAssign - - // UnaddressableFieldAssign occurs when trying to assign to a struct field - // in a map value. - // - // Example: - // func f() { - // m := make(map[string]struct{i int}) - // m["foo"].i = 42 - // } - UnaddressableFieldAssign - - /* decls > type (+ other type expression codes) */ - - // NotAType occurs when the identifier used as the underlying type in a type - // declaration or the right-hand side of a type alias does not denote a type. - // - // Example: - // var S = 2 - // - // type T S - NotAType - - // InvalidArrayLen occurs when an array length is not a constant value. - // - // Example: - // var n = 3 - // var _ = [n]int{} - InvalidArrayLen - - // BlankIfaceMethod occurs when a method name is '_'. - // - // Per the spec: - // "The name of each explicitly specified method must be unique and not - // blank." - // - // Example: - // type T interface { - // _(int) - // } - BlankIfaceMethod - - // IncomparableMapKey occurs when a map key type does not support the == and - // != operators. - // - // Per the spec: - // "The comparison operators == and != must be fully defined for operands of - // the key type; thus the key type must not be a function, map, or slice." - // - // Example: - // var x map[T]int - // - // type T []int - IncomparableMapKey - - // InvalidIfaceEmbed occurs when a non-interface type is embedded in an - // interface. - // - // Example: - // type T struct {} - // - // func (T) m() - // - // type I interface { - // T - // } - InvalidIfaceEmbed - - // InvalidPtrEmbed occurs when an embedded field is of the pointer form *T, - // and T itself is itself a pointer, an unsafe.Pointer, or an interface. - // - // Per the spec: - // "An embedded field must be specified as a type name T or as a pointer to - // a non-interface type name *T, and T itself may not be a pointer type." - // - // Example: - // type T *int - // - // type S struct { - // *T - // } - InvalidPtrEmbed - - /* decls > func and method */ - - // BadRecv occurs when a method declaration does not have exactly one - // receiver parameter. - // - // Example: - // func () _() {} - BadRecv - - // InvalidRecv occurs when a receiver type expression is not of the form T - // or *T, or T is a pointer type. - // - // Example: - // type T struct {} - // - // func (**T) m() {} - InvalidRecv - - // DuplicateFieldAndMethod occurs when an identifier appears as both a field - // and method name. - // - // Example: - // type T struct { - // m int - // } - // - // func (T) m() {} - DuplicateFieldAndMethod - - // DuplicateMethod occurs when two methods on the same receiver type have - // the same name. - // - // Example: - // type T struct {} - // func (T) m() {} - // func (T) m(i int) int { return i } - DuplicateMethod - - /* decls > special */ - - // InvalidBlank occurs when a blank identifier is used as a value or type. - // - // Per the spec: - // "The blank identifier may appear as an operand only on the left-hand side - // of an assignment." - // - // Example: - // var x = _ - InvalidBlank - - // InvalidIota occurs when the predeclared identifier iota is used outside - // of a constant declaration. - // - // Example: - // var x = iota - InvalidIota - - // MissingInitBody occurs when an init function is missing its body. - // - // Example: - // func init() - MissingInitBody - - // InvalidInitSig occurs when an init function declares parameters or - // results. - // - // Example: - // func init() int { return 1 } - InvalidInitSig - - // InvalidInitDecl occurs when init is declared as anything other than a - // function. - // - // Example: - // var init = 1 - InvalidInitDecl - - // InvalidMainDecl occurs when main is declared as anything other than a - // function, in a main package. - InvalidMainDecl - - /* exprs */ - - // TooManyValues occurs when a function returns too many values for the - // expression context in which it is used. - // - // Example: - // func ReturnTwo() (int, int) { - // return 1, 2 - // } - // - // var x = ReturnTwo() - TooManyValues - - // NotAnExpr occurs when a type expression is used where a value expression - // is expected. - // - // Example: - // type T struct {} - // - // func f() { - // T - // } - NotAnExpr - - /* exprs > const */ - - // TruncatedFloat occurs when a float constant is truncated to an integer - // value. - // - // Example: - // var _ int = 98.6 - TruncatedFloat - - // NumericOverflow occurs when a numeric constant overflows its target type. - // - // Example: - // var x int8 = 1000 - NumericOverflow - - /* exprs > operation */ - - // UndefinedOp occurs when an operator is not defined for the type(s) used - // in an operation. - // - // Example: - // var c = "a" - "b" - UndefinedOp - - // MismatchedTypes occurs when operand types are incompatible in a binary - // operation. - // - // Example: - // var a = "hello" - // var b = 1 - // var c = a - b - MismatchedTypes - - // DivByZero occurs when a division operation is provable at compile - // time to be a division by zero. - // - // Example: - // const divisor = 0 - // var x int = 1/divisor - DivByZero - - // NonNumericIncDec occurs when an increment or decrement operator is - // applied to a non-numeric value. - // - // Example: - // func f() { - // var c = "c" - // c++ - // } - NonNumericIncDec - - /* exprs > ptr */ - - // UnaddressableOperand occurs when the & operator is applied to an - // unaddressable expression. - // - // Example: - // var x = &1 - UnaddressableOperand - - // InvalidIndirection occurs when a non-pointer value is indirected via the - // '*' operator. - // - // Example: - // var x int - // var y = *x - InvalidIndirection - - /* exprs > [] */ - - // NonIndexableOperand occurs when an index operation is applied to a value - // that cannot be indexed. - // - // Example: - // var x = 1 - // var y = x[1] - NonIndexableOperand - - // InvalidIndex occurs when an index argument is not of integer type, - // negative, or out-of-bounds. - // - // Example: - // var s = [...]int{1,2,3} - // var x = s[5] - // - // Example: - // var s = []int{1,2,3} - // var _ = s[-1] - // - // Example: - // var s = []int{1,2,3} - // var i string - // var _ = s[i] - InvalidIndex - - // SwappedSliceIndices occurs when constant indices in a slice expression - // are decreasing in value. - // - // Example: - // var _ = []int{1,2,3}[2:1] - SwappedSliceIndices - - /* operators > slice */ - - // NonSliceableOperand occurs when a slice operation is applied to a value - // whose type is not sliceable, or is unaddressable. - // - // Example: - // var x = [...]int{1, 2, 3}[:1] - // - // Example: - // var x = 1 - // var y = 1[:1] - NonSliceableOperand - - // InvalidSliceExpr occurs when a three-index slice expression (a[x:y:z]) is - // applied to a string. - // - // Example: - // var s = "hello" - // var x = s[1:2:3] - InvalidSliceExpr - - /* exprs > shift */ - - // InvalidShiftCount occurs when the right-hand side of a shift operation is - // either non-integer, negative, or too large. - // - // Example: - // var ( - // x string - // y int = 1 << x - // ) - InvalidShiftCount - - // InvalidShiftOperand occurs when the shifted operand is not an integer. - // - // Example: - // var s = "hello" - // var x = s << 2 - InvalidShiftOperand - - /* exprs > chan */ - - // InvalidReceive occurs when there is a channel receive from a value that - // is either not a channel, or is a send-only channel. - // - // Example: - // func f() { - // var x = 1 - // <-x - // } - InvalidReceive - - // InvalidSend occurs when there is a channel send to a value that is not a - // channel, or is a receive-only channel. - // - // Example: - // func f() { - // var x = 1 - // x <- "hello!" - // } - InvalidSend - - /* exprs > literal */ - - // DuplicateLitKey occurs when an index is duplicated in a slice, array, or - // map literal. - // - // Example: - // var _ = []int{0:1, 0:2} - // - // Example: - // var _ = map[string]int{"a": 1, "a": 2} - DuplicateLitKey - - // MissingLitKey occurs when a map literal is missing a key expression. - // - // Example: - // var _ = map[string]int{1} - MissingLitKey - - // InvalidLitIndex occurs when the key in a key-value element of a slice or - // array literal is not an integer constant. - // - // Example: - // var i = 0 - // var x = []string{i: "world"} - InvalidLitIndex - - // OversizeArrayLit occurs when an array literal exceeds its length. - // - // Example: - // var _ = [2]int{1,2,3} - OversizeArrayLit - - // MixedStructLit occurs when a struct literal contains a mix of positional - // and named elements. - // - // Example: - // var _ = struct{i, j int}{i: 1, 2} - MixedStructLit - - // InvalidStructLit occurs when a positional struct literal has an incorrect - // number of values. - // - // Example: - // var _ = struct{i, j int}{1,2,3} - InvalidStructLit - - // MissingLitField occurs when a struct literal refers to a field that does - // not exist on the struct type. - // - // Example: - // var _ = struct{i int}{j: 2} - MissingLitField - - // DuplicateLitField occurs when a struct literal contains duplicated - // fields. - // - // Example: - // var _ = struct{i int}{i: 1, i: 2} - DuplicateLitField - - // UnexportedLitField occurs when a positional struct literal implicitly - // assigns an unexported field of an imported type. - UnexportedLitField - - // InvalidLitField occurs when a field name is not a valid identifier. - // - // Example: - // var _ = struct{i int}{1: 1} - InvalidLitField - - // UntypedLit occurs when a composite literal omits a required type - // identifier. - // - // Example: - // type outer struct{ - // inner struct { i int } - // } - // - // var _ = outer{inner: {1}} - UntypedLit - - // InvalidLit occurs when a composite literal expression does not match its - // type. - // - // Example: - // type P *struct{ - // x int - // } - // var _ = P {} - InvalidLit - - /* exprs > selector */ - - // AmbiguousSelector occurs when a selector is ambiguous. - // - // Example: - // type E1 struct { i int } - // type E2 struct { i int } - // type T struct { E1; E2 } - // - // var x T - // var _ = x.i - AmbiguousSelector - - // UndeclaredImportedName occurs when a package-qualified identifier is - // undeclared by the imported package. - // - // Example: - // import "go/types" - // - // var _ = types.NotAnActualIdentifier - UndeclaredImportedName - - // UnexportedName occurs when a selector refers to an unexported identifier - // of an imported package. - // - // Example: - // import "reflect" - // - // type _ reflect.flag - UnexportedName - - // UndeclaredName occurs when an identifier is not declared in the current - // scope. - // - // Example: - // var x T - UndeclaredName - - // MissingFieldOrMethod occurs when a selector references a field or method - // that does not exist. - // - // Example: - // type T struct {} - // - // var x = T{}.f - MissingFieldOrMethod - - /* exprs > ... */ - - // BadDotDotDotSyntax occurs when a "..." occurs in a context where it is - // not valid. - // - // Example: - // var _ = map[int][...]int{0: {}} - BadDotDotDotSyntax - - // NonVariadicDotDotDot occurs when a "..." is used on the final argument to - // a non-variadic function. - // - // Example: - // func printArgs(s []string) { - // for _, a := range s { - // println(a) - // } - // } - // - // func f() { - // s := []string{"a", "b", "c"} - // printArgs(s...) - // } - NonVariadicDotDotDot - - // MisplacedDotDotDot occurs when a "..." is used somewhere other than the - // final argument to a function call. - // - // Example: - // func printArgs(args ...int) { - // for _, a := range args { - // println(a) - // } - // } - // - // func f() { - // a := []int{1,2,3} - // printArgs(0, a...) - // } - MisplacedDotDotDot - - // InvalidDotDotDotOperand occurs when a "..." operator is applied to a - // single-valued operand. - // - // Example: - // func printArgs(args ...int) { - // for _, a := range args { - // println(a) - // } - // } - // - // func f() { - // a := 1 - // printArgs(a...) - // } - // - // Example: - // func args() (int, int) { - // return 1, 2 - // } - // - // func printArgs(args ...int) { - // for _, a := range args { - // println(a) - // } - // } - // - // func g() { - // printArgs(args()...) - // } - InvalidDotDotDotOperand - - // InvalidDotDotDot occurs when a "..." is used in a non-variadic built-in - // function. - // - // Example: - // var s = []int{1, 2, 3} - // var l = len(s...) - InvalidDotDotDot - - /* exprs > built-in */ - - // UncalledBuiltin occurs when a built-in function is used as a - // function-valued expression, instead of being called. - // - // Per the spec: - // "The built-in functions do not have standard Go types, so they can only - // appear in call expressions; they cannot be used as function values." - // - // Example: - // var _ = copy - UncalledBuiltin - - // InvalidAppend occurs when append is called with a first argument that is - // not a slice. - // - // Example: - // var _ = append(1, 2) - InvalidAppend - - // InvalidCap occurs when an argument to the cap built-in function is not of - // supported type. - // - // See https://golang.org/ref/spec#Lengthand_capacity for information on - // which underlying types are supported as arguments to cap and len. - // - // Example: - // var s = 2 - // var x = cap(s) - InvalidCap - - // InvalidClose occurs when close(...) is called with an argument that is - // not of channel type, or that is a receive-only channel. - // - // Example: - // func f() { - // var x int - // close(x) - // } - InvalidClose - - // InvalidCopy occurs when the arguments are not of slice type or do not - // have compatible type. - // - // See https://golang.org/ref/spec#Appendingand_copying_slices for more - // information on the type requirements for the copy built-in. - // - // Example: - // func f() { - // var x []int - // y := []int64{1,2,3} - // copy(x, y) - // } - InvalidCopy - - // InvalidComplex occurs when the complex built-in function is called with - // arguments with incompatible types. - // - // Example: - // var _ = complex(float32(1), float64(2)) - InvalidComplex - - // InvalidDelete occurs when the delete built-in function is called with a - // first argument that is not a map. - // - // Example: - // func f() { - // m := "hello" - // delete(m, "e") - // } - InvalidDelete - - // InvalidImag occurs when the imag built-in function is called with an - // argument that does not have complex type. - // - // Example: - // var _ = imag(int(1)) - InvalidImag - - // InvalidLen occurs when an argument to the len built-in function is not of - // supported type. - // - // See https://golang.org/ref/spec#Lengthand_capacity for information on - // which underlying types are supported as arguments to cap and len. - // - // Example: - // var s = 2 - // var x = len(s) - InvalidLen - - // SwappedMakeArgs occurs when make is called with three arguments, and its - // length argument is larger than its capacity argument. - // - // Example: - // var x = make([]int, 3, 2) - SwappedMakeArgs - - // InvalidMake occurs when make is called with an unsupported type argument. - // - // See https://golang.org/ref/spec#Makingslices_maps_and_channels for - // information on the types that may be created using make. - // - // Example: - // var x = make(int) - InvalidMake - - // InvalidReal occurs when the real built-in function is called with an - // argument that does not have complex type. - // - // Example: - // var _ = real(int(1)) - InvalidReal - - /* exprs > assertion */ - - // InvalidAssert occurs when a type assertion is applied to a - // value that is not of interface type. - // - // Example: - // var x = 1 - // var _ = x.(float64) - InvalidAssert - - // ImpossibleAssert occurs for a type assertion x.(T) when the value x of - // interface cannot have dynamic type T, due to a missing or mismatching - // method on T. - // - // Example: - // type T int - // - // func (t *T) m() int { return int(*t) } - // - // type I interface { m() int } - // - // var x I - // var _ = x.(T) - ImpossibleAssert - - /* exprs > conversion */ - - // InvalidConversion occurs when the argument type cannot be converted to the - // target. - // - // See https://golang.org/ref/spec#Conversions for the rules of - // convertibility. - // - // Example: - // var x float64 - // var _ = string(x) - InvalidConversion - - // InvalidUntypedConversion occurs when an there is no valid implicit - // conversion from an untyped value satisfying the type constraints of the - // context in which it is used. - // - // Example: - // var _ = 1 + "" - InvalidUntypedConversion - - /* offsetof */ - - // BadOffsetofSyntax occurs when unsafe.Offsetof is called with an argument - // that is not a selector expression. - // - // Example: - // import "unsafe" - // - // var x int - // var _ = unsafe.Offsetof(x) - BadOffsetofSyntax - - // InvalidOffsetof occurs when unsafe.Offsetof is called with a method - // selector, rather than a field selector, or when the field is embedded via - // a pointer. - // - // Per the spec: - // - // "If f is an embedded field, it must be reachable without pointer - // indirections through fields of the struct. " - // - // Example: - // import "unsafe" - // - // type T struct { f int } - // type S struct { *T } - // var s S - // var _ = unsafe.Offsetof(s.f) - // - // Example: - // import "unsafe" - // - // type S struct{} - // - // func (S) m() {} - // - // var s S - // var _ = unsafe.Offsetof(s.m) - InvalidOffsetof - - /* control flow > scope */ - - // UnusedExpr occurs when a side-effect free expression is used as a - // statement. Such a statement has no effect. - // - // Example: - // func f(i int) { - // i*i - // } - UnusedExpr - - // UnusedVar occurs when a variable is declared but unused. - // - // Example: - // func f() { - // x := 1 - // } - UnusedVar - - // MissingReturn occurs when a function with results is missing a return - // statement. - // - // Example: - // func f() int {} - MissingReturn - - // WrongResultCount occurs when a return statement returns an incorrect - // number of values. - // - // Example: - // func ReturnOne() int { - // return 1, 2 - // } - WrongResultCount - - // OutOfScopeResult occurs when the name of a value implicitly returned by - // an empty return statement is shadowed in a nested scope. - // - // Example: - // func factor(n int) (i int) { - // for i := 2; i < n; i++ { - // if n%i == 0 { - // return - // } - // } - // return 0 - // } - OutOfScopeResult - - /* control flow > if */ - - // InvalidCond occurs when an if condition is not a boolean expression. - // - // Example: - // func checkReturn(i int) { - // if i { - // panic("non-zero return") - // } - // } - InvalidCond - - /* control flow > for */ - - // InvalidPostDecl occurs when there is a declaration in a for-loop post - // statement. - // - // Example: - // func f() { - // for i := 0; i < 10; j := 0 {} - // } - InvalidPostDecl - - // InvalidChanRange occurs when a send-only channel used in a range - // expression. - // - // Example: - // func sum(c chan<- int) { - // s := 0 - // for i := range c { - // s += i - // } - // } - InvalidChanRange - - // InvalidIterVar occurs when two iteration variables are used while ranging - // over a channel. - // - // Example: - // func f(c chan int) { - // for k, v := range c { - // println(k, v) - // } - // } - InvalidIterVar - - // InvalidRangeExpr occurs when the type of a range expression is not array, - // slice, string, map, or channel. - // - // Example: - // func f(i int) { - // for j := range i { - // println(j) - // } - // } - InvalidRangeExpr - - /* control flow > switch */ - - // MisplacedBreak occurs when a break statement is not within a for, switch, - // or select statement of the innermost function definition. - // - // Example: - // func f() { - // break - // } - MisplacedBreak - - // MisplacedContinue occurs when a continue statement is not within a for - // loop of the innermost function definition. - // - // Example: - // func sumeven(n int) int { - // proceed := func() { - // continue - // } - // sum := 0 - // for i := 1; i <= n; i++ { - // if i % 2 != 0 { - // proceed() - // } - // sum += i - // } - // return sum - // } - MisplacedContinue - - // MisplacedFallthrough occurs when a fallthrough statement is not within an - // expression switch. - // - // Example: - // func typename(i interface{}) string { - // switch i.(type) { - // case int64: - // fallthrough - // case int: - // return "int" - // } - // return "unsupported" - // } - MisplacedFallthrough - - // DuplicateCase occurs when a type or expression switch has duplicate - // cases. - // - // Example: - // func printInt(i int) { - // switch i { - // case 1: - // println("one") - // case 1: - // println("One") - // } - // } - DuplicateCase - - // DuplicateDefault occurs when a type or expression switch has multiple - // default clauses. - // - // Example: - // func printInt(i int) { - // switch i { - // case 1: - // println("one") - // default: - // println("One") - // default: - // println("1") - // } - // } - DuplicateDefault - - // BadTypeKeyword occurs when a .(type) expression is used anywhere other - // than a type switch. - // - // Example: - // type I interface { - // m() - // } - // var t I - // var _ = t.(type) - BadTypeKeyword - - // InvalidTypeSwitch occurs when .(type) is used on an expression that is - // not of interface type. - // - // Example: - // func f(i int) { - // switch x := i.(type) {} - // } - InvalidTypeSwitch - - // InvalidExprSwitch occurs when a switch expression is not comparable. - // - // Example: - // func _() { - // var a struct{ _ func() } - // switch a /* ERROR cannot switch on a */ { - // } - // } - InvalidExprSwitch - - /* control flow > select */ - - // InvalidSelectCase occurs when a select case is not a channel send or - // receive. - // - // Example: - // func checkChan(c <-chan int) bool { - // select { - // case c: - // return true - // default: - // return false - // } - // } - InvalidSelectCase - - /* control flow > labels and jumps */ - - // UndeclaredLabel occurs when an undeclared label is jumped to. - // - // Example: - // func f() { - // goto L - // } - UndeclaredLabel - - // DuplicateLabel occurs when a label is declared more than once. - // - // Example: - // func f() int { - // L: - // L: - // return 1 - // } - DuplicateLabel - - // MisplacedLabel occurs when a break or continue label is not on a for, - // switch, or select statement. - // - // Example: - // func f() { - // L: - // a := []int{1,2,3} - // for _, e := range a { - // if e > 10 { - // break L - // } - // println(a) - // } - // } - MisplacedLabel - - // UnusedLabel occurs when a label is declared but not used. - // - // Example: - // func f() { - // L: - // } - UnusedLabel - - // JumpOverDecl occurs when a label jumps over a variable declaration. - // - // Example: - // func f() int { - // goto L - // x := 2 - // L: - // x++ - // return x - // } - JumpOverDecl - - // JumpIntoBlock occurs when a forward jump goes to a label inside a nested - // block. - // - // Example: - // func f(x int) { - // goto L - // if x > 0 { - // L: - // print("inside block") - // } - // } - JumpIntoBlock - - /* control flow > calls */ - - // InvalidMethodExpr occurs when a pointer method is called but the argument - // is not addressable. - // - // Example: - // type T struct {} - // - // func (*T) m() int { return 1 } - // - // var _ = T.m(T{}) - InvalidMethodExpr - - // WrongArgCount occurs when too few or too many arguments are passed by a - // function call. - // - // Example: - // func f(i int) {} - // var x = f() - WrongArgCount - - // InvalidCall occurs when an expression is called that is not of function - // type. - // - // Example: - // var x = "x" - // var y = x() - InvalidCall - - /* control flow > suspended */ - - // UnusedResults occurs when a restricted expression-only built-in function - // is suspended via go or defer. Such a suspension discards the results of - // these side-effect free built-in functions, and therefore is ineffectual. - // - // Example: - // func f(a []int) int { - // defer len(a) - // return i - // } - UnusedResults - - // InvalidDefer occurs when a deferred expression is not a function call, - // for example if the expression is a type conversion. - // - // Example: - // func f(i int) int { - // defer int32(i) - // return i - // } - InvalidDefer - - // InvalidGo occurs when a go expression is not a function call, for example - // if the expression is a type conversion. - // - // Example: - // func f(i int) int { - // go int32(i) - // return i - // } - InvalidGo - - // All codes below were added in Go 1.17. - - /* decl */ - - // BadDecl occurs when a declaration has invalid syntax. - BadDecl - - // RepeatedDecl occurs when an identifier occurs more than once on the left - // hand side of a short variable declaration. - // - // Example: - // func _() { - // x, y, y := 1, 2, 3 - // } - RepeatedDecl - - /* unsafe */ - - // InvalidUnsafeAdd occurs when unsafe.Add is called with a - // length argument that is not of integer type. - // - // Example: - // import "unsafe" - // - // var p unsafe.Pointer - // var _ = unsafe.Add(p, float64(1)) - InvalidUnsafeAdd - - // InvalidUnsafeSlice occurs when unsafe.Slice is called with a - // pointer argument that is not of pointer type or a length argument - // that is not of integer type, negative, or out of bounds. - // - // Example: - // import "unsafe" - // - // var x int - // var _ = unsafe.Slice(x, 1) - // - // Example: - // import "unsafe" - // - // var x int - // var _ = unsafe.Slice(&x, float64(1)) - // - // Example: - // import "unsafe" - // - // var x int - // var _ = unsafe.Slice(&x, -1) - // - // Example: - // import "unsafe" - // - // var x int - // var _ = unsafe.Slice(&x, uint64(1) << 63) - InvalidUnsafeSlice - - // All codes below were added in Go 1.18. - - /* features */ - - // UnsupportedFeature occurs when a language feature is used that is not - // supported at this Go version. - UnsupportedFeature - - /* type params */ - - // NotAGenericType occurs when a non-generic type is used where a generic - // type is expected: in type or function instantiation. - // - // Example: - // type T int - // - // var _ T[int] - NotAGenericType - - // WrongTypeArgCount occurs when a type or function is instantiated with an - // incorrent number of type arguments, including when a generic type or - // function is used without instantiation. - // - // Errors inolving failed type inference are assigned other error codes. - // - // Example: - // type T[p any] int - // - // var _ T[int, string] - // - // Example: - // func f[T any]() {} - // - // var x = f - WrongTypeArgCount - - // CannotInferTypeArgs occurs when type or function type argument inference - // fails to infer all type arguments. - // - // Example: - // func f[T any]() {} - // - // func _() { - // f() - // } - // - // Example: - // type N[P, Q any] struct{} - // - // var _ N[int] - CannotInferTypeArgs - - // InvalidTypeArg occurs when a type argument does not satisfy its - // corresponding type parameter constraints. - // - // Example: - // type T[P ~int] struct{} - // - // var _ T[string] - InvalidTypeArg // arguments? InferenceFailed - - // InvalidInstanceCycle occurs when an invalid cycle is detected - // within the instantiation graph. - // - // Example: - // func f[T any]() { f[*T]() } - InvalidInstanceCycle - - // InvalidUnion occurs when an embedded union or approximation element is - // not valid. - // - // Example: - // type _ interface { - // ~int | interface{ m() } - // } - InvalidUnion - - // MisplacedConstraintIface occurs when a constraint-type interface is used - // outside of constraint position. - // - // Example: - // type I interface { ~int } - // - // var _ I - MisplacedConstraintIface - - // InvalidMethodTypeParams occurs when methods have type parameters. - // - // It cannot be encountered with an AST parsed using go/parser. - InvalidMethodTypeParams - - // MisplacedTypeParam occurs when a type parameter is used in a place where - // it is not permitted. - // - // Example: - // type T[P any] P - // - // Example: - // type T[P any] struct{ *P } - MisplacedTypeParam - - // InvalidUnsafeSliceData occurs when unsafe.SliceData is called with - // an argument that is not of slice type. It also occurs if it is used - // in a package compiled for a language version before go1.20. - // - // Example: - // import "unsafe" - // - // var x int - // var _ = unsafe.SliceData(x) - InvalidUnsafeSliceData - - // InvalidUnsafeString occurs when unsafe.String is called with - // a length argument that is not of integer type, negative, or - // out of bounds. It also occurs if it is used in a package - // compiled for a language version before go1.20. - // - // Example: - // import "unsafe" - // - // var b [10]byte - // var _ = unsafe.String(&b[0], -1) - InvalidUnsafeString - - // InvalidUnsafeStringData occurs if it is used in a package - // compiled for a language version before go1.20. - _ // not used anymore - -) diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go deleted file mode 100644 index 15ecf7c5ded..00000000000 --- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go +++ /dev/null @@ -1,179 +0,0 @@ -// Code generated by "stringer -type=ErrorCode"; DO NOT EDIT. - -package typesinternal - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[InvalidSyntaxTree - -1] - _ = x[Test-1] - _ = x[BlankPkgName-2] - _ = x[MismatchedPkgName-3] - _ = x[InvalidPkgUse-4] - _ = x[BadImportPath-5] - _ = x[BrokenImport-6] - _ = x[ImportCRenamed-7] - _ = x[UnusedImport-8] - _ = x[InvalidInitCycle-9] - _ = x[DuplicateDecl-10] - _ = x[InvalidDeclCycle-11] - _ = x[InvalidTypeCycle-12] - _ = x[InvalidConstInit-13] - _ = x[InvalidConstVal-14] - _ = x[InvalidConstType-15] - _ = x[UntypedNilUse-16] - _ = x[WrongAssignCount-17] - _ = x[UnassignableOperand-18] - _ = x[NoNewVar-19] - _ = x[MultiValAssignOp-20] - _ = x[InvalidIfaceAssign-21] - _ = x[InvalidChanAssign-22] - _ = x[IncompatibleAssign-23] - _ = x[UnaddressableFieldAssign-24] - _ = x[NotAType-25] - _ = x[InvalidArrayLen-26] - _ = x[BlankIfaceMethod-27] - _ = x[IncomparableMapKey-28] - _ = x[InvalidIfaceEmbed-29] - _ = x[InvalidPtrEmbed-30] - _ = x[BadRecv-31] - _ = x[InvalidRecv-32] - _ = x[DuplicateFieldAndMethod-33] - _ = x[DuplicateMethod-34] - _ = x[InvalidBlank-35] - _ = x[InvalidIota-36] - _ = x[MissingInitBody-37] - _ = x[InvalidInitSig-38] - _ = x[InvalidInitDecl-39] - _ = x[InvalidMainDecl-40] - _ = x[TooManyValues-41] - _ = x[NotAnExpr-42] - _ = x[TruncatedFloat-43] - _ = x[NumericOverflow-44] - _ = x[UndefinedOp-45] - _ = x[MismatchedTypes-46] - _ = x[DivByZero-47] - _ = x[NonNumericIncDec-48] - _ = x[UnaddressableOperand-49] - _ = x[InvalidIndirection-50] - _ = x[NonIndexableOperand-51] - _ = x[InvalidIndex-52] - _ = x[SwappedSliceIndices-53] - _ = x[NonSliceableOperand-54] - _ = x[InvalidSliceExpr-55] - _ = x[InvalidShiftCount-56] - _ = x[InvalidShiftOperand-57] - _ = x[InvalidReceive-58] - _ = x[InvalidSend-59] - _ = x[DuplicateLitKey-60] - _ = x[MissingLitKey-61] - _ = x[InvalidLitIndex-62] - _ = x[OversizeArrayLit-63] - _ = x[MixedStructLit-64] - _ = x[InvalidStructLit-65] - _ = x[MissingLitField-66] - _ = x[DuplicateLitField-67] - _ = x[UnexportedLitField-68] - _ = x[InvalidLitField-69] - _ = x[UntypedLit-70] - _ = x[InvalidLit-71] - _ = x[AmbiguousSelector-72] - _ = x[UndeclaredImportedName-73] - _ = x[UnexportedName-74] - _ = x[UndeclaredName-75] - _ = x[MissingFieldOrMethod-76] - _ = x[BadDotDotDotSyntax-77] - _ = x[NonVariadicDotDotDot-78] - _ = x[MisplacedDotDotDot-79] - _ = x[InvalidDotDotDotOperand-80] - _ = x[InvalidDotDotDot-81] - _ = x[UncalledBuiltin-82] - _ = x[InvalidAppend-83] - _ = x[InvalidCap-84] - _ = x[InvalidClose-85] - _ = x[InvalidCopy-86] - _ = x[InvalidComplex-87] - _ = x[InvalidDelete-88] - _ = x[InvalidImag-89] - _ = x[InvalidLen-90] - _ = x[SwappedMakeArgs-91] - _ = x[InvalidMake-92] - _ = x[InvalidReal-93] - _ = x[InvalidAssert-94] - _ = x[ImpossibleAssert-95] - _ = x[InvalidConversion-96] - _ = x[InvalidUntypedConversion-97] - _ = x[BadOffsetofSyntax-98] - _ = x[InvalidOffsetof-99] - _ = x[UnusedExpr-100] - _ = x[UnusedVar-101] - _ = x[MissingReturn-102] - _ = x[WrongResultCount-103] - _ = x[OutOfScopeResult-104] - _ = x[InvalidCond-105] - _ = x[InvalidPostDecl-106] - _ = x[InvalidChanRange-107] - _ = x[InvalidIterVar-108] - _ = x[InvalidRangeExpr-109] - _ = x[MisplacedBreak-110] - _ = x[MisplacedContinue-111] - _ = x[MisplacedFallthrough-112] - _ = x[DuplicateCase-113] - _ = x[DuplicateDefault-114] - _ = x[BadTypeKeyword-115] - _ = x[InvalidTypeSwitch-116] - _ = x[InvalidExprSwitch-117] - _ = x[InvalidSelectCase-118] - _ = x[UndeclaredLabel-119] - _ = x[DuplicateLabel-120] - _ = x[MisplacedLabel-121] - _ = x[UnusedLabel-122] - _ = x[JumpOverDecl-123] - _ = x[JumpIntoBlock-124] - _ = x[InvalidMethodExpr-125] - _ = x[WrongArgCount-126] - _ = x[InvalidCall-127] - _ = x[UnusedResults-128] - _ = x[InvalidDefer-129] - _ = x[InvalidGo-130] - _ = x[BadDecl-131] - _ = x[RepeatedDecl-132] - _ = x[InvalidUnsafeAdd-133] - _ = x[InvalidUnsafeSlice-134] - _ = x[UnsupportedFeature-135] - _ = x[NotAGenericType-136] - _ = x[WrongTypeArgCount-137] - _ = x[CannotInferTypeArgs-138] - _ = x[InvalidTypeArg-139] - _ = x[InvalidInstanceCycle-140] - _ = x[InvalidUnion-141] - _ = x[MisplacedConstraintIface-142] - _ = x[InvalidMethodTypeParams-143] - _ = x[MisplacedTypeParam-144] - _ = x[InvalidUnsafeSliceData-145] - _ = x[InvalidUnsafeString-146] -} - -const ( - _ErrorCode_name_0 = "InvalidSyntaxTree" - _ErrorCode_name_1 = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilUseWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidExprSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGoBadDeclRepeatedDeclInvalidUnsafeAddInvalidUnsafeSliceUnsupportedFeatureNotAGenericTypeWrongTypeArgCountCannotInferTypeArgsInvalidTypeArgInvalidInstanceCycleInvalidUnionMisplacedConstraintIfaceInvalidMethodTypeParamsMisplacedTypeParamInvalidUnsafeSliceDataInvalidUnsafeString" -) - -var ( - _ErrorCode_index_1 = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 218, 234, 253, 261, 277, 295, 312, 330, 354, 362, 377, 393, 411, 428, 443, 450, 461, 484, 499, 511, 522, 537, 551, 566, 581, 594, 603, 617, 632, 643, 658, 667, 683, 703, 721, 740, 752, 771, 790, 806, 823, 842, 856, 867, 882, 895, 910, 926, 940, 956, 971, 988, 1006, 1021, 1031, 1041, 1058, 1080, 1094, 1108, 1128, 1146, 1166, 1184, 1207, 1223, 1238, 1251, 1261, 1273, 1284, 1298, 1311, 1322, 1332, 1347, 1358, 1369, 1382, 1398, 1415, 1439, 1456, 1471, 1481, 1490, 1503, 1519, 1535, 1546, 1561, 1577, 1591, 1607, 1621, 1638, 1658, 1671, 1687, 1701, 1718, 1735, 1752, 1767, 1781, 1795, 1806, 1818, 1831, 1848, 1861, 1872, 1885, 1897, 1906, 1913, 1925, 1941, 1959, 1977, 1992, 2009, 2028, 2042, 2062, 2074, 2098, 2121, 2139, 2161, 2180} -) - -func (i ErrorCode) String() string { - switch { - case i == -1: - return _ErrorCode_name_0 - case 1 <= i && i <= 146: - i -= 1 - return _ErrorCode_name_1[_ErrorCode_index_1[i]:_ErrorCode_index_1[i+1]] - default: - return "ErrorCode(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go deleted file mode 100644 index ce7d4351b22..00000000000 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package typesinternal provides access to internal go/types APIs that are not -// yet exported. -package typesinternal - -import ( - "go/token" - "go/types" - "reflect" - "unsafe" -) - -func SetUsesCgo(conf *types.Config) bool { - v := reflect.ValueOf(conf).Elem() - - f := v.FieldByName("go115UsesCgo") - if !f.IsValid() { - f = v.FieldByName("UsesCgo") - if !f.IsValid() { - return false - } - } - - addr := unsafe.Pointer(f.UnsafeAddr()) - *(*bool)(addr) = true - - return true -} - -// ReadGo116ErrorData extracts additional information from types.Error values -// generated by Go version 1.16 and later: the error code, start position, and -// end position. If all positions are valid, start <= err.Pos <= end. -// -// If the data could not be read, the final result parameter will be false. -func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) { - var data [3]int - // By coincidence all of these fields are ints, which simplifies things. - v := reflect.ValueOf(err) - for i, name := range []string{"go116code", "go116start", "go116end"} { - f := v.FieldByName(name) - if !f.IsValid() { - return 0, 0, 0, false - } - data[i] = int(f.Int()) - } - return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true -} - -var SetGoVersion = func(conf *types.Config, version string) bool { return false } diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types_118.go b/vendor/golang.org/x/tools/internal/typesinternal/types_118.go deleted file mode 100644 index a42b072a67d..00000000000 --- a/vendor/golang.org/x/tools/internal/typesinternal/types_118.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package typesinternal - -import ( - "go/types" -) - -func init() { - SetGoVersion = func(conf *types.Config, version string) bool { - conf.GoVersion = version - return true - } -} diff --git a/vendor/golang.org/x/tools/internal/versions/gover.go b/vendor/golang.org/x/tools/internal/versions/gover.go deleted file mode 100644 index bbabcd22e94..00000000000 --- a/vendor/golang.org/x/tools/internal/versions/gover.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This is a fork of internal/gover for use by x/tools until -// go1.21 and earlier are no longer supported by x/tools. - -package versions - -import "strings" - -// A gover is a parsed Go gover: major[.Minor[.Patch]][kind[pre]] -// The numbers are the original decimal strings to avoid integer overflows -// and since there is very little actual math. (Probably overflow doesn't matter in practice, -// but at the time this code was written, there was an existing test that used -// go1.99999999999, which does not fit in an int on 32-bit platforms. -// The "big decimal" representation avoids the problem entirely.) -type gover struct { - major string // decimal - minor string // decimal or "" - patch string // decimal or "" - kind string // "", "alpha", "beta", "rc" - pre string // decimal or "" -} - -// compare returns -1, 0, or +1 depending on whether -// x < y, x == y, or x > y, interpreted as toolchain versions. -// The versions x and y must not begin with a "go" prefix: just "1.21" not "go1.21". -// Malformed versions compare less than well-formed versions and equal to each other. -// The language version "1.21" compares less than the release candidate and eventual releases "1.21rc1" and "1.21.0". -func compare(x, y string) int { - vx := parse(x) - vy := parse(y) - - if c := cmpInt(vx.major, vy.major); c != 0 { - return c - } - if c := cmpInt(vx.minor, vy.minor); c != 0 { - return c - } - if c := cmpInt(vx.patch, vy.patch); c != 0 { - return c - } - if c := strings.Compare(vx.kind, vy.kind); c != 0 { // "" < alpha < beta < rc - return c - } - if c := cmpInt(vx.pre, vy.pre); c != 0 { - return c - } - return 0 -} - -// lang returns the Go language version. For example, lang("1.2.3") == "1.2". -func lang(x string) string { - v := parse(x) - if v.minor == "" || v.major == "1" && v.minor == "0" { - return v.major - } - return v.major + "." + v.minor -} - -// isValid reports whether the version x is valid. -func isValid(x string) bool { - return parse(x) != gover{} -} - -// parse parses the Go version string x into a version. -// It returns the zero version if x is malformed. -func parse(x string) gover { - var v gover - - // Parse major version. - var ok bool - v.major, x, ok = cutInt(x) - if !ok { - return gover{} - } - if x == "" { - // Interpret "1" as "1.0.0". - v.minor = "0" - v.patch = "0" - return v - } - - // Parse . before minor version. - if x[0] != '.' { - return gover{} - } - - // Parse minor version. - v.minor, x, ok = cutInt(x[1:]) - if !ok { - return gover{} - } - if x == "" { - // Patch missing is same as "0" for older versions. - // Starting in Go 1.21, patch missing is different from explicit .0. - if cmpInt(v.minor, "21") < 0 { - v.patch = "0" - } - return v - } - - // Parse patch if present. - if x[0] == '.' { - v.patch, x, ok = cutInt(x[1:]) - if !ok || x != "" { - // Note that we are disallowing prereleases (alpha, beta, rc) for patch releases here (x != ""). - // Allowing them would be a bit confusing because we already have: - // 1.21 < 1.21rc1 - // But a prerelease of a patch would have the opposite effect: - // 1.21.3rc1 < 1.21.3 - // We've never needed them before, so let's not start now. - return gover{} - } - return v - } - - // Parse prerelease. - i := 0 - for i < len(x) && (x[i] < '0' || '9' < x[i]) { - if x[i] < 'a' || 'z' < x[i] { - return gover{} - } - i++ - } - if i == 0 { - return gover{} - } - v.kind, x = x[:i], x[i:] - if x == "" { - return v - } - v.pre, x, ok = cutInt(x) - if !ok || x != "" { - return gover{} - } - - return v -} - -// cutInt scans the leading decimal number at the start of x to an integer -// and returns that value and the rest of the string. -func cutInt(x string) (n, rest string, ok bool) { - i := 0 - for i < len(x) && '0' <= x[i] && x[i] <= '9' { - i++ - } - if i == 0 || x[0] == '0' && i != 1 { // no digits or unnecessary leading zero - return "", "", false - } - return x[:i], x[i:], true -} - -// cmpInt returns cmp.Compare(x, y) interpreting x and y as decimal numbers. -// (Copied from golang.org/x/mod/semver's compareInt.) -func cmpInt(x, y string) int { - if x == y { - return 0 - } - if len(x) < len(y) { - return -1 - } - if len(x) > len(y) { - return +1 - } - if x < y { - return -1 - } else { - return +1 - } -} diff --git a/vendor/golang.org/x/tools/internal/versions/types.go b/vendor/golang.org/x/tools/internal/versions/types.go deleted file mode 100644 index 562eef21fa2..00000000000 --- a/vendor/golang.org/x/tools/internal/versions/types.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package versions - -import ( - "go/types" -) - -// GoVersion returns the Go version of the type package. -// It returns zero if no version can be determined. -func GoVersion(pkg *types.Package) string { - // TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25. - if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok { - return pkg.GoVersion() - } - return "" -} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go121.go b/vendor/golang.org/x/tools/internal/versions/types_go121.go deleted file mode 100644 index a7b79207aee..00000000000 --- a/vendor/golang.org/x/tools/internal/versions/types_go121.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.22 -// +build !go1.22 - -package versions - -import ( - "go/ast" - "go/types" -) - -// FileVersions always reports the a file's Go version as the -// zero version at this Go version. -func FileVersions(info *types.Info, file *ast.File) string { return "" } - -// InitFileVersions is a noop at this Go version. -func InitFileVersions(*types.Info) {} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go122.go b/vendor/golang.org/x/tools/internal/versions/types_go122.go deleted file mode 100644 index 7b9ba89a822..00000000000 --- a/vendor/golang.org/x/tools/internal/versions/types_go122.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.22 -// +build go1.22 - -package versions - -import ( - "go/ast" - "go/types" -) - -// FileVersions maps a file to the file's semantic Go version. -// The reported version is the zero version if a version cannot be determined. -func FileVersions(info *types.Info, file *ast.File) string { - return info.FileVersions[file] -} - -// InitFileVersions initializes info to record Go versions for Go files. -func InitFileVersions(info *types.Info) { - info.FileVersions = make(map[*ast.File]string) -} diff --git a/vendor/golang.org/x/tools/internal/versions/versions.go b/vendor/golang.org/x/tools/internal/versions/versions.go deleted file mode 100644 index e16f6c33a52..00000000000 --- a/vendor/golang.org/x/tools/internal/versions/versions.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package versions - -// Note: If we use build tags to use go/versions when go >=1.22, -// we run into go.dev/issue/53737. Under some operations users would see an -// import of "go/versions" even if they would not compile the file. -// For example, during `go get -u ./...` (go.dev/issue/64490) we do not try to include -// For this reason, this library just a clone of go/versions for the moment. - -// Lang returns the Go language version for version x. -// If x is not a valid version, Lang returns the empty string. -// For example: -// -// Lang("go1.21rc2") = "go1.21" -// Lang("go1.21.2") = "go1.21" -// Lang("go1.21") = "go1.21" -// Lang("go1") = "go1" -// Lang("bad") = "" -// Lang("1.21") = "" -func Lang(x string) string { - v := lang(stripGo(x)) - if v == "" { - return "" - } - return x[:2+len(v)] // "go"+v without allocation -} - -// Compare returns -1, 0, or +1 depending on whether -// x < y, x == y, or x > y, interpreted as Go versions. -// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21". -// Invalid versions, including the empty string, compare less than -// valid versions and equal to each other. -// The language version "go1.21" compares less than the -// release candidate and eventual releases "go1.21rc1" and "go1.21.0". -// Custom toolchain suffixes are ignored during comparison: -// "go1.21.0" and "go1.21.0-bigcorp" are equal. -func Compare(x, y string) int { return compare(stripGo(x), stripGo(y)) } - -// IsValid reports whether the version x is valid. -func IsValid(x string) bool { return isValid(stripGo(x)) } - -// stripGo converts from a "go1.21" version to a "1.21" version. -// If v does not start with "go", stripGo returns the empty string (a known invalid version). -func stripGo(v string) string { - if len(v) < 2 || v[:2] != "go" { - return "" - } - return v[2:] -} diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 52d530d7ad0..712fef4d0fb 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -121,9 +121,9 @@ func (a *Attributes) String() string { return sb.String() } -func str(x any) (s string) { +func str(x any) string { if v, ok := x.(fmt.Stringer); ok { - return fmt.Sprint(v) + return v.String() } else if v, ok := x.(string); ok { return v } diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index 08476ad1fe1..11b106182db 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -25,13 +25,7 @@ import ( "strconv" ) -// A Code is a status code defined according to the [gRPC documentation]. -// -// Only the codes defined as consts in this package are valid codes. Do not use -// other code values. Behavior of other codes is implementation-specific and -// interoperability between implementations is not guaranteed. -// -// [gRPC documentation]: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md +// A Code is an unsigned 32-bit error code as defined in the gRPC spec. type Code uint32 const ( diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 5dafd34edf9..877b7cd21af 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -44,25 +44,10 @@ func (t TLSInfo) AuthType() string { return "tls" } -// cipherSuiteLookup returns the string version of a TLS cipher suite ID. -func cipherSuiteLookup(cipherSuiteID uint16) string { - for _, s := range tls.CipherSuites() { - if s.ID == cipherSuiteID { - return s.Name - } - } - for _, s := range tls.InsecureCipherSuites() { - if s.ID == cipherSuiteID { - return s.Name - } - } - return fmt.Sprintf("unknown ID: %v", cipherSuiteID) -} - // GetSecurityValue returns security info requested by channelz. func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { v := &TLSChannelzSecurityValue{ - StandardName: cipherSuiteLookup(t.State.CipherSuite), + StandardName: cipherSuiteLookup[t.State.CipherSuite], } // Currently there's no way to get LocalCertificate info from tls package. if len(t.State.PeerCertificates) > 0 { @@ -153,39 +138,10 @@ func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { return nil } -// The following cipher suites are forbidden for use with HTTP/2 by -// https://datatracker.ietf.org/doc/html/rfc7540#appendix-A -var tls12ForbiddenCipherSuites = map[uint16]struct{}{ - tls.TLS_RSA_WITH_AES_128_CBC_SHA: {}, - tls.TLS_RSA_WITH_AES_256_CBC_SHA: {}, - tls.TLS_RSA_WITH_AES_128_GCM_SHA256: {}, - tls.TLS_RSA_WITH_AES_256_GCM_SHA384: {}, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: {}, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: {}, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: {}, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: {}, -} - // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { tc := &tlsCreds{credinternal.CloneTLSConfig(c)} tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) - // If the user did not configure a MinVersion and did not configure a - // MaxVersion < 1.2, use MinVersion=1.2, which is required by - // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2 - if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) { - tc.config.MinVersion = tls.VersionTLS12 - } - // If the user did not configure CipherSuites, use all "secure" cipher - // suites reported by the TLS package, but remove some explicitly forbidden - // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A - if tc.config.CipherSuites == nil { - for _, cs := range tls.CipherSuites() { - if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok { - tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID) - } - } - } return tc } @@ -249,3 +205,32 @@ type TLSChannelzSecurityValue struct { LocalCertificate []byte RemoteCertificate []byte } + +var cipherSuiteLookup = map[uint16]string{ + tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", + tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256", + tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384", + tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256", +} diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go deleted file mode 100644 index 7f7044e1731..00000000000 --- a/vendor/google.golang.org/grpc/internal/experimental.go +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package internal - -var ( - // WithRecvBufferPool is implemented by the grpc package and returns a dial - // option to configure a shared buffer pool for a grpc.ClientConn. - WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption - - // RecvBufferPool is implemented by the grpc package and returns a server - // option to configure a shared buffer pool for a grpc.Server. - RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption -) diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 6c7ea6a5336..c8a8c76d628 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -57,7 +57,7 @@ var ( // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo // stored in the passed in attributes. This is set by // credentials/xds/xds.go. - GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *unsafe.Pointer + GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *xds.HandshakeInfo // GetServerCredentials returns the transport credentials configured on a // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. @@ -68,11 +68,11 @@ var ( // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. CanonicalString any // func (codes.Code) string - // IsRegisteredMethod returns whether the passed in method is registered as - // a method on the server. - IsRegisteredMethod any // func(*grpc.Server, string) bool - // ServerFromContext returns the server from the context. - ServerFromContext any // func(context.Context) *grpc.Server + // DrainServerTransports initiates a graceful close of existing connections + // on a gRPC server accepted on the provided listener address. An + // xDS-enabled server invokes this method on a grpc.Server when a particular + // listener moves to "not-serving" mode. + DrainServerTransports any // func(*grpc.Server, string) // AddGlobalServerOptions adds an array of ServerOption that will be // effective globally for newly created servers. The priority will be: 1. // user-provided; 2. this method; 3. default values. @@ -175,27 +175,6 @@ var ( // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra // metadata to RPCs. GRPCResolverSchemeExtraMetadata string = "xds" - - // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. - EnterIdleModeForTesting any // func(*grpc.ClientConn) - - // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. - ExitIdleModeForTesting any // func(*grpc.ClientConn) error - - ChannelzTurnOffForTesting func() - - // TriggerXDSResourceNameNotFoundForTesting triggers the resource-not-found - // error for a given resource type and name. This is usually triggered when - // the associated watch timer fires. For testing purposes, having this - // function makes events more predictable than relying on timer events. - TriggerXDSResourceNameNotFoundForTesting any // func(func(xdsresource.Type, string), string, string) error - - // TriggerXDSResourceNotFoundClient invokes the testing xDS Client singleton - // to invoke resource not found for a resource type name and resource name. - TriggerXDSResourceNameNotFoundClient any // func(string, string) error - - // FromOutgoingContextRaw returns the un-merged, intermediary contents of metadata.rawMD. - FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool) ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index 03ef2fedd5c..4cf85cad9f8 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -43,34 +43,6 @@ type Status struct { s *spb.Status } -// NewWithProto returns a new status including details from statusProto. This -// is meant to be used by the gRPC library only. -func NewWithProto(code codes.Code, message string, statusProto []string) *Status { - if len(statusProto) != 1 { - // No grpc-status-details bin header, or multiple; just ignore. - return &Status{s: &spb.Status{Code: int32(code), Message: message}} - } - st := &spb.Status{} - if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil { - // Probably not a google.rpc.Status proto; do not provide details. - return &Status{s: &spb.Status{Code: int32(code), Message: message}} - } - if st.Code == int32(code) { - // The codes match between the grpc-status header and the - // grpc-status-details-bin header; use the full details proto. - return &Status{s: st} - } - return &Status{ - s: &spb.Status{ - Code: int32(codes.Internal), - Message: fmt.Sprintf( - "grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v", - code, message, st, - ), - }, - } -} - // New returns a Status representing c and msg. func New(c codes.Code, msg string) *Status { return &Status{s: &spb.Status{Code: int32(c), Message: msg}} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go deleted file mode 100644 index 4f347edd423..00000000000 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go +++ /dev/null @@ -1,29 +0,0 @@ -//go:build !unix && !windows - -/* - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package internal - -import ( - "net" -) - -// NetDialerWithTCPKeepalive returns a vanilla net.Dialer on non-unix platforms. -func NetDialerWithTCPKeepalive() *net.Dialer { - return &net.Dialer{} -} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go deleted file mode 100644 index 078137b7fd7..00000000000 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go +++ /dev/null @@ -1,54 +0,0 @@ -//go:build unix - -/* - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package internal - -import ( - "net" - "syscall" - "time" - - "golang.org/x/sys/unix" -) - -// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on -// the underlying connection with OS default values for keepalive parameters. -// -// TODO: Once https://github.com/golang/go/issues/62254 lands, and the -// appropriate Go version becomes less than our least supported Go version, we -// should look into using the new API to make things more straightforward. -func NetDialerWithTCPKeepalive() *net.Dialer { - return &net.Dialer{ - // Setting a negative value here prevents the Go stdlib from overriding - // the values of TCP keepalive time and interval. It also prevents the - // Go stdlib from enabling TCP keepalives by default. - KeepAlive: time.Duration(-1), - // This method is called after the underlying network socket is created, - // but before dialing the socket (or calling its connect() method). The - // combination of unconditionally enabling TCP keepalives here, and - // disabling the overriding of TCP keepalive parameters by setting the - // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. - Control: func(_, _ string, c syscall.RawConn) error { - return c.Control(func(fd uintptr) { - unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) - }) - }, - } -} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go deleted file mode 100644 index fd7d43a8907..00000000000 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go +++ /dev/null @@ -1,54 +0,0 @@ -//go:build windows - -/* - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package internal - -import ( - "net" - "syscall" - "time" - - "golang.org/x/sys/windows" -) - -// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on -// the underlying connection with OS default values for keepalive parameters. -// -// TODO: Once https://github.com/golang/go/issues/62254 lands, and the -// appropriate Go version becomes less than our least supported Go version, we -// should look into using the new API to make things more straightforward. -func NetDialerWithTCPKeepalive() *net.Dialer { - return &net.Dialer{ - // Setting a negative value here prevents the Go stdlib from overriding - // the values of TCP keepalive time and interval. It also prevents the - // Go stdlib from enabling TCP keepalives by default. - KeepAlive: time.Duration(-1), - // This method is called after the underlying network socket is created, - // but before dialing the socket (or calling its connect() method). The - // combination of unconditionally enabling TCP keepalives here, and - // disabling the overriding of TCP keepalive parameters by setting the - // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. - Control: func(_, _ string, c syscall.RawConn) error { - return c.Control(func(fd uintptr) { - windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) - }) - }, - } -} diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index ada5b9bb79b..804be887de0 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -136,116 +136,3 @@ func (a *AddressMap) Values() []any { } return ret } - -type endpointNode struct { - addrs map[string]struct{} -} - -// Equal returns whether the unordered set of addrs are the same between the -// endpoint nodes. -func (en *endpointNode) Equal(en2 *endpointNode) bool { - if len(en.addrs) != len(en2.addrs) { - return false - } - for addr := range en.addrs { - if _, ok := en2.addrs[addr]; !ok { - return false - } - } - return true -} - -func toEndpointNode(endpoint Endpoint) endpointNode { - en := make(map[string]struct{}) - for _, addr := range endpoint.Addresses { - en[addr.Addr] = struct{}{} - } - return endpointNode{ - addrs: en, - } -} - -// EndpointMap is a map of endpoints to arbitrary values keyed on only the -// unordered set of address strings within an endpoint. This map is not thread -// safe, thus it is unsafe to access concurrently. Must be created via -// NewEndpointMap; do not construct directly. -type EndpointMap struct { - endpoints map[*endpointNode]any -} - -// NewEndpointMap creates a new EndpointMap. -func NewEndpointMap() *EndpointMap { - return &EndpointMap{ - endpoints: make(map[*endpointNode]any), - } -} - -// Get returns the value for the address in the map, if present. -func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) { - en := toEndpointNode(e) - if endpoint := em.find(en); endpoint != nil { - return em.endpoints[endpoint], true - } - return nil, false -} - -// Set updates or adds the value to the address in the map. -func (em *EndpointMap) Set(e Endpoint, value any) { - en := toEndpointNode(e) - if endpoint := em.find(en); endpoint != nil { - em.endpoints[endpoint] = value - return - } - em.endpoints[&en] = value -} - -// Len returns the number of entries in the map. -func (em *EndpointMap) Len() int { - return len(em.endpoints) -} - -// Keys returns a slice of all current map keys, as endpoints specifying the -// addresses present in the endpoint keys, in which uniqueness is determined by -// the unordered set of addresses. Thus, endpoint information returned is not -// the full endpoint data (drops duplicated addresses and attributes) but can be -// used for EndpointMap accesses. -func (em *EndpointMap) Keys() []Endpoint { - ret := make([]Endpoint, 0, len(em.endpoints)) - for en := range em.endpoints { - var endpoint Endpoint - for addr := range en.addrs { - endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr}) - } - ret = append(ret, endpoint) - } - return ret -} - -// Values returns a slice of all current map values. -func (em *EndpointMap) Values() []any { - ret := make([]any, 0, len(em.endpoints)) - for _, val := range em.endpoints { - ret = append(ret, val) - } - return ret -} - -// find returns a pointer to the endpoint node in em if the endpoint node is -// already present. If not found, nil is returned. The comparisons are done on -// the unordered set of addresses within an endpoint. -func (em EndpointMap) find(e endpointNode) *endpointNode { - for endpoint := range em.endpoints { - if e.Equal(endpoint) { - return endpoint - } - } - return nil -} - -// Delete removes the specified endpoint from the map. -func (em *EndpointMap) Delete(e Endpoint) { - en := toEndpointNode(e) - if entry := em.find(en); entry != nil { - delete(em.endpoints, entry) - } -} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index adf89dd9cfe..11384e228e5 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -240,6 +240,11 @@ type ClientConn interface { // // Deprecated: Use UpdateState instead. NewAddress(addresses []Address) + // NewServiceConfig is called by resolver to notify ClientConn a new + // service config. The service config should be provided as a json string. + // + // Deprecated: Use UpdateState instead. + NewServiceConfig(serviceConfig string) // ParseServiceConfig parses the provided service config and returns an // object that provides the parsed config. ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult @@ -281,11 +286,6 @@ func (t Target) Endpoint() string { return strings.TrimPrefix(endpoint, "/") } -// String returns a string representation of Target. -func (t Target) String() string { - return t.URL.String() -} - // Builder creates a resolver that will be used to watch name resolution updates. type Builder interface { // Build creates a new resolver for the given target. @@ -314,13 +314,3 @@ type Resolver interface { // Close closes the resolver. Close() } - -// AuthorityOverrider is implemented by Builders that wish to override the -// default authority for the ClientConn. -// By default, the authority used is target.Endpoint(). -type AuthorityOverrider interface { - // OverrideAuthority returns the authority to use for a ClientConn with the - // given target. The implementation must generate it without blocking, - // typically in line, and must keep it unchanged. - OverrideAuthority(Target) string -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 0f2410f3840..6115583a81f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,22 +1,19 @@ # dario.cat/mergo v1.0.0 ## explicit; go 1.13 dario.cat/mergo -# github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 +# github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 ## explicit; go 1.16 # github.com/Masterminds/goutils v1.1.1 ## explicit github.com/Masterminds/goutils -# github.com/Masterminds/semver v1.5.0 -## explicit -github.com/Masterminds/semver -# github.com/Masterminds/semver/v3 v3.2.0 +# github.com/Masterminds/semver/v3 v3.2.1 ## explicit; go 1.18 github.com/Masterminds/semver/v3 # github.com/Masterminds/sprig/v3 v3.2.3 ## explicit; go 1.13 github.com/Masterminds/sprig/v3 -# github.com/Microsoft/go-winio v0.6.1 -## explicit; go 1.17 +# github.com/Microsoft/go-winio v0.6.2 +## explicit; go 1.21 github.com/Microsoft/go-winio github.com/Microsoft/go-winio/backuptar github.com/Microsoft/go-winio/internal/fs @@ -38,8 +35,8 @@ github.com/Microsoft/hcsshim/internal/wclayer github.com/Microsoft/hcsshim/internal/winapi github.com/Microsoft/hcsshim/osversion github.com/Microsoft/hcsshim/pkg/ociwclayer -# github.com/ProtonMail/go-crypto v1.0.0 -## explicit; go 1.13 +# github.com/ProtonMail/go-crypto v1.1.0-alpha.2 +## explicit; go 1.17 github.com/ProtonMail/go-crypto/bitcurves github.com/ProtonMail/go-crypto/brainpool github.com/ProtonMail/go-crypto/eax @@ -50,6 +47,8 @@ github.com/ProtonMail/go-crypto/openpgp/aes/keywrap github.com/ProtonMail/go-crypto/openpgp/armor github.com/ProtonMail/go-crypto/openpgp/ecdh github.com/ProtonMail/go-crypto/openpgp/ecdsa +github.com/ProtonMail/go-crypto/openpgp/ed25519 +github.com/ProtonMail/go-crypto/openpgp/ed448 github.com/ProtonMail/go-crypto/openpgp/eddsa github.com/ProtonMail/go-crypto/openpgp/elgamal github.com/ProtonMail/go-crypto/openpgp/errors @@ -58,6 +57,8 @@ github.com/ProtonMail/go-crypto/openpgp/internal/ecc github.com/ProtonMail/go-crypto/openpgp/internal/encoding github.com/ProtonMail/go-crypto/openpgp/packet github.com/ProtonMail/go-crypto/openpgp/s2k +github.com/ProtonMail/go-crypto/openpgp/x25519 +github.com/ProtonMail/go-crypto/openpgp/x448 # github.com/StackExchange/wmi v1.2.1 ## explicit; go 1.13 github.com/StackExchange/wmi @@ -81,8 +82,8 @@ github.com/cavaliergopher/grab/v3/pkg/bps # github.com/cenkalti/backoff/v4 v4.2.1 ## explicit; go 1.18 github.com/cenkalti/backoff/v4 -# github.com/cloudflare/circl v1.3.7 -## explicit; go 1.19 +# github.com/cloudflare/circl v1.3.8 +## explicit; go 1.21 github.com/cloudflare/circl/dh/x25519 github.com/cloudflare/circl/dh/x448 github.com/cloudflare/circl/ecc/goldilocks @@ -106,7 +107,7 @@ github.com/containerd/containerd/errdefs github.com/containerd/containerd/log github.com/containerd/containerd/pkg/epoch github.com/containerd/containerd/pkg/userns -# github.com/containerd/continuity v0.4.3 +# github.com/containerd/continuity v0.4.2 ## explicit; go 1.19 github.com/containerd/continuity/fs github.com/containerd/continuity/sysx @@ -117,17 +118,17 @@ github.com/containerd/log ## explicit; go 1.19 github.com/containerd/stargz-snapshotter/estargz github.com/containerd/stargz-snapshotter/estargz/errorutil -# github.com/cpuguy83/go-md2man/v2 v2.0.2 +# github.com/cpuguy83/go-md2man/v2 v2.0.3 ## explicit; go 1.11 github.com/cpuguy83/go-md2man/v2/md2man -# github.com/cyphar/filepath-securejoin v0.2.4 +# github.com/cyphar/filepath-securejoin v0.2.5 ## explicit; go 1.13 github.com/cyphar/filepath-securejoin # github.com/denisbrodbeck/machineid v1.0.1 ## explicit github.com/denisbrodbeck/machineid -# github.com/diskfs/go-diskfs v1.3.0 -## explicit; go 1.16 +# github.com/diskfs/go-diskfs v1.4.0 +## explicit; go 1.19 github.com/diskfs/go-diskfs github.com/diskfs/go-diskfs/disk github.com/diskfs/go-diskfs/filesystem @@ -154,7 +155,7 @@ github.com/docker/cli/cli/config/types github.com/docker/distribution/digestset github.com/docker/distribution/reference github.com/docker/distribution/registry/client/auth/challenge -# github.com/docker/docker v24.0.9+incompatible +# github.com/docker/docker v24.0.0+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types @@ -187,6 +188,9 @@ github.com/docker/go-connections/tlsconfig # github.com/docker/go-units v0.5.0 ## explicit github.com/docker/go-units +# github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab +## explicit; go 1.11 +github.com/elliotwutingfeng/asciiset # github.com/emirpasic/gods v1.18.1 ## explicit; go 1.2 github.com/emirpasic/gods/containers @@ -215,7 +219,7 @@ github.com/go-git/go-billy/v5/helper/polyfill github.com/go-git/go-billy/v5/memfs github.com/go-git/go-billy/v5/osfs github.com/go-git/go-billy/v5/util -# github.com/go-git/go-git/v5 v5.11.0 +# github.com/go-git/go-git/v5 v5.12.0 ## explicit; go 1.19 github.com/go-git/go-git/v5 github.com/go-git/go-git/v5/config @@ -263,7 +267,7 @@ github.com/go-git/go-git/v5/utils/merkletrie/internal/frame github.com/go-git/go-git/v5/utils/merkletrie/noder github.com/go-git/go-git/v5/utils/sync github.com/go-git/go-git/v5/utils/trace -# github.com/go-logr/logr v1.4.1 +# github.com/go-logr/logr v1.4.2 ## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/funcr @@ -271,9 +275,12 @@ github.com/go-logr/logr/funcr ## explicit; go 1.12 github.com/go-ole/go-ole github.com/go-ole/go-ole/oleutil -# github.com/go-task/slim-sprig v2.20.0+incompatible +# github.com/go-task/slim-sprig/v3 v3.0.0 +## explicit; go 1.20 +github.com/go-task/slim-sprig/v3 +# github.com/gofrs/flock v0.8.1 ## explicit -github.com/go-task/slim-sprig +github.com/gofrs/flock # github.com/gogo/protobuf v1.3.2 ## explicit; go 1.15 github.com/gogo/protobuf/gogoproto @@ -322,7 +329,7 @@ github.com/google/go-containerregistry/pkg/v1/remote/transport github.com/google/go-containerregistry/pkg/v1/stream github.com/google/go-containerregistry/pkg/v1/tarball github.com/google/go-containerregistry/pkg/v1/types -# github.com/google/pprof v0.0.0-20230502171905-255e3b9b56de +# github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 ## explicit; go 1.19 github.com/google/pprof/profile # github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 @@ -358,11 +365,11 @@ github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.1.0 ## explicit; go 1.18 github.com/inconshreveable/mousetrap -# github.com/itchyny/gojq v0.12.12 -## explicit; go 1.18 +# github.com/itchyny/gojq v0.12.16 +## explicit; go 1.20 github.com/itchyny/gojq -# github.com/itchyny/timefmt-go v0.1.5 -## explicit; go 1.17 +# github.com/itchyny/timefmt-go v0.1.6 +## explicit; go 1.20 github.com/itchyny/timefmt-go # github.com/jaypipes/ghw v0.9.1-0.20220511134554-dac2f19e1c76 ## explicit; go 1.15 @@ -402,8 +409,8 @@ github.com/kendru/darwin/go/depgraph # github.com/kevinburke/ssh_config v1.2.0 ## explicit github.com/kevinburke/ssh_config -# github.com/klauspost/compress v1.17.4 -## explicit; go 1.19 +# github.com/klauspost/compress v1.16.5 +## explicit; go 1.18 github.com/klauspost/compress github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 @@ -414,6 +421,9 @@ github.com/klauspost/compress/zstd/internal/xxhash # github.com/magiconair/properties v1.8.7 ## explicit; go 1.19 github.com/magiconair/properties +# github.com/mauromorales/xpasswd v0.4.0 +## explicit; go 1.20 +github.com/mauromorales/xpasswd/pkg/users # github.com/mitchellh/copystructure v1.2.0 ## explicit; go 1.15 github.com/mitchellh/copystructure @@ -426,18 +436,13 @@ github.com/mitchellh/mapstructure # github.com/mitchellh/reflectwalk v1.0.2 ## explicit github.com/mitchellh/reflectwalk -# github.com/moby/moby v25.0.2+incompatible -## explicit -github.com/moby/moby/libnetwork/resolvconf -# github.com/moby/sys/mountinfo v0.7.1 -## explicit; go 1.16 # github.com/moby/sys/sequential v0.5.0 ## explicit; go 1.17 github.com/moby/sys/sequential -# github.com/mudler/entities v0.0.0-20220905203055-68348bae0f49 -## explicit; go 1.12 +# github.com/mudler/entities v0.8.0 +## explicit; go 1.21 github.com/mudler/entities/pkg/entities -# github.com/onsi/ginkgo/v2 v2.15.0 +# github.com/onsi/ginkgo/v2 v2.19.0 ## explicit; go 1.20 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config @@ -459,8 +464,8 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.30.0 -## explicit; go 1.18 +# github.com/onsi/gomega v1.33.1 +## explicit; go 1.20 github.com/onsi/gomega github.com/onsi/gomega/format github.com/onsi/gomega/internal @@ -474,14 +479,14 @@ github.com/onsi/gomega/types # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest -# github.com/opencontainers/image-spec v1.1.0-rc6 +# github.com/opencontainers/image-spec v1.1.0-rc3 ## explicit; go 1.18 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 # github.com/packethost/packngo v0.31.0 ## explicit; go 1.16 github.com/packethost/packngo/metadata -# github.com/pelletier/go-toml/v2 v2.0.7 +# github.com/pelletier/go-toml/v2 v2.0.6 ## explicit; go 1.16 github.com/pelletier/go-toml/v2 github.com/pelletier/go-toml/v2/internal/characters @@ -491,10 +496,13 @@ github.com/pelletier/go-toml/v2/unstable # github.com/phayes/permbits v0.0.0-20190612203442-39d7c581d2ee ## explicit github.com/phayes/permbits -# github.com/pierrec/lz4 v2.6.1+incompatible -## explicit -github.com/pierrec/lz4 -github.com/pierrec/lz4/internal/xxh32 +# github.com/pierrec/lz4/v4 v4.1.21 +## explicit; go 1.14 +github.com/pierrec/lz4/v4 +github.com/pierrec/lz4/v4/internal/lz4block +github.com/pierrec/lz4/v4/internal/lz4errors +github.com/pierrec/lz4/v4/internal/lz4stream +github.com/pierrec/lz4/v4/internal/xxh32 # github.com/pjbgf/sha1cd v0.3.0 ## explicit; go 1.19 github.com/pjbgf/sha1cd @@ -509,8 +517,9 @@ github.com/pkg/xattr # github.com/rancher-sandbox/linuxkit v1.0.2 ## explicit; go 1.18 github.com/rancher-sandbox/linuxkit/providers -# github.com/rancher/yip v1.4.10 -## explicit; go 1.20 +# github.com/rancher/yip v1.9.2 +## explicit; go 1.21 +github.com/rancher/yip/pkg/dag github.com/rancher/yip/pkg/executor github.com/rancher/yip/pkg/logger github.com/rancher/yip/pkg/plugins @@ -520,39 +529,33 @@ github.com/rancher/yip/pkg/utils # github.com/russross/blackfriday/v2 v2.1.0 ## explicit github.com/russross/blackfriday/v2 -# github.com/samber/lo v1.37.0 -## explicit; go 1.18 -github.com/samber/lo # github.com/sanity-io/litter v1.5.5 ## explicit; go 1.16 github.com/sanity-io/litter # github.com/satori/go.uuid v1.2.0 ## explicit github.com/satori/go.uuid -# github.com/sergi/go-diff v1.3.1 -## explicit; go 1.12 -github.com/sergi/go-diff/diffmatchpatch -# github.com/shopspring/decimal v1.3.1 +# github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 ## explicit; go 1.13 +github.com/sergi/go-diff/diffmatchpatch +# github.com/shopspring/decimal v1.4.0 +## explicit; go 1.10 github.com/shopspring/decimal # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus -# github.com/skeema/knownhosts v1.2.1 +# github.com/skeema/knownhosts v1.2.2 ## explicit; go 1.17 github.com/skeema/knownhosts -# github.com/spectrocloud-labs/herd v0.4.2 -## explicit; go 1.19 -github.com/spectrocloud-labs/herd -# github.com/spf13/afero v1.9.5 +# github.com/spf13/afero v1.9.3 ## explicit; go 1.16 github.com/spf13/afero github.com/spf13/afero/internal/common github.com/spf13/afero/mem -# github.com/spf13/cast v1.5.0 -## explicit; go 1.18 +# github.com/spf13/cast v1.6.0 +## explicit; go 1.19 github.com/spf13/cast -# github.com/spf13/cobra v1.7.0 +# github.com/spf13/cobra v1.8.0 ## explicit; go 1.15 github.com/spf13/cobra github.com/spf13/cobra/doc @@ -576,16 +579,16 @@ github.com/spf13/viper/internal/encoding/yaml # github.com/subosito/gotenv v1.4.2 ## explicit; go 1.18 github.com/subosito/gotenv -# github.com/tredoe/osutil/v2 v2.0.0-rc.16 +# github.com/tredoe/osutil v1.5.0 ## explicit; go 1.16 -github.com/tredoe/osutil/v2/userutil/crypt -github.com/tredoe/osutil/v2/userutil/crypt/common -github.com/tredoe/osutil/v2/userutil/crypt/sha512_crypt +github.com/tredoe/osutil/user/crypt +github.com/tredoe/osutil/user/crypt/common +github.com/tredoe/osutil/user/crypt/sha512_crypt # github.com/twpayne/go-vfs/v4 v4.3.0 ## explicit; go 1.19 github.com/twpayne/go-vfs/v4 github.com/twpayne/go-vfs/v4/vfst -# github.com/ulikunitz/xz v0.5.11 +# github.com/ulikunitz/xz v0.5.12 ## explicit; go 1.12 github.com/ulikunitz/xz github.com/ulikunitz/xz/internal/hash @@ -598,8 +601,8 @@ github.com/vbatts/tar-split/archive/tar ## explicit; go 1.12 github.com/vishvananda/netlink github.com/vishvananda/netlink/nl -# github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f -## explicit; go 1.12 +# github.com/vishvananda/netns v0.0.4 +## explicit; go 1.17 github.com/vishvananda/netns # github.com/vmware/vmw-guestinfo v0.0.0-20220317130741-510905f0efa3 ## explicit; go 1.12 @@ -608,14 +611,11 @@ github.com/vmware/vmw-guestinfo/message github.com/vmware/vmw-guestinfo/rpcout github.com/vmware/vmw-guestinfo/rpcvmx github.com/vmware/vmw-guestinfo/vmcheck -# github.com/willdonnelly/passwd v0.0.0-20141013001024-7935dab3074c -## explicit -github.com/willdonnelly/passwd # github.com/xanzy/ssh-agent v0.3.3 ## explicit; go 1.16 github.com/xanzy/ssh-agent -# github.com/zcalusic/sysinfo v0.9.5 -## explicit; go 1.17 +# github.com/zcalusic/sysinfo v1.0.2 +## explicit; go 1.21 github.com/zcalusic/sysinfo github.com/zcalusic/sysinfo/cpuid # go.opencensus.io v0.24.0 @@ -625,7 +625,7 @@ go.opencensus.io/internal go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/tracestate -# golang.org/x/crypto v0.21.0 +# golang.org/x/crypto v0.23.0 ## explicit; go 1.18 golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt @@ -647,13 +647,7 @@ golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts -# golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17 -## explicit; go 1.18 -golang.org/x/exp/constraints -# golang.org/x/mod v0.14.0 -## explicit; go 1.18 -golang.org/x/mod/semver -# golang.org/x/net v0.21.0 +# golang.org/x/net v0.25.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html @@ -661,17 +655,17 @@ golang.org/x/net/html/atom golang.org/x/net/html/charset golang.org/x/net/internal/socks golang.org/x/net/proxy -# golang.org/x/sync v0.6.0 +# golang.org/x/sync v0.7.0 ## explicit; go 1.18 golang.org/x/sync/errgroup -# golang.org/x/sys v0.18.0 +# golang.org/x/sys v0.20.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/execabs golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/text v0.14.0 +# golang.org/x/text v0.15.0 ## explicit; go 1.18 golang.org/x/text/encoding golang.org/x/text/encoding/charmap @@ -691,35 +685,18 @@ golang.org/x/text/language golang.org/x/text/runes golang.org/x/text/transform golang.org/x/text/unicode/norm -# golang.org/x/tools v0.17.0 -## explicit; go 1.18 -golang.org/x/tools/cmd/stringer +# golang.org/x/tools v0.21.0 +## explicit; go 1.19 +golang.org/x/tools/cover golang.org/x/tools/go/ast/inspector -golang.org/x/tools/go/gcexportdata -golang.org/x/tools/go/internal/packagesdriver -golang.org/x/tools/go/packages -golang.org/x/tools/go/types/objectpath -golang.org/x/tools/internal/event -golang.org/x/tools/internal/event/core -golang.org/x/tools/internal/event/keys -golang.org/x/tools/internal/event/label -golang.org/x/tools/internal/event/tag -golang.org/x/tools/internal/gcimporter -golang.org/x/tools/internal/gocommand -golang.org/x/tools/internal/packagesinternal -golang.org/x/tools/internal/pkgbits -golang.org/x/tools/internal/tokeninternal -golang.org/x/tools/internal/typeparams -golang.org/x/tools/internal/typesinternal -golang.org/x/tools/internal/versions -# golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 -## explicit; go 1.18 +# golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 +## explicit; go 1.17 golang.org/x/xerrors golang.org/x/xerrors/internal -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.61.0 +# google.golang.org/grpc v1.58.3 ## explicit; go 1.19 google.golang.org/grpc/attributes google.golang.org/grpc/codes @@ -801,9 +778,3 @@ k8s.io/mount-utils k8s.io/utils/exec k8s.io/utils/io k8s.io/utils/keymutex -# pault.ag/go/modprobe v0.1.2 -## explicit; go 1.15 -pault.ag/go/modprobe -# pault.ag/go/topsort v0.0.0-20160530003732-f98d2ad46e1a -## explicit -pault.ag/go/topsort diff --git a/vendor/pault.ag/go/modprobe/.gitignore b/vendor/pault.ag/go/modprobe/.gitignore deleted file mode 100644 index 1377554ebea..00000000000 --- a/vendor/pault.ag/go/modprobe/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.swp diff --git a/vendor/pault.ag/go/modprobe/LICENSE b/vendor/pault.ag/go/modprobe/LICENSE deleted file mode 100644 index 4bf00c71b97..00000000000 --- a/vendor/pault.ag/go/modprobe/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2018, Paul R. Tagliamonte - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/pault.ag/go/modprobe/README.md b/vendor/pault.ag/go/modprobe/README.md deleted file mode 100644 index b68d1aa9765..00000000000 --- a/vendor/pault.ag/go/modprobe/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# go-modprobe - -Load an unload Linux kernel modules using the Linux module syscalls. - -This package is Linux specific. Loading a module uses the `finit` variant, -which allows loading of modules by a file descriptor, rather than having to -load an ELF into the process memory before loading. - -The ability to load and unload modules is dependent on either the `CAP_SYS_MODULE` -capability, or running as root. Care should be taken to understand what security -implications this has on processes that use this library. - -## Setting the capability on a binary using this package - -``` -$ sudo setcap cap_sys_module+ep /path/to/binary -``` diff --git a/vendor/pault.ag/go/modprobe/dep.go b/vendor/pault.ag/go/modprobe/dep.go deleted file mode 100644 index 5cc68a2fc23..00000000000 --- a/vendor/pault.ag/go/modprobe/dep.go +++ /dev/null @@ -1,98 +0,0 @@ -package modprobe - -import ( - "bufio" - "os" - "strings" - - "pault.ag/go/topsort" -) - -// Dependencies takes a path to a .ko file, determine what modules will have to -// be present before loading that module, and return those modules as a slice -// of strings. -func Dependencies(path string) ([]string, error) { - deps, err := loadDependencies() - if err != nil { - return nil, err - } - return deps.Load(path) -} - -// simple container type that stores a mapping from an element to elements -// that it depends on. -type dependencies map[string][]string - -// top level loading of the dependency tree. this will start a network -// walk the dep tree, load them into the network, and return a topological -// sort of the modules. -func (d dependencies) Load(name string) ([]string, error) { - network := topsort.NewNetwork() - if err := d.load(name, network); err != nil { - return nil, err - } - - order, err := network.Sort() - if err != nil { - return nil, err - } - - ret := []string{} - for _, node := range order { - ret = append(ret, node.Name) - } - return ret, nil -} - -// add a specific dependency to the network, and recurse on the leafs. -func (d dependencies) load(name string, network *topsort.Network) error { - if network.Get(name) != nil { - return nil - } - network.AddNode(name, nil) - - for _, dep := range d[name] { - if err := d.load(dep, network); err != nil { - return err - } - if err := network.AddEdge(dep, name); err != nil { - return err - } - } - - return nil -} - -// get a dependency map from the running kernel's modules.dep file -func loadDependencies() (dependencies, error) { - path := modulePath("modules.dep") - - file, err := os.Open(path) - if err != nil { - return nil, err - } - defer file.Close() - - deps := map[string][]string{} - - scanner := bufio.NewScanner(file) - for scanner.Scan() { - chunks := strings.SplitN(scanner.Text(), ":", 2) - depString := strings.TrimSpace(chunks[1]) - if len(depString) == 0 { - continue - } - - ret := []string{} - for _, dep := range strings.Split(depString, " ") { - ret = append(ret, modulePath(dep)) - } - deps[modulePath(chunks[0])] = ret - } - - if err := scanner.Err(); err != nil { - return nil, err - } - - return deps, nil -} diff --git a/vendor/pault.ag/go/modprobe/docs.go b/vendor/pault.ag/go/modprobe/docs.go deleted file mode 100644 index 53e4edc0424..00000000000 --- a/vendor/pault.ag/go/modprobe/docs.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package modprobe allows users to load and unload Linux kernel modules by -// calling the relevent Linux syscalls. -package modprobe diff --git a/vendor/pault.ag/go/modprobe/elf.go b/vendor/pault.ag/go/modprobe/elf.go deleted file mode 100644 index 7a48c58ab70..00000000000 --- a/vendor/pault.ag/go/modprobe/elf.go +++ /dev/null @@ -1,134 +0,0 @@ -package modprobe - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - "debug/elf" - - "golang.org/x/sys/unix" -) - -var ( - // get the root directory for the kernel modules. If this line panics, - // it's because getModuleRoot has failed to get the uname of the running - // kernel (likely a non-POSIX system, but maybe a broken kernel?) - moduleRoot = getModuleRoot() -) - -// Get the module root (/lib/modules/$(uname -r)/) -func getModuleRoot() string { - uname := unix.Utsname{} - if err := unix.Uname(&uname); err != nil { - panic(err) - } - - i := 0 - for ; uname.Release[i] != 0; i++ { - } - - return filepath.Join( - "/lib/modules", - string(uname.Release[:i]), - ) -} - -// Get a path relitive to the module root directory. -func modulePath(path string) string { - return filepath.Join(moduleRoot, path) -} - -// ResolveName will, given a module name (such as `g_ether`) return an absolute -// path to the .ko that provides that module. -func ResolveName(name string) (string, error) { - paths, err := generateMap() - if err != nil { - return "", err - } - - fsPath := paths[name] - if !strings.HasPrefix(fsPath, moduleRoot) { - return "", fmt.Errorf("Module isn't in the module directory") - } - - return fsPath, nil -} - -// Open every single kernel module under the kernel module directory -// (/lib/modules/$(uname -r)/), and parse the ELF headers to extract the -// module name. -func generateMap() (map[string]string, error) { - return elfMap(moduleRoot) -} - -// Open every single kernel module under the root, and parse the ELF headers to -// extract the module name. -func elfMap(root string) (map[string]string, error) { - ret := map[string]string{} - - err := filepath.Walk( - root, - func(path string, info os.FileInfo, err error) error { - if !info.Mode().IsRegular() { - return nil - } - fd, err := os.Open(path) - if err != nil { - return err - } - defer fd.Close() - name, err := Name(fd) - if err != nil { - /* For now, let's just ignore that and avoid adding to it */ - return nil - } - - ret[name] = path - return nil - }) - - if err != nil { - return nil, err - } - - return ret, nil -} - -// Name will, given a file descriptor to a Kernel Module (.ko file), parse the -// binary to get the module name. For instance, given a handle to the file at -// `kernel/drivers/usb/gadget/legacy/g_ether.ko`, return `g_ether`. -func Name(file *os.File) (string, error) { - f, err := elf.NewFile(file) - if err != nil { - return "", err - } - - syms, err := f.Symbols() - if err != nil { - return "", err - } - - for _, sym := range syms { - if strings.Compare(sym.Name, "__this_module") == 0 { - section := f.Sections[sym.Section] - data, err := section.Data() - if err != nil { - return "", err - } - - if len(data) < 25 { - return "", fmt.Errorf("modprobe: data is short, __this_module is '%s'", data) - } - - data = data[24:] - i := 0 - for ; data[i] != 0x00; i++ { - } - return string(data[:i]), nil - } - } - - return "", fmt.Errorf("No name found. Is this a .ko or just an ELF?") -} diff --git a/vendor/pault.ag/go/modprobe/load.go b/vendor/pault.ag/go/modprobe/load.go deleted file mode 100644 index 404e78bc98e..00000000000 --- a/vendor/pault.ag/go/modprobe/load.go +++ /dev/null @@ -1,41 +0,0 @@ -package modprobe - -import ( - "os" - - "golang.org/x/sys/unix" -) - -// Load will, given a short module name (such as `g_ether`), determine where -// the kernel module is located, determine any dependencies, and load all -// required modules. -func Load(module, params string) error { - path, err := ResolveName(module) - if err != nil { - return err - } - - order, err := Dependencies(path) - if err != nil { - return err - } - - paramList := make([]string, len(order)) - paramList[len(order)-1] = params - - for i, module := range order { - fd, err := os.Open(module) - if err != nil { - return err - } - /* not doing a defer since we're in a loop */ - param := paramList[i] - if err := Init(fd, param); err != nil && err != unix.EEXIST { - fd.Close() - return err - } - fd.Close() - } - - return nil -} diff --git a/vendor/pault.ag/go/modprobe/modprobe.go b/vendor/pault.ag/go/modprobe/modprobe.go deleted file mode 100644 index 50dc4d204d5..00000000000 --- a/vendor/pault.ag/go/modprobe/modprobe.go +++ /dev/null @@ -1,37 +0,0 @@ -package modprobe - -import ( - "os" - - "golang.org/x/sys/unix" -) - -// Init will use the provide .ko file's os.File (created with os.Open or -// similar), to load that kernel module into the running kernel. This may error -// out for a number of reasons, such as no permission (either setcap -// CAP_SYS_MODULE or run as root), the .ko being for the wrong kernel, or the -// file not being a module at all. -// -// Any arguments to the module may be passed through `params`, such as -// `file=/root/data/backing_file`. -func Init(file *os.File, params string) error { - return unix.FinitModule(int(file.Fd()), params, 0) -} - -// InitWithFlags will preform an Init, but allow the passing of flags to the -// syscall. The `flags` parameter is a bit mask value created by ORing together -// zero or more of the following flags: -// -// MODULE_INIT_IGNORE_MODVERSIONS - Ignore symbol version hashes -// MODULE_INIT_IGNORE_VERMAGIC - Ignore kernel version magic. -// -// Both flags are defined in the golang.org/x/sys/unix package. -func InitWithFlags(file *os.File, params string, flags int) error { - return unix.FinitModule(int(file.Fd()), params, flags) -} - -// Remove will unload a loaded kernel module. If no such module is loaded, or if -// the module can not be unloaded, this function will return an error. -func Remove(name string) error { - return unix.DeleteModule(name, 0) -} diff --git a/vendor/pault.ag/go/topsort/LICENSE b/vendor/pault.ag/go/topsort/LICENSE deleted file mode 100644 index 263ff1739ec..00000000000 --- a/vendor/pault.ag/go/topsort/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) Paul R. Tagliamonte , 2015 - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/pault.ag/go/topsort/README.md b/vendor/pault.ag/go/topsort/README.md deleted file mode 100644 index 939649941cd..00000000000 --- a/vendor/pault.ag/go/topsort/README.md +++ /dev/null @@ -1,49 +0,0 @@ -topsort -======= - -This package provides a handy interface to do a topological sort of -some data in a pretty lightweight way. - -Example -------- - -```go -package main - -import ( - "fmt" - - "pault.ag/go/topsort" -) - -func main() { - network := topsort.NewNetwork() - - network.AddNode("watch tv while eating", nil) - network.AddNode("make dinner", nil) - network.AddNode("clean my kitchen", nil) - - /* Right, so the order of operations is next */ - - network.AddEdge("clean my kitchen", "make dinner") - // I need to clean the kitchen before I make dinner. - - network.AddEdge("make dinner", "watch tv while eating") - // Need to make dinner before I can eat it. - - nodes, err := network.Sort() - if err != nil { - panic(err) - } - - for _, step := range nodes { - fmt.Printf(" -> %s\n", step.Name) - } - /* Output is: - * - * -> clean my kitchen - * -> make dinner - * -> watch tv while eating - */ -} -``` diff --git a/vendor/pault.ag/go/topsort/topsort.go b/vendor/pault.ag/go/topsort/topsort.go deleted file mode 100644 index 27002307503..00000000000 --- a/vendor/pault.ag/go/topsort/topsort.go +++ /dev/null @@ -1,160 +0,0 @@ -/* {{{ Copyright (c) Paul R. Tagliamonte , 2015 - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. }}} */ - -package topsort - -import ( - "errors" -) - -// Network Helpers {{{ - -type Network struct { - nodes map[string]*Node - order []string -} - -func NewNetwork() *Network { - return &Network{ - nodes: map[string]*Node{}, - order: []string{}, - } -} - -func (tn *Network) Sort() ([]*Node, error) { - nodes := make([]*Node, 0) - for _, key := range tn.order { - nodes = append(nodes, tn.nodes[key]) - } - return sortNodes(nodes) -} - -func (tn *Network) Get(name string) *Node { - return tn.nodes[name] -} - -func (tn *Network) AddNode(name string, value interface{}) *Node { - node := Node{ - Name: name, - Value: value, - InboundEdges: make([]*Node, 0), - OutboundEdges: make([]*Node, 0), - Marked: false, - } - - if _, ok := tn.nodes[name]; !ok { - tn.order = append(tn.order, name) - } - tn.nodes[name] = &node - return &node -} - -// }}} - -// Node Helpers {{{ - -type Node struct { - Name string - Value interface{} - OutboundEdges []*Node - InboundEdges []*Node - Marked bool -} - -func (node *Node) IsCanidate() bool { - for _, edge := range node.InboundEdges { - /* for each node, let's check if they're all marked */ - if !edge.Marked { - return false - } - } - return true -} - -func (tn *Network) AddEdge(from string, to string) error { - fromNode := tn.Get(from) - toNode := tn.Get(to) - - if fromNode == nil || toNode == nil { - return errors.New("Either the root or target node doesn't exist") - } - - toNode.InboundEdges = append(toNode.InboundEdges, fromNode) - fromNode.OutboundEdges = append(fromNode.OutboundEdges, toNode) - - return nil -} - -func (tn *Network) AddEdgeIfExists(from string, to string) { - tn.AddEdge(from, to) -} - -// }}} - -// Sort Helpers {{{ - -func sortSingleNodes(nodes []*Node) ([]*Node, error) { - ret := make([]*Node, 0) - hasUnprunedNodes := false - - for _, node := range nodes { - if node.Marked { - continue /* Already output. */ - } - - hasUnprunedNodes = true - - /* Otherwise, let's see if we can prune it */ - if node.IsCanidate() { - /* So, it has no deps and hasn't been marked; let's mark and - * output */ - node.Marked = true - ret = append(ret, node) - } - } - - if hasUnprunedNodes && len(ret) == 0 { - return nil, errors.New("Cycle detected :(") - } - - return ret, nil -} - -func sortNodes(nodes []*Node) (ret []*Node, err error) { - /* Reset Marked status of nodes so they're ready to sort */ - for _, node := range nodes { - node.Marked = false - } - for { - generation, err := sortSingleNodes(nodes) - if err != nil { - return nil, err - } - if len(generation) == 0 { - break - } - ret = append(ret, generation...) - } - return -} - -// }}} - -// vim: foldmethod=marker