diff --git a/.github/ISSUE_TEMPLATE/maintainer_change.yml b/.github/ISSUE_TEMPLATE/maintainer_change.yml new file mode 100644 index 0000000000..29dd62c090 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/maintainer_change.yml @@ -0,0 +1,57 @@ +name: Add or remove maintainer +description: To be used when making changes to project maintainers. +labels: ["kind/support"] +body: + - type: markdown + attributes: + value: | + Use this form to make changes to the maintainers of CAPA + - type: input + id: github_handle + attributes: + label: GitHub handle + description: The GitHub handle of the maintainer that changes will be made to. + validations: + required: true + - type: dropdown + id: action + attributes: + label: Action + description: What action is being performed to the maintainers + options: + - Add + - Remove + validations: + required: true + - type: input + id: merge_date + attributes: + label: Merge date + description: Enter the date when the changes can merge. The date should be 7 days or after the next CAPA office hours (whichever is longer) + validations: + required: true + - type: textarea + id: reason + attributes: + label: Reason for change + description: What is the reason this change is being made + validations: + required: true + - type: textarea + id: tasks + attributes: + label: 🖱️Tasks + value: | + ```[tasklist] + ## Tasks to be done + - [ ] Update **cluster-api-aws-maintainers** section in [OWNER_ALIASES](https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/main/OWNERS_ALIASES) in CAPA repo + - [ ] Create a PR with the change and add `/hold` + - [ ] Announce the change in the CAPA slack channel + - [ ] Add PSA in the notes doc for the next CAPA office hours + - [ ] After the lazy consensus review period remove the hold + - [ ] Update **cluster-api-provider-aws-maintainers** team in [this file](https://github.com/kubernetes/org/blob/main/config/kubernetes-sigs/sig-cluster-lifecycle/teams.yaml) + - [ ] Update **approvers** for CAPA jobs [here](https://github.com/kubernetes/test-infra/blob/master/config/jobs/kubernetes-sigs/cluster-api-provider-aws/OWNERS) + - [ ] Update **approvers** from CAPA image promotion [here](https://github.com/kubernetes/k8s.io/blob/main/registry.k8s.io/images/k8s-staging-cluster-api-aws/OWNERS) + - [ ] Update **k8s-infra-staging-cluster-api-aws@kubernetes.io** [here](https://github.com/kubernetes/k8s.io/blob/main/groups/sig-cluster-lifecycle/groups.yaml) + - [ ] Update **sig-cluster-lifecycle-cluster-api-aws-alerts@kubernetes.io** [here](https://github.com/kubernetes/k8s.io/blob/main/groups/sig-cluster-lifecycle/groups.yaml) + ``` diff --git a/.github/ISSUE_TEMPLATE/reviewer_change.yml b/.github/ISSUE_TEMPLATE/reviewer_change.yml new file mode 100644 index 0000000000..7a44377d48 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/reviewer_change.yml @@ -0,0 +1,53 @@ +name: Add or remove reviewer +description: To be used when making changes to project reviewers. See [contributing guide(https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/main/CONTRIBUTING.md)] for details of the projects ladder. +labels: ["kind/support"] +body: + - type: markdown + attributes: + value: | + Use this form to make changes to the reviewers of CAPA + - type: input + id: github_handle + attributes: + label: GitHub handle + description: The GitHub handle of the reviewer that changes will be made to. + validations: + required: true + - type: dropdown + id: action + attributes: + label: Action + description: What action is being performed to the reviewers + options: + - Add + - Remove + validations: + required: true + - type: input + id: merge_date + attributes: + label: Merge date + description: Enter the date when the changes can merge. The date should be 7 days or after the next CAPA office hours (whichever is longer) + validations: + required: true + - type: textarea + id: reason + attributes: + label: Reason for change + description: What is the reason this change is being made + validations: + required: true + - type: textarea + id: tasks + attributes: + label: 🖱️Tasks + value: | + ```[tasklist] + ## Tasks to be done + - [ ] Update **cluster-api-aws-reviewers** section in [OWNER_ALIASES](https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/main/OWNERS_ALIASES) in CAPA repo + - [ ] Create a PR with the change and add `/hold` + - [ ] Announce the change in the CAPA slack channel + - [ ] Add PSA in the notes doc for the next CAPA office hours + - [ ] After the lazy consensus review period remove the hold + - [ ] Update **reviewers** for CAPA jobs [here](https://github.com/kubernetes/test-infra/blob/master/config/jobs/kubernetes-sigs/cluster-api-provider-aws/OWNERS) + ``` \ No newline at end of file diff --git a/.github/dependabot.yml b/.github/dependabot.yml index a87a2f2f8f..a78f019a9b 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,13 +1,31 @@ version: 2 updates: + # GitHub Actions + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + commit-message: + prefix: ":seedling:" + labels: + - "kind/cleanup" + - "area/ci" + - "ok-to-test" + - "release-note-none" + + # Main Go module - package-ecosystem: "gomod" directory: "/" schedule: interval: "weekly" + day: "monday" commit-message: prefix: ":seedling:" labels: - "kind/cleanup" + - "area/dependency" + - "ok-to-test" + - "release-note-none" groups: dependencies: patterns: @@ -15,22 +33,36 @@ updates: ignore: # Ignore Cluster-API as its upgraded manually. - dependency-name: "sigs.k8s.io/cluster-api*" + update-types: [ "version-update:semver-major", "version-update:semver-minor" ] # Ignore controller-runtime as its upgraded manually. - dependency-name: "sigs.k8s.io/controller-runtime" - # Ignore k8s and its transitives modules as they are upgraded manually - # together with controller-runtime. + update-types: [ "version-update:semver-major", "version-update:semver-minor" ] + # Ignore k8s and its transitives modules as they are upgraded manually together with controller-runtime. - dependency-name: "k8s.io/*" + update-types: [ "version-update:semver-major", "version-update:semver-minor" ] - dependency-name: "go.etcd.io/*" + update-types: [ "version-update:semver-major", "version-update:semver-minor" ] - dependency-name: "google.golang.org/grpc" + update-types: [ "version-update:semver-major", "version-update:semver-minor" ] + # Bumping the kustomize API independently can break compatibility with client-go as they share k8s.io/kube-openapi as a dependency. + - dependency-name: "sigs.k8s.io/kustomize/api" + update-types: [ "version-update:semver-major", "version-update:semver-minor" ] + # Ignore openshift ROSA pkgs as its upgraded manually. + - dependency-name: "github.com/openshift*" + update-types: [ "version-update:semver-major", "version-update:semver-minor", "version-update:semver-patch" ] - package-ecosystem: "docker" directory: "/" schedule: interval: "weekly" + day: "monday" commit-message: prefix: ":seedling:" labels: - "kind/cleanup" + - "area/dependency" + - "ok-to-test" + - "release-note-none" groups: dependencies: patterns: @@ -41,10 +73,14 @@ updates: directory: "/hack/tools" schedule: interval: "weekly" + day: "wednesday" commit-message: prefix: ":seedling:" labels: - "kind/cleanup" + - "area/dependency" + - "ok-to-test" + - "release-note-none" groups: dependencies: patterns: @@ -52,35 +88,33 @@ updates: ignore: # Ignore Cluster-API as its upgraded manually. - dependency-name: "sigs.k8s.io/cluster-api*" + update-types: [ "version-update:semver-major", "version-update:semver-minor" ] # Ignore controller-runtime as its upgraded manually. - dependency-name: "sigs.k8s.io/controller-runtime" - # Ignore k8s and its transitives modules as they are upgraded manually - # together with controller-runtime. + update-types: [ "version-update:semver-major", "version-update:semver-minor" ] + # Ignore k8s and its transitives modules as they are upgraded manually together with controller-runtime. - dependency-name: "k8s.io/*" - # Ignore controller-tools as its upgraded manually. - - dependency-name: "sigs.k8s.io/controller-tools" + update-types: [ "version-update:semver-major", "version-update:semver-minor" ] + - dependency-name: "go.etcd.io/*" + update-types: [ "version-update:semver-major", "version-update:semver-minor" ] + - dependency-name: "google.golang.org/grpc" + update-types: [ "version-update:semver-major", "version-update:semver-minor" ] + # Bumping the kustomize API independently can break compatibility with client-go as they share k8s.io/kube-openapi as a dependency. + - dependency-name: "sigs.k8s.io/kustomize/api" + update-types: [ "version-update:semver-major", "version-update:semver-minor" ] - package-ecosystem: "docker" directory: "/hack/tools" schedule: interval: "weekly" + day: "wednesday" commit-message: prefix: ":seedling:" labels: - "kind/cleanup" - groups: - dependencies: - patterns: - - "*" - - - package-ecosystem: "github-actions" - directory: "/" - schedule: - interval: "weekly" - commit-message: - prefix: ":seedling:" - labels: - - "kind/cleanup" + - "area/dependency" + - "ok-to-test" + - "release-note-none" groups: dependencies: patterns: diff --git a/.github/workflows/dependabot.yml b/.github/workflows/dependabot.yml index 20a6fd7993..7672de99c9 100644 --- a/.github/workflows/dependabot.yml +++ b/.github/workflows/dependabot.yml @@ -13,6 +13,10 @@ on: description: 'Run code generation manually from GH CLI' required: true default: 'Make Generate' + +permissions: + contents: write # Allow actions to update dependabot PRs + jobs: build: name: Build @@ -21,7 +25,7 @@ jobs: - name: Set up Go 1.x uses: actions/setup-go@v5 with: - go-version: '1.21' + go-version: '1.22' id: go - name: Check out code into the Go module directory uses: actions/checkout@v4.1.1 diff --git a/.github/workflows/pr-golangci-lint.yaml b/.github/workflows/pr-golangci-lint.yaml new file mode 100644 index 0000000000..86309ebeda --- /dev/null +++ b/.github/workflows/pr-golangci-lint.yaml @@ -0,0 +1,33 @@ +name: PR golangci-lint + +on: + pull_request: + types: [opened, edited, synchronize, reopened] + +# Remove all permissions from GITHUB_TOKEN except metadata. +permissions: {} + +jobs: + golangci: + name: lint + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + working-directory: + - "" + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # tag=v4.1.1 + - name: Calculate go version + id: vars + run: echo "go_version=$(make go-version)" >> $GITHUB_OUTPUT + - name: Set up Go + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # tag=v5.0.0 + with: + go-version: ${{ steps.vars.outputs.go_version }} + - name: golangci-lint + uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # tag=v6.0.1 + with: + version: v1.56.1 + args: --out-format=colored-line-number + working-directory: ${{matrix.working-directory}} diff --git a/.github/workflows/pr-verify.yml b/.github/workflows/pr-verify.yml index 0198b590bb..51e8acaaf3 100644 --- a/.github/workflows/pr-verify.yml +++ b/.github/workflows/pr-verify.yml @@ -4,6 +4,9 @@ on: pull_request_target: types: [opened, edited, synchronize, reopened] +permissions: + checks: write + jobs: verify: runs-on: ubuntu-latest diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 0000000000..41d312832f --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,38 @@ +name: release + +on: + push: + tags: + - 'v*' + +permissions: + contents: write # required to write to github release. + +jobs: + release: + name: Create draft release + runs-on: ubuntu-latest + steps: + - name: checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: '1.22' + - name: Set version info + run: | + echo "VERSION=${GITHUB_REF_NAME}" >> $GITHUB_ENV + echo "PREVIOUS_VERSION=$(git describe --abbrev=0 2> /dev/null)" >> $GITHUB_ENV + echo "RELEASE_BRANCH=release-$(echo ${GITHUB_REF_NAME} | grep -Eo '[0-9]\.[0-9]+')" >> $GITHUB_ENV + echo "RELEASE_TAG=${GITHUB_REF_NAME}" >> $GITHUB_ENV + - name: Run release + run: | + echo "Version is: $VERSION" + echo "Previous version is: $PREVIOUS_VERSION" + echo "Release branch is: $RELEASE_BRANCH" + echo "Release tag is: $RELEASE_TAG" + make release + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index 5c5e4e5642..ada3a863fa 100644 --- a/.gitignore +++ b/.gitignore @@ -57,6 +57,7 @@ junit.*.xml .DS_Store .tiltbuild +dist # test results _artifacts diff --git a/.golangci.yml b/.golangci.yml index bd41d21015..4f5d11a134 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,79 +1,126 @@ linters: - enable-all: true - disable: - - bidichk - - contextcheck - - cyclop - - dupl - - durationcheck - - errname - - errorlint - - exhaustive - - exhaustivestruct - - exhaustruct - - forcetypeassert - - forbidigo - - funlen - - gochecknoglobals - - gochecknoinits - - gocognit - - godox - - goerr113 - - gofumpt - - golint - - gomnd - - gomoddirectives - - gomodguard - - interfacer - - ireturn - - lll - - makezero - - maligned - - musttag - - nestif - - nilnil - - nlreturn - - nonamedreturns - - nosnakecase - - paralleltest - - promlinter - - scopelint - - sqlclosecheck - - tagliatelle - - tenv - - testpackage - - tparallel - - varnamelen - - wastedassign - - wrapcheck - - wsl - - deadcode - - ifshort - - structcheck - - varcheck - - interfacebloat + disable-all: true + enable: + - asasalint + - asciicheck + - bidichk + - bodyclose + - containedctx + - dogsled + - dupword + - durationcheck + - errcheck + - errchkjson + - exportloopref + - gci + - ginkgolinter + - goconst + - gocritic + - godot + - gofmt + - goimports + - goprintffuncname + - gosec + - gosimple + - govet + - importas + - ineffassign + - loggercheck + - misspell + - nakedret + - nilerr + - noctx + - nolintlint + - nosprintfhostport + - prealloc + - predeclared + - revive + - rowserrcheck + - staticcheck + - stylecheck + - thelper + - typecheck + - unconvert + - unparam + - unused + - usestdlibvars + - whitespace linters-settings: - # Restrict revive to exported. - revive: - # see https://github.com/mgechev/revive#available-rules for details. - ignore-generated-header: true - severity: warning - rules: - - name: exported - severity: warning gci: sections: - standard - default - prefix(sigs.k8s.io/cluster-api) ginkgolinter: - # Suppress the wrong length assertion warning. - suppress-len-assertion: true - # Suppress the wrong nil assertion warning. - suppress-nil-assertion: false - # Suppress the wrong error assertion warning. - suppress-err-assertion: true + forbid-focus-container: true + suppress-len-assertion: true # Suppress the wrong length assertion warning. + suppress-nil-assertion: false # Suppress the wrong nil assertion warning. + suppress-err-assertion: true # Suppress the wrong error assertion warning. + gocritic: + enabled-tags: + - diagnostic + - experimental + - performance + disabled-checks: + - appendAssign + - dupImport # https://github.com/go-critic/go-critic/issues/845 + - evalOrder + - ifElseChain + - octalLiteral + - regexpSimplify + - sloppyReassign + - truncateCmp + - typeDefFirst + - unnamedResult + - unnecessaryDefer + - whyNoLint + - wrapperFunc + - rangeValCopy + - hugeParam + - filepathJoin + - emptyStringTest + godot: + # declarations - for top level declaration comments (default); + # toplevel - for top level comments; + # all - for all comments. + scope: toplevel + exclude: + - '^ \+.*' + - '^ ANCHOR.*' + revive: + rules: + # The following rules are recommended https://github.com/mgechev/revive#recommended-configuration + - name: blank-imports + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: error-return + - name: error-strings + - name: error-naming + - name: exported + - name: if-return + - name: increment-decrement + - name: var-naming + - name: var-declaration + - name: package-comments + - name: range + - name: receiver-naming + - name: time-naming + - name: unexported-return + - name: indent-error-flow + - name: errorf + - name: empty-block + - name: superfluous-else + - name: unreachable-code + - name: redefines-builtin-id + # + # Rules in addition to the recommended configuration above. + # + - name: bool-literal-in-expr + - name: constant-logical-expr + goconst: + ignore-tests: true gosec: excludes: - G307 # Deferring unsafe method "Close" on type "\*os.File" @@ -159,10 +206,14 @@ linters-settings: alias: apimachinerytypes - pkg: "sigs.k8s.io/cluster-api/exp/api/v1beta1" alias: expclusterv1 + nolintlint: + allow-unused: false + allow-leading-space: false + require-specific: true staticcheck: - go: "1.21" + go: "1.22" stylecheck: - go: "1.21" + go: "1.22" depguard: rules: main: @@ -178,7 +229,6 @@ issues: # List of regexps of issue texts to exclude, empty list by default. exclude: - (Expect directory permissions to be 0750 or less|Expect file permissions to be 0600 or less) - - "exported: exported (const|function|method|type|var) (.+) should have comment or be unexported" - "exported: (func|type) name will be used as (.+) by other packages, and that stutters; consider calling this (.+)" - (G104|G107|G404|G505|ST1000) - "G108: Profiling endpoint is automatically exposed on /debug/pprof" @@ -188,6 +238,13 @@ issues: - "net/http.Get must not be called" exclude-rules: # Exclude revive's exported for certain packages and code, e.g. tests and fake. + - linters: + - revive + text: "exported: exported method .*\\.(Reconcile|SetupWithManager|SetupWebhookWithManager) should have comment or be unexported" + - linters: + - errcheck + text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked + # Exclude some packages or code to require comments, for example test code, or fake clients. - linters: - revive text: exported (method|function|type|const) (.+) should have comment or be unexported @@ -229,6 +286,11 @@ issues: - revive text: "var-naming: don't use underscores in Go names; func (.+) should be (.+)" path: .*/defaults.go + # These directives allow the mock and gc packages to be imported with an underscore everywhere. + - linters: + - revive + text: "var-naming: don't use an underscore in package name" + path: .*/.*(mock|gc_).*/.+\.go # Disable unparam "always receives" which might not be really # useful when building libraries. - linters: diff --git a/.goreleaser.yaml b/.goreleaser.yaml new file mode 100644 index 0000000000..bd352cfbbf --- /dev/null +++ b/.goreleaser.yaml @@ -0,0 +1,64 @@ +builds: +# clusterctl-aws +- id: "clusterctl-aws" + main: ./cmd/clusterawsadm + binary: bin/clusterctl-aws + env: + - CGO_ENABLED=0 + ldflags: + - -s -w + - -X 'sigs.k8s.io/cluster-api-provider-aws/v2/version.gitMajor={{.Major}}' + - -X 'sigs.k8s.io/cluster-api-provider-aws/v2/version.gitMinor={{.Minor}}' + - -X 'sigs.k8s.io/cluster-api-provider-aws/v2/version.gitVersion={{.Version}}' + - -X 'sigs.k8s.io/cluster-api-provider-aws/v2/version.gitCommit={{.Commit}}' + - -X 'sigs.k8s.io/cluster-api-provider-aws/v2/version.gitTreeState={{.GitTreeState}}' + - -X 'sigs.k8s.io/cluster-api-provider-aws/v2/version.buildDate={{.Date}}' + - -X 'sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd/version.CLIName=clusterctl-aws' + goos: + - linux + - darwin + - windows + goarch: + - amd64 + - arm64 + +# clusterawsadm +- id: "clusterawsadm" + main: ./cmd/clusterawsadm + binary: bin/clusterawsadm + env: + - CGO_ENABLED=0 + ldflags: + - -s -w + - -X 'sigs.k8s.io/cluster-api-provider-aws/v2/version.gitMajor={{.Major}}' + - -X 'sigs.k8s.io/cluster-api-provider-aws/v2/version.gitMinor={{.Minor}}' + - -X 'sigs.k8s.io/cluster-api-provider-aws/v2/version.gitVersion={{.Version}}' + - -X 'sigs.k8s.io/cluster-api-provider-aws/v2/version.gitCommit={{.Commit}}' + - -X 'sigs.k8s.io/cluster-api-provider-aws/v2/version.gitTreeState={{.GitTreeState}}' + - -X 'sigs.k8s.io/cluster-api-provider-aws/v2/version.buildDate={{.Date}}' + goos: + - linux + - darwin + - windows + goarch: + - amd64 + - arm64 + +archives: +- id: clusterctl-aws + builds: + - clusterctl-aws + name_template: "clusterctl-aws-{{ .Os }}-{{ .Arch }}" + format: binary +- id: clusterawsadm + builds: + - clusterawsadm + name_template: "clusterawsadm-{{ .Os }}-{{ .Arch }}" + format: binary + +release: + discussion_category_name: General + extra_files: + - glob: ./templates/*.yaml + - glob: ./out/* + draft: true diff --git a/Makefile b/Makefile index 070b0970a1..ef0bb24e20 100644 --- a/Makefile +++ b/Makefile @@ -20,8 +20,8 @@ include $(ROOT_DIR_RELATIVE)/common.mk # https://suva.sh/posts/well-documented-makefiles # Go -GO_VERSION ?=1.21.5 -GO_CONTAINER_IMAGE ?= public.ecr.aws/docker/library/golang:$(GO_VERSION) +GO_VERSION ?=1.22.6 +GO_CONTAINER_IMAGE ?= golang:$(GO_VERSION) # Directories. ARTIFACTS ?= $(REPO_ROOT)/_artifacts @@ -46,8 +46,10 @@ E2E_CONF_PATH ?= $(E2E_DATA_DIR)/e2e_conf.yaml E2E_EKS_CONF_PATH ?= $(E2E_DATA_DIR)/e2e_eks_conf.yaml KUBETEST_CONF_PATH ?= $(abspath $(E2E_DATA_DIR)/kubetest/conformance.yaml) EXP_DIR := exp +GORELEASER_CONFIG := .goreleaser.yaml # Binaries. +GO_INSTALL := ./scripts/go_install.sh GO_APIDIFF_BIN := $(BIN_DIR)/go-apidiff GO_APIDIFF := $(TOOLS_DIR)/$(GO_APIDIFF_BIN) CLUSTERCTL := $(BIN_DIR)/clusterctl @@ -58,7 +60,10 @@ DEFAULTER_GEN := $(TOOLS_BIN_DIR)/defaulter-gen ENVSUBST := $(TOOLS_BIN_DIR)/envsubst GH := $(TOOLS_BIN_DIR)/gh GOJQ := $(TOOLS_BIN_DIR)/gojq -GOLANGCI_LINT := $(TOOLS_BIN_DIR)/golangci-lint +GOLANGCI_LINT_BIN := golangci-lint +GOLANGCI_LINT_VER := $(shell cat .github/workflows/pr-golangci-lint.yaml | grep [[:space:]]version: | sed 's/.*version: //') +GOLANGCI_LINT := $(abspath $(TOOLS_BIN_DIR)/$(GOLANGCI_LINT_BIN)-$(GOLANGCI_LINT_VER)) +GOLANGCI_LINT_PKG := github.com/golangci/golangci-lint/cmd/golangci-lint KIND := $(TOOLS_BIN_DIR)/kind KUSTOMIZE := $(TOOLS_BIN_DIR)/kustomize MOCKGEN := $(TOOLS_BIN_DIR)/mockgen @@ -66,6 +71,7 @@ SSM_PLUGIN := $(TOOLS_BIN_DIR)/session-manager-plugin YQ := $(TOOLS_BIN_DIR)/yq KPROMO := $(TOOLS_BIN_DIR)/kpromo RELEASE_NOTES := $(TOOLS_BIN_DIR)/release-notes +GORELEASER := $(TOOLS_BIN_DIR)/goreleaser CLUSTERAWSADM_SRCS := $(call rwildcard,.,cmd/clusterawsadm/*.*) @@ -85,7 +91,7 @@ endif # Release variables STAGING_REGISTRY ?= gcr.io/k8s-staging-cluster-api-aws -STAGING_BUCKET ?= artifacts.k8s-staging-cluster-api-aws.appspot.com +STAGING_BUCKET ?= k8s-staging-cluster-api-aws BUCKET ?= $(STAGING_BUCKET) PROD_REGISTRY := registry.k8s.io/cluster-api-aws REGISTRY ?= $(STAGING_REGISTRY) @@ -143,7 +149,7 @@ EKS_SOURCE_TEMPLATE ?= eks/cluster-template-eks-control-plane-only.yaml # set up `setup-envtest` to install kubebuilder dependency export KUBEBUILDER_ENVTEST_KUBERNETES_VERSION ?= 1.28.3 -SETUP_ENVTEST_VER := v0.0.0-20230131074648-f5014c077fc3 +SETUP_ENVTEST_VER := v0.0.0-20240531134648-6636df17d67b SETUP_ENVTEST_BIN := setup-envtest SETUP_ENVTEST := $(abspath $(TOOLS_BIN_DIR)/$(SETUP_ENVTEST_BIN)-$(SETUP_ENVTEST_VER)) SETUP_ENVTEST_PKG := sigs.k8s.io/controller-runtime/tools/setup-envtest @@ -290,6 +296,9 @@ generate-go-apis: ## Alias for .build/generate-go-apis .PHONY: modules +$(GOLANGCI_LINT): # Build golangci-lint from tools folder. + GOBIN=$(abspath $(TOOLS_BIN_DIR)) $(GO_INSTALL) $(GOLANGCI_LINT_PKG) $(GOLANGCI_LINT_BIN) $(GOLANGCI_LINT_VER) + .PHONY: lint lint: $(GOLANGCI_LINT) ## Lint codebase $(GOLANGCI_LINT) run -v --fast=false $(GOLANGCI_LINT_EXTRA_ARGS) @@ -494,9 +503,9 @@ check-release-tag: ## Check if the release tag is set @if [ -z "${RELEASE_TAG}" ]; then echo "RELEASE_TAG is not set"; exit 1; fi @if ! [ -z "$$(git status --porcelain)" ]; then echo "Your local git repository contains uncommitted changes, use git clean before proceeding."; exit 1; fi -.PHONY: create-gh-release -create-gh-release:$(GH) ## Create release on Github - $(GH) release create $(VERSION) -d -F $(RELEASE_DIR)/CHANGELOG.md -t $(VERSION) -R $(GH_REPO) +.PHONY: check-release-branch +check-release-branch: ## Check if the release branch is set + @if [ -z "${RELEASE_BRANCH}" ]; then echo "RELEASE_BRANCH is not set"; exit 1; fi .PHONY: compiled-manifest compiled-manifest: $(RELEASE_DIR) $(KUSTOMIZE) ## Compile the manifest files @@ -558,13 +567,12 @@ list-image: ## List images for RELEASE_TAG gcloud container images list-tags $(STAGING_REGISTRY)/$(IMAGE) --filter="tags=('$(RELEASE_TAG)')" --format=json .PHONY: release -release: clean-release check-release-tag $(RELEASE_DIR) ## Builds and push container images using the latest git tag for the commit. +release: clean-release check-release-tag check-release-branch $(RELEASE_DIR) $(GORELEASER) ## Builds and push container images using the latest git tag for the commit. git checkout "${RELEASE_TAG}" $(MAKE) release-changelog - $(MAKE) release-binaries CORE_CONTROLLER_IMG=$(PROD_REGISTRY)/$(CORE_IMAGE_NAME) $(MAKE) release-manifests - $(MAKE) release-templates $(MAKE) release-policies + $(GORELEASER) release --config $(GORELEASER_CONFIG) --release-notes $(RELEASE_DIR)/CHANGELOG.md --clean release-policies: $(RELEASE_POLICIES) ## Release policies @@ -591,36 +599,15 @@ release-manifests: ## Release manifest files .PHONY: release-changelog release-changelog: $(RELEASE_NOTES) check-release-tag check-previous-release-tag check-github-token $(RELEASE_DIR) - $(RELEASE_NOTES) --debug --org $(GH_ORG_NAME) --repo $(GH_REPO_NAME) --start-sha $(shell git rev-list -n 1 ${PREVIOUS_VERSION}) --end-sha $(shell git rev-list -n 1 ${RELEASE_TAG}) --output $(RELEASE_DIR)/CHANGELOG.md --go-template go-template:$(REPO_ROOT)/hack/changelog.tpl --dependencies=true + $(RELEASE_NOTES) --debug --org $(GH_ORG_NAME) --repo $(GH_REPO_NAME) --start-sha $(shell git rev-list -n 1 ${PREVIOUS_VERSION}) --end-sha $(shell git rev-list -n 1 ${RELEASE_TAG}) --output $(RELEASE_DIR)/CHANGELOG.md --go-template go-template:$(REPO_ROOT)/hack/changelog.tpl --dependencies=false --branch=${RELEASE_BRANCH} --required-author="" .PHONY: promote-images promote-images: $(KPROMO) $(YQ) $(KPROMO) pr --project cluster-api-aws --tag $(RELEASE_TAG) --reviewers "$(shell ./hack/get-project-maintainers.sh ${YQ})" --fork $(USER_FORK) --image cluster-api-aws-controller .PHONY: release-binaries -release-binaries: ## Builds the binaries to publish with a release - RELEASE_BINARY=./cmd/clusterawsadm GOOS=linux GOARCH=amd64 $(MAKE) release-binary - RELEASE_BINARY=./cmd/clusterawsadm GOOS=linux GOARCH=arm64 $(MAKE) release-binary - RELEASE_BINARY=./cmd/clusterawsadm GOOS=darwin GOARCH=amd64 $(MAKE) release-binary - RELEASE_BINARY=./cmd/clusterawsadm GOOS=darwin GOARCH=arm64 $(MAKE) release-binary - RELEASE_BINARY=./cmd/clusterawsadm GOOS=windows GOARCH=amd64 EXT=.exe $(MAKE) release-binary - RELEASE_BINARY=./cmd/clusterawsadm GOOS=windows GOARCH=arm64 EXT=.exe $(MAKE) release-binary - -.PHONY: release-binary -release-binary: $(RELEASE_DIR) versions.mk build-toolchain ## Release binary - docker run \ - --rm \ - -e CGO_ENABLED=0 \ - -e GOOS=$(GOOS) \ - -e GOARCH=$(GOARCH) \ - --mount=source=gocache,target=/go/pkg/mod \ - --mount=source=gocache,target=/root/.cache/go-build \ - -v "$$(pwd):/workspace$(DOCKER_VOL_OPTS)" \ - -w /workspace \ - $(TOOLCHAIN_IMAGE) \ - git config --global --add safe.directory /workspace; \ - go build -ldflags '$(LDFLAGS) -extldflags "-static"' \ - -o $(RELEASE_DIR)/$(notdir $(RELEASE_BINARY))-$(GOOS)-$(GOARCH)$(EXT) $(RELEASE_BINARY) +release-binaries: $(GORELEASER) ## Builds only the binaries, not a release. + $(GORELEASER) build --config $(GORELEASER_CONFIG) --snapshot --clean .PHONY: release-staging release-staging: ## Builds and push container images and manifests to the staging bucket. @@ -642,18 +629,12 @@ release-staging-nightly: ## Tags and push container images to the staging bucket release-alias-tag: # Adds the tag to the last build tag. gcloud container images add-tag -q $(CORE_CONTROLLER_IMG):$(TAG) $(CORE_CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) -.PHONY: release-templates -release-templates: $(RELEASE_DIR) ## Generate release templates - cp templates/cluster-template*.yaml $(RELEASE_DIR)/ - .PHONY: upload-staging-artifacts upload-staging-artifacts: ## Upload release artifacts to the staging bucket + # Example manifest location: https://storage.googleapis.com/k8s-staging-cluster-api-aws/components/nightly_main_20240425/infrastructure-components.yaml + # Please note that these files are deleted after a certain period, at the time of this writing 60 days after file creation. gsutil cp $(RELEASE_DIR)/* gs://$(BUCKET)/components/$(RELEASE_ALIAS_TAG) -.PHONY: upload-gh-artifacts -upload-gh-artifacts: $(GH) ## Upload artifacts to Github release - $(GH) release upload $(VERSION) -R $(GH_REPO) --clobber $(RELEASE_DIR)/* - IMAGE_PATCH_DIR := $(ARTIFACTS)/image-patch $(IMAGE_PATCH_DIR): $(ARTIFACTS) diff --git a/OWNERS b/OWNERS index 5d3e834178..e70b70835c 100644 --- a/OWNERS +++ b/OWNERS @@ -3,7 +3,6 @@ approvers: - sig-cluster-lifecycle-leads - cluster-api-admins - - cluster-api-maintainers - cluster-api-aws-admins - cluster-api-aws-maintainers @@ -11,6 +10,7 @@ reviewers: - cluster-api-aws-admins - cluster-api-aws-maintainers - cluster-api-aws-reviewers + - cluster-api-maintainers emeritus_approvers: - AverageMarcus @@ -21,3 +21,4 @@ emeritus_approvers: - rudoi - sedefsavas - Skarlso + - vincepri diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 61e20308a9..3efa847ddb 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -7,10 +7,11 @@ aliases: - neolit123 - vincepri cluster-api-admins: - - CecileRobertMichon + - fabriziopandini + - sbueringer - vincepri cluster-api-maintainers: - - CecileRobertMichon + - chrischdi - enxebre - fabriziopandini - killianmuldoon @@ -22,11 +23,11 @@ aliases: - richardcase - Ankitasw - dlipovetsky - - vincepri + - nrb + - AndiDog cluster-api-aws-reviewers: - luthermonson - cnmcavoy - - nrb - faiq - fiunchinho - - AndiDog + - damdo diff --git a/README.md b/README.md index b62fde62c3..0c7730b73f 100644 --- a/README.md +++ b/README.md @@ -187,10 +187,9 @@ Thank you to all contributors and a special thanks to our current maintainers & | [@richardcase](https://github.com/richardcase) (from 2020-12-04) | [@cnmcavoy](https://github.com/cnmcavoy) (from 2023-10-16) | | [@Ankitasw](https://github.com/Ankitasw) (from 2022-10-19) | [@AverageMarcus](https://github.com/AverageMarcus) (from 2022-10-19) | | [@dlipovetsky](https://github.com/dlipovetsky) (from 2021-10-31) | [@luthermonson](https://github.com/luthermonson ) (from 2023-03-08) | -| [@vincepri](https://github.com/vincepri) (og & from 2023-10-16) | [@nrb](https://github.com/nrb) (from 2023-10-16) | -| | [@faiq](https://github.com/faiq) (from 2023-10-16) | -| | [@fiunchinho](https://github.com/fiunchinho) (from 2023-11-6) | -| | [@AndiDog](https://github.com/AndiDog) (from 2023-12-13) | +| [@nrb](https://github.com/nrb) (from 2024-05-24) | [@faiq](https://github.com/faiq) (from 2023-10-16) | +| [@AndiDog](https://github.com/AndiDog) (from 2023-12-13) | [@fiunchinho](https://github.com/fiunchinho) (from 2023-11-6) | +| | [@damdo](https://github.com/damdo) (from 2023-03-01) | and the previous/emeritus maintainers & reviewers: @@ -203,7 +202,7 @@ and the previous/emeritus maintainers & reviewers: | [@rudoi](https://github.com/rudoi) | [@michaelbeaumont](https://github.com/michaelbeaumont) | | [@sedefsavas](https://github.com/sedefsavas) | [@sethp-nr](https://github.com/sethp-nr) | | [@Skarlso](https://github.com/Skarlso) | [@shivi28](https://github.com/shivi28) | -| | [@dthorsen](https://github.com/dthorsen) | +| [@vincepri](https://github.com/vincepri) | [@dthorsen](https://github.com/dthorsen) | | | [@pydctw](https://github.com/pydctw) | All the CAPA contributors: diff --git a/api/v1beta1/awscluster_conversion.go b/api/v1beta1/awscluster_conversion.go index a3763d22c8..954fea7a4c 100644 --- a/api/v1beta1/awscluster_conversion.go +++ b/api/v1beta1/awscluster_conversion.go @@ -56,7 +56,10 @@ func (src *AWSCluster) ConvertTo(dstRaw conversion.Hub) error { if restored.Status.Bastion != nil { dst.Status.Bastion.InstanceMetadataOptions = restored.Status.Bastion.InstanceMetadataOptions dst.Status.Bastion.PlacementGroupName = restored.Status.Bastion.PlacementGroupName + dst.Status.Bastion.PlacementGroupPartition = restored.Status.Bastion.PlacementGroupPartition dst.Status.Bastion.PrivateDNSName = restored.Status.Bastion.PrivateDNSName + dst.Status.Bastion.PublicIPOnLaunch = restored.Status.Bastion.PublicIPOnLaunch + dst.Status.Bastion.CapacityReservationID = restored.Status.Bastion.CapacityReservationID } dst.Spec.Partition = restored.Spec.Partition @@ -101,15 +104,35 @@ func (src *AWSCluster) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.NetworkSpec.VPC.EmptyRoutesDefaultVPCSecurityGroup = restored.Spec.NetworkSpec.VPC.EmptyRoutesDefaultVPCSecurityGroup dst.Spec.NetworkSpec.VPC.PrivateDNSHostnameTypeOnLaunch = restored.Spec.NetworkSpec.VPC.PrivateDNSHostnameTypeOnLaunch + dst.Spec.NetworkSpec.VPC.CarrierGatewayID = restored.Spec.NetworkSpec.VPC.CarrierGatewayID + dst.Spec.NetworkSpec.VPC.SubnetSchema = restored.Spec.NetworkSpec.VPC.SubnetSchema + dst.Spec.NetworkSpec.VPC.SecondaryCidrBlocks = restored.Spec.NetworkSpec.VPC.SecondaryCidrBlocks - // Restore SubnetSpec.ResourceID field, if any. - for _, subnet := range restored.Spec.NetworkSpec.Subnets { - if len(subnet.ResourceID) == 0 { - continue + if restored.Spec.NetworkSpec.VPC.ElasticIPPool != nil { + if dst.Spec.NetworkSpec.VPC.ElasticIPPool == nil { + dst.Spec.NetworkSpec.VPC.ElasticIPPool = &infrav2.ElasticIPPool{} + } + if restored.Spec.NetworkSpec.VPC.ElasticIPPool.PublicIpv4Pool != nil { + dst.Spec.NetworkSpec.VPC.ElasticIPPool.PublicIpv4Pool = restored.Spec.NetworkSpec.VPC.ElasticIPPool.PublicIpv4Pool } + if restored.Spec.NetworkSpec.VPC.ElasticIPPool.PublicIpv4PoolFallBackOrder != nil { + dst.Spec.NetworkSpec.VPC.ElasticIPPool.PublicIpv4PoolFallBackOrder = restored.Spec.NetworkSpec.VPC.ElasticIPPool.PublicIpv4PoolFallBackOrder + } + } + + // Restore SubnetSpec.ResourceID, SubnetSpec.ParentZoneName, and SubnetSpec.ZoneType fields, if any. + for _, subnet := range restored.Spec.NetworkSpec.Subnets { for i, dstSubnet := range dst.Spec.NetworkSpec.Subnets { if dstSubnet.ID == subnet.ID { - dstSubnet.ResourceID = subnet.ResourceID + if len(subnet.ResourceID) > 0 { + dstSubnet.ResourceID = subnet.ResourceID + } + if subnet.ParentZoneName != nil { + dstSubnet.ParentZoneName = subnet.ParentZoneName + } + if subnet.ZoneType != nil { + dstSubnet.ZoneType = subnet.ZoneType + } dstSubnet.DeepCopyInto(&dst.Spec.NetworkSpec.Subnets[i]) } } @@ -150,6 +173,7 @@ func restoreIPAMPool(restored, dst *infrav2.IPAMPool) { func restoreControlPlaneLoadBalancer(restored, dst *infrav2.AWSLoadBalancerSpec) { dst.Name = restored.Name dst.HealthCheckProtocol = restored.HealthCheckProtocol + dst.HealthCheck = restored.HealthCheck dst.LoadBalancerType = restored.LoadBalancerType dst.DisableHostsRewrite = restored.DisableHostsRewrite dst.PreserveClientIP = restored.PreserveClientIP diff --git a/api/v1beta1/awscluster_types.go b/api/v1beta1/awscluster_types.go index 0e06987b4b..ddb1d2cd5a 100644 --- a/api/v1beta1/awscluster_types.go +++ b/api/v1beta1/awscluster_types.go @@ -207,6 +207,7 @@ type AWSClusterStatus struct { Conditions clusterv1.Conditions `json:"conditions,omitempty"` } +// S3Bucket defines a supporting S3 bucket for the cluster, currently can be optionally used for Ignition. type S3Bucket struct { // ControlPlaneIAMInstanceProfile is a name of the IAMInstanceProfile, which will be allowed // to read control-plane node bootstrap data from S3 Bucket. diff --git a/api/v1beta1/awsclustertemplate_types.go b/api/v1beta1/awsclustertemplate_types.go index 404da0b88a..07e2cf4039 100644 --- a/api/v1beta1/awsclustertemplate_types.go +++ b/api/v1beta1/awsclustertemplate_types.go @@ -53,6 +53,7 @@ func init() { SchemeBuilder.Register(&AWSClusterTemplate{}, &AWSClusterTemplateList{}) } +// AWSClusterTemplateResource defines the desired state of AWSClusterTemplate. type AWSClusterTemplateResource struct { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata diff --git a/api/v1beta1/awsmachine_conversion.go b/api/v1beta1/awsmachine_conversion.go index 55856591a9..6044416cdf 100644 --- a/api/v1beta1/awsmachine_conversion.go +++ b/api/v1beta1/awsmachine_conversion.go @@ -38,8 +38,21 @@ func (src *AWSMachine) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.Ignition = restored.Spec.Ignition dst.Spec.InstanceMetadataOptions = restored.Spec.InstanceMetadataOptions dst.Spec.PlacementGroupName = restored.Spec.PlacementGroupName + dst.Spec.PlacementGroupPartition = restored.Spec.PlacementGroupPartition dst.Spec.PrivateDNSName = restored.Spec.PrivateDNSName dst.Spec.SecurityGroupOverrides = restored.Spec.SecurityGroupOverrides + dst.Spec.CapacityReservationID = restored.Spec.CapacityReservationID + if restored.Spec.ElasticIPPool != nil { + if dst.Spec.ElasticIPPool == nil { + dst.Spec.ElasticIPPool = &infrav1.ElasticIPPool{} + } + if restored.Spec.ElasticIPPool.PublicIpv4Pool != nil { + dst.Spec.ElasticIPPool.PublicIpv4Pool = restored.Spec.ElasticIPPool.PublicIpv4Pool + } + if restored.Spec.ElasticIPPool.PublicIpv4PoolFallBackOrder != nil { + dst.Spec.ElasticIPPool.PublicIpv4PoolFallBackOrder = restored.Spec.ElasticIPPool.PublicIpv4PoolFallBackOrder + } + } return nil } @@ -87,8 +100,21 @@ func (r *AWSMachineTemplate) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.Template.Spec.Ignition = restored.Spec.Template.Spec.Ignition dst.Spec.Template.Spec.InstanceMetadataOptions = restored.Spec.Template.Spec.InstanceMetadataOptions dst.Spec.Template.Spec.PlacementGroupName = restored.Spec.Template.Spec.PlacementGroupName + dst.Spec.Template.Spec.PlacementGroupPartition = restored.Spec.Template.Spec.PlacementGroupPartition dst.Spec.Template.Spec.PrivateDNSName = restored.Spec.Template.Spec.PrivateDNSName dst.Spec.Template.Spec.SecurityGroupOverrides = restored.Spec.Template.Spec.SecurityGroupOverrides + dst.Spec.Template.Spec.CapacityReservationID = restored.Spec.Template.Spec.CapacityReservationID + if restored.Spec.Template.Spec.ElasticIPPool != nil { + if dst.Spec.Template.Spec.ElasticIPPool == nil { + dst.Spec.Template.Spec.ElasticIPPool = &infrav1.ElasticIPPool{} + } + if restored.Spec.Template.Spec.ElasticIPPool.PublicIpv4Pool != nil { + dst.Spec.Template.Spec.ElasticIPPool.PublicIpv4Pool = restored.Spec.Template.Spec.ElasticIPPool.PublicIpv4Pool + } + if restored.Spec.Template.Spec.ElasticIPPool.PublicIpv4PoolFallBackOrder != nil { + dst.Spec.Template.Spec.ElasticIPPool.PublicIpv4PoolFallBackOrder = restored.Spec.Template.Spec.ElasticIPPool.PublicIpv4PoolFallBackOrder + } + } return nil } diff --git a/api/v1beta1/conversion_test.go b/api/v1beta1/conversion_test.go index 7579d59aa8..24aa530ac2 100644 --- a/api/v1beta1/conversion_test.go +++ b/api/v1beta1/conversion_test.go @@ -19,9 +19,8 @@ package v1beta1 import ( "testing" - . "github.com/onsi/gomega" - fuzz "github.com/google/gofuzz" + . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/api/apitesting/fuzzer" "k8s.io/apimachinery/pkg/runtime" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" @@ -38,7 +37,7 @@ func fuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { func AWSMachineFuzzer(obj *AWSMachine, c fuzz.Continue) { c.FuzzNoCustom(obj) - + // AWSMachine.Spec.FailureDomain, AWSMachine.Spec.Subnet.ARN and AWSMachine.Spec.AdditionalSecurityGroups.ARN has been removed in v1beta2, so setting it to nil in order to avoid v1beta1 --> v1beta2 --> v1beta1 round trip errors. if obj.Spec.Subnet != nil { obj.Spec.Subnet.ARN = nil @@ -54,7 +53,7 @@ func AWSMachineFuzzer(obj *AWSMachine, c fuzz.Continue) { func AWSMachineTemplateFuzzer(obj *AWSMachineTemplate, c fuzz.Continue) { c.FuzzNoCustom(obj) - + // AWSMachineTemplate.Spec.Template.Spec.FailureDomain, AWSMachineTemplate.Spec.Template.Spec.Subnet.ARN and AWSMachineTemplate.Spec.Template.Spec.AdditionalSecurityGroups.ARN has been removed in v1beta2, so setting it to nil in order to avoid v1beta1 --> v1beta2 --> v1beta round trip errors. if obj.Spec.Template.Spec.Subnet != nil { obj.Spec.Template.Spec.Subnet.ARN = nil @@ -81,16 +80,16 @@ func TestFuzzyConversion(t *testing.T) { })) t.Run("for AWSMachine", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Scheme: scheme, - Hub: &v1beta2.AWSMachine{}, - Spoke: &AWSMachine{}, + Scheme: scheme, + Hub: &v1beta2.AWSMachine{}, + Spoke: &AWSMachine{}, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, })) t.Run("for AWSMachineTemplate", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Scheme: scheme, - Hub: &v1beta2.AWSMachineTemplate{}, - Spoke: &AWSMachineTemplate{}, + Scheme: scheme, + Hub: &v1beta2.AWSMachineTemplate{}, + Spoke: &AWSMachineTemplate{}, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, })) diff --git a/api/v1beta1/zz_generated.conversion.go b/api/v1beta1/zz_generated.conversion.go index 6fab23cc8a..a85eaf08d3 100644 --- a/api/v1beta1/zz_generated.conversion.go +++ b/api/v1beta1/zz_generated.conversion.go @@ -1235,6 +1235,7 @@ func autoConvert_v1beta2_AWSLoadBalancerSpec_To_v1beta1_AWSLoadBalancerSpec(in * out.CrossZoneLoadBalancing = in.CrossZoneLoadBalancing out.Subnets = *(*[]string)(unsafe.Pointer(&in.Subnets)) out.HealthCheckProtocol = (*ClassicELBProtocol)(unsafe.Pointer(in.HealthCheckProtocol)) + // WARNING: in.HealthCheck requires manual conversion: does not exist in peer-type out.AdditionalSecurityGroups = *(*[]string)(unsafe.Pointer(&in.AdditionalSecurityGroups)) // WARNING: in.AdditionalListeners requires manual conversion: does not exist in peer-type // WARNING: in.IngressRules requires manual conversion: does not exist in peer-type @@ -1388,6 +1389,7 @@ func autoConvert_v1beta2_AWSMachineSpec_To_v1beta1_AWSMachineSpec(in *v1beta2.AW out.AdditionalTags = *(*Tags)(unsafe.Pointer(&in.AdditionalTags)) out.IAMInstanceProfile = in.IAMInstanceProfile out.PublicIP = (*bool)(unsafe.Pointer(in.PublicIP)) + // WARNING: in.ElasticIPPool requires manual conversion: does not exist in peer-type if in.AdditionalSecurityGroups != nil { in, out := &in.AdditionalSecurityGroups, &out.AdditionalSecurityGroups *out = make([]AWSResourceReference, len(*in)) @@ -1428,8 +1430,10 @@ func autoConvert_v1beta2_AWSMachineSpec_To_v1beta1_AWSMachineSpec(in *v1beta2.AW } out.SpotMarketOptions = (*SpotMarketOptions)(unsafe.Pointer(in.SpotMarketOptions)) // WARNING: in.PlacementGroupName requires manual conversion: does not exist in peer-type + // WARNING: in.PlacementGroupPartition requires manual conversion: does not exist in peer-type out.Tenancy = in.Tenancy // WARNING: in.PrivateDNSName requires manual conversion: does not exist in peer-type + // WARNING: in.CapacityReservationID requires manual conversion: does not exist in peer-type return nil } @@ -1938,6 +1942,8 @@ func Convert_v1beta1_Ignition_To_v1beta2_Ignition(in *Ignition, out *v1beta2.Ign func autoConvert_v1beta2_Ignition_To_v1beta1_Ignition(in *v1beta2.Ignition, out *Ignition, s conversion.Scope) error { out.Version = in.Version // WARNING: in.StorageType requires manual conversion: does not exist in peer-type + // WARNING: in.Proxy requires manual conversion: does not exist in peer-type + // WARNING: in.TLS requires manual conversion: does not exist in peer-type return nil } @@ -1966,6 +1972,7 @@ func autoConvert_v1beta2_IngressRule_To_v1beta1_IngressRule(in *v1beta2.IngressR out.IPv6CidrBlocks = *(*[]string)(unsafe.Pointer(&in.IPv6CidrBlocks)) out.SourceSecurityGroupIDs = *(*[]string)(unsafe.Pointer(&in.SourceSecurityGroupIDs)) // WARNING: in.SourceSecurityGroupRoles requires manual conversion: does not exist in peer-type + // WARNING: in.NatGatewaysIPsSource requires manual conversion: does not exist in peer-type return nil } @@ -2022,10 +2029,13 @@ func autoConvert_v1beta2_Instance_To_v1beta1_Instance(in *v1beta2.Instance, out out.AvailabilityZone = in.AvailabilityZone out.SpotMarketOptions = (*SpotMarketOptions)(unsafe.Pointer(in.SpotMarketOptions)) // WARNING: in.PlacementGroupName requires manual conversion: does not exist in peer-type + // WARNING: in.PlacementGroupPartition requires manual conversion: does not exist in peer-type out.Tenancy = in.Tenancy out.VolumeIDs = *(*[]string)(unsafe.Pointer(&in.VolumeIDs)) // WARNING: in.InstanceMetadataOptions requires manual conversion: does not exist in peer-type // WARNING: in.PrivateDNSName requires manual conversion: does not exist in peer-type + // WARNING: in.PublicIPOnLaunch requires manual conversion: does not exist in peer-type + // WARNING: in.CapacityReservationID requires manual conversion: does not exist in peer-type return nil } @@ -2159,6 +2169,7 @@ func autoConvert_v1beta2_S3Bucket_To_v1beta1_S3Bucket(in *v1beta2.S3Bucket, out out.NodesIAMInstanceProfiles = *(*[]string)(unsafe.Pointer(&in.NodesIAMInstanceProfiles)) // WARNING: in.PresignedURLDuration requires manual conversion: does not exist in peer-type out.Name = in.Name + // WARNING: in.BestEffortDeleteObjects requires manual conversion: does not exist in peer-type return nil } @@ -2257,6 +2268,8 @@ func autoConvert_v1beta2_SubnetSpec_To_v1beta1_SubnetSpec(in *v1beta2.SubnetSpec out.RouteTableID = (*string)(unsafe.Pointer(in.RouteTableID)) out.NatGatewayID = (*string)(unsafe.Pointer(in.NatGatewayID)) out.Tags = *(*Tags)(unsafe.Pointer(&in.Tags)) + // WARNING: in.ZoneType requires manual conversion: does not exist in peer-type + // WARNING: in.ParentZoneName requires manual conversion: does not exist in peer-type return nil } @@ -2287,6 +2300,7 @@ func Convert_v1beta1_VPCSpec_To_v1beta2_VPCSpec(in *VPCSpec, out *v1beta2.VPCSpe func autoConvert_v1beta2_VPCSpec_To_v1beta1_VPCSpec(in *v1beta2.VPCSpec, out *VPCSpec, s conversion.Scope) error { out.ID = in.ID out.CidrBlock = in.CidrBlock + // WARNING: in.SecondaryCidrBlocks requires manual conversion: does not exist in peer-type // WARNING: in.IPAMPool requires manual conversion: does not exist in peer-type if in.IPv6 != nil { in, out := &in.IPv6, &out.IPv6 @@ -2298,11 +2312,14 @@ func autoConvert_v1beta2_VPCSpec_To_v1beta1_VPCSpec(in *v1beta2.VPCSpec, out *VP out.IPv6 = nil } out.InternetGatewayID = (*string)(unsafe.Pointer(in.InternetGatewayID)) + // WARNING: in.CarrierGatewayID requires manual conversion: does not exist in peer-type out.Tags = *(*Tags)(unsafe.Pointer(&in.Tags)) out.AvailabilityZoneUsageLimit = (*int)(unsafe.Pointer(in.AvailabilityZoneUsageLimit)) out.AvailabilityZoneSelection = (*AZSelectionScheme)(unsafe.Pointer(in.AvailabilityZoneSelection)) // WARNING: in.EmptyRoutesDefaultVPCSecurityGroup requires manual conversion: does not exist in peer-type // WARNING: in.PrivateDNSHostnameTypeOnLaunch requires manual conversion: does not exist in peer-type + // WARNING: in.ElasticIPPool requires manual conversion: does not exist in peer-type + // WARNING: in.SubnetSchema requires manual conversion: does not exist in peer-type return nil } diff --git a/api/v1beta2/awscluster_types.go b/api/v1beta2/awscluster_types.go index 1df6c53b89..213ad99c56 100644 --- a/api/v1beta2/awscluster_types.go +++ b/api/v1beta2/awscluster_types.go @@ -166,13 +166,19 @@ type Bastion struct { AMI string `json:"ami,omitempty"` } +// LoadBalancerType defines the type of load balancer to use. type LoadBalancerType string var ( - LoadBalancerTypeClassic = LoadBalancerType("classic") - LoadBalancerTypeELB = LoadBalancerType("elb") - LoadBalancerTypeALB = LoadBalancerType("alb") - LoadBalancerTypeNLB = LoadBalancerType("nlb") + // LoadBalancerTypeClassic is the classic ELB type. + LoadBalancerTypeClassic = LoadBalancerType("classic") + // LoadBalancerTypeELB is the ELB type. + LoadBalancerTypeELB = LoadBalancerType("elb") + // LoadBalancerTypeALB is the ALB type. + LoadBalancerTypeALB = LoadBalancerType("alb") + // LoadBalancerTypeNLB is the NLB type. + LoadBalancerTypeNLB = LoadBalancerType("nlb") + // LoadBalancerTypeDisabled disables the load balancer. LoadBalancerTypeDisabled = LoadBalancerType("disabled") ) @@ -214,6 +220,10 @@ type AWSLoadBalancerSpec struct { // +optional HealthCheckProtocol *ELBProtocol `json:"healthCheckProtocol,omitempty"` + // HealthCheck sets custom health check configuration to the API target group. + // +optional + HealthCheck *TargetGroupHealthCheckAPISpec `json:"healthCheck,omitempty"` + // AdditionalSecurityGroups sets the security groups used by the load balancer. Expected to be security group IDs // This is optional - if not provided new security groups will be created for the load balancer // +optional @@ -251,11 +261,16 @@ type AdditionalListenerSpec struct { // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=65535 Port int64 `json:"port"` + // Protocol sets the protocol for the additional listener. // Currently only TCP is supported. // +kubebuilder:validation:Enum=TCP // +kubebuilder:default=TCP Protocol ELBProtocol `json:"protocol,omitempty"` + + // HealthCheck sets the optional custom health check configuration to the API target group. + // +optional + HealthCheck *TargetGroupHealthCheckAdditionalSpec `json:"healthCheck,omitempty"` } // AWSClusterStatus defines the observed state of AWSCluster. @@ -268,6 +283,7 @@ type AWSClusterStatus struct { Conditions clusterv1.Conditions `json:"conditions,omitempty"` } +// S3Bucket defines a supporting S3 bucket for the cluster, currently can be optionally used for Ignition. type S3Bucket struct { // ControlPlaneIAMInstanceProfile is a name of the IAMInstanceProfile, which will be allowed // to read control-plane node bootstrap data from S3 Bucket. @@ -293,6 +309,10 @@ type S3Bucket struct { // +kubebuilder:validation:MaxLength:=63 // +kubebuilder:validation:Pattern=`^[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]$` Name string `json:"name"` + + // BestEffortDeleteObjects defines whether access/permission errors during object deletion should be ignored. + // +optional + BestEffortDeleteObjects *bool `json:"bestEffortDeleteObjects,omitempty"` } // +kubebuilder:object:root=true diff --git a/api/v1beta2/awscluster_webhook.go b/api/v1beta2/awscluster_webhook.go index 4e1a2dbb12..1cb8012282 100644 --- a/api/v1beta2/awscluster_webhook.go +++ b/api/v1beta2/awscluster_webhook.go @@ -153,6 +153,16 @@ func (r *AWSCluster) validateControlPlaneLoadBalancerUpdate(oldlb, newlb *AWSLoa ) } } else { + // A disabled Load Balancer has many implications that must be treated as immutable/ + // this is mostly used by externally managed Control Plane, and there's no need to support type changes. + // More info: https://kubernetes.slack.com/archives/CD6U2V71N/p1708983246100859?thread_ts=1708973478.410979&cid=CD6U2V71N + if (oldlb.LoadBalancerType == LoadBalancerTypeDisabled && newlb.LoadBalancerType != LoadBalancerTypeDisabled) || + (newlb.LoadBalancerType == LoadBalancerTypeDisabled && oldlb.LoadBalancerType != LoadBalancerTypeDisabled) { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "type"), + newlb.Scheme, "field is immutable when created of disabled type"), + ) + } // If old scheme was not nil, the new scheme should be the same. if !cmp.Equal(oldlb.Scheme, newlb.Scheme) { allErrs = append(allErrs, @@ -238,6 +248,11 @@ func (r *AWSCluster) validateNetwork() field.ErrorList { if subnet.IsIPv6 || subnet.IPv6CidrBlock != "" { allErrs = append(allErrs, field.Invalid(field.NewPath("subnets"), r.Spec.NetworkSpec.Subnets, "IPv6 cannot be used with unmanaged clusters at this time.")) } + if subnet.ZoneType != nil && subnet.IsEdge() { + if subnet.ParentZoneName == nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("subnets"), r.Spec.NetworkSpec.Subnets, "ParentZoneName must be set when ZoneType is 'local-zone'.")) + } + } } if r.Spec.NetworkSpec.VPC.CidrBlock != "" && r.Spec.NetworkSpec.VPC.IPAMPool != nil { @@ -249,8 +264,30 @@ func (r *AWSCluster) validateNetwork() field.ErrorList { } for _, rule := range r.Spec.NetworkSpec.AdditionalControlPlaneIngressRules { - if (rule.CidrBlocks != nil || rule.IPv6CidrBlocks != nil) && (rule.SourceSecurityGroupIDs != nil || rule.SourceSecurityGroupRoles != nil) { - allErrs = append(allErrs, field.Invalid(field.NewPath("additionalControlPlaneIngressRules"), r.Spec.NetworkSpec.AdditionalControlPlaneIngressRules, "CIDR blocks and security group IDs or security group roles cannot be used together")) + allErrs = append(allErrs, r.validateIngressRule(rule)...) + } + + if r.Spec.NetworkSpec.VPC.ElasticIPPool != nil { + eipp := r.Spec.NetworkSpec.VPC.ElasticIPPool + if eipp.PublicIpv4Pool != nil { + if eipp.PublicIpv4PoolFallBackOrder == nil { + return append(allErrs, field.Invalid(field.NewPath("elasticIpPool.publicIpv4PoolFallbackOrder"), r.Spec.NetworkSpec.VPC.ElasticIPPool, "publicIpv4PoolFallbackOrder must be set when publicIpv4Pool is defined.")) + } + awsPublicIpv4PoolPrefix := "ipv4pool-ec2-" + if !strings.HasPrefix(*eipp.PublicIpv4Pool, awsPublicIpv4PoolPrefix) { + return append(allErrs, field.Invalid(field.NewPath("elasticIpPool.publicIpv4Pool"), r.Spec.NetworkSpec.VPC.ElasticIPPool, fmt.Sprintf("publicIpv4Pool must start with %s.", awsPublicIpv4PoolPrefix))) + } + } + if eipp.PublicIpv4Pool == nil && eipp.PublicIpv4PoolFallBackOrder != nil { + return append(allErrs, field.Invalid(field.NewPath("elasticIpPool.publicIpv4PoolFallbackOrder"), r.Spec.NetworkSpec.VPC.ElasticIPPool, "publicIpv4Pool must be set when publicIpv4PoolFallbackOrder is defined.")) + } + } + + secondaryCidrBlocks := r.Spec.NetworkSpec.VPC.SecondaryCidrBlocks + secondaryCidrBlocksField := field.NewPath("spec", "network", "vpc", "secondaryCidrBlocks") + for _, cidrBlock := range secondaryCidrBlocks { + if r.Spec.NetworkSpec.VPC.CidrBlock != "" && r.Spec.NetworkSpec.VPC.CidrBlock == cidrBlock.IPv4CidrBlock { + allErrs = append(allErrs, field.Invalid(secondaryCidrBlocksField, secondaryCidrBlocks, fmt.Sprintf("AWSCluster.spec.network.vpc.secondaryCidrBlocks must not contain the primary AWSCluster.spec.network.vpc.cidrBlock %v", r.Spec.NetworkSpec.VPC.CidrBlock))) } } @@ -292,9 +329,7 @@ func (r *AWSCluster) validateControlPlaneLBs() field.ErrorList { } for _, rule := range cp.IngressRules { - if (rule.CidrBlocks != nil || rule.IPv6CidrBlocks != nil) && (rule.SourceSecurityGroupIDs != nil || rule.SourceSecurityGroupRoles != nil) { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "ingressRules"), r.Spec.ControlPlaneLoadBalancer.IngressRules, "CIDR blocks and security group IDs or security group roles cannot be used together")) - } + allErrs = append(allErrs, r.validateIngressRule(rule)...) } } @@ -336,11 +371,19 @@ func (r *AWSCluster) validateControlPlaneLBs() field.ErrorList { } } - for _, rule := range r.Spec.ControlPlaneLoadBalancer.IngressRules { + return allErrs +} + +func (r *AWSCluster) validateIngressRule(rule IngressRule) field.ErrorList { + var allErrs field.ErrorList + if rule.NatGatewaysIPsSource { + if rule.CidrBlocks != nil || rule.IPv6CidrBlocks != nil || rule.SourceSecurityGroupIDs != nil || rule.SourceSecurityGroupRoles != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("additionalControlPlaneIngressRules"), r.Spec.NetworkSpec.AdditionalControlPlaneIngressRules, "CIDR blocks and security group IDs or security group roles cannot be used together")) + } + } else { if (rule.CidrBlocks != nil || rule.IPv6CidrBlocks != nil) && (rule.SourceSecurityGroupIDs != nil || rule.SourceSecurityGroupRoles != nil) { allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "ingressRules"), r.Spec.ControlPlaneLoadBalancer.IngressRules, "CIDR blocks and security group IDs or security group roles cannot be used together")) } } - return allErrs } diff --git a/api/v1beta2/awscluster_webhook_test.go b/api/v1beta2/awscluster_webhook_test.go index 85342552c6..5252054f7c 100644 --- a/api/v1beta2/awscluster_webhook_test.go +++ b/api/v1beta2/awscluster_webhook_test.go @@ -18,6 +18,7 @@ package v1beta2 import ( "context" + "fmt" "strings" "testing" "time" @@ -407,6 +408,59 @@ func TestAWSClusterValidateCreate(t *testing.T) { }, wantErr: true, }, + { + name: "rejects ingress rules with cidr block, source security group id, role and nat gateway IP source", + cluster: &AWSCluster{ + Spec: AWSClusterSpec{ + ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{ + IngressRules: []IngressRule{ + { + Protocol: SecurityGroupProtocolTCP, + IPv6CidrBlocks: []string{"test"}, + SourceSecurityGroupIDs: []string{"test"}, + SourceSecurityGroupRoles: []SecurityGroupRole{SecurityGroupBastion}, + NatGatewaysIPsSource: true, + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "rejects ingress rules with source security role and nat gateway IP source", + cluster: &AWSCluster{ + Spec: AWSClusterSpec{ + ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{ + IngressRules: []IngressRule{ + { + Protocol: SecurityGroupProtocolTCP, + SourceSecurityGroupRoles: []SecurityGroupRole{SecurityGroupBastion}, + NatGatewaysIPsSource: true, + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "rejects ingress rules with cidr block and nat gateway IP source", + cluster: &AWSCluster{ + Spec: AWSClusterSpec{ + ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{ + IngressRules: []IngressRule{ + { + Protocol: SecurityGroupProtocolTCP, + IPv6CidrBlocks: []string{"test"}, + NatGatewaysIPsSource: true, + }, + }, + }, + }, + }, + wantErr: true, + }, { name: "accepts ingress rules with cidr block", cluster: &AWSCluster{ @@ -423,6 +477,22 @@ func TestAWSClusterValidateCreate(t *testing.T) { }, wantErr: false, }, + { + name: "accepts ingress rules with nat gateway IPs source", + cluster: &AWSCluster{ + Spec: AWSClusterSpec{ + ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{ + IngressRules: []IngressRule{ + { + Protocol: SecurityGroupProtocolTCP, + NatGatewaysIPsSource: true, + }, + }, + }, + }, + }, + wantErr: false, + }, { name: "accepts ingress rules with source security group role", cluster: &AWSCluster{ @@ -597,7 +667,7 @@ func TestAWSClusterValidateCreate(t *testing.T) { g.Eventually(func() bool { err := testEnv.Get(ctx, key, c) return err == nil - }, 10*time.Second).Should(BeTrue()) + }, 10*time.Second).Should(BeTrue(), fmt.Sprintf("Eventually failed getting the newly created cluster %q", cluster.Name)) if tt.expect != nil { tt.expect(g, c.Spec.ControlPlaneLoadBalancer) @@ -607,12 +677,48 @@ func TestAWSClusterValidateCreate(t *testing.T) { } func TestAWSClusterValidateUpdate(t *testing.T) { - tests := []struct { + var tests = []struct { name string oldCluster *AWSCluster newCluster *AWSCluster wantErr bool }{ + { + name: "Control Plane LB type is immutable when switching from disabled to any", + oldCluster: &AWSCluster{ + Spec: AWSClusterSpec{ + ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{ + LoadBalancerType: LoadBalancerTypeDisabled, + }, + }, + }, + newCluster: &AWSCluster{ + Spec: AWSClusterSpec{ + ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{ + LoadBalancerType: LoadBalancerTypeClassic, + }, + }, + }, + wantErr: true, + }, + { + name: "Control Plane LB type is immutable when switching from any to disabled", + oldCluster: &AWSCluster{ + Spec: AWSClusterSpec{ + ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{ + LoadBalancerType: LoadBalancerTypeClassic, + }, + }, + }, + newCluster: &AWSCluster{ + Spec: AWSClusterSpec{ + ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{ + LoadBalancerType: LoadBalancerTypeDisabled, + }, + }, + }, + wantErr: true, + }, { name: "region is immutable", oldCluster: &AWSCluster{ @@ -956,6 +1062,7 @@ func TestAWSClusterDefaultCNIIngressRules(t *testing.T) { defaultVPCSpec := VPCSpec{ AvailabilityZoneUsageLimit: &AZUsageLimit, AvailabilityZoneSelection: &AZSelectionSchemeOrdered, + SubnetSchema: &SubnetSchemaPreferPrivate, } g := NewWithT(t) tests := []struct { diff --git a/api/v1beta2/awsclustertemplate_types.go b/api/v1beta2/awsclustertemplate_types.go index 333cb285c3..e0a827fa3d 100644 --- a/api/v1beta2/awsclustertemplate_types.go +++ b/api/v1beta2/awsclustertemplate_types.go @@ -54,6 +54,7 @@ func init() { SchemeBuilder.Register(&AWSClusterTemplate{}, &AWSClusterTemplateList{}) } +// AWSClusterTemplateResource defines the desired state of AWSClusterTemplateResource. type AWSClusterTemplateResource struct { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata diff --git a/api/v1beta2/awsmachine_types.go b/api/v1beta2/awsmachine_types.go index 10d8ce0dcb..39a649a0e5 100644 --- a/api/v1beta2/awsmachine_types.go +++ b/api/v1beta2/awsmachine_types.go @@ -113,6 +113,11 @@ type AWSMachineSpec struct { // +optional PublicIP *bool `json:"publicIP,omitempty"` + // ElasticIPPool is the configuration to allocate Public IPv4 address (Elastic IP/EIP) from user-defined pool. + // + // +optional + ElasticIPPool *ElasticIPPool `json:"elasticIpPool,omitempty"` + // AdditionalSecurityGroups is an array of references to security groups that should be applied to the // instance. These security groups would be set in addition to any security groups defined // at the cluster level or in the actuator. It is possible to specify either IDs of Filters. Using Filters @@ -172,6 +177,14 @@ type AWSMachineSpec struct { // +optional PlacementGroupName string `json:"placementGroupName,omitempty"` + // PlacementGroupPartition is the partition number within the placement group in which to launch the instance. + // This value is only valid if the placement group, referred in `PlacementGroupName`, was created with + // strategy set to partition. + // +kubebuilder:validation:Minimum:=1 + // +kubebuilder:validation:Maximum:=7 + // +optional + PlacementGroupPartition int64 `json:"placementGroupPartition,omitempty"` + // Tenancy indicates if instance should run on shared or single-tenant hardware. // +optional // +kubebuilder:validation:Enum:=default;dedicated;host @@ -180,6 +193,10 @@ type AWSMachineSpec struct { // PrivateDNSName is the options for the instance hostname. // +optional PrivateDNSName *PrivateDNSName `json:"privateDnsName,omitempty"` + + // CapacityReservationID specifies the target Capacity Reservation into which the instance should be launched. + // +optional + CapacityReservationID *string `json:"capacityReservationId,omitempty"` } // CloudInit defines options related to the bootstrapping systems where @@ -210,6 +227,7 @@ type CloudInit struct { } // Ignition defines options related to the bootstrapping systems where Ignition is used. +// For more information on Ignition configuration, see https://coreos.github.io/butane/specs/ type Ignition struct { // Version defines which version of Ignition will be used to generate bootstrap data. // @@ -237,6 +255,66 @@ type Ignition struct { // +kubebuilder:default="ClusterObjectStore" // +kubebuilder:validation:Enum:="ClusterObjectStore";"UnencryptedUserData" StorageType IgnitionStorageTypeOption `json:"storageType,omitempty"` + + // Proxy defines proxy settings for Ignition. + // Only valid for Ignition versions 3.1 and above. + // +optional + Proxy *IgnitionProxy `json:"proxy,omitempty"` + + // TLS defines TLS settings for Ignition. + // Only valid for Ignition versions 3.1 and above. + // +optional + TLS *IgnitionTLS `json:"tls,omitempty"` +} + +// IgnitionCASource defines the source of the certificate authority to use for Ignition. +// +kubebuilder:validation:MaxLength:=65536 +type IgnitionCASource string + +// IgnitionTLS defines TLS settings for Ignition. +type IgnitionTLS struct { + // CASources defines the list of certificate authorities to use for Ignition. + // The value is the certificate bundle (in PEM format). The bundle can contain multiple concatenated certificates. + // Supported schemes are http, https, tftp, s3, arn, gs, and `data` (RFC 2397) URL scheme. + // + // +optional + // +kubebuilder:validation:MaxItems=64 + CASources []IgnitionCASource `json:"certificateAuthorities,omitempty"` +} + +// IgnitionNoProxy defines the list of domains to not proxy for Ignition. +// +kubebuilder:validation:MaxLength:=2048 +type IgnitionNoProxy string + +// IgnitionProxy defines proxy settings for Ignition. +type IgnitionProxy struct { + // HTTPProxy is the HTTP proxy to use for Ignition. + // A single URL that specifies the proxy server to use for HTTP and HTTPS requests, + // unless overridden by the HTTPSProxy or NoProxy options. + // +optional + HTTPProxy *string `json:"httpProxy,omitempty"` + + // HTTPSProxy is the HTTPS proxy to use for Ignition. + // A single URL that specifies the proxy server to use for HTTPS requests, + // unless overridden by the NoProxy option. + // +optional + HTTPSProxy *string `json:"httpsProxy,omitempty"` + + // NoProxy is the list of domains to not proxy for Ignition. + // Specifies a list of strings to hosts that should be excluded from proxying. + // + // Each value is represented by: + // - An IP address prefix (1.2.3.4) + // - An IP address prefix in CIDR notation (1.2.3.4/8) + // - A domain name + // - A domain name matches that name and all subdomains + // - A domain name with a leading . matches subdomains only + // - A special DNS label (*), indicates that no proxying should be done + // + // An IP address prefix and domain name can also include a literal port number (1.2.3.4:80). + // +optional + // +kubebuilder:validation:MaxItems=64 + NoProxy []IgnitionNoProxy `json:"noProxy,omitempty"` } // AWSMachineStatus defines the observed state of AWSMachine. diff --git a/api/v1beta2/awsmachine_webhook.go b/api/v1beta2/awsmachine_webhook.go index 2fe32083db..50af4f2211 100644 --- a/api/v1beta2/awsmachine_webhook.go +++ b/api/v1beta2/awsmachine_webhook.go @@ -17,11 +17,19 @@ limitations under the License. package v1beta2 import ( + "encoding/base64" + "fmt" + "net" + "net/url" + "strings" + "github.com/google/go-cmp/cmp" "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -57,6 +65,7 @@ func (r *AWSMachine) ValidateCreate() (admission.Warnings, error) { allErrs = append(allErrs, r.validateSSHKeyName()...) allErrs = append(allErrs, r.validateAdditionalSecurityGroups()...) allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...) + allErrs = append(allErrs, r.validateNetworkElasticIPPool()...) return nil, aggregateObjErrors(r.GroupVersionKind().GroupKind(), r.Name, allErrs) } @@ -171,17 +180,132 @@ func (r *AWSMachine) ignitionEnabled() bool { func (r *AWSMachine) validateIgnitionAndCloudInit() field.ErrorList { var allErrs field.ErrorList + if !r.ignitionEnabled() { + return allErrs + } // Feature gate is not enabled but ignition is enabled then send a forbidden error. - if !feature.Gates.Enabled(feature.BootstrapFormatIgnition) && r.ignitionEnabled() { + if !feature.Gates.Enabled(feature.BootstrapFormatIgnition) { allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "ignition"), "can be set only if the BootstrapFormatIgnition feature gate is enabled")) } - if r.ignitionEnabled() && r.cloudInitConfigured() { + // If ignition is enabled, cloudInit should not be configured. + if r.cloudInitConfigured() { allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "cloudInit"), "cannot be set if spec.ignition is set")) } + // Proxy and TLS are only valid for Ignition versions >= 3.1. + if r.Spec.Ignition.Version == "2.3" || r.Spec.Ignition.Version == "3.0" { + if r.Spec.Ignition.Proxy != nil { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "ignition", "proxy"), "cannot be set if spec.ignition.version is 2.3 or 3.0")) + } + if r.Spec.Ignition.TLS != nil { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "ignition", "tls"), "cannot be set if spec.ignition.version is 2.3 or 3.0")) + } + } + + allErrs = append(allErrs, r.validateIgnitionProxy()...) + allErrs = append(allErrs, r.validateIgnitionTLS()...) + + return allErrs +} + +func (r *AWSMachine) validateIgnitionProxy() field.ErrorList { + var allErrs field.ErrorList + + if r.Spec.Ignition.Proxy == nil { + return allErrs + } + + // Validate HTTPProxy. + if r.Spec.Ignition.Proxy.HTTPProxy != nil { + // Parse the url to check if it is valid. + _, err := url.Parse(*r.Spec.Ignition.Proxy.HTTPProxy) + if err != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ignition", "proxy", "httpProxy"), *r.Spec.Ignition.Proxy.HTTPProxy, "invalid URL")) + } + } + + // Validate HTTPSProxy. + if r.Spec.Ignition.Proxy.HTTPSProxy != nil { + // Parse the url to check if it is valid. + _, err := url.Parse(*r.Spec.Ignition.Proxy.HTTPSProxy) + if err != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ignition", "proxy", "httpsProxy"), *r.Spec.Ignition.Proxy.HTTPSProxy, "invalid URL")) + } + } + + // Validate NoProxy. + for _, noProxy := range r.Spec.Ignition.Proxy.NoProxy { + noProxy := string(noProxy) + // Validate here that the value `noProxy` is: + // - A domain name + // - A domain name matches that name and all subdomains + // - A domain name with a leading . matches subdomains only + + // A special DNS label (*). + if noProxy == "*" { + continue + } + // An IP address prefix (1.2.3.4). + if ip := net.ParseIP(noProxy); ip != nil { + continue + } + // An IP address prefix in CIDR notation (1.2.3.4/8). + if _, _, err := net.ParseCIDR(noProxy); err == nil { + continue + } + // An IP or domain name with a port. + if _, _, err := net.SplitHostPort(noProxy); err == nil { + continue + } + // A domain name. + if noProxy[0] == '.' { + // If it starts with a dot, it should be a domain name. + noProxy = noProxy[1:] + } + // Validate that the value matches DNS 1123. + if errs := validation.IsDNS1123Subdomain(noProxy); len(errs) > 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ignition", "proxy", "noProxy"), noProxy, fmt.Sprintf("invalid noProxy value, please refer to the field documentation: %s", strings.Join(errs, "; ")))) + } + } + + return allErrs +} + +func (r *AWSMachine) validateIgnitionTLS() field.ErrorList { + var allErrs field.ErrorList + + if r.Spec.Ignition.TLS == nil { + return allErrs + } + + for _, source := range r.Spec.Ignition.TLS.CASources { + // Validate that source is RFC 2397 data URL. + u, err := url.Parse(string(source)) + if err != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ignition", "tls", "caSources"), source, "invalid URL")) + } + + switch u.Scheme { + case "http", "https", "tftp", "s3", "arn", "gs": + // Valid schemes. + case "data": + // Validate that the data URL is base64 encoded. + i := strings.Index(u.Opaque, ",") + if i < 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ignition", "tls", "caSources"), source, "invalid data URL")) + } + // Validate that the data URL is base64 encoded. + if _, err := base64.StdEncoding.DecodeString(u.Opaque[i+1:]); err != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ignition", "tls", "caSources"), source, "invalid base64 encoding for data url")) + } + default: + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ignition", "tls", "caSources"), source, "unsupported URL scheme")) + } + } + return allErrs } @@ -212,6 +336,31 @@ func (r *AWSMachine) validateRootVolume() field.ErrorList { return allErrs } +func (r *AWSMachine) validateNetworkElasticIPPool() field.ErrorList { + var allErrs field.ErrorList + + if r.Spec.ElasticIPPool == nil { + return allErrs + } + if !ptr.Deref(r.Spec.PublicIP, false) { + allErrs = append(allErrs, field.Required(field.NewPath("spec.elasticIpPool"), "publicIp must be set to 'true' to assign custom public IPv4 pools with elasticIpPool")) + } + eipp := r.Spec.ElasticIPPool + if eipp.PublicIpv4Pool != nil { + if eipp.PublicIpv4PoolFallBackOrder == nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec.elasticIpPool.publicIpv4PoolFallbackOrder"), r.Spec.ElasticIPPool, "publicIpv4PoolFallbackOrder must be set when publicIpv4Pool is defined.")) + } + awsPublicIpv4PoolPrefix := "ipv4pool-ec2-" + if !strings.HasPrefix(*eipp.PublicIpv4Pool, awsPublicIpv4PoolPrefix) { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec.elasticIpPool.publicIpv4Pool"), r.Spec.ElasticIPPool, fmt.Sprintf("publicIpv4Pool must start with %s.", awsPublicIpv4PoolPrefix))) + } + } else if eipp.PublicIpv4PoolFallBackOrder != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec.elasticIpPool.publicIpv4PoolFallbackOrder"), r.Spec.ElasticIPPool, "publicIpv4Pool must be set when publicIpv4PoolFallbackOrder is defined.")) + } + + return allErrs +} + func (r *AWSMachine) validateNonRootVolumes() field.ErrorList { var allErrs field.ErrorList diff --git a/api/v1beta2/awsmachine_webhook_test.go b/api/v1beta2/awsmachine_webhook_test.go index a2b6ecd607..80e7abc45a 100644 --- a/api/v1beta2/awsmachine_webhook_test.go +++ b/api/v1beta2/awsmachine_webhook_test.go @@ -24,8 +24,10 @@ import ( "github.com/aws/aws-sdk-go/aws" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilfeature "k8s.io/component-base/featuregate/testing" "k8s.io/utils/ptr" + "sigs.k8s.io/cluster-api-provider-aws/v2/feature" utildefaulting "sigs.k8s.io/cluster-api/util/defaulting" ) @@ -248,9 +250,197 @@ func TestAWSMachineCreate(t *testing.T) { }, wantErr: true, }, + { + name: "ignition proxy and TLS can be from version 3.1", + machine: &AWSMachine{ + Spec: AWSMachineSpec{ + InstanceType: "test", + Ignition: &Ignition{ + Version: "3.1", + Proxy: &IgnitionProxy{ + HTTPProxy: ptr.To("http://proxy.example.com:3128"), + }, + TLS: &IgnitionTLS{ + CASources: []IgnitionCASource{"s3://example.com/ca.pem"}, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "ignition tls with invalid CASources URL", + machine: &AWSMachine{ + Spec: AWSMachineSpec{ + InstanceType: "test", + Ignition: &Ignition{ + Version: "3.1", + TLS: &IgnitionTLS{ + CASources: []IgnitionCASource{"data;;"}, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "ignition proxy with valid URLs, and noproxy", + machine: &AWSMachine{ + Spec: AWSMachineSpec{ + InstanceType: "test", + Ignition: &Ignition{ + Version: "3.1", + Proxy: &IgnitionProxy{ + HTTPProxy: ptr.To("http://proxy.example.com:3128"), + HTTPSProxy: ptr.To("https://proxy.example.com:3128"), + NoProxy: []IgnitionNoProxy{ + "10.0.0.1", // single ip + "example.com", // domain + ".example.com", // all subdomains + "example.com:3128", // domain with port + "10.0.0.1:3128", // ip with port + "10.0.0.0/8", // cidr block + "*", // no proxy wildcard + }, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "ignition proxy with invalid HTTPProxy URL", + machine: &AWSMachine{ + Spec: AWSMachineSpec{ + InstanceType: "test", + Ignition: &Ignition{ + Version: "3.1", + Proxy: &IgnitionProxy{ + HTTPProxy: ptr.To("*:80"), + }, + }, + }, + }, + wantErr: true, + }, + { + name: "ignition proxy with invalid HTTPSProxy URL", + machine: &AWSMachine{ + Spec: AWSMachineSpec{ + InstanceType: "test", + Ignition: &Ignition{ + Version: "3.1", + Proxy: &IgnitionProxy{ + HTTPSProxy: ptr.To("*:80"), + }, + }, + }, + }, + wantErr: true, + }, + { + name: "ignition proxy with invalid noproxy URL", + machine: &AWSMachine{ + Spec: AWSMachineSpec{ + InstanceType: "test", + Ignition: &Ignition{ + Version: "3.1", + Proxy: &IgnitionProxy{ + NoProxy: []IgnitionNoProxy{"&"}, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "cannot use ignition proxy with version 2.3", + machine: &AWSMachine{ + Spec: AWSMachineSpec{ + InstanceType: "test", + Ignition: &Ignition{ + Version: "2.3.0", + Proxy: &IgnitionProxy{ + HTTPProxy: ptr.To("http://proxy.example.com:3128"), + }, + }, + }, + }, + wantErr: true, + }, + { + name: "create with valid BYOIPv4", + machine: &AWSMachine{ + Spec: AWSMachineSpec{ + InstanceType: "type", + PublicIP: aws.Bool(true), + ElasticIPPool: &ElasticIPPool{ + PublicIpv4Pool: aws.String("ipv4pool-ec2-0123456789abcdef0"), + PublicIpv4PoolFallBackOrder: ptr.To(PublicIpv4PoolFallbackOrderAmazonPool), + }, + }, + }, + wantErr: false, + }, + { + name: "error when BYOIPv4 without fallback", + machine: &AWSMachine{ + Spec: AWSMachineSpec{ + InstanceType: "type", + PublicIP: aws.Bool(true), + ElasticIPPool: &ElasticIPPool{ + PublicIpv4Pool: aws.String("ipv4pool-ec2-0123456789abcdef0"), + }, + }, + }, + wantErr: true, + }, + { + name: "error when BYOIPv4 without public ipv4 pool", + machine: &AWSMachine{ + Spec: AWSMachineSpec{ + InstanceType: "type", + PublicIP: aws.Bool(true), + ElasticIPPool: &ElasticIPPool{ + PublicIpv4PoolFallBackOrder: ptr.To(PublicIpv4PoolFallbackOrderAmazonPool), + }, + }, + }, + wantErr: true, + }, + { + name: "error when BYOIPv4 with non-public IP set", + machine: &AWSMachine{ + Spec: AWSMachineSpec{ + InstanceType: "type", + PublicIP: aws.Bool(false), + ElasticIPPool: &ElasticIPPool{ + PublicIpv4Pool: aws.String("ipv4pool-ec2-0123456789abcdef0"), + PublicIpv4PoolFallBackOrder: ptr.To(PublicIpv4PoolFallbackOrderAmazonPool), + }, + }, + }, + wantErr: true, + }, + { + name: "error when BYOIPv4 with invalid pool name", + machine: &AWSMachine{ + Spec: AWSMachineSpec{ + InstanceType: "type", + PublicIP: aws.Bool(true), + ElasticIPPool: &ElasticIPPool{ + PublicIpv4Pool: aws.String("ipv4poolx-ec2-0123456789abcdef"), + PublicIpv4PoolFallBackOrder: ptr.To(PublicIpv4PoolFallbackOrderAmazonPool), + }, + }, + }, + wantErr: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.BootstrapFormatIgnition, true)() + machine := tt.machine.DeepCopy() machine.ObjectMeta = metav1.ObjectMeta{ GenerateName: "machine-", diff --git a/api/v1beta2/awsmachinetemplate_webhook.go b/api/v1beta2/awsmachinetemplate_webhook.go index 30dee37458..426a42882f 100644 --- a/api/v1beta2/awsmachinetemplate_webhook.go +++ b/api/v1beta2/awsmachinetemplate_webhook.go @@ -180,7 +180,7 @@ func (r *AWSMachineTemplateWebhook) ValidateCreate(_ context.Context, raw runtim var allErrs field.ErrorList obj, ok := raw.(*AWSMachineTemplate) if !ok { - return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a VSphereMachineTemplate but got a %T", raw)) + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a AWSMachineTemplate but got a %T", raw)) } spec := obj.Spec.Template.Spec diff --git a/api/v1beta2/conditions_consts.go b/api/v1beta2/conditions_consts.go index bfbb96c77a..604ef8e1d5 100644 --- a/api/v1beta2/conditions_consts.go +++ b/api/v1beta2/conditions_consts.go @@ -69,6 +69,14 @@ const ( EgressOnlyInternetGatewayFailedReason = "EgressOnlyInternetGatewayFailed" ) +const ( + // CarrierGatewayReadyCondition reports on the successful reconciliation of carrier gateways. + // Only applicable to managed clusters. + CarrierGatewayReadyCondition clusterv1.ConditionType = "CarrierGatewayReady" + // CarrierGatewayFailedReason used when errors occur during carrier gateway reconciliation. + CarrierGatewayFailedReason = "CarrierGatewayFailed" +) + const ( // NatGatewaysReadyCondition reports successful reconciliation of NAT gateways. // Only applicable to managed clusters. diff --git a/api/v1beta2/doc.go b/api/v1beta2/doc.go index 912b8f6556..4ed8bbddb8 100644 --- a/api/v1beta2/doc.go +++ b/api/v1beta2/doc.go @@ -17,5 +17,5 @@ limitations under the License. // +gencrdrefdocs:force // +groupName=infrastructure.cluster.x-k8s.io -// package v1beta2 contains the v1beta2 API implementation. +// Package v1beta2 contains the v1beta2 API implementation. package v1beta2 diff --git a/api/v1beta2/groupversion_info.go b/api/v1beta2/groupversion_info.go index 7b92eca9fa..1d921ac08c 100644 --- a/api/v1beta2/groupversion_info.go +++ b/api/v1beta2/groupversion_info.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// package v1beta2 contains API Schema definitions for the infrastructure v1beta2 API group +// Package v1beta2 contains API Schema definitions for the infrastructure v1beta2 API group // +kubebuilder:object:generate=true // +groupName=infrastructure.cluster.x-k8s.io package v1beta2 diff --git a/api/v1beta2/network_types.go b/api/v1beta2/network_types.go index 55cb919cdc..969d2d8b77 100644 --- a/api/v1beta2/network_types.go +++ b/api/v1beta2/network_types.go @@ -20,6 +20,10 @@ import ( "fmt" "sort" "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "k8s.io/utils/ptr" ) const ( @@ -27,6 +31,23 @@ const ( DefaultAPIServerPort = 6443 // DefaultAPIServerPortString defines the API server port as a string for convenience. DefaultAPIServerPortString = "6443" + // DefaultAPIServerHealthCheckPath the API server health check path. + DefaultAPIServerHealthCheckPath = "/readyz" + // DefaultAPIServerHealthCheckIntervalSec the API server health check interval in seconds. + DefaultAPIServerHealthCheckIntervalSec = 10 + // DefaultAPIServerHealthCheckTimeoutSec the API server health check timeout in seconds. + DefaultAPIServerHealthCheckTimeoutSec = 5 + // DefaultAPIServerHealthThresholdCount the API server health check threshold count. + DefaultAPIServerHealthThresholdCount = 5 + // DefaultAPIServerUnhealthThresholdCount the API server unhealthy check threshold count. + DefaultAPIServerUnhealthThresholdCount = 3 + + // ZoneTypeAvailabilityZone defines the regular AWS zones in the Region. + ZoneTypeAvailabilityZone ZoneType = "availability-zone" + // ZoneTypeLocalZone defines the AWS zone type in Local Zone infrastructure. + ZoneTypeLocalZone ZoneType = "local-zone" + // ZoneTypeWavelengthZone defines the AWS zone type in Wavelength infrastructure. + ZoneTypeWavelengthZone ZoneType = "wavelength-zone" ) // NetworkStatus encapsulates AWS networking resources. @@ -94,18 +115,97 @@ var ( // TargetGroupHealthCheck defines health check settings for the target group. type TargetGroupHealthCheck struct { - Protocol *string `json:"protocol,omitempty"` - Path *string `json:"path,omitempty"` - Port *string `json:"port,omitempty"` - IntervalSeconds *int64 `json:"intervalSeconds,omitempty"` - TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty"` - ThresholdCount *int64 `json:"thresholdCount,omitempty"` + Protocol *string `json:"protocol,omitempty"` + Path *string `json:"path,omitempty"` + Port *string `json:"port,omitempty"` + IntervalSeconds *int64 `json:"intervalSeconds,omitempty"` + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty"` + ThresholdCount *int64 `json:"thresholdCount,omitempty"` + UnhealthyThresholdCount *int64 `json:"unhealthyThresholdCount,omitempty"` +} + +// TargetGroupHealthCheckAPISpec defines the optional health check settings for the API target group. +type TargetGroupHealthCheckAPISpec struct { + // The approximate amount of time, in seconds, between health checks of an individual + // target. + // +kubebuilder:validation:Minimum=5 + // +kubebuilder:validation:Maximum=300 + // +optional + IntervalSeconds *int64 `json:"intervalSeconds,omitempty"` + + // The amount of time, in seconds, during which no response from a target means + // a failed health check. + // +kubebuilder:validation:Minimum=2 + // +kubebuilder:validation:Maximum=120 + // +optional + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty"` + + // The number of consecutive health check successes required before considering + // a target healthy. + // +kubebuilder:validation:Minimum=2 + // +kubebuilder:validation:Maximum=10 + // +optional + ThresholdCount *int64 `json:"thresholdCount,omitempty"` + + // The number of consecutive health check failures required before considering + // a target unhealthy. + // +kubebuilder:validation:Minimum=2 + // +kubebuilder:validation:Maximum=10 + // +optional + UnhealthyThresholdCount *int64 `json:"unhealthyThresholdCount,omitempty"` +} + +// TargetGroupHealthCheckAdditionalSpec defines the optional health check settings for the additional target groups. +type TargetGroupHealthCheckAdditionalSpec struct { + // The protocol to use to health check connect with the target. When not specified the Protocol + // will be the same of the listener. + // +kubebuilder:validation:Enum=TCP;HTTP;HTTPS + // +optional + Protocol *string `json:"protocol,omitempty"` + + // The port the load balancer uses when performing health checks for additional target groups. When + // not specified this value will be set for the same of listener port. + // +optional + Port *string `json:"port,omitempty"` + + // The destination for health checks on the targets when using the protocol HTTP or HTTPS, + // otherwise the path will be ignored. + // +optional + Path *string `json:"path,omitempty"` + // The approximate amount of time, in seconds, between health checks of an individual + // target. + // +kubebuilder:validation:Minimum=5 + // +kubebuilder:validation:Maximum=300 + // +optional + IntervalSeconds *int64 `json:"intervalSeconds,omitempty"` + + // The amount of time, in seconds, during which no response from a target means + // a failed health check. + // +kubebuilder:validation:Minimum=2 + // +kubebuilder:validation:Maximum=120 + // +optional + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty"` + + // The number of consecutive health check successes required before considering + // a target healthy. + // +kubebuilder:validation:Minimum=2 + // +kubebuilder:validation:Maximum=10 + // +optional + ThresholdCount *int64 `json:"thresholdCount,omitempty"` + + // The number of consecutive health check failures required before considering + // a target unhealthy. + // +kubebuilder:validation:Minimum=2 + // +kubebuilder:validation:Maximum=10 + // +optional + UnhealthyThresholdCount *int64 `json:"unhealthyThresholdCount,omitempty"` } // TargetGroupAttribute defines attribute key values for V2 Load Balancer Attributes. type TargetGroupAttribute string var ( + // TargetGroupAttributeEnablePreserveClientIP defines the attribute key for enabling preserve client IP. TargetGroupAttributeEnablePreserveClientIP = "preserve_client_ip.enabled" ) @@ -113,8 +213,11 @@ var ( type LoadBalancerAttribute string var ( - LoadBalancerAttributeEnableLoadBalancingCrossZone = "load_balancing.cross_zone.enabled" - LoadBalancerAttributeIdleTimeTimeoutSeconds = "idle_timeout.timeout_seconds" + // LoadBalancerAttributeEnableLoadBalancingCrossZone defines the attribute key for enabling load balancing cross zone. + LoadBalancerAttributeEnableLoadBalancingCrossZone = "load_balancing.cross_zone.enabled" + // LoadBalancerAttributeIdleTimeTimeoutSeconds defines the attribute key for idle timeout. + LoadBalancerAttributeIdleTimeTimeoutSeconds = "idle_timeout.timeout_seconds" + // LoadBalancerAttributeIdleTimeDefaultTimeoutSecondsInSeconds defines the default idle timeout in seconds. LoadBalancerAttributeIdleTimeDefaultTimeoutSecondsInSeconds = "60" ) @@ -122,6 +225,7 @@ var ( // This is created first, and the ARN is then passed to the listener. type TargetGroupSpec struct { // Name of the TargetGroup. Must be unique over the same group of listeners. + // +kubebuilder:validation:MaxLength=32 Name string `json:"name"` // Port is the exposed port Port int64 `json:"port"` @@ -284,6 +388,13 @@ type IPAMPool struct { NetmaskLength int64 `json:"netmaskLength,omitempty"` } +// VpcCidrBlock defines the CIDR block and settings to associate with the managed VPC. Currently, only IPv4 is supported. +type VpcCidrBlock struct { + // IPv4CidrBlock is the IPv4 CIDR block to associate with the managed VPC. + // +kubebuilder:validation:MinLength=1 + IPv4CidrBlock string `json:"ipv4CidrBlock"` +} + // VPCSpec configures an AWS VPC. type VPCSpec struct { // ID is the vpc-id of the VPC this provider should use to create resources. @@ -294,6 +405,12 @@ type VPCSpec struct { // Mutually exclusive with IPAMPool. CidrBlock string `json:"cidrBlock,omitempty"` + // SecondaryCidrBlocks are additional CIDR blocks to be associated when the provider creates a managed VPC. + // Defaults to none. Mutually exclusive with IPAMPool. This makes sense to use if, for example, you want to use + // a separate IP range for pods (e.g. Cilium ENI mode). + // +optional + SecondaryCidrBlocks []VpcCidrBlock `json:"secondaryCidrBlocks,omitempty"` + // IPAMPool defines the IPAMv4 pool to be used for VPC. // Mutually exclusive with CidrBlock. IPAMPool *IPAMPool `json:"ipamPool,omitempty"` @@ -307,6 +424,12 @@ type VPCSpec struct { // +optional InternetGatewayID *string `json:"internetGatewayId,omitempty"` + // CarrierGatewayID is the id of the internet gateway associated with the VPC, + // for carrier network (Wavelength Zones). + // +optional + // +kubebuilder:validation:XValidation:rule="self.startsWith('cagw-')",message="Carrier Gateway ID must start with 'cagw-'" + CarrierGatewayID *string `json:"carrierGatewayId,omitempty"` + // Tags is a collection of tags describing the resource. Tags Tags `json:"tags,omitempty"` @@ -345,6 +468,22 @@ type VPCSpec struct { // +optional // +kubebuilder:validation:Enum:=ip-name;resource-name PrivateDNSHostnameTypeOnLaunch *string `json:"privateDnsHostnameTypeOnLaunch,omitempty"` + + // ElasticIPPool contains specific configuration to allocate Public IPv4 address (Elastic IP) from user-defined pool + // brought to AWS for core infrastructure resources, like NAT Gateways and Public Network Load Balancers for + // the API Server. + // +optional + ElasticIPPool *ElasticIPPool `json:"elasticIpPool,omitempty"` + + // SubnetSchema specifies how CidrBlock should be divided on subnets in the VPC depending on the number of AZs. + // PreferPrivate - one private subnet for each AZ plus one other subnet that will be further sub-divided for the public subnets. + // PreferPublic - have the reverse logic of PreferPrivate, one public subnet for each AZ plus one other subnet + // that will be further sub-divided for the private subnets. + // Defaults to PreferPrivate + // +optional + // +kubebuilder:default=PreferPrivate + // +kubebuilder:validation:Enum=PreferPrivate;PreferPublic + SubnetSchema *SubnetSchemaType `json:"subnetSchema,omitempty"` } // String returns a string representation of the VPC. @@ -367,6 +506,22 @@ func (v *VPCSpec) IsIPv6Enabled() bool { return v.IPv6 != nil } +// GetElasticIPPool returns the custom Elastic IP Pool configuration when present. +func (v *VPCSpec) GetElasticIPPool() *ElasticIPPool { + return v.ElasticIPPool +} + +// GetPublicIpv4Pool returns the custom public IPv4 pool brought to AWS when present. +func (v *VPCSpec) GetPublicIpv4Pool() *string { + if v.ElasticIPPool == nil { + return nil + } + if v.ElasticIPPool.PublicIpv4Pool != nil { + return v.ElasticIPPool.PublicIpv4Pool + } + return nil +} + // SubnetSpec configures an AWS Subnet. type SubnetSpec struct { // ID defines a unique identifier to reference this resource. @@ -416,6 +571,42 @@ type SubnetSpec struct { // Tags is a collection of tags describing the resource. Tags Tags `json:"tags,omitempty"` + + // ZoneType defines the type of the zone where the subnet is created. + // + // The valid values are availability-zone, local-zone, and wavelength-zone. + // + // Subnet with zone type availability-zone (regular) is always selected to create cluster + // resources, like Load Balancers, NAT Gateways, Contol Plane nodes, etc. + // + // Subnet with zone type local-zone or wavelength-zone is not eligible to automatically create + // regular cluster resources. + // + // The public subnet in availability-zone or local-zone is associated with regular public + // route table with default route entry to a Internet Gateway. + // + // The public subnet in wavelength-zone is associated with a carrier public + // route table with default route entry to a Carrier Gateway. + // + // The private subnet in the availability-zone is associated with a private route table with + // the default route entry to a NAT Gateway created in that zone. + // + // The private subnet in the local-zone or wavelength-zone is associated with a private route table with + // the default route entry re-using the NAT Gateway in the Region (preferred from the + // parent zone, the zone type availability-zone in the region, or first table available). + // + // +kubebuilder:validation:Enum=availability-zone;local-zone;wavelength-zone + // +optional + ZoneType *ZoneType `json:"zoneType,omitempty"` + + // ParentZoneName is the zone name where the current subnet's zone is tied when + // the zone is a Local Zone. + // + // The subnets in Local Zone or Wavelength Zone locations consume the ParentZoneName + // to select the correct private route table to egress traffic to the internet. + // + // +optional + ParentZoneName *string `json:"parentZoneName,omitempty"` } // GetResourceID returns the identifier for this subnet, @@ -432,6 +623,59 @@ func (s *SubnetSpec) String() string { return fmt.Sprintf("id=%s/az=%s/public=%v", s.GetResourceID(), s.AvailabilityZone, s.IsPublic) } +// IsEdge returns the true when the subnet is created in the edge zone, +// Local Zones. +func (s *SubnetSpec) IsEdge() bool { + if s.ZoneType == nil { + return false + } + if s.ZoneType.Equal(ZoneTypeLocalZone) { + return true + } + if s.ZoneType.Equal(ZoneTypeWavelengthZone) { + return true + } + return false +} + +// IsEdgeWavelength returns true only when the subnet is created in Wavelength Zone. +func (s *SubnetSpec) IsEdgeWavelength() bool { + if s.ZoneType == nil { + return false + } + if *s.ZoneType == ZoneTypeWavelengthZone { + return true + } + return false +} + +// SetZoneInfo updates the subnets with zone information. +func (s *SubnetSpec) SetZoneInfo(zones []*ec2.AvailabilityZone) error { + zoneInfo := func(zoneName string) *ec2.AvailabilityZone { + for _, zone := range zones { + if aws.StringValue(zone.ZoneName) == zoneName { + return zone + } + } + return nil + } + + zone := zoneInfo(s.AvailabilityZone) + if zone == nil { + if len(s.AvailabilityZone) > 0 { + return fmt.Errorf("unable to update zone information for subnet '%v' and zone '%v'", s.ID, s.AvailabilityZone) + } + return fmt.Errorf("unable to update zone information for subnet '%v'", s.ID) + } + if zone.ZoneType != nil { + s.ZoneType = ptr.To(ZoneType(*zone.ZoneType)) + } + if zone.ParentZoneName != nil { + s.ParentZoneName = zone.ParentZoneName + } + return nil +} + // Subnets is a slice of Subnet. // +listType=map // +listMapKey=id @@ -449,6 +693,22 @@ func (s Subnets) ToMap() map[string]*SubnetSpec { // IDs returns a slice of the subnet ids. func (s Subnets) IDs() []string { + res := []string{} + for _, subnet := range s { + // Prevent returning edge zones (Local Zone) to regular Subnet IDs. + // Edge zones should not deploy control plane nodes, and does not support Nat Gateway and + // Network Load Balancers. Any resource for the core infrastructure should not consume edge + // zones. + if subnet.IsEdge() { + continue + } + res = append(res, subnet.GetResourceID()) + } + return res +} + +// IDsWithEdge returns a slice of the subnet ids. +func (s Subnets) IDsWithEdge() []string { res := []string{} for _, subnet := range s { res = append(res, subnet.GetResourceID()) @@ -489,6 +749,10 @@ func (s Subnets) FindEqual(spec *SubnetSpec) *SubnetSpec { // FilterPrivate returns a slice containing all subnets marked as private. func (s Subnets) FilterPrivate() (res Subnets) { for _, x := range s { + // Subnets in AWS Local Zones or Wavelength should not be used by core infrastructure. + if x.IsEdge() { + continue + } if !x.IsPublic { res = append(res, x) } @@ -496,9 +760,24 @@ func (s Subnets) FilterPrivate() (res Subnets) { return } +// FilterNonCni returns the subnets that are NOT intended for usage with the CNI pod network +// (i.e. do NOT have the `sigs.k8s.io/cluster-api-provider-aws/association=secondary` tag). +func (s Subnets) FilterNonCni() (res Subnets) { + for _, x := range s { + if x.Tags[NameAWSSubnetAssociation] != SecondarySubnetTagValue { + res = append(res, x) + } + } + return +} + // FilterPublic returns a slice containing all subnets marked as public. func (s Subnets) FilterPublic() (res Subnets) { for _, x := range s { + // Subnets in AWS Local Zones or Wavelength should not be used by core infrastructure. + if x.IsEdge() { + continue + } if x.IsPublic { res = append(res, x) } @@ -521,7 +800,7 @@ func (s Subnets) GetUniqueZones() []string { keys := make(map[string]bool) zones := []string{} for _, x := range s { - if _, value := keys[x.AvailabilityZone]; !value { + if _, value := keys[x.AvailabilityZone]; len(x.AvailabilityZone) > 0 && !value { keys[x.AvailabilityZone] = true zones = append(zones, x.AvailabilityZone) } @@ -529,6 +808,29 @@ func (s Subnets) GetUniqueZones() []string { return zones } +// SetZoneInfo updates the subnets with zone information. +func (s Subnets) SetZoneInfo(zones []*ec2.AvailabilityZone) error { + for i := range s { + if err := s[i].SetZoneInfo(zones); err != nil { + return err + } + } + return nil +} + +// HasPublicSubnetWavelength returns true when there are subnets in Wavelength zone. +func (s Subnets) HasPublicSubnetWavelength() bool { + for _, sub := range s { + if sub.ZoneType == nil { + return false + } + if sub.IsPublic && *sub.ZoneType == ZoneTypeWavelengthZone { + return true + } + } + return false +} + // CNISpec defines configuration for CNI. type CNISpec struct { // CNIIngressRules specify rules to apply to control plane and worker node security groups. @@ -651,6 +953,10 @@ type IngressRule struct { // The field will be combined with source security group IDs if specified. // +optional SourceSecurityGroupRoles []SecurityGroupRole `json:"sourceSecurityGroupRoles,omitempty"` + + // NatGatewaysIPsSource use the NAT gateways IPs as the source for the ingress rule. + // +optional + NatGatewaysIPsSource bool `json:"natGatewaysIPsSource,omitempty"` } // String returns a string representation of the ingress rule. @@ -743,3 +1049,70 @@ func (i *IngressRule) Equals(o *IngressRule) bool { return true } + +// ZoneType defines listener AWS Availability Zone type. +type ZoneType string + +// String returns the string representation for the zone type. +func (z ZoneType) String() string { + return string(z) +} + +// Equal compares two zone types. +func (z ZoneType) Equal(other ZoneType) bool { + return z == other +} + +// ElasticIPPool allows configuring a Elastic IP pool for resources allocating +// public IPv4 addresses on public subnets. +type ElasticIPPool struct { + // PublicIpv4Pool sets a custom Public IPv4 Pool used to create Elastic IP address for resources + // created in public IPv4 subnets. Every IPv4 address, Elastic IP, will be allocated from the custom + // Public IPv4 pool that you brought to AWS, instead of Amazon-provided pool. The public IPv4 pool + // resource ID starts with 'ipv4pool-ec2'. + // + // +kubebuilder:validation:MaxLength=30 + // +optional + PublicIpv4Pool *string `json:"publicIpv4Pool,omitempty"` + + // PublicIpv4PoolFallBackOrder defines the fallback action when the Public IPv4 Pool has been exhausted, + // no more IPv4 address available in the pool. + // + // When set to 'amazon-pool', the controller check if the pool has available IPv4 address, when pool has reached the + // IPv4 limit, the address will be claimed from Amazon-pool (default). + // + // When set to 'none', the controller will fail the Elastic IP allocation when the publicIpv4Pool is exhausted. + // + // +kubebuilder:validation:Enum:=amazon-pool;none + // +optional + PublicIpv4PoolFallBackOrder *PublicIpv4PoolFallbackOrder `json:"publicIpv4PoolFallbackOrder,omitempty"` + + // TODO(mtulio): add future support of user-defined Elastic IP to allow users to assign BYO Public IP from + // 'static'/preallocated amazon-provided IPsstrucute currently holds only 'BYO Public IP from Public IPv4 Pool' (user brought to AWS), + // although a dedicated structure would help to hold 'BYO Elastic IP' variants like: + // - AllocationIdPoolApiLoadBalancer: an user-defined (static) IP address to the Public API Load Balancer. + // - AllocationIdPoolNatGateways: an user-defined (static) IP address to allocate to NAT Gateways (egress traffic). +} + +// PublicIpv4PoolFallbackOrder defines the list of available fallback action when the PublicIpv4Pool is exhausted. +// 'none' let the controllers return failures when the PublicIpv4Pool is exhausted - no more IPv4 available. +// 'amazon-pool' let the controllers to skip the PublicIpv4Pool and use the Amazon pool, the default. +// +kubebuilder:validation:XValidation:rule="self in ['none','amazon-pool']",message="allowed values are 'none' and 'amazon-pool'" +type PublicIpv4PoolFallbackOrder string + +const ( + // PublicIpv4PoolFallbackOrderAmazonPool refers to use Amazon-pool Public IPv4 Pool as a fallback strategy. + PublicIpv4PoolFallbackOrderAmazonPool = PublicIpv4PoolFallbackOrder("amazon-pool") + + // PublicIpv4PoolFallbackOrderNone refers to not use any fallback strategy. + PublicIpv4PoolFallbackOrderNone = PublicIpv4PoolFallbackOrder("none") +) + +func (r PublicIpv4PoolFallbackOrder) String() string { + return string(r) +} + +// Equal compares PublicIpv4PoolFallbackOrder types and return true if input param is equal. +func (r PublicIpv4PoolFallbackOrder) Equal(e PublicIpv4PoolFallbackOrder) bool { + return r == e +} diff --git a/api/v1beta2/network_types_test.go b/api/v1beta2/network_types_test.go index 3704e6adc4..25409f7c3e 100644 --- a/api/v1beta2/network_types_test.go +++ b/api/v1beta2/network_types_test.go @@ -19,7 +19,10 @@ package v1beta2 import ( "testing" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" + "k8s.io/utils/ptr" ) func TestSGDifference(t *testing.T) { @@ -105,3 +108,743 @@ func TestSGDifference(t *testing.T) { }) } } + +var ( + stubNetworkTypeSubnetsAvailabilityZone = []*SubnetSpec{ + { + ID: "subnet-id-us-east-1a-private", + AvailabilityZone: "us-east-1a", + IsPublic: false, + ZoneType: ptr.To(ZoneTypeAvailabilityZone), + }, + { + ID: "subnet-id-us-east-1a-public", + AvailabilityZone: "us-east-1a", + IsPublic: true, + ZoneType: ptr.To(ZoneTypeAvailabilityZone), + }, + } + stubNetworkTypeSubnetsLocalZone = []*SubnetSpec{ + { + ID: "subnet-id-us-east-1-nyc-1-private", + AvailabilityZone: "us-east-1-nyc-1a", + IsPublic: false, + ZoneType: ptr.To(ZoneTypeLocalZone), + }, + { + ID: "subnet-id-us-east-1-nyc-1-public", + AvailabilityZone: "us-east-1-nyc-1a", + IsPublic: true, + ZoneType: ptr.To(ZoneTypeLocalZone), + }, + } + stubNetworkTypeSubnetsWavelengthZone = []*SubnetSpec{ + { + ID: "subnet-id-us-east-1-wl1-nyc-wlz-1-private", + AvailabilityZone: "us-east-1-wl1-nyc-wlz-1", + IsPublic: false, + ZoneType: ptr.To(ZoneTypeWavelengthZone), + }, + { + ID: "subnet-id-us-east-1-wl1-nyc-wlz-1-public", + AvailabilityZone: "us-east-1-wl1-nyc-wlz-1", + IsPublic: true, + ZoneType: ptr.To(ZoneTypeWavelengthZone), + }, + } + + subnetsAllZones = Subnets{ + { + ResourceID: "subnet-az-1a", + AvailabilityZone: "us-east-1a", + }, + { + ResourceID: "subnet-az-1b", + IsPublic: true, + AvailabilityZone: "us-east-1a", + }, + { + ResourceID: "subnet-az-2a", + IsPublic: false, + AvailabilityZone: "us-east-1b", + }, + { + ResourceID: "subnet-az-2b", + IsPublic: true, + AvailabilityZone: "us-east-1b", + }, + { + ResourceID: "subnet-az-3a", + ZoneType: ptr.To(ZoneTypeAvailabilityZone), + IsPublic: false, + AvailabilityZone: "us-east-1c", + }, + { + ResourceID: "subnet-az-3b", + ZoneType: ptr.To(ZoneTypeAvailabilityZone), + IsPublic: true, + AvailabilityZone: "us-east-1c", + }, + { + ResourceID: "subnet-lz-1a", + ZoneType: ptr.To(ZoneTypeLocalZone), + IsPublic: false, + AvailabilityZone: "us-east-1-nyc-1a", + }, + { + ResourceID: "subnet-lz-2b", + ZoneType: ptr.To(ZoneTypeLocalZone), + IsPublic: true, + AvailabilityZone: "us-east-1-nyc-1a", + }, + { + ResourceID: "subnet-wl-1a", + ZoneType: ptr.To(ZoneTypeWavelengthZone), + IsPublic: false, + AvailabilityZone: "us-east-1-wl1-nyc-wlz-1", + }, + { + ResourceID: "subnet-wl-1b", + ZoneType: ptr.To(ZoneTypeWavelengthZone), + IsPublic: true, + AvailabilityZone: "us-east-1-wl1-nyc-wlz-1", + }, + } +) + +type testStubNetworkTypes struct{} + +func (ts *testStubNetworkTypes) deepCopyToSubnets(stub []*SubnetSpec) (subnets Subnets) { + for _, sn := range stub { + subnets = append(subnets, *sn.DeepCopy()) + } + return subnets +} + +func (ts *testStubNetworkTypes) deepCopySubnets(stub []*SubnetSpec) (subnets []*SubnetSpec) { + for _, s := range stub { + subnets = append(subnets, s.DeepCopy()) + } + return subnets +} + +func (ts *testStubNetworkTypes) getSubnetsAvailabilityZones() (subnets []*SubnetSpec) { + return ts.deepCopySubnets(stubNetworkTypeSubnetsAvailabilityZone) +} + +func (ts *testStubNetworkTypes) getSubnetsLocalZones() (subnets []*SubnetSpec) { + return ts.deepCopySubnets(stubNetworkTypeSubnetsLocalZone) +} + +func (ts *testStubNetworkTypes) getSubnetsWavelengthZones() (subnets []*SubnetSpec) { + return ts.deepCopySubnets(stubNetworkTypeSubnetsWavelengthZone) +} + +func (ts *testStubNetworkTypes) getSubnets() (sns Subnets) { + subnets := []*SubnetSpec{} + subnets = append(subnets, ts.getSubnetsAvailabilityZones()...) + subnets = append(subnets, ts.getSubnetsLocalZones()...) + subnets = append(subnets, ts.getSubnetsWavelengthZones()...) + sns = ts.deepCopyToSubnets(subnets) + return sns +} + +func TestSubnetSpec_IsEdge(t *testing.T) { + stub := testStubNetworkTypes{} + tests := []struct { + name string + spec *SubnetSpec + want bool + }{ + { + name: "az without type is not edge", + spec: func() *SubnetSpec { + s := stub.getSubnetsAvailabilityZones()[0] + s.ZoneType = nil + return s + }(), + want: false, + }, + { + name: "az is not edge", + spec: stub.getSubnetsAvailabilityZones()[0], + want: false, + }, + { + name: "localzone is edge", + spec: stub.getSubnetsLocalZones()[0], + want: true, + }, + { + name: "wavelength is edge", + spec: stub.getSubnetsWavelengthZones()[0], + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := tt.spec + if got := s.IsEdge(); got != tt.want { + t.Errorf("SubnetSpec.IsEdge() returned unexpected value = got: %v, want: %v", got, tt.want) + } + }) + } +} + +func TestSubnetSpec_IsEdgeWavelength(t *testing.T) { + stub := testStubNetworkTypes{} + tests := []struct { + name string + spec *SubnetSpec + want bool + }{ + { + name: "az without type is not edge wavelength", + spec: func() *SubnetSpec { + s := stub.getSubnetsAvailabilityZones()[0] + s.ZoneType = nil + return s + }(), + want: false, + }, + { + name: "az is not edge wavelength", + spec: stub.getSubnetsAvailabilityZones()[0], + want: false, + }, + { + name: "localzone is not edge wavelength", + spec: stub.getSubnetsLocalZones()[0], + want: false, + }, + { + name: "wavelength is edge wavelength", + spec: stub.getSubnetsWavelengthZones()[0], + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := tt.spec + if got := s.IsEdgeWavelength(); got != tt.want { + t.Errorf("SubnetSpec.IsEdgeWavelength() returned unexpected value = got: %v, want: %v", got, tt.want) + } + }) + } +} + +func TestSubnetSpec_SetZoneInfo(t *testing.T) { + stub := testStubNetworkTypes{} + tests := []struct { + name string + spec *SubnetSpec + zones []*ec2.AvailabilityZone + want *SubnetSpec + wantErr string + }{ + { + name: "set zone information to availability zone subnet", + spec: func() *SubnetSpec { + s := stub.getSubnetsAvailabilityZones()[0] + s.ZoneType = nil + s.ParentZoneName = nil + return s + }(), + zones: []*ec2.AvailabilityZone{ + { + ZoneName: ptr.To[string]("us-east-1a"), + ZoneType: ptr.To[string]("availability-zone"), + }, + }, + want: stub.getSubnetsAvailabilityZones()[0], + }, + { + name: "set zone information to availability zone subnet with many zones", + spec: func() *SubnetSpec { + s := stub.getSubnetsAvailabilityZones()[0] + s.ZoneType = nil + s.ParentZoneName = nil + return s + }(), + zones: []*ec2.AvailabilityZone{ + { + ZoneName: ptr.To[string]("us-east-1b"), + ZoneType: ptr.To[string]("availability-zone"), + }, + { + ZoneName: ptr.To[string]("us-east-1a"), + ZoneType: ptr.To[string]("availability-zone"), + }, + }, + want: stub.getSubnetsAvailabilityZones()[0], + }, + { + name: "want error when zone metadata is not provided", + spec: func() *SubnetSpec { + s := stub.getSubnetsAvailabilityZones()[0] + s.ZoneType = nil + s.ParentZoneName = nil + return s + }(), + zones: []*ec2.AvailabilityZone{}, + wantErr: `unable to update zone information for subnet 'subnet-id-us-east-1a-private' and zone 'us-east-1a'`, + }, + { + name: "want error when subnet's available zone is not set", + spec: func() *SubnetSpec { + s := stub.getSubnetsAvailabilityZones()[0] + s.AvailabilityZone = "" + return s + }(), + zones: []*ec2.AvailabilityZone{ + { + ZoneName: ptr.To[string]("us-east-1a"), + ZoneType: ptr.To[string]("availability-zone"), + }, + }, + wantErr: `unable to update zone information for subnet 'subnet-id-us-east-1a-private'`, + }, + { + name: "set zone information to local zone subnet", + spec: func() *SubnetSpec { + s := stub.getSubnetsLocalZones()[0] + s.ZoneType = nil + s.ParentZoneName = nil + return s + }(), + zones: []*ec2.AvailabilityZone{ + { + ZoneName: ptr.To[string]("us-east-1b"), + ZoneType: ptr.To[string]("availability-zone"), + }, + { + ZoneName: ptr.To[string]("us-east-1a"), + ZoneType: ptr.To[string]("availability-zone"), + }, + { + ZoneName: ptr.To[string]("us-east-1-nyc-1a"), + ZoneType: ptr.To[string]("local-zone"), + }, + }, + want: stub.getSubnetsLocalZones()[0], + }, + { + name: "set zone information to wavelength zone subnet", + spec: func() *SubnetSpec { + s := stub.getSubnetsWavelengthZones()[0] + s.ZoneType = nil + s.ParentZoneName = nil + return s + }(), + zones: []*ec2.AvailabilityZone{ + { + ZoneName: ptr.To[string]("us-east-1b"), + ZoneType: ptr.To[string]("availability-zone"), + }, + { + ZoneName: ptr.To[string]("us-east-1a"), + ZoneType: ptr.To[string]("availability-zone"), + }, + { + ZoneName: ptr.To[string]("us-east-1-wl1-nyc-wlz-1"), + ZoneType: ptr.To[string]("wavelength-zone"), + }, + { + ZoneName: ptr.To[string]("us-east-1-nyc-1a"), + ZoneType: ptr.To[string]("local-zone"), + }, + }, + want: stub.getSubnetsWavelengthZones()[0], + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := tt.spec + err := s.SetZoneInfo(tt.zones) + if err != nil { + if len(tt.wantErr) == 0 { + t.Fatalf("SubnetSpec.SetZoneInfo() got unexpected error: %v", err) + } + if len(tt.wantErr) > 0 && err.Error() != tt.wantErr { + t.Fatalf("SubnetSpec.SetZoneInfo() got unexpected error message:\n got: %v,\nwant: %v", err, tt.wantErr) + } else { + return + } + } + if !cmp.Equal(s, tt.want) { + t.Errorf("SubnetSpec.SetZoneInfo() got unwanted value:\n %v", cmp.Diff(s, tt.want)) + } + }) + } +} + +func TestSubnets_IDs(t *testing.T) { + tests := []struct { + name string + subnets Subnets + want []string + }{ + { + name: "no valid subnet IDs", + subnets: Subnets{}, + want: []string{}, + }, + { + name: "no valid subnet IDs", + subnets: Subnets{ + { + ResourceID: "subnet-lz-1", + ZoneType: ptr.To(ZoneTypeLocalZone), + }, + { + ResourceID: "subnet-wl-1", + ZoneType: ptr.To(ZoneTypeWavelengthZone), + }, + }, + want: []string{}, + }, + { + name: "should have only subnet IDs from availability zone", + subnets: Subnets{ + { + ResourceID: "subnet-az-1", + }, + { + ResourceID: "subnet-az-2", + ZoneType: ptr.To(ZoneTypeAvailabilityZone), + }, + { + ResourceID: "subnet-lz-1", + ZoneType: ptr.To(ZoneTypeLocalZone), + }, + }, + want: []string{"subnet-az-1", "subnet-az-2"}, + }, + { + name: "should have only subnet IDs from availability zone", + subnets: Subnets{ + { + ResourceID: "subnet-az-1", + }, + { + ResourceID: "subnet-az-2", + ZoneType: ptr.To(ZoneTypeAvailabilityZone), + }, + { + ResourceID: "subnet-lz-1", + ZoneType: ptr.To(ZoneTypeLocalZone), + }, + { + ResourceID: "subnet-wl-1", + ZoneType: ptr.To(ZoneTypeWavelengthZone), + }, + }, + want: []string{"subnet-az-1", "subnet-az-2"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.subnets.IDs(); !cmp.Equal(got, tt.want) { + t.Errorf("Subnets.IDs() diff: %v", cmp.Diff(got, tt.want)) + } + }) + } +} + +func TestSubnets_IDsWithEdge(t *testing.T) { + tests := []struct { + name string + subnets Subnets + want []string + }{ + { + name: "invalid subnet IDs", + subnets: nil, + want: []string{}, + }, + { + name: "invalid subnet IDs", + subnets: Subnets{}, + want: []string{}, + }, + { + name: "subnet IDs for all zones", + subnets: Subnets{ + { + ResourceID: "subnet-az-1", + }, + { + ResourceID: "subnet-az-2", + ZoneType: ptr.To(ZoneTypeAvailabilityZone), + }, + { + ResourceID: "subnet-lz-1", + ZoneType: ptr.To(ZoneTypeLocalZone), + }, + }, + want: []string{"subnet-az-1", "subnet-az-2", "subnet-lz-1"}, + }, + { + name: "subnet IDs for all zones", + subnets: Subnets{ + { + ResourceID: "subnet-az-1", + }, + { + ResourceID: "subnet-az-2", + ZoneType: ptr.To(ZoneTypeAvailabilityZone), + }, + { + ResourceID: "subnet-lz-1", + ZoneType: ptr.To(ZoneTypeLocalZone), + }, + { + ResourceID: "subnet-wl-1", + ZoneType: ptr.To(ZoneTypeWavelengthZone), + }, + }, + want: []string{"subnet-az-1", "subnet-az-2", "subnet-lz-1", "subnet-wl-1"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.subnets.IDsWithEdge(); !cmp.Equal(got, tt.want) { + t.Errorf("Subnets.IDsWithEdge() got unwanted value:\n %v", cmp.Diff(got, tt.want)) + } + }) + } +} + +func TestSubnets_FilterPrivate(t *testing.T) { + tests := []struct { + name string + subnets Subnets + want Subnets + }{ + { + name: "no private subnets", + subnets: nil, + want: nil, + }, + { + name: "no private subnets", + subnets: Subnets{}, + want: nil, + }, + { + name: "no private subnets", + subnets: Subnets{ + { + ResourceID: "subnet-az-1b", + IsPublic: true, + }, + { + ResourceID: "subnet-az-2b", + IsPublic: true, + }, + { + ResourceID: "subnet-az-3b", + ZoneType: ptr.To(ZoneTypeAvailabilityZone), + IsPublic: true, + }, + { + ResourceID: "subnet-lz-1a", + ZoneType: ptr.To(ZoneTypeLocalZone), + IsPublic: false, + }, + { + ResourceID: "subnet-lz-2b", + ZoneType: ptr.To(ZoneTypeLocalZone), + IsPublic: true, + }, + }, + want: nil, + }, + { + name: "private subnets", + subnets: subnetsAllZones, + want: Subnets{ + { + ResourceID: "subnet-az-1a", + AvailabilityZone: "us-east-1a", + }, + { + ResourceID: "subnet-az-2a", + IsPublic: false, + AvailabilityZone: "us-east-1b", + }, + { + ResourceID: "subnet-az-3a", + ZoneType: ptr.To(ZoneTypeAvailabilityZone), + IsPublic: false, + AvailabilityZone: "us-east-1c", + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.subnets.FilterPrivate(); !cmp.Equal(got, tt.want) { + t.Errorf("Subnets.FilterPrivate() got unwanted value:\n %v", cmp.Diff(got, tt.want)) + } + }) + } +} + +func TestSubnets_FilterPublic(t *testing.T) { + tests := []struct { + name string + subnets Subnets + want Subnets + }{ + { + name: "empty subnets", + subnets: nil, + want: nil, + }, + { + name: "empty subnets", + subnets: Subnets{}, + want: nil, + }, + { + name: "no public subnets", + subnets: Subnets{ + { + ResourceID: "subnet-az-1a", + IsPublic: false, + }, + { + ResourceID: "subnet-az-2a", + IsPublic: false, + }, + { + ResourceID: "subnet-az-3a", + ZoneType: ptr.To(ZoneTypeAvailabilityZone), + IsPublic: false, + }, + { + ResourceID: "subnet-lz-1a", + ZoneType: ptr.To(ZoneTypeLocalZone), + IsPublic: false, + }, + { + ResourceID: "subnet-lz-2b", + ZoneType: ptr.To(ZoneTypeLocalZone), + IsPublic: true, + }, + }, + want: nil, + }, + { + name: "public subnets", + subnets: subnetsAllZones, + want: Subnets{ + { + ResourceID: "subnet-az-1b", + IsPublic: true, + AvailabilityZone: "us-east-1a", + }, + { + ResourceID: "subnet-az-2b", + IsPublic: true, + AvailabilityZone: "us-east-1b", + }, + { + ResourceID: "subnet-az-3b", + ZoneType: ptr.To(ZoneTypeAvailabilityZone), + IsPublic: true, + AvailabilityZone: "us-east-1c", + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.subnets.FilterPublic(); !cmp.Equal(got, tt.want) { + t.Errorf("Subnets.FilterPublic() got unwanted value:\n %v", cmp.Diff(got, tt.want)) + } + }) + } +} + +func TestSubnets_GetUniqueZones(t *testing.T) { + tests := []struct { + name string + subnets Subnets + want []string + }{ + { + name: "no subnets", + subnets: Subnets{}, + want: []string{}, + }, + { + name: "all subnets and zones", + subnets: subnetsAllZones, + want: []string{ + "us-east-1a", + "us-east-1b", + "us-east-1c", + "us-east-1-nyc-1a", + "us-east-1-wl1-nyc-wlz-1", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.subnets.GetUniqueZones(); !cmp.Equal(got, tt.want) { + t.Errorf("Subnets.GetUniqueZones() got unwanted value:\n %v", cmp.Diff(got, tt.want)) + } + }) + } +} + +func TestSubnets_HasPublicSubnetWavelength(t *testing.T) { + stub := testStubNetworkTypes{} + tests := []struct { + name string + subnets Subnets + want bool + }{ + { + name: "no subnets", + subnets: Subnets{}, + want: false, + }, + { + name: "no wavelength", + subnets: stub.deepCopyToSubnets(stub.getSubnetsAvailabilityZones()), + want: false, + }, + { + name: "no wavelength", + subnets: stub.deepCopyToSubnets(stub.getSubnetsLocalZones()), + want: false, + }, + { + name: "has only private subnets in wavelength zones", + subnets: Subnets{ + { + ID: "subnet-id-us-east-1-wl1-nyc-wlz-1-private", + AvailabilityZone: "us-east-1-wl1-nyc-wlz-1", + IsPublic: false, + ZoneType: ptr.To(ZoneTypeWavelengthZone), + }, + }, + want: false, + }, + { + name: "has public subnets in wavelength zones", + subnets: stub.getSubnets(), + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.subnets.HasPublicSubnetWavelength(); got != tt.want { + t.Errorf("Subnets.HasPublicSubnetWavelength() got unwanted value:\n %v", cmp.Diff(got, tt.want)) + } + }) + } +} diff --git a/api/v1beta2/types.go b/api/v1beta2/types.go index 545c4f320c..978d5310f2 100644 --- a/api/v1beta2/types.go +++ b/api/v1beta2/types.go @@ -17,11 +17,19 @@ limitations under the License. package v1beta2 import ( + "strings" + "k8s.io/apimachinery/pkg/util/sets" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) +const ( + // PreventDeletionLabel can be used in situations where preventing delation is allowed. The docs + // and the CRD will call this out where its allowed. + PreventDeletionLabel = "aws.cluster.x-k8s.io/prevent-deletion" +) + // AWSResourceReference is a reference to a specific AWS resource by ID or filters. // Only one of ID or Filters may be specified. Specifying more than one will result in // a validation error. @@ -80,6 +88,7 @@ const ( ExternalResourceGCTasksAnnotation = "aws.cluster.x-k8s.io/external-resource-tasks-gc" ) +// GCTask defines a task to be executed by the garbage collector. type GCTask string var ( @@ -221,6 +230,14 @@ type Instance struct { // +optional PlacementGroupName string `json:"placementGroupName,omitempty"` + // PlacementGroupPartition is the partition number within the placement group in which to launch the instance. + // This value is only valid if the placement group, referred in `PlacementGroupName`, was created with + // strategy set to partition. + // +kubebuilder:validation:Minimum:=1 + // +kubebuilder:validation:Maximum:=7 + // +optional + PlacementGroupPartition int64 `json:"placementGroupPartition,omitempty"` + // Tenancy indicates if instance should run on shared or single-tenant hardware. // +optional Tenancy string `json:"tenancy,omitempty"` @@ -236,6 +253,14 @@ type Instance struct { // PrivateDNSName is the options for the instance hostname. // +optional PrivateDNSName *PrivateDNSName `json:"privateDnsName,omitempty"` + + // PublicIPOnLaunch is the option to associate a public IP on instance launch + // +optional + PublicIPOnLaunch *bool `json:"publicIPOnLaunch,omitempty"` + + // CapacityReservationID specifies the target Capacity Reservation into which the instance should be launched. + // +optional + CapacityReservationID *string `json:"capacityReservationId,omitempty"` } // InstanceMetadataState describes the state of InstanceMetadataOptions.HttpEndpoint and InstanceMetadataOptions.InstanceMetadataTags @@ -313,6 +338,7 @@ type InstanceMetadataOptions struct { InstanceMetadataTags InstanceMetadataState `json:"instanceMetadataTags,omitempty"` } +// SetDefaults sets the default values for the InstanceMetadataOptions. func (obj *InstanceMetadataOptions) SetDefaults() { if obj.HTTPEndpoint == "" { obj.HTTPEndpoint = InstanceMetadataEndpointStateEnabled @@ -425,3 +451,19 @@ type PrivateDNSName struct { // +kubebuilder:validation:Enum:=ip-name;resource-name HostnameType *string `json:"hostnameType,omitempty"` } + +// SubnetSchemaType specifies how given network should be divided on subnets +// in the VPC depending on the number of AZs. +type SubnetSchemaType string + +// Name returns subnet schema type name without prefix. +func (s *SubnetSchemaType) Name() string { + return strings.ToLower(strings.TrimPrefix(string(*s), "Prefer")) +} + +var ( + // SubnetSchemaPreferPrivate allocates more subnets in the VPC to private subnets. + SubnetSchemaPreferPrivate = SubnetSchemaType("PreferPrivate") + // SubnetSchemaPreferPublic allocates more subnets in the VPC to public subnets. + SubnetSchemaPreferPublic = SubnetSchemaType("PreferPublic") +) diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go index fa6fe0e594..b3eaa6c08d 100644 --- a/api/v1beta2/zz_generated.deepcopy.go +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -574,6 +574,11 @@ func (in *AWSLoadBalancerSpec) DeepCopyInto(out *AWSLoadBalancerSpec) { *out = new(ELBProtocol) **out = **in } + if in.HealthCheck != nil { + in, out := &in.HealthCheck, &out.HealthCheck + *out = new(TargetGroupHealthCheckAPISpec) + (*in).DeepCopyInto(*out) + } if in.AdditionalSecurityGroups != nil { in, out := &in.AdditionalSecurityGroups, &out.AdditionalSecurityGroups *out = make([]string, len(*in)) @@ -582,7 +587,9 @@ func (in *AWSLoadBalancerSpec) DeepCopyInto(out *AWSLoadBalancerSpec) { if in.AdditionalListeners != nil { in, out := &in.AdditionalListeners, &out.AdditionalListeners *out = make([]AdditionalListenerSpec, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } if in.IngressRules != nil { in, out := &in.IngressRules, &out.IngressRules @@ -693,6 +700,11 @@ func (in *AWSMachineSpec) DeepCopyInto(out *AWSMachineSpec) { *out = new(bool) **out = **in } + if in.ElasticIPPool != nil { + in, out := &in.ElasticIPPool, &out.ElasticIPPool + *out = new(ElasticIPPool) + (*in).DeepCopyInto(*out) + } if in.AdditionalSecurityGroups != nil { in, out := &in.AdditionalSecurityGroups, &out.AdditionalSecurityGroups *out = make([]AWSResourceReference, len(*in)) @@ -743,7 +755,7 @@ func (in *AWSMachineSpec) DeepCopyInto(out *AWSMachineSpec) { if in.Ignition != nil { in, out := &in.Ignition, &out.Ignition *out = new(Ignition) - **out = **in + (*in).DeepCopyInto(*out) } if in.SpotMarketOptions != nil { in, out := &in.SpotMarketOptions, &out.SpotMarketOptions @@ -755,6 +767,11 @@ func (in *AWSMachineSpec) DeepCopyInto(out *AWSMachineSpec) { *out = new(PrivateDNSName) (*in).DeepCopyInto(*out) } + if in.CapacityReservationID != nil { + in, out := &in.CapacityReservationID, &out.CapacityReservationID + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineSpec. @@ -1070,6 +1087,11 @@ func (in *AWSRoleSpec) DeepCopy() *AWSRoleSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AdditionalListenerSpec) DeepCopyInto(out *AdditionalListenerSpec) { *out = *in + if in.HealthCheck != nil { + in, out := &in.HealthCheck, &out.HealthCheck + *out = new(TargetGroupHealthCheckAdditionalSpec) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalListenerSpec. @@ -1269,6 +1291,31 @@ func (in *CloudInit) DeepCopy() *CloudInit { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticIPPool) DeepCopyInto(out *ElasticIPPool) { + *out = *in + if in.PublicIpv4Pool != nil { + in, out := &in.PublicIpv4Pool, &out.PublicIpv4Pool + *out = new(string) + **out = **in + } + if in.PublicIpv4PoolFallBackOrder != nil { + in, out := &in.PublicIpv4PoolFallBackOrder, &out.PublicIpv4PoolFallBackOrder + *out = new(PublicIpv4PoolFallbackOrder) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticIPPool. +func (in *ElasticIPPool) DeepCopy() *ElasticIPPool { + if in == nil { + return nil + } + out := new(ElasticIPPool) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Filter) DeepCopyInto(out *Filter) { *out = *in @@ -1332,6 +1379,16 @@ func (in *IPv6) DeepCopy() *IPv6 { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Ignition) DeepCopyInto(out *Ignition) { *out = *in + if in.Proxy != nil { + in, out := &in.Proxy, &out.Proxy + *out = new(IgnitionProxy) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(IgnitionTLS) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ignition. @@ -1344,6 +1401,56 @@ func (in *Ignition) DeepCopy() *Ignition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IgnitionProxy) DeepCopyInto(out *IgnitionProxy) { + *out = *in + if in.HTTPProxy != nil { + in, out := &in.HTTPProxy, &out.HTTPProxy + *out = new(string) + **out = **in + } + if in.HTTPSProxy != nil { + in, out := &in.HTTPSProxy, &out.HTTPSProxy + *out = new(string) + **out = **in + } + if in.NoProxy != nil { + in, out := &in.NoProxy, &out.NoProxy + *out = make([]IgnitionNoProxy, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IgnitionProxy. +func (in *IgnitionProxy) DeepCopy() *IgnitionProxy { + if in == nil { + return nil + } + out := new(IgnitionProxy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IgnitionTLS) DeepCopyInto(out *IgnitionTLS) { + *out = *in + if in.CASources != nil { + in, out := &in.CASources, &out.CASources + *out = make([]IgnitionCASource, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IgnitionTLS. +func (in *IgnitionTLS) DeepCopy() *IgnitionTLS { + if in == nil { + return nil + } + out := new(IgnitionTLS) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IngressRule) DeepCopyInto(out *IngressRule) { *out = *in @@ -1487,6 +1594,16 @@ func (in *Instance) DeepCopyInto(out *Instance) { *out = new(PrivateDNSName) (*in).DeepCopyInto(*out) } + if in.PublicIPOnLaunch != nil { + in, out := &in.PublicIPOnLaunch, &out.PublicIPOnLaunch + *out = new(bool) + **out = **in + } + if in.CapacityReservationID != nil { + in, out := &in.CapacityReservationID, &out.CapacityReservationID + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Instance. @@ -1730,6 +1847,11 @@ func (in *S3Bucket) DeepCopyInto(out *S3Bucket) { *out = new(v1.Duration) **out = **in } + if in.BestEffortDeleteObjects != nil { + in, out := &in.BestEffortDeleteObjects, &out.BestEffortDeleteObjects + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Bucket. @@ -1811,6 +1933,16 @@ func (in *SubnetSpec) DeepCopyInto(out *SubnetSpec) { (*out)[key] = val } } + if in.ZoneType != nil { + in, out := &in.ZoneType, &out.ZoneType + *out = new(ZoneType) + **out = **in + } + if in.ParentZoneName != nil { + in, out := &in.ParentZoneName, &out.ParentZoneName + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetSpec. @@ -1898,6 +2030,11 @@ func (in *TargetGroupHealthCheck) DeepCopyInto(out *TargetGroupHealthCheck) { *out = new(int64) **out = **in } + if in.UnhealthyThresholdCount != nil { + in, out := &in.UnhealthyThresholdCount, &out.UnhealthyThresholdCount + *out = new(int64) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupHealthCheck. @@ -1910,6 +2047,91 @@ func (in *TargetGroupHealthCheck) DeepCopy() *TargetGroupHealthCheck { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupHealthCheckAPISpec) DeepCopyInto(out *TargetGroupHealthCheckAPISpec) { + *out = *in + if in.IntervalSeconds != nil { + in, out := &in.IntervalSeconds, &out.IntervalSeconds + *out = new(int64) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int64) + **out = **in + } + if in.ThresholdCount != nil { + in, out := &in.ThresholdCount, &out.ThresholdCount + *out = new(int64) + **out = **in + } + if in.UnhealthyThresholdCount != nil { + in, out := &in.UnhealthyThresholdCount, &out.UnhealthyThresholdCount + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupHealthCheckAPISpec. +func (in *TargetGroupHealthCheckAPISpec) DeepCopy() *TargetGroupHealthCheckAPISpec { + if in == nil { + return nil + } + out := new(TargetGroupHealthCheckAPISpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupHealthCheckAdditionalSpec) DeepCopyInto(out *TargetGroupHealthCheckAdditionalSpec) { + *out = *in + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.IntervalSeconds != nil { + in, out := &in.IntervalSeconds, &out.IntervalSeconds + *out = new(int64) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int64) + **out = **in + } + if in.ThresholdCount != nil { + in, out := &in.ThresholdCount, &out.ThresholdCount + *out = new(int64) + **out = **in + } + if in.UnhealthyThresholdCount != nil { + in, out := &in.UnhealthyThresholdCount, &out.UnhealthyThresholdCount + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupHealthCheckAdditionalSpec. +func (in *TargetGroupHealthCheckAdditionalSpec) DeepCopy() *TargetGroupHealthCheckAdditionalSpec { + if in == nil { + return nil + } + out := new(TargetGroupHealthCheckAdditionalSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TargetGroupSpec) DeepCopyInto(out *TargetGroupSpec) { *out = *in @@ -1933,6 +2155,11 @@ func (in *TargetGroupSpec) DeepCopy() *TargetGroupSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VPCSpec) DeepCopyInto(out *VPCSpec) { *out = *in + if in.SecondaryCidrBlocks != nil { + in, out := &in.SecondaryCidrBlocks, &out.SecondaryCidrBlocks + *out = make([]VpcCidrBlock, len(*in)) + copy(*out, *in) + } if in.IPAMPool != nil { in, out := &in.IPAMPool, &out.IPAMPool *out = new(IPAMPool) @@ -1948,6 +2175,11 @@ func (in *VPCSpec) DeepCopyInto(out *VPCSpec) { *out = new(string) **out = **in } + if in.CarrierGatewayID != nil { + in, out := &in.CarrierGatewayID, &out.CarrierGatewayID + *out = new(string) + **out = **in + } if in.Tags != nil { in, out := &in.Tags, &out.Tags *out = make(Tags, len(*in)) @@ -1970,6 +2202,16 @@ func (in *VPCSpec) DeepCopyInto(out *VPCSpec) { *out = new(string) **out = **in } + if in.ElasticIPPool != nil { + in, out := &in.ElasticIPPool, &out.ElasticIPPool + *out = new(ElasticIPPool) + (*in).DeepCopyInto(*out) + } + if in.SubnetSchema != nil { + in, out := &in.SubnetSchema, &out.SubnetSchema + *out = new(SubnetSchemaType) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCSpec. @@ -2006,3 +2248,18 @@ func (in *Volume) DeepCopy() *Volume { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VpcCidrBlock) DeepCopyInto(out *VpcCidrBlock) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VpcCidrBlock. +func (in *VpcCidrBlock) DeepCopy() *VpcCidrBlock { + if in == nil { + return nil + } + out := new(VpcCidrBlock) + in.DeepCopyInto(out) + return out +} diff --git a/bootstrap/eks/api/v1beta1/conversion_test.go b/bootstrap/eks/api/v1beta1/conversion_test.go index c6b4485354..47dcb9736d 100644 --- a/bootstrap/eks/api/v1beta1/conversion_test.go +++ b/bootstrap/eks/api/v1beta1/conversion_test.go @@ -20,7 +20,6 @@ import ( "testing" . "github.com/onsi/gomega" - runtime "k8s.io/apimachinery/pkg/runtime" v1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2" utilconversion "sigs.k8s.io/cluster-api/util/conversion" diff --git a/bootstrap/eks/api/v1beta2/doc.go b/bootstrap/eks/api/v1beta2/doc.go index 2069db82a5..992666159f 100644 --- a/bootstrap/eks/api/v1beta2/doc.go +++ b/bootstrap/eks/api/v1beta2/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package v1beta2 contains API Schema definitions for the Amazon EKS Bootstrap v1beta2 API group. // +gencrdrefdocs:force //nolint: revive // +groupName=bootstrap.cluster.x-k8s.io - package v1beta2 diff --git a/bootstrap/eks/api/v1beta2/eksconfig_webhook.go b/bootstrap/eks/api/v1beta2/eksconfig_webhook.go index e1459ba1dd..30609f6755 100644 --- a/bootstrap/eks/api/v1beta2/eksconfig_webhook.go +++ b/bootstrap/eks/api/v1beta2/eksconfig_webhook.go @@ -42,7 +42,7 @@ func (r *EKSConfig) ValidateCreate() (admission.Warnings, error) { } // ValidateUpdate will do any extra validation when updating a EKSConfig. -func (r *EKSConfig) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { +func (r *EKSConfig) ValidateUpdate(_ runtime.Object) (admission.Warnings, error) { return nil, nil } diff --git a/bootstrap/eks/api/v1beta2/eksconfigtemplate_webhook.go b/bootstrap/eks/api/v1beta2/eksconfigtemplate_webhook.go index fc2504eca4..d6611c40c3 100644 --- a/bootstrap/eks/api/v1beta2/eksconfigtemplate_webhook.go +++ b/bootstrap/eks/api/v1beta2/eksconfigtemplate_webhook.go @@ -42,7 +42,7 @@ func (r *EKSConfigTemplate) ValidateCreate() (admission.Warnings, error) { } // ValidateUpdate will do any extra validation when updating a EKSConfigTemplate. -func (r *EKSConfigTemplate) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { +func (r *EKSConfigTemplate) ValidateUpdate(_ runtime.Object) (admission.Warnings, error) { return nil, nil } diff --git a/bootstrap/eks/api/v1beta2/groupversion_info.go b/bootstrap/eks/api/v1beta2/groupversion_info.go index a93c42785f..7c26521b41 100644 --- a/bootstrap/eks/api/v1beta2/groupversion_info.go +++ b/bootstrap/eks/api/v1beta2/groupversion_info.go @@ -14,10 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// package v1beta2 contains API Schema definitions for the Amazon EKS Bootstrap v1beta2 API group +// Package v1beta2 contains API Schema definitions for the Amazon EKS Bootstrap v1beta2 API group // +kubebuilder:object:generate=true // +groupName=bootstrap.cluster.x-k8s.io - package v1beta2 import ( diff --git a/bootstrap/eks/controllers/eksconfig_controller.go b/bootstrap/eks/controllers/eksconfig_controller.go index 8f1de94fc3..5aa9425dd5 100644 --- a/bootstrap/eks/controllers/eksconfig_controller.go +++ b/bootstrap/eks/controllers/eksconfig_controller.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package controllers provides a way to reconcile EKSConfig objects. package controllers import ( diff --git a/bootstrap/eks/controllers/suite_test.go b/bootstrap/eks/controllers/suite_test.go index 74cd527bd6..2b61ab258a 100644 --- a/bootstrap/eks/controllers/suite_test.go +++ b/bootstrap/eks/controllers/suite_test.go @@ -42,8 +42,6 @@ func TestMain(m *testing.M) { } func setup() { - // utilruntime.Must(bootstrapv1.AddToScheme(scheme.Scheme)) - // utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) utilruntime.Must(ekscontrolplanev1.AddToScheme(scheme.Scheme)) testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{ path.Join("config", "crd", "bases"), diff --git a/bootstrap/eks/internal/userdata/commands.go b/bootstrap/eks/internal/userdata/commands.go index af7551d8b6..1ee0c85abf 100644 --- a/bootstrap/eks/internal/userdata/commands.go +++ b/bootstrap/eks/internal/userdata/commands.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package userdata provides a way to generate ec2 instance userdata. package userdata const ( diff --git a/bootstrap/eks/internal/userdata/node.go b/bootstrap/eks/internal/userdata/node.go index 7be304cdb7..468f15478f 100644 --- a/bootstrap/eks/internal/userdata/node.go +++ b/bootstrap/eks/internal/userdata/node.go @@ -68,6 +68,7 @@ type NodeInput struct { NTP *eksbootstrapv1.NTP } +// DockerConfigJSONEscaped returns the DockerConfigJSON escaped for use in cloud-init. func (ni *NodeInput) DockerConfigJSONEscaped() string { if ni.DockerConfigJSON == nil || len(*ni.DockerConfigJSON) == 0 { return "''" @@ -76,6 +77,7 @@ func (ni *NodeInput) DockerConfigJSONEscaped() string { return shellescape.Quote(*ni.DockerConfigJSON) } +// BootstrapCommand returns the bootstrap command to be used on a node instance. func (ni *NodeInput) BootstrapCommand() string { if ni.BootstrapCommandOverride != nil && *ni.BootstrapCommandOverride != "" { return *ni.BootstrapCommandOverride diff --git a/cloudbuild-nightly.yaml b/cloudbuild-nightly.yaml index 3979d51979..d46ac1edfc 100644 --- a/cloudbuild-nightly.yaml +++ b/cloudbuild-nightly.yaml @@ -3,7 +3,7 @@ timeout: 3000s options: substitution_option: ALLOW_LOOSE steps: - - name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20220609-2e4c91eb7e' + - name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20240210-29014a6e3a' entrypoint: make env: - DOCKER_CLI_EXPERIMENTAL=enabled diff --git a/cloudbuild.yaml b/cloudbuild.yaml index f8a71c1b0c..182ca60d03 100644 --- a/cloudbuild.yaml +++ b/cloudbuild.yaml @@ -3,7 +3,7 @@ timeout: 3000s options: substitution_option: ALLOW_LOOSE steps: - - name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20220609-2e4c91eb7e' + - name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20240210-29014a6e3a' entrypoint: make env: - DOCKER_CLI_EXPERIMENTAL=enabled diff --git a/cmd/clusterawsadm/ami/helper.go b/cmd/clusterawsadm/ami/helper.go index e8c8a2d9ed..ebc393084c 100644 --- a/cmd/clusterawsadm/ami/helper.go +++ b/cmd/clusterawsadm/ami/helper.go @@ -241,16 +241,14 @@ func findAMI(imagesMap map[string][]*ec2.Image, baseOS, kubernetesVersion string } if val, ok := imagesMap[amiName]; ok && val != nil { return latestAMI(val) - } else { - amiName, err = ec2service.GenerateAmiName(amiNameFormat, baseOS, strings.TrimPrefix(kubernetesVersion, "v")) - if err != nil { - return nil, errors.Wrapf(err, "failed to process ami format: %q", amiNameFormat) - } - if val, ok = imagesMap[amiName]; ok && val != nil { - return latestAMI(val) - } } - + amiName, err = ec2service.GenerateAmiName(amiNameFormat, baseOS, strings.TrimPrefix(kubernetesVersion, "v")) + if err != nil { + return nil, errors.Wrapf(err, "failed to process ami format: %q", amiNameFormat) + } + if val, ok := imagesMap[amiName]; ok && val != nil { + return latestAMI(val) + } return nil, nil } diff --git a/cmd/clusterawsadm/ami/list.go b/cmd/clusterawsadm/ami/list.go index b17166f75f..2b04f81422 100644 --- a/cmd/clusterawsadm/ami/list.go +++ b/cmd/clusterawsadm/ami/list.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package ami provides a way to interact with AWS AMIs. package ami import ( diff --git a/cmd/clusterawsadm/api/ami/v1beta1/scheme/scheme.go b/cmd/clusterawsadm/api/ami/v1beta1/scheme/scheme.go index 1dc2079536..851bbead25 100644 --- a/cmd/clusterawsadm/api/ami/v1beta1/scheme/scheme.go +++ b/cmd/clusterawsadm/api/ami/v1beta1/scheme/scheme.go @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package scheme provides a way to generate a Scheme and CodecFactory f +// or the bootstrap.aws.infrastructure.cluster.x-k8s.io API group. package scheme import ( diff --git a/cmd/clusterawsadm/api/ami/v1beta1/zz_generated.defaults.go b/cmd/clusterawsadm/api/ami/v1beta1/zz_generated.defaults.go index 58e403f040..f6474798ed 100644 --- a/cmd/clusterawsadm/api/ami/v1beta1/zz_generated.defaults.go +++ b/cmd/clusterawsadm/api/ami/v1beta1/zz_generated.defaults.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/cmd/clusterawsadm/api/bootstrap/v1alpha1/scheme/scheme.go b/cmd/clusterawsadm/api/bootstrap/v1alpha1/scheme/scheme.go index fc604a190f..b320f44db3 100644 --- a/cmd/clusterawsadm/api/bootstrap/v1alpha1/scheme/scheme.go +++ b/cmd/clusterawsadm/api/bootstrap/v1alpha1/scheme/scheme.go @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package scheme provides a way to generate a Scheme and CodecFactory +// for the bootstrap.aws.infrastructure.cluster.x-k8s.io API group. package scheme import ( diff --git a/cmd/clusterawsadm/api/bootstrap/v1beta1/scheme/scheme.go b/cmd/clusterawsadm/api/bootstrap/v1beta1/scheme/scheme.go index d84a39aee5..f70029e383 100644 --- a/cmd/clusterawsadm/api/bootstrap/v1beta1/scheme/scheme.go +++ b/cmd/clusterawsadm/api/bootstrap/v1beta1/scheme/scheme.go @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package scheme provides a way to generate a Scheme and CodecFactory +// for the bootstrap.aws.infrastructure.cluster.x-k8s.io API group. package scheme import ( diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/cluster_api_controller.go b/cmd/clusterawsadm/cloudformation/bootstrap/cluster_api_controller.go index 14f8d423bb..f3cb407c75 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/cluster_api_controller.go +++ b/cmd/clusterawsadm/cloudformation/bootstrap/cluster_api_controller.go @@ -90,8 +90,10 @@ func (t Template) ControllersPolicy() *iamv1.PolicyDocument { "ec2:AssignPrivateIpAddresses", "ec2:UnassignPrivateIpAddresses", "ec2:AssociateRouteTable", + "ec2:AssociateVpcCidrBlock", "ec2:AttachInternetGateway", "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateCarrierGateway", "ec2:CreateInternetGateway", "ec2:CreateEgressOnlyInternetGateway", "ec2:CreateNatGateway", @@ -103,8 +105,10 @@ func (t Template) ControllersPolicy() *iamv1.PolicyDocument { "ec2:CreateTags", "ec2:CreateVpc", "ec2:CreateVpcEndpoint", + "ec2:DisassociateVpcCidrBlock", "ec2:ModifyVpcAttribute", "ec2:ModifyVpcEndpoint", + "ec2:DeleteCarrierGateway", "ec2:DeleteInternetGateway", "ec2:DeleteEgressOnlyInternetGateway", "ec2:DeleteNatGateway", @@ -118,6 +122,7 @@ func (t Template) ControllersPolicy() *iamv1.PolicyDocument { "ec2:DescribeAccountAttributes", "ec2:DescribeAddresses", "ec2:DescribeAvailabilityZones", + "ec2:DescribeCarrierGateways", "ec2:DescribeInstances", "ec2:DescribeInstanceTypes", "ec2:DescribeInternetGateways", @@ -131,6 +136,7 @@ func (t Template) ControllersPolicy() *iamv1.PolicyDocument { "ec2:DescribeSecurityGroups", "ec2:DescribeSubnets", "ec2:DescribeVpcs", + "ec2:DescribeDhcpOptions", "ec2:DescribeVpcAttribute", "ec2:DescribeVpcEndpoints", "ec2:DescribeVolumes", @@ -322,60 +328,59 @@ func (t Template) ControllersPolicy() *iamv1.PolicyDocument { // ControllersPolicyEKS creates a policy from a template for AWS Controllers. func (t Template) ControllersPolicyEKS() *iamv1.PolicyDocument { - statement := []iamv1.StatementEntry{} + statements := []iamv1.StatementEntry{} allowedIAMActions := iamv1.Actions{ "iam:GetRole", "iam:ListAttachedRolePolicies", } - statement = append(statement, iamv1.StatementEntry{ - Effect: iamv1.EffectAllow, - Resource: iamv1.Resources{ - "arn:*:ssm:*:*:parameter/aws/service/eks/optimized-ami/*", - }, - Action: iamv1.Actions{ - "ssm:GetParameter", - }, - }) - - statement = append(statement, iamv1.StatementEntry{ - Effect: iamv1.EffectAllow, - Action: iamv1.Actions{ - "iam:CreateServiceLinkedRole", - }, - Resource: iamv1.Resources{ - "arn:*:iam::*:role/aws-service-role/eks.amazonaws.com/AWSServiceRoleForAmazonEKS", - }, - Condition: iamv1.Conditions{ - iamv1.StringLike: map[string]string{"iam:AWSServiceName": "eks.amazonaws.com"}, - }, - }) - - statement = append(statement, iamv1.StatementEntry{ - Effect: iamv1.EffectAllow, - Action: iamv1.Actions{ - "iam:CreateServiceLinkedRole", - }, - Resource: iamv1.Resources{ - "arn:*:iam::*:role/aws-service-role/eks-nodegroup.amazonaws.com/AWSServiceRoleForAmazonEKSNodegroup", - }, - Condition: iamv1.Conditions{ - iamv1.StringLike: map[string]string{"iam:AWSServiceName": "eks-nodegroup.amazonaws.com"}, + statements = append(statements, + iamv1.StatementEntry{ + Effect: iamv1.EffectAllow, + Resource: iamv1.Resources{ + "arn:*:ssm:*:*:parameter/aws/service/eks/optimized-ami/*", + }, + Action: iamv1.Actions{ + "ssm:GetParameter", + }, }, - }) - - statement = append(statement, iamv1.StatementEntry{ - Effect: iamv1.EffectAllow, - Action: iamv1.Actions{ - "iam:CreateServiceLinkedRole", + iamv1.StatementEntry{ + Effect: iamv1.EffectAllow, + Action: iamv1.Actions{ + "iam:CreateServiceLinkedRole", + }, + Resource: iamv1.Resources{ + "arn:*:iam::*:role/aws-service-role/eks.amazonaws.com/AWSServiceRoleForAmazonEKS", + }, + Condition: iamv1.Conditions{ + iamv1.StringLike: map[string]string{"iam:AWSServiceName": "eks.amazonaws.com"}, + }, }, - Resource: iamv1.Resources{ - "arn:" + t.Spec.Partition + ":iam::*:role/aws-service-role/eks-fargate-pods.amazonaws.com/AWSServiceRoleForAmazonEKSForFargate", + iamv1.StatementEntry{ + Effect: iamv1.EffectAllow, + Action: iamv1.Actions{ + "iam:CreateServiceLinkedRole", + }, + Resource: iamv1.Resources{ + "arn:*:iam::*:role/aws-service-role/eks-nodegroup.amazonaws.com/AWSServiceRoleForAmazonEKSNodegroup", + }, + Condition: iamv1.Conditions{ + iamv1.StringLike: map[string]string{"iam:AWSServiceName": "eks-nodegroup.amazonaws.com"}, + }, }, - Condition: iamv1.Conditions{ - iamv1.StringLike: map[string]string{"iam:AWSServiceName": "eks-fargate.amazonaws.com"}, + iamv1.StatementEntry{ + Effect: iamv1.EffectAllow, + Action: iamv1.Actions{ + "iam:CreateServiceLinkedRole", + }, + Resource: iamv1.Resources{ + "arn:" + t.Spec.Partition + ":iam::*:role/aws-service-role/eks-fargate-pods.amazonaws.com/AWSServiceRoleForAmazonEKSForFargate", + }, + Condition: iamv1.Conditions{ + iamv1.StringLike: map[string]string{"iam:AWSServiceName": "eks-fargate.amazonaws.com"}, + }, }, - }) + ) if t.Spec.EKS.AllowIAMRoleCreation { allowedIAMActions = append(allowedIAMActions, iamv1.Actions{ @@ -386,7 +391,7 @@ func (t Template) ControllersPolicyEKS() *iamv1.PolicyDocument { "iam:AttachRolePolicy", }...) - statement = append(statement, iamv1.StatementEntry{ + statements = append(statements, iamv1.StatementEntry{ Action: iamv1.Actions{ "iam:ListOpenIDConnectProviders", "iam:GetOpenIDConnectProvider", @@ -402,7 +407,8 @@ func (t Template) ControllersPolicyEKS() *iamv1.PolicyDocument { Effect: iamv1.EffectAllow, }) } - statement = append(statement, []iamv1.StatementEntry{ + + statements = append(statements, []iamv1.StatementEntry{ { Action: allowedIAMActions, Resource: iamv1.Resources{ @@ -495,7 +501,7 @@ func (t Template) ControllersPolicyEKS() *iamv1.PolicyDocument { return &iamv1.PolicyDocument{ Version: iamv1.CurrentVersion, - Statement: statement, + Statement: statements, } } diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/customsuffix.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/customsuffix.yaml index d5b5505009..5031f59964 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/customsuffix.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/customsuffix.yaml @@ -149,8 +149,10 @@ Resources: - ec2:AssignPrivateIpAddresses - ec2:UnassignPrivateIpAddresses - ec2:AssociateRouteTable + - ec2:AssociateVpcCidrBlock - ec2:AttachInternetGateway - ec2:AuthorizeSecurityGroupIngress + - ec2:CreateCarrierGateway - ec2:CreateInternetGateway - ec2:CreateEgressOnlyInternetGateway - ec2:CreateNatGateway @@ -162,8 +164,10 @@ Resources: - ec2:CreateTags - ec2:CreateVpc - ec2:CreateVpcEndpoint + - ec2:DisassociateVpcCidrBlock - ec2:ModifyVpcAttribute - ec2:ModifyVpcEndpoint + - ec2:DeleteCarrierGateway - ec2:DeleteInternetGateway - ec2:DeleteEgressOnlyInternetGateway - ec2:DeleteNatGateway @@ -177,6 +181,7 @@ Resources: - ec2:DescribeAccountAttributes - ec2:DescribeAddresses - ec2:DescribeAvailabilityZones + - ec2:DescribeCarrierGateways - ec2:DescribeInstances - ec2:DescribeInstanceTypes - ec2:DescribeInternetGateways @@ -190,6 +195,7 @@ Resources: - ec2:DescribeSecurityGroups - ec2:DescribeSubnets - ec2:DescribeVpcs + - ec2:DescribeDhcpOptions - ec2:DescribeVpcAttribute - ec2:DescribeVpcEndpoints - ec2:DescribeVolumes diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/default.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/default.yaml index a2c06a7e28..aae74b87be 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/default.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/default.yaml @@ -149,8 +149,10 @@ Resources: - ec2:AssignPrivateIpAddresses - ec2:UnassignPrivateIpAddresses - ec2:AssociateRouteTable + - ec2:AssociateVpcCidrBlock - ec2:AttachInternetGateway - ec2:AuthorizeSecurityGroupIngress + - ec2:CreateCarrierGateway - ec2:CreateInternetGateway - ec2:CreateEgressOnlyInternetGateway - ec2:CreateNatGateway @@ -162,8 +164,10 @@ Resources: - ec2:CreateTags - ec2:CreateVpc - ec2:CreateVpcEndpoint + - ec2:DisassociateVpcCidrBlock - ec2:ModifyVpcAttribute - ec2:ModifyVpcEndpoint + - ec2:DeleteCarrierGateway - ec2:DeleteInternetGateway - ec2:DeleteEgressOnlyInternetGateway - ec2:DeleteNatGateway @@ -177,6 +181,7 @@ Resources: - ec2:DescribeAccountAttributes - ec2:DescribeAddresses - ec2:DescribeAvailabilityZones + - ec2:DescribeCarrierGateways - ec2:DescribeInstances - ec2:DescribeInstanceTypes - ec2:DescribeInternetGateways @@ -190,6 +195,7 @@ Resources: - ec2:DescribeSecurityGroups - ec2:DescribeSubnets - ec2:DescribeVpcs + - ec2:DescribeDhcpOptions - ec2:DescribeVpcAttribute - ec2:DescribeVpcEndpoints - ec2:DescribeVolumes diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_all_secret_backends.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_all_secret_backends.yaml index e965556dc5..0929a3a64a 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_all_secret_backends.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_all_secret_backends.yaml @@ -155,8 +155,10 @@ Resources: - ec2:AssignPrivateIpAddresses - ec2:UnassignPrivateIpAddresses - ec2:AssociateRouteTable + - ec2:AssociateVpcCidrBlock - ec2:AttachInternetGateway - ec2:AuthorizeSecurityGroupIngress + - ec2:CreateCarrierGateway - ec2:CreateInternetGateway - ec2:CreateEgressOnlyInternetGateway - ec2:CreateNatGateway @@ -168,8 +170,10 @@ Resources: - ec2:CreateTags - ec2:CreateVpc - ec2:CreateVpcEndpoint + - ec2:DisassociateVpcCidrBlock - ec2:ModifyVpcAttribute - ec2:ModifyVpcEndpoint + - ec2:DeleteCarrierGateway - ec2:DeleteInternetGateway - ec2:DeleteEgressOnlyInternetGateway - ec2:DeleteNatGateway @@ -183,6 +187,7 @@ Resources: - ec2:DescribeAccountAttributes - ec2:DescribeAddresses - ec2:DescribeAvailabilityZones + - ec2:DescribeCarrierGateways - ec2:DescribeInstances - ec2:DescribeInstanceTypes - ec2:DescribeInternetGateways @@ -196,6 +201,7 @@ Resources: - ec2:DescribeSecurityGroups - ec2:DescribeSubnets - ec2:DescribeVpcs + - ec2:DescribeDhcpOptions - ec2:DescribeVpcAttribute - ec2:DescribeVpcEndpoints - ec2:DescribeVolumes diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_allow_assume_role.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_allow_assume_role.yaml index 46b4121509..f5d47e6d2b 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_allow_assume_role.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_allow_assume_role.yaml @@ -149,8 +149,10 @@ Resources: - ec2:AssignPrivateIpAddresses - ec2:UnassignPrivateIpAddresses - ec2:AssociateRouteTable + - ec2:AssociateVpcCidrBlock - ec2:AttachInternetGateway - ec2:AuthorizeSecurityGroupIngress + - ec2:CreateCarrierGateway - ec2:CreateInternetGateway - ec2:CreateEgressOnlyInternetGateway - ec2:CreateNatGateway @@ -162,8 +164,10 @@ Resources: - ec2:CreateTags - ec2:CreateVpc - ec2:CreateVpcEndpoint + - ec2:DisassociateVpcCidrBlock - ec2:ModifyVpcAttribute - ec2:ModifyVpcEndpoint + - ec2:DeleteCarrierGateway - ec2:DeleteInternetGateway - ec2:DeleteEgressOnlyInternetGateway - ec2:DeleteNatGateway @@ -177,6 +181,7 @@ Resources: - ec2:DescribeAccountAttributes - ec2:DescribeAddresses - ec2:DescribeAvailabilityZones + - ec2:DescribeCarrierGateways - ec2:DescribeInstances - ec2:DescribeInstanceTypes - ec2:DescribeInternetGateways @@ -190,6 +195,7 @@ Resources: - ec2:DescribeSecurityGroups - ec2:DescribeSubnets - ec2:DescribeVpcs + - ec2:DescribeDhcpOptions - ec2:DescribeVpcAttribute - ec2:DescribeVpcEndpoints - ec2:DescribeVolumes diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_bootstrap_user.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_bootstrap_user.yaml index 6fc278b78a..d391de851b 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_bootstrap_user.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_bootstrap_user.yaml @@ -155,8 +155,10 @@ Resources: - ec2:AssignPrivateIpAddresses - ec2:UnassignPrivateIpAddresses - ec2:AssociateRouteTable + - ec2:AssociateVpcCidrBlock - ec2:AttachInternetGateway - ec2:AuthorizeSecurityGroupIngress + - ec2:CreateCarrierGateway - ec2:CreateInternetGateway - ec2:CreateEgressOnlyInternetGateway - ec2:CreateNatGateway @@ -168,8 +170,10 @@ Resources: - ec2:CreateTags - ec2:CreateVpc - ec2:CreateVpcEndpoint + - ec2:DisassociateVpcCidrBlock - ec2:ModifyVpcAttribute - ec2:ModifyVpcEndpoint + - ec2:DeleteCarrierGateway - ec2:DeleteInternetGateway - ec2:DeleteEgressOnlyInternetGateway - ec2:DeleteNatGateway @@ -183,6 +187,7 @@ Resources: - ec2:DescribeAccountAttributes - ec2:DescribeAddresses - ec2:DescribeAvailabilityZones + - ec2:DescribeCarrierGateways - ec2:DescribeInstances - ec2:DescribeInstanceTypes - ec2:DescribeInternetGateways @@ -196,6 +201,7 @@ Resources: - ec2:DescribeSecurityGroups - ec2:DescribeSubnets - ec2:DescribeVpcs + - ec2:DescribeDhcpOptions - ec2:DescribeVpcAttribute - ec2:DescribeVpcEndpoints - ec2:DescribeVolumes diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_custom_bootstrap_user.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_custom_bootstrap_user.yaml index 4cb1a565cf..8ebe0836cd 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_custom_bootstrap_user.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_custom_bootstrap_user.yaml @@ -155,8 +155,10 @@ Resources: - ec2:AssignPrivateIpAddresses - ec2:UnassignPrivateIpAddresses - ec2:AssociateRouteTable + - ec2:AssociateVpcCidrBlock - ec2:AttachInternetGateway - ec2:AuthorizeSecurityGroupIngress + - ec2:CreateCarrierGateway - ec2:CreateInternetGateway - ec2:CreateEgressOnlyInternetGateway - ec2:CreateNatGateway @@ -168,8 +170,10 @@ Resources: - ec2:CreateTags - ec2:CreateVpc - ec2:CreateVpcEndpoint + - ec2:DisassociateVpcCidrBlock - ec2:ModifyVpcAttribute - ec2:ModifyVpcEndpoint + - ec2:DeleteCarrierGateway - ec2:DeleteInternetGateway - ec2:DeleteEgressOnlyInternetGateway - ec2:DeleteNatGateway @@ -183,6 +187,7 @@ Resources: - ec2:DescribeAccountAttributes - ec2:DescribeAddresses - ec2:DescribeAvailabilityZones + - ec2:DescribeCarrierGateways - ec2:DescribeInstances - ec2:DescribeInstanceTypes - ec2:DescribeInternetGateways @@ -196,6 +201,7 @@ Resources: - ec2:DescribeSecurityGroups - ec2:DescribeSubnets - ec2:DescribeVpcs + - ec2:DescribeDhcpOptions - ec2:DescribeVpcAttribute - ec2:DescribeVpcEndpoints - ec2:DescribeVolumes diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_different_instance_profiles.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_different_instance_profiles.yaml index 1dd528076b..aae239179d 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_different_instance_profiles.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_different_instance_profiles.yaml @@ -149,8 +149,10 @@ Resources: - ec2:AssignPrivateIpAddresses - ec2:UnassignPrivateIpAddresses - ec2:AssociateRouteTable + - ec2:AssociateVpcCidrBlock - ec2:AttachInternetGateway - ec2:AuthorizeSecurityGroupIngress + - ec2:CreateCarrierGateway - ec2:CreateInternetGateway - ec2:CreateEgressOnlyInternetGateway - ec2:CreateNatGateway @@ -162,8 +164,10 @@ Resources: - ec2:CreateTags - ec2:CreateVpc - ec2:CreateVpcEndpoint + - ec2:DisassociateVpcCidrBlock - ec2:ModifyVpcAttribute - ec2:ModifyVpcEndpoint + - ec2:DeleteCarrierGateway - ec2:DeleteInternetGateway - ec2:DeleteEgressOnlyInternetGateway - ec2:DeleteNatGateway @@ -177,6 +181,7 @@ Resources: - ec2:DescribeAccountAttributes - ec2:DescribeAddresses - ec2:DescribeAvailabilityZones + - ec2:DescribeCarrierGateways - ec2:DescribeInstances - ec2:DescribeInstanceTypes - ec2:DescribeInternetGateways @@ -190,6 +195,7 @@ Resources: - ec2:DescribeSecurityGroups - ec2:DescribeSubnets - ec2:DescribeVpcs + - ec2:DescribeDhcpOptions - ec2:DescribeVpcAttribute - ec2:DescribeVpcEndpoints - ec2:DescribeVolumes diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_console.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_console.yaml index 80f96c8d6d..02f1b6b74a 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_console.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_console.yaml @@ -149,8 +149,10 @@ Resources: - ec2:AssignPrivateIpAddresses - ec2:UnassignPrivateIpAddresses - ec2:AssociateRouteTable + - ec2:AssociateVpcCidrBlock - ec2:AttachInternetGateway - ec2:AuthorizeSecurityGroupIngress + - ec2:CreateCarrierGateway - ec2:CreateInternetGateway - ec2:CreateEgressOnlyInternetGateway - ec2:CreateNatGateway @@ -162,8 +164,10 @@ Resources: - ec2:CreateTags - ec2:CreateVpc - ec2:CreateVpcEndpoint + - ec2:DisassociateVpcCidrBlock - ec2:ModifyVpcAttribute - ec2:ModifyVpcEndpoint + - ec2:DeleteCarrierGateway - ec2:DeleteInternetGateway - ec2:DeleteEgressOnlyInternetGateway - ec2:DeleteNatGateway @@ -177,6 +181,7 @@ Resources: - ec2:DescribeAccountAttributes - ec2:DescribeAddresses - ec2:DescribeAvailabilityZones + - ec2:DescribeCarrierGateways - ec2:DescribeInstances - ec2:DescribeInstanceTypes - ec2:DescribeInternetGateways @@ -190,6 +195,7 @@ Resources: - ec2:DescribeSecurityGroups - ec2:DescribeSubnets - ec2:DescribeVpcs + - ec2:DescribeDhcpOptions - ec2:DescribeVpcAttribute - ec2:DescribeVpcEndpoints - ec2:DescribeVolumes diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_default_roles.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_default_roles.yaml index 9ce26aff22..9ce2db9f3c 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_default_roles.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_default_roles.yaml @@ -149,8 +149,10 @@ Resources: - ec2:AssignPrivateIpAddresses - ec2:UnassignPrivateIpAddresses - ec2:AssociateRouteTable + - ec2:AssociateVpcCidrBlock - ec2:AttachInternetGateway - ec2:AuthorizeSecurityGroupIngress + - ec2:CreateCarrierGateway - ec2:CreateInternetGateway - ec2:CreateEgressOnlyInternetGateway - ec2:CreateNatGateway @@ -162,8 +164,10 @@ Resources: - ec2:CreateTags - ec2:CreateVpc - ec2:CreateVpcEndpoint + - ec2:DisassociateVpcCidrBlock - ec2:ModifyVpcAttribute - ec2:ModifyVpcEndpoint + - ec2:DeleteCarrierGateway - ec2:DeleteInternetGateway - ec2:DeleteEgressOnlyInternetGateway - ec2:DeleteNatGateway @@ -177,6 +181,7 @@ Resources: - ec2:DescribeAccountAttributes - ec2:DescribeAddresses - ec2:DescribeAvailabilityZones + - ec2:DescribeCarrierGateways - ec2:DescribeInstances - ec2:DescribeInstanceTypes - ec2:DescribeInternetGateways @@ -190,6 +195,7 @@ Resources: - ec2:DescribeSecurityGroups - ec2:DescribeSubnets - ec2:DescribeVpcs + - ec2:DescribeDhcpOptions - ec2:DescribeVpcAttribute - ec2:DescribeVpcEndpoints - ec2:DescribeVolumes diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_disable.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_disable.yaml index 76af2c7aee..cf9a249319 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_disable.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_disable.yaml @@ -149,8 +149,10 @@ Resources: - ec2:AssignPrivateIpAddresses - ec2:UnassignPrivateIpAddresses - ec2:AssociateRouteTable + - ec2:AssociateVpcCidrBlock - ec2:AttachInternetGateway - ec2:AuthorizeSecurityGroupIngress + - ec2:CreateCarrierGateway - ec2:CreateInternetGateway - ec2:CreateEgressOnlyInternetGateway - ec2:CreateNatGateway @@ -162,8 +164,10 @@ Resources: - ec2:CreateTags - ec2:CreateVpc - ec2:CreateVpcEndpoint + - ec2:DisassociateVpcCidrBlock - ec2:ModifyVpcAttribute - ec2:ModifyVpcEndpoint + - ec2:DeleteCarrierGateway - ec2:DeleteInternetGateway - ec2:DeleteEgressOnlyInternetGateway - ec2:DeleteNatGateway @@ -177,6 +181,7 @@ Resources: - ec2:DescribeAccountAttributes - ec2:DescribeAddresses - ec2:DescribeAvailabilityZones + - ec2:DescribeCarrierGateways - ec2:DescribeInstances - ec2:DescribeInstanceTypes - ec2:DescribeInternetGateways @@ -190,6 +195,7 @@ Resources: - ec2:DescribeSecurityGroups - ec2:DescribeSubnets - ec2:DescribeVpcs + - ec2:DescribeDhcpOptions - ec2:DescribeVpcAttribute - ec2:DescribeVpcEndpoints - ec2:DescribeVolumes diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_kms_prefix.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_kms_prefix.yaml index 67e78b9504..18467a9df7 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_kms_prefix.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_kms_prefix.yaml @@ -149,8 +149,10 @@ Resources: - ec2:AssignPrivateIpAddresses - ec2:UnassignPrivateIpAddresses - ec2:AssociateRouteTable + - ec2:AssociateVpcCidrBlock - ec2:AttachInternetGateway - ec2:AuthorizeSecurityGroupIngress + - ec2:CreateCarrierGateway - ec2:CreateInternetGateway - ec2:CreateEgressOnlyInternetGateway - ec2:CreateNatGateway @@ -162,8 +164,10 @@ Resources: - ec2:CreateTags - ec2:CreateVpc - ec2:CreateVpcEndpoint + - ec2:DisassociateVpcCidrBlock - ec2:ModifyVpcAttribute - ec2:ModifyVpcEndpoint + - ec2:DeleteCarrierGateway - ec2:DeleteInternetGateway - ec2:DeleteEgressOnlyInternetGateway - ec2:DeleteNatGateway @@ -177,6 +181,7 @@ Resources: - ec2:DescribeAccountAttributes - ec2:DescribeAddresses - ec2:DescribeAvailabilityZones + - ec2:DescribeCarrierGateways - ec2:DescribeInstances - ec2:DescribeInstanceTypes - ec2:DescribeInternetGateways @@ -190,6 +195,7 @@ Resources: - ec2:DescribeSecurityGroups - ec2:DescribeSubnets - ec2:DescribeVpcs + - ec2:DescribeDhcpOptions - ec2:DescribeVpcAttribute - ec2:DescribeVpcEndpoints - ec2:DescribeVolumes diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_extra_statements.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_extra_statements.yaml index ef5fd59980..8c4af02490 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_extra_statements.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_extra_statements.yaml @@ -155,8 +155,10 @@ Resources: - ec2:AssignPrivateIpAddresses - ec2:UnassignPrivateIpAddresses - ec2:AssociateRouteTable + - ec2:AssociateVpcCidrBlock - ec2:AttachInternetGateway - ec2:AuthorizeSecurityGroupIngress + - ec2:CreateCarrierGateway - ec2:CreateInternetGateway - ec2:CreateEgressOnlyInternetGateway - ec2:CreateNatGateway @@ -168,8 +170,10 @@ Resources: - ec2:CreateTags - ec2:CreateVpc - ec2:CreateVpcEndpoint + - ec2:DisassociateVpcCidrBlock - ec2:ModifyVpcAttribute - ec2:ModifyVpcEndpoint + - ec2:DeleteCarrierGateway - ec2:DeleteInternetGateway - ec2:DeleteEgressOnlyInternetGateway - ec2:DeleteNatGateway @@ -183,6 +187,7 @@ Resources: - ec2:DescribeAccountAttributes - ec2:DescribeAddresses - ec2:DescribeAvailabilityZones + - ec2:DescribeCarrierGateways - ec2:DescribeInstances - ec2:DescribeInstanceTypes - ec2:DescribeInternetGateways @@ -196,6 +201,7 @@ Resources: - ec2:DescribeSecurityGroups - ec2:DescribeSubnets - ec2:DescribeVpcs + - ec2:DescribeDhcpOptions - ec2:DescribeVpcAttribute - ec2:DescribeVpcEndpoints - ec2:DescribeVolumes diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_s3_bucket.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_s3_bucket.yaml index 39bd20ef2c..e1f1e332ed 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_s3_bucket.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_s3_bucket.yaml @@ -149,8 +149,10 @@ Resources: - ec2:AssignPrivateIpAddresses - ec2:UnassignPrivateIpAddresses - ec2:AssociateRouteTable + - ec2:AssociateVpcCidrBlock - ec2:AttachInternetGateway - ec2:AuthorizeSecurityGroupIngress + - ec2:CreateCarrierGateway - ec2:CreateInternetGateway - ec2:CreateEgressOnlyInternetGateway - ec2:CreateNatGateway @@ -162,8 +164,10 @@ Resources: - ec2:CreateTags - ec2:CreateVpc - ec2:CreateVpcEndpoint + - ec2:DisassociateVpcCidrBlock - ec2:ModifyVpcAttribute - ec2:ModifyVpcEndpoint + - ec2:DeleteCarrierGateway - ec2:DeleteInternetGateway - ec2:DeleteEgressOnlyInternetGateway - ec2:DeleteNatGateway @@ -177,6 +181,7 @@ Resources: - ec2:DescribeAccountAttributes - ec2:DescribeAddresses - ec2:DescribeAvailabilityZones + - ec2:DescribeCarrierGateways - ec2:DescribeInstances - ec2:DescribeInstanceTypes - ec2:DescribeInternetGateways @@ -190,6 +195,7 @@ Resources: - ec2:DescribeSecurityGroups - ec2:DescribeSubnets - ec2:DescribeVpcs + - ec2:DescribeDhcpOptions - ec2:DescribeVpcAttribute - ec2:DescribeVpcEndpoints - ec2:DescribeVolumes diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_ssm_secret_backend.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_ssm_secret_backend.yaml index 472fdaacf3..4f282a6394 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_ssm_secret_backend.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_ssm_secret_backend.yaml @@ -149,8 +149,10 @@ Resources: - ec2:AssignPrivateIpAddresses - ec2:UnassignPrivateIpAddresses - ec2:AssociateRouteTable + - ec2:AssociateVpcCidrBlock - ec2:AttachInternetGateway - ec2:AuthorizeSecurityGroupIngress + - ec2:CreateCarrierGateway - ec2:CreateInternetGateway - ec2:CreateEgressOnlyInternetGateway - ec2:CreateNatGateway @@ -162,8 +164,10 @@ Resources: - ec2:CreateTags - ec2:CreateVpc - ec2:CreateVpcEndpoint + - ec2:DisassociateVpcCidrBlock - ec2:ModifyVpcAttribute - ec2:ModifyVpcEndpoint + - ec2:DeleteCarrierGateway - ec2:DeleteInternetGateway - ec2:DeleteEgressOnlyInternetGateway - ec2:DeleteNatGateway @@ -177,6 +181,7 @@ Resources: - ec2:DescribeAccountAttributes - ec2:DescribeAddresses - ec2:DescribeAvailabilityZones + - ec2:DescribeCarrierGateways - ec2:DescribeInstances - ec2:DescribeInstanceTypes - ec2:DescribeInternetGateways @@ -190,6 +195,7 @@ Resources: - ec2:DescribeSecurityGroups - ec2:DescribeSubnets - ec2:DescribeVpcs + - ec2:DescribeDhcpOptions - ec2:DescribeVpcAttribute - ec2:DescribeVpcEndpoints - ec2:DescribeVolumes diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/iam.go b/cmd/clusterawsadm/cloudformation/bootstrap/iam.go index 1aa016606e..2a30b4ea33 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/iam.go +++ b/cmd/clusterawsadm/cloudformation/bootstrap/iam.go @@ -71,6 +71,7 @@ func (t Template) policyFunctionMap() map[PolicyName]func() *iamv1.PolicyDocumen } } +// PrintPolicyDocs prints the JSON representation of policy documents for all ManagedIAMPolicy. func (t Template) PrintPolicyDocs() error { for _, name := range ManagedIAMPolicyNames { policyDoc := t.GetPolicyDocFromPolicyName(name) diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/template.go b/cmd/clusterawsadm/cloudformation/bootstrap/template.go index 030bc248ee..c4eb4cbff7 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/template.go +++ b/cmd/clusterawsadm/cloudformation/bootstrap/template.go @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package bootstrap provides a way to generate a CloudFormation template for IAM policies, +// users and roles for use by Cluster API Provider AWS. package bootstrap import ( diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/template_test.go b/cmd/clusterawsadm/cloudformation/bootstrap/template_test.go index c80f2312ef..e47fbbd047 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/template_test.go +++ b/cmd/clusterawsadm/cloudformation/bootstrap/template_test.go @@ -17,6 +17,7 @@ limitations under the License. package bootstrap import ( + "bytes" "fmt" "os" "path" @@ -201,7 +202,7 @@ func TestRenderCloudformation(t *testing.T) { t.Fatal(err) } - if string(tData) != string(data) { + if !bytes.Equal(tData, data) { dmp := diffmatchpatch.New() diffs := dmp.DiffMain(string(tData), string(data), false) out := dmp.DiffPrettyText(diffs) diff --git a/cmd/clusterawsadm/cloudformation/service/service.go b/cmd/clusterawsadm/cloudformation/service/service.go index 14a27fd2e9..33db42a8d0 100644 --- a/cmd/clusterawsadm/cloudformation/service/service.go +++ b/cmd/clusterawsadm/cloudformation/service/service.go @@ -82,6 +82,7 @@ func (s *Service) ReconcileBootstrapStack(stackName string, t go_cfn.Template, t return nil } +// ReconcileBootstrapNoUpdate creates or updates bootstrap CloudFormation without updating the stack. func (s *Service) ReconcileBootstrapNoUpdate(stackName string, t go_cfn.Template, tags map[string]string) error { yaml, err := t.YAML() processedYaml := string(yaml) diff --git a/cmd/clusterawsadm/cmd/ami/ami.go b/cmd/clusterawsadm/cmd/ami/ami.go index 0992c0723c..b4959b29e5 100644 --- a/cmd/clusterawsadm/cmd/ami/ami.go +++ b/cmd/clusterawsadm/cmd/ami/ami.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package ami provides a way to generate AMI commands. package ami import ( diff --git a/cmd/clusterawsadm/cmd/ami/common/common.go b/cmd/clusterawsadm/cmd/ami/common/common.go index 14ad2babaf..c3f79ed0de 100644 --- a/cmd/clusterawsadm/cmd/ami/common/common.go +++ b/cmd/clusterawsadm/cmd/ami/common/common.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package common provides common flags and functions for the AMI commands. package common import ( diff --git a/cmd/clusterawsadm/cmd/ami/common/copy.go b/cmd/clusterawsadm/cmd/ami/common/copy.go index 406d10f015..c2c95c6448 100644 --- a/cmd/clusterawsadm/cmd/ami/common/copy.go +++ b/cmd/clusterawsadm/cmd/ami/common/copy.go @@ -89,7 +89,6 @@ func CopyAMICmd() *cobra.Command { printer.Print(ami) - // klog.V(0).Infof("Completed copying %v\n", *image.ImageId) return nil }, } diff --git a/cmd/clusterawsadm/cmd/ami/list/list.go b/cmd/clusterawsadm/cmd/ami/list/list.go index 12ee0cfc88..5e1bef32ed 100644 --- a/cmd/clusterawsadm/cmd/ami/list/list.go +++ b/cmd/clusterawsadm/cmd/ami/list/list.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package list provides a way to list AMIs from the default AWS account where AMIs are stored. package list import ( diff --git a/cmd/clusterawsadm/cmd/bootstrap/bootstrap.go b/cmd/clusterawsadm/cmd/bootstrap/bootstrap.go index 00d7322f75..cfa73aa658 100644 --- a/cmd/clusterawsadm/cmd/bootstrap/bootstrap.go +++ b/cmd/clusterawsadm/cmd/bootstrap/bootstrap.go @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package bootstrap provides cli commands for bootstrapping +// AWS accounts for use with the Kubernetes Cluster API Provider AWS. package bootstrap import ( diff --git a/cmd/clusterawsadm/cmd/bootstrap/credentials/credentials.go b/cmd/clusterawsadm/cmd/bootstrap/credentials/credentials.go index 2abda3f3b6..0c919d7e7e 100644 --- a/cmd/clusterawsadm/cmd/bootstrap/credentials/credentials.go +++ b/cmd/clusterawsadm/cmd/bootstrap/credentials/credentials.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package credentials provides a way to encode credentials for use with Kubernetes Cluster API Provider AWS. package credentials import ( diff --git a/cmd/clusterawsadm/cmd/bootstrap/iam/iam_doc.go b/cmd/clusterawsadm/cmd/bootstrap/iam/iam_doc.go index f518c5cc96..775187858f 100644 --- a/cmd/clusterawsadm/cmd/bootstrap/iam/iam_doc.go +++ b/cmd/clusterawsadm/cmd/bootstrap/iam/iam_doc.go @@ -44,7 +44,7 @@ func printPolicyCmd() *cobra.Command { clusterawsadm bootstrap iam print-policy --document AWSIAMManagedPolicyControllers # Print out the IAM policy for the Kubernetes Cluster API Provider AWS Controller using a given configuration file. - clusterawsadm bootstrap iam print-policy --document AWSIAMManagedPolicyControllers --config bootstrap_config.yaml + clusterawsadm bootstrap iam print-policy --document AWSIAMManagedPolicyControllers --config bootstrap_config.yaml # Print out the IAM policy for the Kubernetes AWS Cloud Provider for the control plane. clusterawsadm bootstrap iam print-policy --document AWSIAMManagedPolicyCloudProviderControlPlane diff --git a/cmd/clusterawsadm/cmd/bootstrap/iam/root.go b/cmd/clusterawsadm/cmd/bootstrap/iam/root.go index 1f9f2b9ca5..491610cd59 100644 --- a/cmd/clusterawsadm/cmd/bootstrap/iam/root.go +++ b/cmd/clusterawsadm/cmd/bootstrap/iam/root.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package iam provides a way to generate IAM policies and roles. package iam import ( diff --git a/cmd/clusterawsadm/cmd/controller/controller.go b/cmd/clusterawsadm/cmd/controller/controller.go index a8897cea08..31e018d432 100644 --- a/cmd/clusterawsadm/cmd/controller/controller.go +++ b/cmd/clusterawsadm/cmd/controller/controller.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package controller provides the controller command. package controller import ( diff --git a/cmd/clusterawsadm/cmd/controller/credentials/print.go b/cmd/clusterawsadm/cmd/controller/credentials/print.go index b88621cf25..0b4e27094a 100644 --- a/cmd/clusterawsadm/cmd/controller/credentials/print.go +++ b/cmd/clusterawsadm/cmd/controller/credentials/print.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package credentials provides a CLI utilities for AWS credentials. package credentials import ( diff --git a/cmd/clusterawsadm/cmd/controller/rollout/common.go b/cmd/clusterawsadm/cmd/controller/rollout/common.go index 37cc67b6e9..47707f3970 100644 --- a/cmd/clusterawsadm/cmd/controller/rollout/common.go +++ b/cmd/clusterawsadm/cmd/controller/rollout/common.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package rollout provides the rollout command. package rollout import ( diff --git a/cmd/clusterawsadm/cmd/eks/addons/addons.go b/cmd/clusterawsadm/cmd/eks/addons/addons.go index 3b8ae23e76..709f2f2cf3 100644 --- a/cmd/clusterawsadm/cmd/eks/addons/addons.go +++ b/cmd/clusterawsadm/cmd/eks/addons/addons.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package addons provides EKS addons commands. package addons import "github.com/spf13/cobra" diff --git a/cmd/clusterawsadm/cmd/eks/addons/list_installed.go b/cmd/clusterawsadm/cmd/eks/addons/list_installed.go index 827c944e0a..cb73ee64b5 100644 --- a/cmd/clusterawsadm/cmd/eks/addons/list_installed.go +++ b/cmd/clusterawsadm/cmd/eks/addons/list_installed.go @@ -113,10 +113,10 @@ func listInstalledAddons(region, clusterName, printerType *string) error { newIssue := issue{ Code: *addonIssue.Code, Message: *addonIssue.Message, - ResourceIds: []string{}, + ResourceIDs: []string{}, } for _, resID := range addonIssue.ResourceIds { - newIssue.ResourceIds = append(newIssue.ResourceIds, *resID) + newIssue.ResourceIDs = append(newIssue.ResourceIDs, *resID) } installedAddon.HealthIssues = append(installedAddon.HealthIssues, newIssue) } diff --git a/cmd/clusterawsadm/cmd/eks/addons/types.go b/cmd/clusterawsadm/cmd/eks/addons/types.go index a59368f8f6..9c9ae62616 100644 --- a/cmd/clusterawsadm/cmd/eks/addons/types.go +++ b/cmd/clusterawsadm/cmd/eks/addons/types.go @@ -106,7 +106,7 @@ type installedAddon struct { type issue struct { Code string Message string - ResourceIds []string + ResourceIDs []string } type installedAddonsList struct { diff --git a/cmd/clusterawsadm/cmd/eks/eks.go b/cmd/clusterawsadm/cmd/eks/eks.go index 42d271f481..8856216aa8 100644 --- a/cmd/clusterawsadm/cmd/eks/eks.go +++ b/cmd/clusterawsadm/cmd/eks/eks.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package eks provides a CLI to manage EKS clusters. package eks import ( diff --git a/cmd/clusterawsadm/cmd/flags/common.go b/cmd/clusterawsadm/cmd/flags/common.go index 096d289927..d6d7e4e808 100644 --- a/cmd/clusterawsadm/cmd/flags/common.go +++ b/cmd/clusterawsadm/cmd/flags/common.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package flags provides a way to add flags to the cli. package flags import ( diff --git a/cmd/clusterawsadm/cmd/gc/gc.go b/cmd/clusterawsadm/cmd/gc/gc.go index 0bd0344514..c9d91bf703 100644 --- a/cmd/clusterawsadm/cmd/gc/gc.go +++ b/cmd/clusterawsadm/cmd/gc/gc.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package gc provides commands related to garbage collecting external resources of clusters. package gc import ( @@ -27,10 +28,7 @@ func RootCmd() *cobra.Command { Short: "Commands related to garbage collecting external resources of clusters", Args: cobra.NoArgs, RunE: func(cmd *cobra.Command, args []string) error { - if err := cmd.Help(); err != nil { - return err - } - return nil + return cmd.Help() }, } diff --git a/cmd/clusterawsadm/cmd/resource/list/list.go b/cmd/clusterawsadm/cmd/resource/list/list.go index 01b84e2ae4..1e65ef61ad 100644 --- a/cmd/clusterawsadm/cmd/resource/list/list.go +++ b/cmd/clusterawsadm/cmd/resource/list/list.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package list provides the list command for the resource package. package list import ( @@ -38,7 +39,7 @@ func ListAWSResourceCmd() *cobra.Command { Short: "List all AWS resources created by CAPA", Long: cmd.LongDesc(` List AWS resources directly created by CAPA based on region and cluster-name. There are some indirect resources like Cloudwatch alarms, rules, etc - which are not directly created by CAPA, so those resources are not listed here. + which are not directly created by CAPA, so those resources are not listed here. If region and cluster-name are not set, then it will throw an error. `), Example: cmd.Examples(` diff --git a/cmd/clusterawsadm/cmd/resource/resource.go b/cmd/clusterawsadm/cmd/resource/resource.go index 36e5aa3e25..c2cbde7a6a 100644 --- a/cmd/clusterawsadm/cmd/resource/resource.go +++ b/cmd/clusterawsadm/cmd/resource/resource.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package resource provides commands related to AWS resources. package resource import ( @@ -34,10 +35,7 @@ func RootCmd() *cobra.Command { # List of AWS resources created by CAPA `), RunE: func(cmd *cobra.Command, args []string) error { - if err := cmd.Help(); err != nil { - return err - } - return nil + return cmd.Help() }, } diff --git a/cmd/clusterawsadm/cmd/root.go b/cmd/clusterawsadm/cmd/root.go index dc25175824..0c0b2b5614 100644 --- a/cmd/clusterawsadm/cmd/root.go +++ b/cmd/clusterawsadm/cmd/root.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package cmd implements the clusterawsadm command line utility. package cmd import ( @@ -63,7 +64,7 @@ func RootCmd() *cobra.Command { export AWS_B64ENCODED_CREDENTIALS=$(clusterawsadm bootstrap credentials encode-as-profile) clusterctl init --infrastructure aws `), - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { return cmd.Help() }, } diff --git a/cmd/clusterawsadm/cmd/util/util.go b/cmd/clusterawsadm/cmd/util/util.go index 8e714ed80c..7b974add4a 100644 --- a/cmd/clusterawsadm/cmd/util/util.go +++ b/cmd/clusterawsadm/cmd/util/util.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package util provides utility functions. package util import ( diff --git a/cmd/clusterawsadm/cmd/version/version.go b/cmd/clusterawsadm/cmd/version/version.go index db85908013..d5e4cbc37b 100644 --- a/cmd/clusterawsadm/cmd/version/version.go +++ b/cmd/clusterawsadm/cmd/version/version.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package version provides the version information of clusterawsadm. package version import ( @@ -33,6 +34,9 @@ type Version struct { ClientVersion *version.Info `json:"awsProviderVersion"` } +// CLIName defaults to clusterawsadm. +var CLIName = "clusterawsadm" + // Cmd provides the version information clusterawsadm. func Cmd(out io.Writer) *cobra.Command { cmd := &cobra.Command{ @@ -63,7 +67,7 @@ func RunVersion(out io.Writer, cmd *cobra.Command) error { switch of { case "": - fmt.Fprintf(out, "clusterawsadm version: %#v\n", v.ClientVersion) + fmt.Fprintf(out, "%s version: %#v\n", CLIName, v.ClientVersion) case "short": fmt.Fprintf(out, "%s\n", v.ClientVersion.GitVersion) case "yaml": diff --git a/cmd/clusterawsadm/configreader/configreader.go b/cmd/clusterawsadm/configreader/configreader.go index 3047152cb6..e5b1d800cd 100644 --- a/cmd/clusterawsadm/configreader/configreader.go +++ b/cmd/clusterawsadm/configreader/configreader.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package configreader provides a way to load a bootstrapv1.AWSIAMConfiguration from a file. package configreader import ( diff --git a/cmd/clusterawsadm/controller/credentials/update_credentials.go b/cmd/clusterawsadm/controller/credentials/update_credentials.go index e4a9d1afc4..eba621cb3e 100644 --- a/cmd/clusterawsadm/controller/credentials/update_credentials.go +++ b/cmd/clusterawsadm/controller/credentials/update_credentials.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package credentials provides AWS credentials management. package credentials import ( @@ -49,7 +50,7 @@ func UpdateCredentials(input UpdateCredentialsInput) error { creds = "Cg==" } - patch := fmt.Sprintf("{\"data\":{\"credentials\": \"%s\"}}", creds) + patch := fmt.Sprintf("{\"data\":{\"credentials\": %q}}", creds) _, err = client.CoreV1().Secrets(input.Namespace).Patch( context.TODO(), controller.BootstrapCredsSecret, diff --git a/cmd/clusterawsadm/controller/helper.go b/cmd/clusterawsadm/controller/helper.go index d7ff024ff2..809678bf2b 100644 --- a/cmd/clusterawsadm/controller/helper.go +++ b/cmd/clusterawsadm/controller/helper.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package controller contains the controller logic for the capa manager. package controller import ( diff --git a/cmd/clusterawsadm/controller/rollout/rollout.go b/cmd/clusterawsadm/controller/rollout/rollout.go index 12f9f722cd..eb55e32947 100644 --- a/cmd/clusterawsadm/controller/rollout/rollout.go +++ b/cmd/clusterawsadm/controller/rollout/rollout.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package rollout provides a way to rollout the CAPA controller manager deployment. package rollout import ( diff --git a/cmd/clusterawsadm/converters/iam.go b/cmd/clusterawsadm/converters/iam.go index cecf4f5530..a571962fee 100644 --- a/cmd/clusterawsadm/converters/iam.go +++ b/cmd/clusterawsadm/converters/iam.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package converters contains the conversion functions for AWS. package converters import ( diff --git a/cmd/clusterawsadm/credentials/credentials.go b/cmd/clusterawsadm/credentials/credentials.go index 4c640dfbfe..2aa320839a 100644 --- a/cmd/clusterawsadm/credentials/credentials.go +++ b/cmd/clusterawsadm/credentials/credentials.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package credentials contains utilities for working with AWS credentials. package credentials import ( diff --git a/cmd/clusterawsadm/gc/gc.go b/cmd/clusterawsadm/gc/gc.go index 046c841be6..27a9887d41 100644 --- a/cmd/clusterawsadm/gc/gc.go +++ b/cmd/clusterawsadm/gc/gc.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package gc provides a way to handle AWS garbage collection on deletion. package gc import ( @@ -23,8 +24,8 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - _ "k8s.io/client-go/plugin/pkg/client/auth/exec" - _ "k8s.io/client-go/plugin/pkg/client/auth/oidc" + _ "k8s.io/client-go/plugin/pkg/client/auth/exec" // import all auth plugins + _ "k8s.io/client-go/plugin/pkg/client/auth/oidc" // import all oidc plugins "k8s.io/client-go/tools/clientcmd" "sigs.k8s.io/controller-runtime/pkg/client" diff --git a/cmd/clusterawsadm/main.go b/cmd/clusterawsadm/main.go index bd97bc0adb..0a30981ed0 100644 --- a/cmd/clusterawsadm/main.go +++ b/cmd/clusterawsadm/main.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package main is the entrypoint for the clusterawsadm command. package main import "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/cmd" diff --git a/cmd/clusterawsadm/printers/printers.go b/cmd/clusterawsadm/printers/printers.go index 4d3b6aa713..0c106aca12 100644 --- a/cmd/clusterawsadm/printers/printers.go +++ b/cmd/clusterawsadm/printers/printers.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package printers provides a wrapper for the k8s.io/cli-runtime/pkg/printers package. package printers import ( diff --git a/cmd/clusterawsadm/resource/type.go b/cmd/clusterawsadm/resource/type.go index e5b344aff3..0dda210426 100644 --- a/cmd/clusterawsadm/resource/type.go +++ b/cmd/clusterawsadm/resource/type.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package resource provides definitions for AWS resource types. package resource import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/config/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigs.yaml b/config/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigs.yaml index 9b5a7c4a17..1d298881d8 100644 --- a/config/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigs.yaml +++ b/config/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.14.0 name: eksconfigs.bootstrap.cluster.x-k8s.io spec: group: bootstrap.cluster.x-k8s.io @@ -34,14 +34,19 @@ spec: Configuration API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -58,13 +63,13 @@ spec: when bootstrapping EKS. type: string dnsClusterIP: - description: DNSClusterIP overrides the IP address to use for DNS - queries within the cluster. + description: ' DNSClusterIP overrides the IP address to use for DNS + queries within the cluster.' type: string dockerConfigJson: - description: DockerConfigJson is used for the contents of the /etc/docker/daemon.json - file. Useful if you want a custom config differing from the default - one in the AMI. This is expected to be a json string. + description: |- + DockerConfigJson is used for the contents of the /etc/docker/daemon.json file. Useful if you want a custom config differing from the default one in the AMI. + This is expected to be a json string. type: string kubeletExtraArgs: additionalProperties: @@ -77,8 +82,8 @@ spec: to use. properties: accountNumber: - description: AccountNumber is the AWS account number to pull the - pause container from. + description: ' AccountNumber is the AWS account number to pull + the pause container from.' type: string version: description: Version is the tag of the pause container to use. @@ -88,8 +93,9 @@ spec: - version type: object serviceIPV6Cidr: - description: ServiceIPV6Cidr is the ipv6 cidr range of the cluster. - If this is specified then the ip family will be set to ipv6. + description: |- + ServiceIPV6Cidr is the ipv6 cidr range of the cluster. If this is specified then + the ip family will be set to ipv6. type: string useMaxPods: description: UseMaxPods sets --max-pods for the kubelet when true. @@ -106,37 +112,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime @@ -185,14 +191,19 @@ spec: Configuration API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -239,9 +250,9 @@ spec: used. If set to None, no label is used. type: string overwrite: - description: Overwrite defines whether or not to overwrite - any existing filesystem. If true, any pre-existing file - system will be destroyed. Use with Caution. + description: |- + Overwrite defines whether or not to overwrite any existing filesystem. + If true, any pre-existing file system will be destroyed. Use with Caution. type: boolean partition: description: 'Partition specifies the partition to use. @@ -264,21 +275,21 @@ spec: description: Device is the name of the device. type: string layout: - description: Layout specifies the device layout. If it is - true, a single partition will be created for the entire - device. When layout is false, it means don't partition - or ignore existing partitioning. + description: |- + Layout specifies the device layout. + If it is true, a single partition will be created for the entire device. + When layout is false, it means don't partition or ignore existing partitioning. type: boolean overwrite: - description: Overwrite describes whether to skip checks - and create the partition if a partition or filesystem - is found on the device. Use with caution. Default is 'false'. + description: |- + Overwrite describes whether to skip checks and create the partition if a partition or filesystem is found on the device. + Use with caution. Default is 'false'. type: boolean tableType: - description: 'TableType specifies the tupe of partition - table. The following are supported: ''mbr'': default and - setups a MS-DOS partition table ''gpt'': setups a GPT - partition table' + description: |- + TableType specifies the tupe of partition table. The following are supported: + 'mbr': default and setups a MS-DOS partition table + 'gpt': setups a GPT partition table type: string required: - device @@ -287,13 +298,13 @@ spec: type: array type: object dnsClusterIP: - description: DNSClusterIP overrides the IP address to use for DNS - queries within the cluster. + description: ' DNSClusterIP overrides the IP address to use for DNS + queries within the cluster.' type: string dockerConfigJson: - description: DockerConfigJson is used for the contents of the /etc/docker/daemon.json - file. Useful if you want a custom config differing from the default - one in the AMI. This is expected to be a json string. + description: |- + DockerConfigJson is used for the contents of the /etc/docker/daemon.json file. Useful if you want a custom config differing from the default one in the AMI. + This is expected to be a json string. type: string files: description: Files specifies extra files to be passed to user_data @@ -386,8 +397,8 @@ spec: to use. properties: accountNumber: - description: AccountNumber is the AWS account number to pull the - pause container from. + description: ' AccountNumber is the AWS account number to pull + the pause container from.' type: string version: description: Version is the tag of the pause container to use. @@ -409,8 +420,9 @@ spec: type: string type: array serviceIPV6Cidr: - description: ServiceIPV6Cidr is the ipv6 cidr range of the cluster. - If this is specified then the ip family will be set to ipv6. + description: |- + ServiceIPV6Cidr is the ipv6 cidr range of the cluster. If this is specified then + the ip family will be set to ipv6. type: string useMaxPods: description: UseMaxPods sets --max-pods for the kubelet when true. @@ -500,37 +512,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime diff --git a/config/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigtemplates.yaml b/config/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigtemplates.yaml index 49e37a9948..0a63027e0a 100644 --- a/config/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigtemplates.yaml +++ b/config/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigtemplates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.14.0 name: eksconfigtemplates.bootstrap.cluster.x-k8s.io spec: group: bootstrap.cluster.x-k8s.io @@ -25,14 +25,19 @@ spec: API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -56,14 +61,13 @@ spec: to use when bootstrapping EKS. type: string dnsClusterIP: - description: DNSClusterIP overrides the IP address to use - for DNS queries within the cluster. + description: ' DNSClusterIP overrides the IP address to use + for DNS queries within the cluster.' type: string dockerConfigJson: - description: DockerConfigJson is used for the contents of - the /etc/docker/daemon.json file. Useful if you want a custom - config differing from the default one in the AMI. This is - expected to be a json string. + description: |- + DockerConfigJson is used for the contents of the /etc/docker/daemon.json file. Useful if you want a custom config differing from the default one in the AMI. + This is expected to be a json string. type: string kubeletExtraArgs: additionalProperties: @@ -76,8 +80,8 @@ spec: container to use. properties: accountNumber: - description: AccountNumber is the AWS account number to - pull the pause container from. + description: ' AccountNumber is the AWS account number + to pull the pause container from.' type: string version: description: Version is the tag of the pause container @@ -88,9 +92,9 @@ spec: - version type: object serviceIPV6Cidr: - description: ServiceIPV6Cidr is the ipv6 cidr range of the - cluster. If this is specified then the ip family will be - set to ipv6. + description: |- + ServiceIPV6Cidr is the ipv6 cidr range of the cluster. If this is specified then + the ip family will be set to ipv6. type: string useMaxPods: description: UseMaxPods sets --max-pods for the kubelet when @@ -111,14 +115,19 @@ spec: API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -174,10 +183,9 @@ spec: to be used. If set to None, no label is used. type: string overwrite: - description: Overwrite defines whether or not to - overwrite any existing filesystem. If true, any - pre-existing file system will be destroyed. Use - with Caution. + description: |- + Overwrite defines whether or not to overwrite any existing filesystem. + If true, any pre-existing file system will be destroyed. Use with Caution. type: boolean partition: description: 'Partition specifies the partition @@ -202,22 +210,21 @@ spec: description: Device is the name of the device. type: string layout: - description: Layout specifies the device layout. - If it is true, a single partition will be created - for the entire device. When layout is false, it - means don't partition or ignore existing partitioning. + description: |- + Layout specifies the device layout. + If it is true, a single partition will be created for the entire device. + When layout is false, it means don't partition or ignore existing partitioning. type: boolean overwrite: - description: Overwrite describes whether to skip - checks and create the partition if a partition - or filesystem is found on the device. Use with - caution. Default is 'false'. + description: |- + Overwrite describes whether to skip checks and create the partition if a partition or filesystem is found on the device. + Use with caution. Default is 'false'. type: boolean tableType: - description: 'TableType specifies the tupe of partition - table. The following are supported: ''mbr'': default - and setups a MS-DOS partition table ''gpt'': setups - a GPT partition table' + description: |- + TableType specifies the tupe of partition table. The following are supported: + 'mbr': default and setups a MS-DOS partition table + 'gpt': setups a GPT partition table type: string required: - device @@ -226,14 +233,13 @@ spec: type: array type: object dnsClusterIP: - description: DNSClusterIP overrides the IP address to use - for DNS queries within the cluster. + description: ' DNSClusterIP overrides the IP address to use + for DNS queries within the cluster.' type: string dockerConfigJson: - description: DockerConfigJson is used for the contents of - the /etc/docker/daemon.json file. Useful if you want a custom - config differing from the default one in the AMI. This is - expected to be a json string. + description: |- + DockerConfigJson is used for the contents of the /etc/docker/daemon.json file. Useful if you want a custom config differing from the default one in the AMI. + This is expected to be a json string. type: string files: description: Files specifies extra files to be passed to user_data @@ -329,8 +335,8 @@ spec: container to use. properties: accountNumber: - description: AccountNumber is the AWS account number to - pull the pause container from. + description: ' AccountNumber is the AWS account number + to pull the pause container from.' type: string version: description: Version is the tag of the pause container @@ -353,9 +359,9 @@ spec: type: string type: array serviceIPV6Cidr: - description: ServiceIPV6Cidr is the ipv6 cidr range of the - cluster. If this is specified then the ip family will be - set to ipv6. + description: |- + ServiceIPV6Cidr is the ipv6 cidr range of the cluster. If this is specified then + the ip family will be set to ipv6. type: string useMaxPods: description: UseMaxPods sets --max-pods for the kubelet when diff --git a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml index 3227df4d81..345b3f4379 100644 --- a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml +++ b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.14.0 name: awsmanagedcontrolplanes.controlplane.cluster.x-k8s.io spec: group: controlplane.cluster.x-k8s.io @@ -47,14 +47,19 @@ spec: Control Plane API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -65,9 +70,9 @@ spec: additionalTags: additionalProperties: type: string - description: AdditionalTags is an optional set of tags to add to AWS - resources managed by the AWS provider, in addition to the ones added - by default. + description: |- + AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the + ones added by default. type: object addons: description: Addons defines the EKS addons to enable with the EKS @@ -80,8 +85,9 @@ spec: type: string conflictResolution: default: none - description: ConflictResolution is used to declare what should - happen if there are parameter conflicts. Defaults to none + description: |- + ConflictResolution is used to declare what should happen if there + are parameter conflicts. Defaults to none enum: - overwrite - none @@ -104,38 +110,39 @@ spec: type: array associateOIDCProvider: default: false - description: AssociateOIDCProvider can be enabled to automatically - create an identity provider for the controller for use with IAM - roles for service accounts + description: |- + AssociateOIDCProvider can be enabled to automatically create an identity + provider for the controller for use with IAM roles for service accounts type: boolean bastion: description: Bastion contains options to configure the bastion host. properties: allowedCIDRBlocks: - description: AllowedCIDRBlocks is a list of CIDR blocks allowed - to access the bastion host. They are set as ingress rules for - the Bastion host's Security Group (defaults to 0.0.0.0/0). + description: |- + AllowedCIDRBlocks is a list of CIDR blocks allowed to access the bastion host. + They are set as ingress rules for the Bastion host's Security Group (defaults to 0.0.0.0/0). items: type: string type: array ami: - description: AMI will use the specified AMI to boot the bastion. - If not specified, the AMI will default to one picked out in - public space. + description: |- + AMI will use the specified AMI to boot the bastion. If not specified, + the AMI will default to one picked out in public space. type: string disableIngressRules: - description: DisableIngressRules will ensure there are no Ingress - rules in the bastion host's security group. Requires AllowedCIDRBlocks - to be empty. + description: |- + DisableIngressRules will ensure there are no Ingress rules in the bastion host's security group. + Requires AllowedCIDRBlocks to be empty. type: boolean enabled: - description: Enabled allows this provider to create a bastion - host instance with a public ip to access the VPC private network. + description: |- + Enabled allows this provider to create a bastion host instance + with a public ip to access the VPC private network. type: boolean instanceType: - description: InstanceType will use the specified instance type - for the bastion. If not specified, Cluster API Provider AWS - will use t3.micro for all regions except us-east-1, where t2.micro + description: |- + InstanceType will use the specified instance type for the bastion. If not specified, + Cluster API Provider AWS will use t3.micro for all regions except us-east-1, where t2.micro will be the default. type: string type: object @@ -156,18 +163,18 @@ spec: type: object disableVPCCNI: default: false - description: DisableVPCCNI indicates that the Amazon VPC CNI should - be disabled. With EKS clusters the Amazon VPC CNI is automatically - installed into the cluster. For clusters where you want to use an - alternate CNI this option provides a way to specify that the Amazon - VPC CNI should be deleted. You cannot set this to true if you are - using the Amazon VPC CNI addon. + description: |- + DisableVPCCNI indicates that the Amazon VPC CNI should be disabled. With EKS clusters the + Amazon VPC CNI is automatically installed into the cluster. For clusters where you want + to use an alternate CNI this option provides a way to specify that the Amazon VPC CNI + should be deleted. You cannot set this to true if you are using the + Amazon VPC CNI addon. type: boolean eksClusterName: - description: EKSClusterName allows you to specify the name of the - EKS cluster in AWS. If you don't specify a name then a default name - will be created based on the namespace and name of the managed control - plane. + description: |- + EKSClusterName allows you to specify the name of the EKS cluster in + AWS. If you don't specify a name then a default name will be created + based on the namespace and name of the managed control plane. type: string encryptionConfig: description: EncryptionConfig specifies the encryption configuration @@ -203,10 +210,10 @@ spec: type: array type: object iamAuthenticatorConfig: - description: IAMAuthenticatorConfig allows the specification of any - additional user or role mappings for use when generating the aws-iam-authenticator - configuration. If this is nil the default configuration is still - generated for the cluster. + description: |- + IAMAuthenticatorConfig allows the specification of any additional user or role mappings + for use when generating the aws-iam-authenticator configuration. If this is nil the + default configuration is still generated for the cluster. properties: mapRoles: description: RoleMappings is a list of role mappings @@ -258,9 +265,9 @@ spec: type: array type: object identityRef: - description: IdentityRef is a reference to an identity to be used - when reconciling the managed control plane. If no identity is specified, - the default identity for this controller will be used. + description: |- + IdentityRef is a reference to an identity to be used when reconciling the managed control plane. + If no identity is specified, the default identity for this controller will be used. properties: kind: description: Kind of the identity. @@ -278,30 +285,32 @@ spec: - name type: object imageLookupBaseOS: - description: ImageLookupBaseOS is the name of the base operating system - used to look up machine images when a machine does not specify an - AMI. When set, this will be used for all cluster machines unless - a machine specifies a different ImageLookupBaseOS. + description: |- + ImageLookupBaseOS is the name of the base operating system used to look + up machine images when a machine does not specify an AMI. When set, this + will be used for all cluster machines unless a machine specifies a + different ImageLookupBaseOS. type: string imageLookupFormat: - description: 'ImageLookupFormat is the AMI naming format to look up - machine images when a machine does not specify an AMI. When set, - this will be used for all cluster machines unless a machine specifies - a different ImageLookupOrg. Supports substitutions for {{.BaseOS}} - and {{.K8sVersion}} with the base OS and kubernetes version, respectively. - The BaseOS will be the value in ImageLookupBaseOS or ubuntu (the - default), and the kubernetes version as defined by the packages - produced by kubernetes/release without v as a prefix: 1.13.0, 1.12.5-mybuild.1, - or 1.17.3. For example, the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* - will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* - for a Machine that is targeting kubernetes v1.18.0 and the ubuntu - base OS. See also: https://golang.org/pkg/text/template/' + description: |- + ImageLookupFormat is the AMI naming format to look up machine images when + a machine does not specify an AMI. When set, this will be used for all + cluster machines unless a machine specifies a different ImageLookupOrg. + Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base + OS and kubernetes version, respectively. The BaseOS will be the value in + ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as + defined by the packages produced by kubernetes/release without v as a + prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default + image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up + searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a + Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See + also: https://golang.org/pkg/text/template/ type: string imageLookupOrg: - description: ImageLookupOrg is the AWS Organization ID to look up - machine images when a machine does not specify an AMI. When set, - this will be used for all cluster machines unless a machine specifies - a different ImageLookupOrg. + description: |- + ImageLookupOrg is the AWS Organization ID to look up machine images when a + machine does not specify an AMI. When set, this will be used for all + cluster machines unless a machine specifies a different ImageLookupOrg. type: string kubeProxy: description: KubeProxy defines managed attributes of the kube-proxy @@ -309,18 +318,18 @@ spec: properties: disable: default: false - description: Disable set to true indicates that kube-proxy should - be disabled. With EKS clusters kube-proxy is automatically installed - into the cluster. For clusters where you want to use kube-proxy - functionality that is provided with an alternate CNI, this option - provides a way to specify that the kube-proxy daemonset should - be deleted. You cannot set this to true if you are using the - Amazon kube-proxy addon. + description: |- + Disable set to true indicates that kube-proxy should be disabled. With EKS clusters + kube-proxy is automatically installed into the cluster. For clusters where you want + to use kube-proxy functionality that is provided with an alternate CNI, this option + provides a way to specify that the kube-proxy daemonset should be deleted. You cannot + set this to true if you are using the Amazon kube-proxy addon. type: boolean type: object logging: - description: Logging specifies which EKS Cluster logs should be enabled. - Entries for each of the enabled logs will be sent to CloudWatch + description: |- + Logging specifies which EKS Cluster logs should be enabled. Entries for + each of the enabled logs will be sent to CloudWatch properties: apiServer: default: false @@ -384,6 +393,10 @@ spec: items: type: string type: array + natGatewaysIPsSource: + description: NatGatewaysIPsSource use the NAT gateways IPs + as the source for the ingress rule. + type: boolean protocol: description: Protocol is the protocol for the ingress rule. Accepted values are "-1" (all), "4" (IP in IP),"tcp", @@ -404,9 +417,9 @@ spec: type: string type: array sourceSecurityGroupRoles: - description: The security group role to allow access from. - Cannot be specified with CidrBlocks. The field will be - combined with source security group IDs if specified. + description: |- + The security group role to allow access from. Cannot be specified with CidrBlocks. + The field will be combined with source security group IDs if specified. items: description: SecurityGroupRole defines the unique role of a security group. @@ -434,10 +447,9 @@ spec: description: CNI configuration properties: cniIngressRules: - description: CNIIngressRules specify rules to apply to control - plane and worker node security groups. The source for the - rule will be set to control plane and worker security group - IDs. + description: |- + CNIIngressRules specify rules to apply to control plane and worker node security groups. + The source for the rule will be set to control plane and worker security group IDs. items: description: CNIIngressRule defines an AWS ingress rule for CNI requirements. @@ -465,9 +477,9 @@ spec: securityGroupOverrides: additionalProperties: type: string - description: SecurityGroupOverrides is an optional set of security - groups to use for cluster instances This is optional - if not - provided new security groups will be created for the cluster + description: |- + SecurityGroupOverrides is an optional set of security groups to use for cluster instances + This is optional - if not provided new security groups will be created for the cluster type: object subnets: description: Subnets configuration. @@ -483,28 +495,27 @@ spec: the provider creates a managed VPC. type: string id: - description: "ID defines a unique identifier to reference - this resource. If you're bringing your subnet, set the - AWS subnet-id here, it must start with `subnet-`. \n When - the VPC is managed by CAPA, and you'd like the provider - to create a subnet for you, the id can be set to any placeholder - value that does not start with `subnet-`; upon creation, - the subnet AWS identifier will be populated in the `ResourceID` - field and the `id` field is going to be used as the subnet - name. If you specify a tag called `Name`, it takes precedence." + description: |- + ID defines a unique identifier to reference this resource. + If you're bringing your subnet, set the AWS subnet-id here, it must start with `subnet-`. + + + When the VPC is managed by CAPA, and you'd like the provider to create a subnet for you, + the id can be set to any placeholder value that does not start with `subnet-`; + upon creation, the subnet AWS identifier will be populated in the `ResourceID` field and + the `id` field is going to be used as the subnet name. If you specify a tag + called `Name`, it takes precedence. type: string ipv6CidrBlock: - description: IPv6CidrBlock is the IPv6 CIDR block to be - used when the provider creates a managed VPC. A subnet - can have an IPv4 and an IPv6 address. IPv6 is only supported - in managed clusters, this field cannot be set on AWSCluster - object. + description: |- + IPv6CidrBlock is the IPv6 CIDR block to be used when the provider creates a managed VPC. + A subnet can have an IPv4 and an IPv6 address. + IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object. type: string isIpv6: - description: IsIPv6 defines the subnet as an IPv6 subnet. - A subnet is IPv6 when it is associated with a VPC that - has IPv6 enabled. IPv6 is only supported in managed clusters, - this field cannot be set on AWSCluster object. + description: |- + IsIPv6 defines the subnet as an IPv6 subnet. A subnet is IPv6 when it is associated with a VPC that has IPv6 enabled. + IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object. type: boolean isPublic: description: IsPublic defines the subnet as a public subnet. @@ -512,17 +523,23 @@ spec: table that has a route to an internet gateway. type: boolean natGatewayId: - description: NatGatewayID is the NAT gateway id associated - with the subnet. Ignored unless the subnet is managed - by the provider, in which case this is set on the public - subnet where the NAT gateway resides. It is then used - to determine routes for private subnets in the same AZ - as the public subnet. + description: |- + NatGatewayID is the NAT gateway id associated with the subnet. + Ignored unless the subnet is managed by the provider, in which case this is set on the public subnet where the NAT gateway resides. It is then used to determine routes for private subnets in the same AZ as the public subnet. + type: string + parentZoneName: + description: |- + ParentZoneName is the zone name where the current subnet's zone is tied when + the zone is a Local Zone. + + + The subnets in Local Zone or Wavelength Zone locations consume the ParentZoneName + to select the correct private route table to egress traffic to the internet. type: string resourceID: - description: ResourceID is the subnet identifier from AWS, - READ ONLY. This field is populated when the provider manages - the subnet. + description: |- + ResourceID is the subnet identifier from AWS, READ ONLY. + This field is populated when the provider manages the subnet. type: string routeTableId: description: RouteTableID is the routing table id associated @@ -534,6 +551,42 @@ spec: description: Tags is a collection of tags describing the resource. type: object + zoneType: + description: |- + ZoneType defines the type of the zone where the subnet is created. + + + The valid values are availability-zone, local-zone, and wavelength-zone. + + + Subnet with zone type availability-zone (regular) is always selected to create cluster + resources, like Load Balancers, NAT Gateways, Contol Plane nodes, etc. + + + Subnet with zone type local-zone or wavelength-zone is not eligible to automatically create + regular cluster resources. + + + The public subnet in availability-zone or local-zone is associated with regular public + route table with default route entry to a Internet Gateway. + + + The public subnet in wavelength-zone is associated with a carrier public + route table with default route entry to a Carrier Gateway. + + + The private subnet in the availability-zone is associated with a private route table with + the default route entry to a NAT Gateway created in that zone. + + + The private subnet in the local-zone or wavelength-zone is associated with a private route table with + the default route entry re-using the NAT Gateway in the Region (preferred from the + parent zone, the zone type availability-zone in the region, or first table available). + enum: + - availability-zone + - local-zone + - wavelength-zone + type: string required: - id type: object @@ -546,40 +599,84 @@ spec: properties: availabilityZoneSelection: default: Ordered - description: 'AvailabilityZoneSelection specifies how AZs - should be selected if there are more AZs in a region than - specified by AvailabilityZoneUsageLimit. There are 2 selection - schemes: Ordered - selects based on alphabetical order Random - - selects AZs randomly in a region Defaults to Ordered' + description: |- + AvailabilityZoneSelection specifies how AZs should be selected if there are more AZs + in a region than specified by AvailabilityZoneUsageLimit. There are 2 selection schemes: + Ordered - selects based on alphabetical order + Random - selects AZs randomly in a region + Defaults to Ordered enum: - Ordered - Random type: string availabilityZoneUsageLimit: default: 3 - description: AvailabilityZoneUsageLimit specifies the maximum - number of availability zones (AZ) that should be used in - a region when automatically creating subnets. If a region - has more than this number of AZs then this number of AZs - will be picked randomly when creating default subnets. Defaults - to 3 + description: |- + AvailabilityZoneUsageLimit specifies the maximum number of availability zones (AZ) that + should be used in a region when automatically creating subnets. If a region has more + than this number of AZs then this number of AZs will be picked randomly when creating + default subnets. Defaults to 3 minimum: 1 type: integer + carrierGatewayId: + description: |- + CarrierGatewayID is the id of the internet gateway associated with the VPC, + for carrier network (Wavelength Zones). + type: string + x-kubernetes-validations: + - message: Carrier Gateway ID must start with 'cagw-' + rule: self.startsWith('cagw-') cidrBlock: - description: CidrBlock is the CIDR block to be used when the - provider creates a managed VPC. Defaults to 10.0.0.0/16. + description: |- + CidrBlock is the CIDR block to be used when the provider creates a managed VPC. + Defaults to 10.0.0.0/16. Mutually exclusive with IPAMPool. type: string + elasticIpPool: + description: |- + ElasticIPPool contains specific configuration to allocate Public IPv4 address (Elastic IP) from user-defined pool + brought to AWS for core infrastructure resources, like NAT Gateways and Public Network Load Balancers for + the API Server. + properties: + publicIpv4Pool: + description: |- + PublicIpv4Pool sets a custom Public IPv4 Pool used to create Elastic IP address for resources + created in public IPv4 subnets. Every IPv4 address, Elastic IP, will be allocated from the custom + Public IPv4 pool that you brought to AWS, instead of Amazon-provided pool. The public IPv4 pool + resource ID starts with 'ipv4pool-ec2'. + maxLength: 30 + type: string + publicIpv4PoolFallbackOrder: + description: |- + PublicIpv4PoolFallBackOrder defines the fallback action when the Public IPv4 Pool has been exhausted, + no more IPv4 address available in the pool. + + + When set to 'amazon-pool', the controller check if the pool has available IPv4 address, when pool has reached the + IPv4 limit, the address will be claimed from Amazon-pool (default). + + + When set to 'none', the controller will fail the Elastic IP allocation when the publicIpv4Pool is exhausted. + enum: + - amazon-pool + - none + type: string + x-kubernetes-validations: + - message: allowed values are 'none' and 'amazon-pool' + rule: self in ['none','amazon-pool'] + type: object emptyRoutesDefaultVPCSecurityGroup: - description: "EmptyRoutesDefaultVPCSecurityGroup specifies - whether the default VPC security group ingress and egress - rules should be removed. \n By default, when creating a - VPC, AWS creates a security group called `default` with - ingress and egress rules that allow traffic from anywhere. - The group could be used as a potential surface attack and - it's generally suggested that the group rules are removed - or modified appropriately. \n NOTE: This only applies when - the VPC is managed by the Cluster API AWS controller." + description: |- + EmptyRoutesDefaultVPCSecurityGroup specifies whether the default VPC security group ingress + and egress rules should be removed. + + + By default, when creating a VPC, AWS creates a security group called `default` with ingress and egress + rules that allow traffic from anywhere. The group could be used as a potential surface attack and + it's generally suggested that the group rules are removed or modified appropriately. + + + NOTE: This only applies when the VPC is managed by the Cluster API AWS controller. type: boolean id: description: ID is the vpc-id of the VPC this provider should @@ -590,8 +687,9 @@ spec: associated with the VPC. type: string ipamPool: - description: IPAMPool defines the IPAMv4 pool to be used for - VPC. Mutually exclusive with CidrBlock. + description: |- + IPAMPool defines the IPAMv4 pool to be used for VPC. + Mutually exclusive with CidrBlock. properties: id: description: ID is the ID of the IPAM pool this provider @@ -602,20 +700,22 @@ spec: should use to create VPC. type: string netmaskLength: - description: The netmask length of the IPv4 CIDR you want - to allocate to VPC from an Amazon VPC IP Address Manager - (IPAM) pool. Defaults to /16 for IPv4 if not specified. + description: |- + The netmask length of the IPv4 CIDR you want to allocate to VPC from + an Amazon VPC IP Address Manager (IPAM) pool. + Defaults to /16 for IPv4 if not specified. format: int64 type: integer type: object ipv6: - description: IPv6 contains ipv6 specific settings for the - network. Supported only in managed clusters. This field - cannot be set on AWSCluster object. + description: |- + IPv6 contains ipv6 specific settings for the network. Supported only in managed clusters. + This field cannot be set on AWSCluster object. properties: cidrBlock: - description: CidrBlock is the CIDR block provided by Amazon - when VPC has enabled IPv6. Mutually exclusive with IPAMPool. + description: |- + CidrBlock is the CIDR block provided by Amazon when VPC has enabled IPv6. + Mutually exclusive with IPAMPool. type: string egressOnlyInternetGatewayId: description: EgressOnlyInternetGatewayID is the id of @@ -623,8 +723,9 @@ spec: IPv6 enabled VPC. type: string ipamPool: - description: IPAMPool defines the IPAMv6 pool to be used - for VPC. Mutually exclusive with CidrBlock. + description: |- + IPAMPool defines the IPAMv6 pool to be used for VPC. + Mutually exclusive with CidrBlock. properties: id: description: ID is the ID of the IPAM pool this provider @@ -635,31 +736,60 @@ spec: provider should use to create VPC. type: string netmaskLength: - description: The netmask length of the IPv4 CIDR you - want to allocate to VPC from an Amazon VPC IP Address - Manager (IPAM) pool. Defaults to /16 for IPv4 if - not specified. + description: |- + The netmask length of the IPv4 CIDR you want to allocate to VPC from + an Amazon VPC IP Address Manager (IPAM) pool. + Defaults to /16 for IPv4 if not specified. format: int64 type: integer type: object poolId: - description: PoolID is the IP pool which must be defined - in case of BYO IP is defined. Must be specified if CidrBlock - is set. Mutually exclusive with IPAMPool. + description: |- + PoolID is the IP pool which must be defined in case of BYO IP is defined. + Must be specified if CidrBlock is set. + Mutually exclusive with IPAMPool. type: string type: object privateDnsHostnameTypeOnLaunch: - description: PrivateDNSHostnameTypeOnLaunch is the type of - hostname to assign to instances in the subnet at launch. - For IPv4-only and dual-stack (IPv4 and IPv6) subnets, an - instance DNS name can be based on the instance IPv4 address - (ip-name) or the instance ID (resource-name). For IPv6 only - subnets, an instance DNS name must be based on the instance - ID (resource-name). + description: |- + PrivateDNSHostnameTypeOnLaunch is the type of hostname to assign to instances in the subnet at launch. + For IPv4-only and dual-stack (IPv4 and IPv6) subnets, an instance DNS name can be based on the instance IPv4 address (ip-name) + or the instance ID (resource-name). For IPv6 only subnets, an instance DNS name must be based on the instance ID (resource-name). enum: - ip-name - resource-name type: string + secondaryCidrBlocks: + description: |- + SecondaryCidrBlocks are additional CIDR blocks to be associated when the provider creates a managed VPC. + Defaults to none. Mutually exclusive with IPAMPool. This makes sense to use if, for example, you want to use + a separate IP range for pods (e.g. Cilium ENI mode). + items: + description: VpcCidrBlock defines the CIDR block and settings + to associate with the managed VPC. Currently, only IPv4 + is supported. + properties: + ipv4CidrBlock: + description: IPv4CidrBlock is the IPv4 CIDR block to + associate with the managed VPC. + minLength: 1 + type: string + required: + - ipv4CidrBlock + type: object + type: array + subnetSchema: + default: PreferPrivate + description: |- + SubnetSchema specifies how CidrBlock should be divided on subnets in the VPC depending on the number of AZs. + PreferPrivate - one private subnet for each AZ plus one other subnet that will be further sub-divided for the public subnets. + PreferPublic - have the reverse logic of PreferPrivate, one public subnet for each AZ plus one other subnet + that will be further sub-divided for the private subnets. + Defaults to PreferPrivate + enum: + - PreferPrivate + - PreferPublic + type: string tags: additionalProperties: type: string @@ -668,47 +798,50 @@ spec: type: object type: object oidcIdentityProviderConfig: - description: IdentityProviderconfig is used to specify the oidc provider - config to be attached with this eks cluster + description: |- + IdentityProviderconfig is used to specify the oidc provider config + to be attached with this eks cluster properties: clientId: - description: This is also known as audience. The ID for the client - application that makes authentication requests to the OpenID - identity provider. + description: |- + This is also known as audience. The ID for the client application that makes + authentication requests to the OpenID identity provider. type: string groupsClaim: description: The JWT claim that the provider uses to return your groups. type: string groupsPrefix: - description: 'The prefix that is prepended to group claims to - prevent clashes with existing names (such as system: groups). - For example, the valueoidc: will create group names like oidc:engineering - and oidc:infra.' + description: |- + The prefix that is prepended to group claims to prevent clashes with existing + names (such as system: groups). For example, the valueoidc: will create group + names like oidc:engineering and oidc:infra. type: string identityProviderConfigName: - description: "The name of the OIDC provider configuration. \n - IdentityProviderConfigName is a required field" + description: |- + The name of the OIDC provider configuration. + + + IdentityProviderConfigName is a required field type: string issuerUrl: - description: The URL of the OpenID identity provider that allows - the API server to discover public signing keys for verifying - tokens. The URL must begin with https:// and should correspond - to the iss claim in the provider's OIDC ID tokens. Per the OIDC - standard, path components are allowed but query parameters are + description: |- + The URL of the OpenID identity provider that allows the API server to discover + public signing keys for verifying tokens. The URL must begin with https:// + and should correspond to the iss claim in the provider's OIDC ID tokens. + Per the OIDC standard, path components are allowed but query parameters are not. Typically the URL consists of only a hostname, like https://server.example.org - or https://example.com. This URL should point to the level below - .well-known/openid-configuration and must be publicly accessible - over the internet. + or https://example.com. This URL should point to the level below .well-known/openid-configuration + and must be publicly accessible over the internet. type: string requiredClaims: additionalProperties: type: string - description: The key value pairs that describe required claims - in the identity token. If set, each claim is verified to be - present in the token with a matching value. For the maximum - number of claims that you can require, see Amazon EKS service - quotas (https://docs.aws.amazon.com/eks/latest/userguide/service-quotas.html) + description: |- + The key value pairs that describe required claims in the identity token. + If set, each claim is verified to be present in the token with a matching + value. For the maximum number of claims that you can require, see Amazon + EKS service quotas (https://docs.aws.amazon.com/eks/latest/userguide/service-quotas.html) in the Amazon EKS User Guide. type: object tags: @@ -717,41 +850,45 @@ spec: description: tags to apply to oidc identity provider association type: object usernameClaim: - description: The JSON Web Token (JWT) claim to use as the username. - The default is sub, which is expected to be a unique identifier - of the end user. You can choose other claims, such as email - or name, depending on the OpenID identity provider. Claims other - than email are prefixed with the issuer URL to prevent naming + description: |- + The JSON Web Token (JWT) claim to use as the username. The default is sub, + which is expected to be a unique identifier of the end user. You can choose + other claims, such as email or name, depending on the OpenID identity provider. + Claims other than email are prefixed with the issuer URL to prevent naming clashes with other plug-ins. type: string usernamePrefix: - description: The prefix that is prepended to username claims to - prevent clashes with existing names. If you do not provide this - field, and username is a value other than email, the prefix - defaults to issuerurl#. You can use the value - to disable all - prefixing. + description: |- + The prefix that is prepended to username claims to prevent clashes with existing + names. If you do not provide this field, and username is a value other than + email, the prefix defaults to issuerurl#. You can use the value - to disable + all prefixing. type: string type: object region: description: The AWS Region the cluster lives in. type: string roleAdditionalPolicies: - description: RoleAdditionalPolicies allows you to attach additional - polices to the control plane role. You must enable the EKSAllowAddRoles + description: |- + RoleAdditionalPolicies allows you to attach additional polices to + the control plane role. You must enable the EKSAllowAddRoles feature flag to incorporate these into the created role. items: type: string type: array roleName: - description: RoleName specifies the name of IAM role that gives EKS - permission to make API calls. If the role is pre-existing we will - treat it as unmanaged and not delete it on deletion. If the EKSEnableIAM - feature flag is true and no name is supplied then a role is created. + description: |- + RoleName specifies the name of IAM role that gives EKS + permission to make API calls. If the role is pre-existing + we will treat it as unmanaged and not delete it on + deletion. If the EKSEnableIAM feature flag is true + and no name is supplied then a role is created. minLength: 2 type: string secondaryCidrBlock: - description: SecondaryCidrBlock is the additional CIDR range to use - for pod IPs. Must be within the 100.64.0.0/10 or 198.19.0.0/16 range. + description: |- + SecondaryCidrBlock is the additional CIDR range to use for pod IPs. + Must be within the 100.64.0.0/10 or 198.19.0.0/16 range. type: string sshKeyName: description: SSHKeyName is the name of the ssh key to attach to the @@ -760,18 +897,20 @@ spec: type: string tokenMethod: default: iam-authenticator - description: TokenMethod is used to specify the method for obtaining - a client token for communicating with EKS iam-authenticator - obtains - a client token using iam-authentictor aws-cli - obtains a client - token using the AWS CLI Defaults to iam-authenticator + description: |- + TokenMethod is used to specify the method for obtaining a client token for communicating with EKS + iam-authenticator - obtains a client token using iam-authentictor + aws-cli - obtains a client token using the AWS CLI + Defaults to iam-authenticator enum: - iam-authenticator - aws-cli type: string version: - description: Version defines the desired Kubernetes version. If no - version number is supplied then the latest version of Kubernetes - that EKS supports will be used. + description: |- + Version defines the desired Kubernetes version. If no version number + is supplied then the latest version of Kubernetes that EKS supports + will be used. minLength: 2 pattern: ^v?(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.?(\.0|[1-9][0-9]*)?$ type: string @@ -791,15 +930,16 @@ spec: C_IDENTIFIER. type: string value: - description: 'Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in - the container and any service environment variables. If - a variable cannot be resolved, the reference in the input - string will be unchanged. Double $$ are reduced to a single - $, which allows for escaping the $(VAR_NAME) syntax: i.e. + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless - of whether the variable exists or not. Defaults to "".' + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". type: string valueFrom: description: Source for the environment variable's value. @@ -812,9 +952,10 @@ spec: description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap or its @@ -825,11 +966,9 @@ spec: type: object x-kubernetes-map-type: atomic fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, `metadata.labels['''']`, - `metadata.annotations['''']`, spec.nodeName, - spec.serviceAccountName, status.hostIP, status.podIP, - status.podIPs.' + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: apiVersion: description: Version of the schema the FieldPath @@ -844,10 +983,9 @@ spec: type: object x-kubernetes-map-type: atomic resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: containerName: description: 'Container name: required for volumes, @@ -877,9 +1015,10 @@ spec: be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret or its key @@ -982,6 +1121,10 @@ spec: availabilityZone: description: Availability zone of instance type: string + capacityReservationId: + description: CapacityReservationID specifies the target Capacity + Reservation into which the instance should be launched. + type: string ebsOptimized: description: Indicates whether the instance is optimized for Amazon EBS I/O. @@ -1005,48 +1148,64 @@ spec: properties: httpEndpoint: default: enabled - description: "Enables or disables the HTTP metadata endpoint - on your instances. \n If you specify a value of disabled, - you cannot access your instance metadata. \n Default: enabled" + description: |- + Enables or disables the HTTP metadata endpoint on your instances. + + + If you specify a value of disabled, you cannot access your instance metadata. + + + Default: enabled enum: - enabled - disabled type: string httpPutResponseHopLimit: default: 1 - description: "The desired HTTP PUT response hop limit for - instance metadata requests. The larger the number, the further - instance metadata requests can travel. \n Default: 1" + description: |- + The desired HTTP PUT response hop limit for instance metadata requests. The + larger the number, the further instance metadata requests can travel. + + + Default: 1 format: int64 maximum: 64 minimum: 1 type: integer httpTokens: default: optional - description: "The state of token usage for your instance metadata - requests. \n If the state is optional, you can choose to - retrieve instance metadata with or without a session token - on your request. If you retrieve the IAM role credentials - without a token, the version 1.0 role credentials are returned. - If you retrieve the IAM role credentials using a valid session - token, the version 2.0 role credentials are returned. \n - If the state is required, you must send a session token - with any instance metadata retrieval requests. In this state, - retrieving the IAM role credentials always returns the version - 2.0 credentials; the version 1.0 credentials are not available. - \n Default: optional" + description: |- + The state of token usage for your instance metadata requests. + + + If the state is optional, you can choose to retrieve instance metadata with + or without a session token on your request. If you retrieve the IAM role + credentials without a token, the version 1.0 role credentials are returned. + If you retrieve the IAM role credentials using a valid session token, the + version 2.0 role credentials are returned. + + + If the state is required, you must send a session token with any instance + metadata retrieval requests. In this state, retrieving the IAM role credentials + always returns the version 2.0 credentials; the version 1.0 credentials are + not available. + + + Default: optional enum: - optional - required type: string instanceMetadataTags: default: disabled - description: "Set to enabled to allow access to instance tags - from the instance metadata. Set to disabled to turn off - access to instance tags from the instance metadata. For - more information, see Work with instance tags using the - instance metadata (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS). - \n Default: disabled" + description: |- + Set to enabled to allow access to instance tags from the instance metadata. + Set to disabled to turn off access to instance tags from the instance metadata. + For more information, see Work with instance tags using the instance metadata + (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS). + + + Default: disabled enum: - enabled - disabled @@ -1074,11 +1233,10 @@ spec: or not. type: boolean encryptionKey: - description: EncryptionKey is the KMS key to use to encrypt - the volume. Can be either a KMS key ID or ARN. If Encrypted - is set and this is omitted, the default AWS key will be - used. The key must already exist and be accessible by - the controller. + description: |- + EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. + If Encrypted is set and this is omitted, the default AWS key will be used. + The key must already exist and be accessible by the controller. type: string iops: description: IOPS is the number of IOPS requested for the @@ -1086,9 +1244,9 @@ spec: format: int64 type: integer size: - description: Size specifies size (in Gi) of the storage - device. Must be greater than the image snapshot size or - 8 (whichever is greater). + description: |- + Size specifies size (in Gi) of the storage device. + Must be greater than the image snapshot size or 8 (whichever is greater). format: int64 minimum: 8 type: integer @@ -1109,6 +1267,15 @@ spec: description: PlacementGroupName specifies the name of the placement group in which to launch the instance. type: string + placementGroupPartition: + description: |- + PlacementGroupPartition is the partition number within the placement group in which to launch the instance. + This value is only valid if the placement group, referred in `PlacementGroupName`, was created with + strategy set to partition. + format: int64 + maximum: 7 + minimum: 1 + type: integer privateDnsName: description: PrivateDNSName is the options for the instance hostname. properties: @@ -1132,6 +1299,10 @@ spec: privateIp: description: The private IPv4 address assigned to the instance. type: string + publicIPOnLaunch: + description: PublicIPOnLaunch is the option to associate a public + IP on instance launch + type: boolean publicIp: description: The public IPv4 address assigned to the instance, if applicable. @@ -1147,11 +1318,10 @@ spec: or not. type: boolean encryptionKey: - description: EncryptionKey is the KMS key to use to encrypt - the volume. Can be either a KMS key ID or ARN. If Encrypted - is set and this is omitted, the default AWS key will be - used. The key must already exist and be accessible by the - controller. + description: |- + EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. + If Encrypted is set and this is omitted, the default AWS key will be used. + The key must already exist and be accessible by the controller. type: string iops: description: IOPS is the number of IOPS requested for the @@ -1159,9 +1329,9 @@ spec: format: int64 type: integer size: - description: Size specifies size (in Gi) of the storage device. - Must be greater than the image snapshot size or 8 (whichever - is greater). + description: |- + Size specifies size (in Gi) of the storage device. + Must be greater than the image snapshot size or 8 (whichever is greater). format: int64 minimum: 8 type: integer @@ -1211,9 +1381,9 @@ spec: description: The instance type. type: string userData: - description: UserData is the raw data script passed to the instance - which is run upon bootstrap. This field must not be base64 encoded - and should only be used when running a new instance. + description: |- + UserData is the raw data script passed to the instance which is run upon bootstrap. + This field must not be base64 encoded and should only be used when running a new instance. type: string volumeIDs: description: IDs of the instance's volumes @@ -1231,37 +1401,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime @@ -1271,15 +1441,15 @@ spec: type: array externalManagedControlPlane: default: true - description: ExternalManagedControlPlane indicates to cluster-api - that the control plane is managed by an external service such as - AKS, EKS, GKE, etc. + description: |- + ExternalManagedControlPlane indicates to cluster-api that the control plane + is managed by an external service such as AKS, EKS, GKE, etc. type: boolean failureDomains: additionalProperties: - description: FailureDomainSpec is the Schema for Cluster API failure - domains. It allows controllers to understand how many failure - domains a cluster can optionally span across. + description: |- + FailureDomainSpec is the Schema for Cluster API failure domains. + It allows controllers to understand how many failure domains a cluster can optionally span across. properties: attributes: additionalProperties: @@ -1296,12 +1466,14 @@ spec: zones that can be used type: object failureMessage: - description: ErrorMessage indicates that there is a terminal problem - reconciling the state, and will be set to a descriptive error message. + description: |- + ErrorMessage indicates that there is a terminal problem reconciling the + state, and will be set to a descriptive error message. type: string identityProviderStatus: - description: IdentityProviderStatus holds the status for associated - identity provider + description: |- + IdentityProviderStatus holds the status for + associated identity provider properties: arn: description: ARN holds the ARN of associated identity provider @@ -1312,8 +1484,9 @@ spec: type: string type: object initialized: - description: Initialized denotes whether or not the control plane - has the uploaded kubernetes config-map. + description: |- + Initialized denotes whether or not the control plane has the + uploaded kubernetes config-map. type: boolean networkStatus: description: Networks holds details about the AWS networking resources @@ -1323,8 +1496,9 @@ spec: description: APIServerELB is the Kubernetes api server load balancer. properties: arn: - description: ARN of the load balancer. Unlike the ClassicLB, - ARN is used mostly to define and get it. + description: |- + ARN of the load balancer. Unlike the ClassicLB, ARN is used mostly + to define and get it. type: string attributes: description: ClassicElbAttributes defines extra attributes @@ -1335,9 +1509,9 @@ spec: load balancer load balancing. type: boolean idleTimeout: - description: IdleTimeout is time that the connection is - allowed to be idle (no data has been sent over the connection) - before it is closed by the load balancer. + description: |- + IdleTimeout is time that the connection is allowed to be idle (no data + has been sent over the connection) before it is closed by the load balancer. format: int64 type: integer type: object @@ -1371,13 +1545,14 @@ spec: for a load balancer. type: string targetGroup: - description: TargetGroupSpec specifies target group - settings for a given listener. This is created first, - and the ARN is then passed to the listener. + description: |- + TargetGroupSpec specifies target group settings for a given listener. + This is created first, and the ARN is then passed to the listener. properties: name: description: Name of the TargetGroup. Must be unique over the same group of listeners. + maxLength: 32 type: string port: description: Port is the exposed port @@ -1413,6 +1588,9 @@ spec: timeoutSeconds: format: int64 type: integer + unhealthyThresholdCount: + format: int64 + type: integer type: object vpcId: type: string @@ -1436,19 +1614,19 @@ spec: format: int64 type: integer interval: - description: A Duration represents the elapsed time between - two instants as an int64 nanosecond count. The representation - limits the largest representable duration to approximately - 290 years. + description: |- + A Duration represents the elapsed time between two instants + as an int64 nanosecond count. The representation limits the + largest representable duration to approximately 290 years. format: int64 type: integer target: type: string timeout: - description: A Duration represents the elapsed time between - two instants as an int64 nanosecond count. The representation - limits the largest representable duration to approximately - 290 years. + description: |- + A Duration represents the elapsed time between two instants + as an int64 nanosecond count. The representation limits the + largest representable duration to approximately 290 years. format: int64 type: integer unhealthyThreshold: @@ -1500,9 +1678,9 @@ spec: - nlb type: string name: - description: The name of the load balancer. It must be unique - within the set of load balancers defined in the region. - It also serves as identifier. + description: |- + The name of the load balancer. It must be unique within the set of load balancers + defined in the region. It also serves as identifier. type: string scheme: description: Scheme is the load balancer scheme, either internet-facing @@ -1538,8 +1716,9 @@ spec: api server load balancer. properties: arn: - description: ARN of the load balancer. Unlike the ClassicLB, - ARN is used mostly to define and get it. + description: |- + ARN of the load balancer. Unlike the ClassicLB, ARN is used mostly + to define and get it. type: string attributes: description: ClassicElbAttributes defines extra attributes @@ -1550,9 +1729,9 @@ spec: load balancer load balancing. type: boolean idleTimeout: - description: IdleTimeout is time that the connection is - allowed to be idle (no data has been sent over the connection) - before it is closed by the load balancer. + description: |- + IdleTimeout is time that the connection is allowed to be idle (no data + has been sent over the connection) before it is closed by the load balancer. format: int64 type: integer type: object @@ -1586,13 +1765,14 @@ spec: for a load balancer. type: string targetGroup: - description: TargetGroupSpec specifies target group - settings for a given listener. This is created first, - and the ARN is then passed to the listener. + description: |- + TargetGroupSpec specifies target group settings for a given listener. + This is created first, and the ARN is then passed to the listener. properties: name: description: Name of the TargetGroup. Must be unique over the same group of listeners. + maxLength: 32 type: string port: description: Port is the exposed port @@ -1628,6 +1808,9 @@ spec: timeoutSeconds: format: int64 type: integer + unhealthyThresholdCount: + format: int64 + type: integer type: object vpcId: type: string @@ -1651,19 +1834,19 @@ spec: format: int64 type: integer interval: - description: A Duration represents the elapsed time between - two instants as an int64 nanosecond count. The representation - limits the largest representable duration to approximately - 290 years. + description: |- + A Duration represents the elapsed time between two instants + as an int64 nanosecond count. The representation limits the + largest representable duration to approximately 290 years. format: int64 type: integer target: type: string timeout: - description: A Duration represents the elapsed time between - two instants as an int64 nanosecond count. The representation - limits the largest representable duration to approximately - 290 years. + description: |- + A Duration represents the elapsed time between two instants + as an int64 nanosecond count. The representation limits the + largest representable duration to approximately 290 years. format: int64 type: integer unhealthyThreshold: @@ -1715,9 +1898,9 @@ spec: - nlb type: string name: - description: The name of the load balancer. It must be unique - within the set of load balancers defined in the region. - It also serves as identifier. + description: |- + The name of the load balancer. It must be unique within the set of load balancers + defined in the region. It also serves as identifier. type: string scheme: description: Scheme is the load balancer scheme, either internet-facing @@ -1776,6 +1959,10 @@ spec: items: type: string type: array + natGatewaysIPsSource: + description: NatGatewaysIPsSource use the NAT gateways + IPs as the source for the ingress rule. + type: boolean protocol: description: Protocol is the protocol for the ingress rule. Accepted values are "-1" (all), "4" (IP in @@ -1797,10 +1984,9 @@ spec: type: string type: array sourceSecurityGroupRoles: - description: The security group role to allow access - from. Cannot be specified with CidrBlocks. The field - will be combined with source security group IDs - if specified. + description: |- + The security group role to allow access from. Cannot be specified with CidrBlocks. + The field will be combined with source security group IDs if specified. items: description: SecurityGroupRole defines the unique role of a security group. @@ -1855,8 +2041,9 @@ spec: type: object ready: default: false - description: Ready denotes that the AWSManagedControlPlane API Server - is ready to receive requests and that the VPC infra is ready. + description: |- + Ready denotes that the AWSManagedControlPlane API Server is ready to + receive requests and that the VPC infra is ready. type: boolean required: - ready @@ -1895,14 +2082,19 @@ spec: Control Plane API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -1913,9 +2105,9 @@ spec: additionalTags: additionalProperties: type: string - description: AdditionalTags is an optional set of tags to add to AWS - resources managed by the AWS provider, in addition to the ones added - by default. + description: |- + AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the + ones added by default. type: object addons: description: Addons defines the EKS addons to enable with the EKS @@ -1928,8 +2120,9 @@ spec: type: string conflictResolution: default: overwrite - description: ConflictResolution is used to declare what should - happen if there are parameter conflicts. Defaults to none + description: |- + ConflictResolution is used to declare what should happen if there + are parameter conflicts. Defaults to none enum: - overwrite - none @@ -1952,38 +2145,39 @@ spec: type: array associateOIDCProvider: default: false - description: AssociateOIDCProvider can be enabled to automatically - create an identity provider for the controller for use with IAM - roles for service accounts + description: |- + AssociateOIDCProvider can be enabled to automatically create an identity + provider for the controller for use with IAM roles for service accounts type: boolean bastion: description: Bastion contains options to configure the bastion host. properties: allowedCIDRBlocks: - description: AllowedCIDRBlocks is a list of CIDR blocks allowed - to access the bastion host. They are set as ingress rules for - the Bastion host's Security Group (defaults to 0.0.0.0/0). + description: |- + AllowedCIDRBlocks is a list of CIDR blocks allowed to access the bastion host. + They are set as ingress rules for the Bastion host's Security Group (defaults to 0.0.0.0/0). items: type: string type: array ami: - description: AMI will use the specified AMI to boot the bastion. - If not specified, the AMI will default to one picked out in - public space. + description: |- + AMI will use the specified AMI to boot the bastion. If not specified, + the AMI will default to one picked out in public space. type: string disableIngressRules: - description: DisableIngressRules will ensure there are no Ingress - rules in the bastion host's security group. Requires AllowedCIDRBlocks - to be empty. + description: |- + DisableIngressRules will ensure there are no Ingress rules in the bastion host's security group. + Requires AllowedCIDRBlocks to be empty. type: boolean enabled: - description: Enabled allows this provider to create a bastion - host instance with a public ip to access the VPC private network. + description: |- + Enabled allows this provider to create a bastion host instance + with a public ip to access the VPC private network. type: boolean instanceType: - description: InstanceType will use the specified instance type - for the bastion. If not specified, Cluster API Provider AWS - will use t3.micro for all regions except us-east-1, where t2.micro + description: |- + InstanceType will use the specified instance type for the bastion. If not specified, + Cluster API Provider AWS will use t3.micro for all regions except us-east-1, where t2.micro will be the default. type: string type: object @@ -2003,10 +2197,10 @@ spec: - port type: object eksClusterName: - description: EKSClusterName allows you to specify the name of the - EKS cluster in AWS. If you don't specify a name then a default name - will be created based on the namespace and name of the managed control - plane. + description: |- + EKSClusterName allows you to specify the name of the EKS cluster in + AWS. If you don't specify a name then a default name will be created + based on the namespace and name of the managed control plane. type: string encryptionConfig: description: EncryptionConfig specifies the encryption configuration @@ -2042,10 +2236,10 @@ spec: type: array type: object iamAuthenticatorConfig: - description: IAMAuthenticatorConfig allows the specification of any - additional user or role mappings for use when generating the aws-iam-authenticator - configuration. If this is nil the default configuration is still - generated for the cluster. + description: |- + IAMAuthenticatorConfig allows the specification of any additional user or role mappings + for use when generating the aws-iam-authenticator configuration. If this is nil the + default configuration is still generated for the cluster. properties: mapRoles: description: RoleMappings is a list of role mappings @@ -2097,9 +2291,9 @@ spec: type: array type: object identityRef: - description: IdentityRef is a reference to an identity to be used - when reconciling the managed control plane. If no identity is specified, - the default identity for this controller will be used. + description: |- + IdentityRef is a reference to an identity to be used when reconciling the managed control plane. + If no identity is specified, the default identity for this controller will be used. properties: kind: description: Kind of the identity. @@ -2117,30 +2311,32 @@ spec: - name type: object imageLookupBaseOS: - description: ImageLookupBaseOS is the name of the base operating system - used to look up machine images when a machine does not specify an - AMI. When set, this will be used for all cluster machines unless - a machine specifies a different ImageLookupBaseOS. + description: |- + ImageLookupBaseOS is the name of the base operating system used to look + up machine images when a machine does not specify an AMI. When set, this + will be used for all cluster machines unless a machine specifies a + different ImageLookupBaseOS. type: string imageLookupFormat: - description: 'ImageLookupFormat is the AMI naming format to look up - machine images when a machine does not specify an AMI. When set, - this will be used for all cluster machines unless a machine specifies - a different ImageLookupOrg. Supports substitutions for {{.BaseOS}} - and {{.K8sVersion}} with the base OS and kubernetes version, respectively. - The BaseOS will be the value in ImageLookupBaseOS or ubuntu (the - default), and the kubernetes version as defined by the packages - produced by kubernetes/release without v as a prefix: 1.13.0, 1.12.5-mybuild.1, - or 1.17.3. For example, the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* - will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* - for a Machine that is targeting kubernetes v1.18.0 and the ubuntu - base OS. See also: https://golang.org/pkg/text/template/' + description: |- + ImageLookupFormat is the AMI naming format to look up machine images when + a machine does not specify an AMI. When set, this will be used for all + cluster machines unless a machine specifies a different ImageLookupOrg. + Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base + OS and kubernetes version, respectively. The BaseOS will be the value in + ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as + defined by the packages produced by kubernetes/release without v as a + prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default + image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up + searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a + Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See + also: https://golang.org/pkg/text/template/ type: string imageLookupOrg: - description: ImageLookupOrg is the AWS Organization ID to look up - machine images when a machine does not specify an AMI. When set, - this will be used for all cluster machines unless a machine specifies - a different ImageLookupOrg. + description: |- + ImageLookupOrg is the AWS Organization ID to look up machine images when a + machine does not specify an AMI. When set, this will be used for all + cluster machines unless a machine specifies a different ImageLookupOrg. type: string kubeProxy: description: KubeProxy defines managed attributes of the kube-proxy @@ -2148,18 +2344,18 @@ spec: properties: disable: default: false - description: Disable set to true indicates that kube-proxy should - be disabled. With EKS clusters kube-proxy is automatically installed - into the cluster. For clusters where you want to use kube-proxy - functionality that is provided with an alternate CNI, this option - provides a way to specify that the kube-proxy daemonset should - be deleted. You cannot set this to true if you are using the - Amazon kube-proxy addon. + description: |- + Disable set to true indicates that kube-proxy should be disabled. With EKS clusters + kube-proxy is automatically installed into the cluster. For clusters where you want + to use kube-proxy functionality that is provided with an alternate CNI, this option + provides a way to specify that the kube-proxy daemonset should be deleted. You cannot + set this to true if you are using the Amazon kube-proxy addon. type: boolean type: object logging: - description: Logging specifies which EKS Cluster logs should be enabled. - Entries for each of the enabled logs will be sent to CloudWatch + description: |- + Logging specifies which EKS Cluster logs should be enabled. Entries for + each of the enabled logs will be sent to CloudWatch properties: apiServer: default: false @@ -2223,6 +2419,10 @@ spec: items: type: string type: array + natGatewaysIPsSource: + description: NatGatewaysIPsSource use the NAT gateways IPs + as the source for the ingress rule. + type: boolean protocol: description: Protocol is the protocol for the ingress rule. Accepted values are "-1" (all), "4" (IP in IP),"tcp", @@ -2243,9 +2443,9 @@ spec: type: string type: array sourceSecurityGroupRoles: - description: The security group role to allow access from. - Cannot be specified with CidrBlocks. The field will be - combined with source security group IDs if specified. + description: |- + The security group role to allow access from. Cannot be specified with CidrBlocks. + The field will be combined with source security group IDs if specified. items: description: SecurityGroupRole defines the unique role of a security group. @@ -2273,10 +2473,9 @@ spec: description: CNI configuration properties: cniIngressRules: - description: CNIIngressRules specify rules to apply to control - plane and worker node security groups. The source for the - rule will be set to control plane and worker security group - IDs. + description: |- + CNIIngressRules specify rules to apply to control plane and worker node security groups. + The source for the rule will be set to control plane and worker security group IDs. items: description: CNIIngressRule defines an AWS ingress rule for CNI requirements. @@ -2304,9 +2503,9 @@ spec: securityGroupOverrides: additionalProperties: type: string - description: SecurityGroupOverrides is an optional set of security - groups to use for cluster instances This is optional - if not - provided new security groups will be created for the cluster + description: |- + SecurityGroupOverrides is an optional set of security groups to use for cluster instances + This is optional - if not provided new security groups will be created for the cluster type: object subnets: description: Subnets configuration. @@ -2322,28 +2521,27 @@ spec: the provider creates a managed VPC. type: string id: - description: "ID defines a unique identifier to reference - this resource. If you're bringing your subnet, set the - AWS subnet-id here, it must start with `subnet-`. \n When - the VPC is managed by CAPA, and you'd like the provider - to create a subnet for you, the id can be set to any placeholder - value that does not start with `subnet-`; upon creation, - the subnet AWS identifier will be populated in the `ResourceID` - field and the `id` field is going to be used as the subnet - name. If you specify a tag called `Name`, it takes precedence." + description: |- + ID defines a unique identifier to reference this resource. + If you're bringing your subnet, set the AWS subnet-id here, it must start with `subnet-`. + + + When the VPC is managed by CAPA, and you'd like the provider to create a subnet for you, + the id can be set to any placeholder value that does not start with `subnet-`; + upon creation, the subnet AWS identifier will be populated in the `ResourceID` field and + the `id` field is going to be used as the subnet name. If you specify a tag + called `Name`, it takes precedence. type: string ipv6CidrBlock: - description: IPv6CidrBlock is the IPv6 CIDR block to be - used when the provider creates a managed VPC. A subnet - can have an IPv4 and an IPv6 address. IPv6 is only supported - in managed clusters, this field cannot be set on AWSCluster - object. + description: |- + IPv6CidrBlock is the IPv6 CIDR block to be used when the provider creates a managed VPC. + A subnet can have an IPv4 and an IPv6 address. + IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object. type: string isIpv6: - description: IsIPv6 defines the subnet as an IPv6 subnet. - A subnet is IPv6 when it is associated with a VPC that - has IPv6 enabled. IPv6 is only supported in managed clusters, - this field cannot be set on AWSCluster object. + description: |- + IsIPv6 defines the subnet as an IPv6 subnet. A subnet is IPv6 when it is associated with a VPC that has IPv6 enabled. + IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object. type: boolean isPublic: description: IsPublic defines the subnet as a public subnet. @@ -2351,17 +2549,23 @@ spec: table that has a route to an internet gateway. type: boolean natGatewayId: - description: NatGatewayID is the NAT gateway id associated - with the subnet. Ignored unless the subnet is managed - by the provider, in which case this is set on the public - subnet where the NAT gateway resides. It is then used - to determine routes for private subnets in the same AZ - as the public subnet. + description: |- + NatGatewayID is the NAT gateway id associated with the subnet. + Ignored unless the subnet is managed by the provider, in which case this is set on the public subnet where the NAT gateway resides. It is then used to determine routes for private subnets in the same AZ as the public subnet. + type: string + parentZoneName: + description: |- + ParentZoneName is the zone name where the current subnet's zone is tied when + the zone is a Local Zone. + + + The subnets in Local Zone or Wavelength Zone locations consume the ParentZoneName + to select the correct private route table to egress traffic to the internet. type: string resourceID: - description: ResourceID is the subnet identifier from AWS, - READ ONLY. This field is populated when the provider manages - the subnet. + description: |- + ResourceID is the subnet identifier from AWS, READ ONLY. + This field is populated when the provider manages the subnet. type: string routeTableId: description: RouteTableID is the routing table id associated @@ -2373,6 +2577,42 @@ spec: description: Tags is a collection of tags describing the resource. type: object + zoneType: + description: |- + ZoneType defines the type of the zone where the subnet is created. + + + The valid values are availability-zone, local-zone, and wavelength-zone. + + + Subnet with zone type availability-zone (regular) is always selected to create cluster + resources, like Load Balancers, NAT Gateways, Contol Plane nodes, etc. + + + Subnet with zone type local-zone or wavelength-zone is not eligible to automatically create + regular cluster resources. + + + The public subnet in availability-zone or local-zone is associated with regular public + route table with default route entry to a Internet Gateway. + + + The public subnet in wavelength-zone is associated with a carrier public + route table with default route entry to a Carrier Gateway. + + + The private subnet in the availability-zone is associated with a private route table with + the default route entry to a NAT Gateway created in that zone. + + + The private subnet in the local-zone or wavelength-zone is associated with a private route table with + the default route entry re-using the NAT Gateway in the Region (preferred from the + parent zone, the zone type availability-zone in the region, or first table available). + enum: + - availability-zone + - local-zone + - wavelength-zone + type: string required: - id type: object @@ -2385,40 +2625,84 @@ spec: properties: availabilityZoneSelection: default: Ordered - description: 'AvailabilityZoneSelection specifies how AZs - should be selected if there are more AZs in a region than - specified by AvailabilityZoneUsageLimit. There are 2 selection - schemes: Ordered - selects based on alphabetical order Random - - selects AZs randomly in a region Defaults to Ordered' + description: |- + AvailabilityZoneSelection specifies how AZs should be selected if there are more AZs + in a region than specified by AvailabilityZoneUsageLimit. There are 2 selection schemes: + Ordered - selects based on alphabetical order + Random - selects AZs randomly in a region + Defaults to Ordered enum: - Ordered - Random type: string availabilityZoneUsageLimit: default: 3 - description: AvailabilityZoneUsageLimit specifies the maximum - number of availability zones (AZ) that should be used in - a region when automatically creating subnets. If a region - has more than this number of AZs then this number of AZs - will be picked randomly when creating default subnets. Defaults - to 3 + description: |- + AvailabilityZoneUsageLimit specifies the maximum number of availability zones (AZ) that + should be used in a region when automatically creating subnets. If a region has more + than this number of AZs then this number of AZs will be picked randomly when creating + default subnets. Defaults to 3 minimum: 1 type: integer + carrierGatewayId: + description: |- + CarrierGatewayID is the id of the internet gateway associated with the VPC, + for carrier network (Wavelength Zones). + type: string + x-kubernetes-validations: + - message: Carrier Gateway ID must start with 'cagw-' + rule: self.startsWith('cagw-') cidrBlock: - description: CidrBlock is the CIDR block to be used when the - provider creates a managed VPC. Defaults to 10.0.0.0/16. + description: |- + CidrBlock is the CIDR block to be used when the provider creates a managed VPC. + Defaults to 10.0.0.0/16. Mutually exclusive with IPAMPool. type: string + elasticIpPool: + description: |- + ElasticIPPool contains specific configuration to allocate Public IPv4 address (Elastic IP) from user-defined pool + brought to AWS for core infrastructure resources, like NAT Gateways and Public Network Load Balancers for + the API Server. + properties: + publicIpv4Pool: + description: |- + PublicIpv4Pool sets a custom Public IPv4 Pool used to create Elastic IP address for resources + created in public IPv4 subnets. Every IPv4 address, Elastic IP, will be allocated from the custom + Public IPv4 pool that you brought to AWS, instead of Amazon-provided pool. The public IPv4 pool + resource ID starts with 'ipv4pool-ec2'. + maxLength: 30 + type: string + publicIpv4PoolFallbackOrder: + description: |- + PublicIpv4PoolFallBackOrder defines the fallback action when the Public IPv4 Pool has been exhausted, + no more IPv4 address available in the pool. + + + When set to 'amazon-pool', the controller check if the pool has available IPv4 address, when pool has reached the + IPv4 limit, the address will be claimed from Amazon-pool (default). + + + When set to 'none', the controller will fail the Elastic IP allocation when the publicIpv4Pool is exhausted. + enum: + - amazon-pool + - none + type: string + x-kubernetes-validations: + - message: allowed values are 'none' and 'amazon-pool' + rule: self in ['none','amazon-pool'] + type: object emptyRoutesDefaultVPCSecurityGroup: - description: "EmptyRoutesDefaultVPCSecurityGroup specifies - whether the default VPC security group ingress and egress - rules should be removed. \n By default, when creating a - VPC, AWS creates a security group called `default` with - ingress and egress rules that allow traffic from anywhere. - The group could be used as a potential surface attack and - it's generally suggested that the group rules are removed - or modified appropriately. \n NOTE: This only applies when - the VPC is managed by the Cluster API AWS controller." + description: |- + EmptyRoutesDefaultVPCSecurityGroup specifies whether the default VPC security group ingress + and egress rules should be removed. + + + By default, when creating a VPC, AWS creates a security group called `default` with ingress and egress + rules that allow traffic from anywhere. The group could be used as a potential surface attack and + it's generally suggested that the group rules are removed or modified appropriately. + + + NOTE: This only applies when the VPC is managed by the Cluster API AWS controller. type: boolean id: description: ID is the vpc-id of the VPC this provider should @@ -2429,8 +2713,9 @@ spec: associated with the VPC. type: string ipamPool: - description: IPAMPool defines the IPAMv4 pool to be used for - VPC. Mutually exclusive with CidrBlock. + description: |- + IPAMPool defines the IPAMv4 pool to be used for VPC. + Mutually exclusive with CidrBlock. properties: id: description: ID is the ID of the IPAM pool this provider @@ -2441,20 +2726,22 @@ spec: should use to create VPC. type: string netmaskLength: - description: The netmask length of the IPv4 CIDR you want - to allocate to VPC from an Amazon VPC IP Address Manager - (IPAM) pool. Defaults to /16 for IPv4 if not specified. + description: |- + The netmask length of the IPv4 CIDR you want to allocate to VPC from + an Amazon VPC IP Address Manager (IPAM) pool. + Defaults to /16 for IPv4 if not specified. format: int64 type: integer type: object ipv6: - description: IPv6 contains ipv6 specific settings for the - network. Supported only in managed clusters. This field - cannot be set on AWSCluster object. + description: |- + IPv6 contains ipv6 specific settings for the network. Supported only in managed clusters. + This field cannot be set on AWSCluster object. properties: cidrBlock: - description: CidrBlock is the CIDR block provided by Amazon - when VPC has enabled IPv6. Mutually exclusive with IPAMPool. + description: |- + CidrBlock is the CIDR block provided by Amazon when VPC has enabled IPv6. + Mutually exclusive with IPAMPool. type: string egressOnlyInternetGatewayId: description: EgressOnlyInternetGatewayID is the id of @@ -2462,8 +2749,9 @@ spec: IPv6 enabled VPC. type: string ipamPool: - description: IPAMPool defines the IPAMv6 pool to be used - for VPC. Mutually exclusive with CidrBlock. + description: |- + IPAMPool defines the IPAMv6 pool to be used for VPC. + Mutually exclusive with CidrBlock. properties: id: description: ID is the ID of the IPAM pool this provider @@ -2474,31 +2762,60 @@ spec: provider should use to create VPC. type: string netmaskLength: - description: The netmask length of the IPv4 CIDR you - want to allocate to VPC from an Amazon VPC IP Address - Manager (IPAM) pool. Defaults to /16 for IPv4 if - not specified. + description: |- + The netmask length of the IPv4 CIDR you want to allocate to VPC from + an Amazon VPC IP Address Manager (IPAM) pool. + Defaults to /16 for IPv4 if not specified. format: int64 type: integer type: object poolId: - description: PoolID is the IP pool which must be defined - in case of BYO IP is defined. Must be specified if CidrBlock - is set. Mutually exclusive with IPAMPool. + description: |- + PoolID is the IP pool which must be defined in case of BYO IP is defined. + Must be specified if CidrBlock is set. + Mutually exclusive with IPAMPool. type: string type: object privateDnsHostnameTypeOnLaunch: - description: PrivateDNSHostnameTypeOnLaunch is the type of - hostname to assign to instances in the subnet at launch. - For IPv4-only and dual-stack (IPv4 and IPv6) subnets, an - instance DNS name can be based on the instance IPv4 address - (ip-name) or the instance ID (resource-name). For IPv6 only - subnets, an instance DNS name must be based on the instance - ID (resource-name). + description: |- + PrivateDNSHostnameTypeOnLaunch is the type of hostname to assign to instances in the subnet at launch. + For IPv4-only and dual-stack (IPv4 and IPv6) subnets, an instance DNS name can be based on the instance IPv4 address (ip-name) + or the instance ID (resource-name). For IPv6 only subnets, an instance DNS name must be based on the instance ID (resource-name). enum: - ip-name - resource-name type: string + secondaryCidrBlocks: + description: |- + SecondaryCidrBlocks are additional CIDR blocks to be associated when the provider creates a managed VPC. + Defaults to none. Mutually exclusive with IPAMPool. This makes sense to use if, for example, you want to use + a separate IP range for pods (e.g. Cilium ENI mode). + items: + description: VpcCidrBlock defines the CIDR block and settings + to associate with the managed VPC. Currently, only IPv4 + is supported. + properties: + ipv4CidrBlock: + description: IPv4CidrBlock is the IPv4 CIDR block to + associate with the managed VPC. + minLength: 1 + type: string + required: + - ipv4CidrBlock + type: object + type: array + subnetSchema: + default: PreferPrivate + description: |- + SubnetSchema specifies how CidrBlock should be divided on subnets in the VPC depending on the number of AZs. + PreferPrivate - one private subnet for each AZ plus one other subnet that will be further sub-divided for the public subnets. + PreferPublic - have the reverse logic of PreferPrivate, one public subnet for each AZ plus one other subnet + that will be further sub-divided for the private subnets. + Defaults to PreferPrivate + enum: + - PreferPrivate + - PreferPublic + type: string tags: additionalProperties: type: string @@ -2507,47 +2824,50 @@ spec: type: object type: object oidcIdentityProviderConfig: - description: IdentityProviderconfig is used to specify the oidc provider - config to be attached with this eks cluster + description: |- + IdentityProviderconfig is used to specify the oidc provider config + to be attached with this eks cluster properties: clientId: - description: This is also known as audience. The ID for the client - application that makes authentication requests to the OpenID - identity provider. + description: |- + This is also known as audience. The ID for the client application that makes + authentication requests to the OpenID identity provider. type: string groupsClaim: description: The JWT claim that the provider uses to return your groups. type: string groupsPrefix: - description: 'The prefix that is prepended to group claims to - prevent clashes with existing names (such as system: groups). - For example, the valueoidc: will create group names like oidc:engineering - and oidc:infra.' + description: |- + The prefix that is prepended to group claims to prevent clashes with existing + names (such as system: groups). For example, the valueoidc: will create group + names like oidc:engineering and oidc:infra. type: string identityProviderConfigName: - description: "The name of the OIDC provider configuration. \n - IdentityProviderConfigName is a required field" + description: |- + The name of the OIDC provider configuration. + + + IdentityProviderConfigName is a required field type: string issuerUrl: - description: The URL of the OpenID identity provider that allows - the API server to discover public signing keys for verifying - tokens. The URL must begin with https:// and should correspond - to the iss claim in the provider's OIDC ID tokens. Per the OIDC - standard, path components are allowed but query parameters are + description: |- + The URL of the OpenID identity provider that allows the API server to discover + public signing keys for verifying tokens. The URL must begin with https:// + and should correspond to the iss claim in the provider's OIDC ID tokens. + Per the OIDC standard, path components are allowed but query parameters are not. Typically the URL consists of only a hostname, like https://server.example.org - or https://example.com. This URL should point to the level below - .well-known/openid-configuration and must be publicly accessible - over the internet. + or https://example.com. This URL should point to the level below .well-known/openid-configuration + and must be publicly accessible over the internet. type: string requiredClaims: additionalProperties: type: string - description: The key value pairs that describe required claims - in the identity token. If set, each claim is verified to be - present in the token with a matching value. For the maximum - number of claims that you can require, see Amazon EKS service - quotas (https://docs.aws.amazon.com/eks/latest/userguide/service-quotas.html) + description: |- + The key value pairs that describe required claims in the identity token. + If set, each claim is verified to be present in the token with a matching + value. For the maximum number of claims that you can require, see Amazon + EKS service quotas (https://docs.aws.amazon.com/eks/latest/userguide/service-quotas.html) in the Amazon EKS User Guide. type: object tags: @@ -2556,19 +2876,19 @@ spec: description: tags to apply to oidc identity provider association type: object usernameClaim: - description: The JSON Web Token (JWT) claim to use as the username. - The default is sub, which is expected to be a unique identifier - of the end user. You can choose other claims, such as email - or name, depending on the OpenID identity provider. Claims other - than email are prefixed with the issuer URL to prevent naming + description: |- + The JSON Web Token (JWT) claim to use as the username. The default is sub, + which is expected to be a unique identifier of the end user. You can choose + other claims, such as email or name, depending on the OpenID identity provider. + Claims other than email are prefixed with the issuer URL to prevent naming clashes with other plug-ins. type: string usernamePrefix: - description: The prefix that is prepended to username claims to - prevent clashes with existing names. If you do not provide this - field, and username is a value other than email, the prefix - defaults to issuerurl#. You can use the value - to disable all - prefixing. + description: |- + The prefix that is prepended to username claims to prevent clashes with existing + names. If you do not provide this field, and username is a value other than + email, the prefix defaults to issuerurl#. You can use the value - to disable + all prefixing. type: string type: object partition: @@ -2578,23 +2898,32 @@ spec: region: description: The AWS Region the cluster lives in. type: string + restrictPrivateSubnets: + default: false + description: RestrictPrivateSubnets indicates that the EKS control + plane should only use private subnets. + type: boolean roleAdditionalPolicies: - description: RoleAdditionalPolicies allows you to attach additional - polices to the control plane role. You must enable the EKSAllowAddRoles + description: |- + RoleAdditionalPolicies allows you to attach additional polices to + the control plane role. You must enable the EKSAllowAddRoles feature flag to incorporate these into the created role. items: type: string type: array roleName: - description: RoleName specifies the name of IAM role that gives EKS - permission to make API calls. If the role is pre-existing we will - treat it as unmanaged and not delete it on deletion. If the EKSEnableIAM - feature flag is true and no name is supplied then a role is created. + description: |- + RoleName specifies the name of IAM role that gives EKS + permission to make API calls. If the role is pre-existing + we will treat it as unmanaged and not delete it on + deletion. If the EKSEnableIAM feature flag is true + and no name is supplied then a role is created. minLength: 2 type: string secondaryCidrBlock: - description: SecondaryCidrBlock is the additional CIDR range to use - for pod IPs. Must be within the 100.64.0.0/10 or 198.19.0.0/16 range. + description: |- + SecondaryCidrBlock is the additional CIDR range to use for pod IPs. + Must be within the 100.64.0.0/10 or 198.19.0.0/16 range. type: string sshKeyName: description: SSHKeyName is the name of the ssh key to attach to the @@ -2603,18 +2932,20 @@ spec: type: string tokenMethod: default: iam-authenticator - description: TokenMethod is used to specify the method for obtaining - a client token for communicating with EKS iam-authenticator - obtains - a client token using iam-authentictor aws-cli - obtains a client - token using the AWS CLI Defaults to iam-authenticator + description: |- + TokenMethod is used to specify the method for obtaining a client token for communicating with EKS + iam-authenticator - obtains a client token using iam-authentictor + aws-cli - obtains a client token using the AWS CLI + Defaults to iam-authenticator enum: - iam-authenticator - aws-cli type: string version: - description: Version defines the desired Kubernetes version. If no - version number is supplied then the latest version of Kubernetes - that EKS supports will be used. + description: |- + Version defines the desired Kubernetes version. If no version number + is supplied then the latest version of Kubernetes that EKS supports + will be used. minLength: 2 pattern: ^v?(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.?(\.0|[1-9][0-9]*)?$ type: string @@ -2624,12 +2955,12 @@ spec: properties: disable: default: false - description: Disable indicates that the Amazon VPC CNI should - be disabled. With EKS clusters the Amazon VPC CNI is automatically - installed into the cluster. For clusters where you want to use - an alternate CNI this option provides a way to specify that - the Amazon VPC CNI should be deleted. You cannot set this to - true if you are using the Amazon VPC CNI addon. + description: |- + Disable indicates that the Amazon VPC CNI should be disabled. With EKS clusters the + Amazon VPC CNI is automatically installed into the cluster. For clusters where you want + to use an alternate CNI this option provides a way to specify that the Amazon VPC CNI + should be deleted. You cannot set this to true if you are using the + Amazon VPC CNI addon. type: boolean env: description: Env defines a list of environment variables to apply @@ -2643,15 +2974,16 @@ spec: C_IDENTIFIER. type: string value: - description: 'Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in - the container and any service environment variables. If - a variable cannot be resolved, the reference in the input - string will be unchanged. Double $$ are reduced to a single - $, which allows for escaping the $(VAR_NAME) syntax: i.e. + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless - of whether the variable exists or not. Defaults to "".' + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". type: string valueFrom: description: Source for the environment variable's value. @@ -2664,9 +2996,10 @@ spec: description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap or its @@ -2677,11 +3010,9 @@ spec: type: object x-kubernetes-map-type: atomic fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, `metadata.labels['''']`, - `metadata.annotations['''']`, spec.nodeName, - spec.serviceAccountName, status.hostIP, status.podIP, - status.podIPs.' + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: apiVersion: description: Version of the schema the FieldPath @@ -2696,10 +3027,9 @@ spec: type: object x-kubernetes-map-type: atomic resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: containerName: description: 'Container name: required for volumes, @@ -2729,9 +3059,10 @@ spec: be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret or its key @@ -2834,6 +3165,10 @@ spec: availabilityZone: description: Availability zone of instance type: string + capacityReservationId: + description: CapacityReservationID specifies the target Capacity + Reservation into which the instance should be launched. + type: string ebsOptimized: description: Indicates whether the instance is optimized for Amazon EBS I/O. @@ -2857,48 +3192,64 @@ spec: properties: httpEndpoint: default: enabled - description: "Enables or disables the HTTP metadata endpoint - on your instances. \n If you specify a value of disabled, - you cannot access your instance metadata. \n Default: enabled" + description: |- + Enables or disables the HTTP metadata endpoint on your instances. + + + If you specify a value of disabled, you cannot access your instance metadata. + + + Default: enabled enum: - enabled - disabled type: string httpPutResponseHopLimit: default: 1 - description: "The desired HTTP PUT response hop limit for - instance metadata requests. The larger the number, the further - instance metadata requests can travel. \n Default: 1" + description: |- + The desired HTTP PUT response hop limit for instance metadata requests. The + larger the number, the further instance metadata requests can travel. + + + Default: 1 format: int64 maximum: 64 minimum: 1 type: integer httpTokens: default: optional - description: "The state of token usage for your instance metadata - requests. \n If the state is optional, you can choose to - retrieve instance metadata with or without a session token - on your request. If you retrieve the IAM role credentials - without a token, the version 1.0 role credentials are returned. - If you retrieve the IAM role credentials using a valid session - token, the version 2.0 role credentials are returned. \n - If the state is required, you must send a session token - with any instance metadata retrieval requests. In this state, - retrieving the IAM role credentials always returns the version - 2.0 credentials; the version 1.0 credentials are not available. - \n Default: optional" + description: |- + The state of token usage for your instance metadata requests. + + + If the state is optional, you can choose to retrieve instance metadata with + or without a session token on your request. If you retrieve the IAM role + credentials without a token, the version 1.0 role credentials are returned. + If you retrieve the IAM role credentials using a valid session token, the + version 2.0 role credentials are returned. + + + If the state is required, you must send a session token with any instance + metadata retrieval requests. In this state, retrieving the IAM role credentials + always returns the version 2.0 credentials; the version 1.0 credentials are + not available. + + + Default: optional enum: - optional - required type: string instanceMetadataTags: default: disabled - description: "Set to enabled to allow access to instance tags - from the instance metadata. Set to disabled to turn off - access to instance tags from the instance metadata. For - more information, see Work with instance tags using the - instance metadata (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS). - \n Default: disabled" + description: |- + Set to enabled to allow access to instance tags from the instance metadata. + Set to disabled to turn off access to instance tags from the instance metadata. + For more information, see Work with instance tags using the instance metadata + (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS). + + + Default: disabled enum: - enabled - disabled @@ -2926,11 +3277,10 @@ spec: or not. type: boolean encryptionKey: - description: EncryptionKey is the KMS key to use to encrypt - the volume. Can be either a KMS key ID or ARN. If Encrypted - is set and this is omitted, the default AWS key will be - used. The key must already exist and be accessible by - the controller. + description: |- + EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. + If Encrypted is set and this is omitted, the default AWS key will be used. + The key must already exist and be accessible by the controller. type: string iops: description: IOPS is the number of IOPS requested for the @@ -2938,9 +3288,9 @@ spec: format: int64 type: integer size: - description: Size specifies size (in Gi) of the storage - device. Must be greater than the image snapshot size or - 8 (whichever is greater). + description: |- + Size specifies size (in Gi) of the storage device. + Must be greater than the image snapshot size or 8 (whichever is greater). format: int64 minimum: 8 type: integer @@ -2961,6 +3311,15 @@ spec: description: PlacementGroupName specifies the name of the placement group in which to launch the instance. type: string + placementGroupPartition: + description: |- + PlacementGroupPartition is the partition number within the placement group in which to launch the instance. + This value is only valid if the placement group, referred in `PlacementGroupName`, was created with + strategy set to partition. + format: int64 + maximum: 7 + minimum: 1 + type: integer privateDnsName: description: PrivateDNSName is the options for the instance hostname. properties: @@ -2984,6 +3343,10 @@ spec: privateIp: description: The private IPv4 address assigned to the instance. type: string + publicIPOnLaunch: + description: PublicIPOnLaunch is the option to associate a public + IP on instance launch + type: boolean publicIp: description: The public IPv4 address assigned to the instance, if applicable. @@ -2999,11 +3362,10 @@ spec: or not. type: boolean encryptionKey: - description: EncryptionKey is the KMS key to use to encrypt - the volume. Can be either a KMS key ID or ARN. If Encrypted - is set and this is omitted, the default AWS key will be - used. The key must already exist and be accessible by the - controller. + description: |- + EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. + If Encrypted is set and this is omitted, the default AWS key will be used. + The key must already exist and be accessible by the controller. type: string iops: description: IOPS is the number of IOPS requested for the @@ -3011,9 +3373,9 @@ spec: format: int64 type: integer size: - description: Size specifies size (in Gi) of the storage device. - Must be greater than the image snapshot size or 8 (whichever - is greater). + description: |- + Size specifies size (in Gi) of the storage device. + Must be greater than the image snapshot size or 8 (whichever is greater). format: int64 minimum: 8 type: integer @@ -3063,9 +3425,9 @@ spec: description: The instance type. type: string userData: - description: UserData is the raw data script passed to the instance - which is run upon bootstrap. This field must not be base64 encoded - and should only be used when running a new instance. + description: |- + UserData is the raw data script passed to the instance which is run upon bootstrap. + This field must not be base64 encoded and should only be used when running a new instance. type: string volumeIDs: description: IDs of the instance's volumes @@ -3083,37 +3445,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime @@ -3123,15 +3485,15 @@ spec: type: array externalManagedControlPlane: default: true - description: ExternalManagedControlPlane indicates to cluster-api - that the control plane is managed by an external service such as - AKS, EKS, GKE, etc. + description: |- + ExternalManagedControlPlane indicates to cluster-api that the control plane + is managed by an external service such as AKS, EKS, GKE, etc. type: boolean failureDomains: additionalProperties: - description: FailureDomainSpec is the Schema for Cluster API failure - domains. It allows controllers to understand how many failure - domains a cluster can optionally span across. + description: |- + FailureDomainSpec is the Schema for Cluster API failure domains. + It allows controllers to understand how many failure domains a cluster can optionally span across. properties: attributes: additionalProperties: @@ -3148,12 +3510,14 @@ spec: zones that can be used type: object failureMessage: - description: ErrorMessage indicates that there is a terminal problem - reconciling the state, and will be set to a descriptive error message. + description: |- + ErrorMessage indicates that there is a terminal problem reconciling the + state, and will be set to a descriptive error message. type: string identityProviderStatus: - description: IdentityProviderStatus holds the status for associated - identity provider + description: |- + IdentityProviderStatus holds the status for + associated identity provider properties: arn: description: ARN holds the ARN of associated identity provider @@ -3164,8 +3528,9 @@ spec: type: string type: object initialized: - description: Initialized denotes whether or not the control plane - has the uploaded kubernetes config-map. + description: |- + Initialized denotes whether or not the control plane has the + uploaded kubernetes config-map. type: boolean networkStatus: description: Networks holds details about the AWS networking resources @@ -3175,8 +3540,9 @@ spec: description: APIServerELB is the Kubernetes api server load balancer. properties: arn: - description: ARN of the load balancer. Unlike the ClassicLB, - ARN is used mostly to define and get it. + description: |- + ARN of the load balancer. Unlike the ClassicLB, ARN is used mostly + to define and get it. type: string attributes: description: ClassicElbAttributes defines extra attributes @@ -3187,9 +3553,9 @@ spec: load balancer load balancing. type: boolean idleTimeout: - description: IdleTimeout is time that the connection is - allowed to be idle (no data has been sent over the connection) - before it is closed by the load balancer. + description: |- + IdleTimeout is time that the connection is allowed to be idle (no data + has been sent over the connection) before it is closed by the load balancer. format: int64 type: integer type: object @@ -3223,13 +3589,14 @@ spec: for a load balancer. type: string targetGroup: - description: TargetGroupSpec specifies target group - settings for a given listener. This is created first, - and the ARN is then passed to the listener. + description: |- + TargetGroupSpec specifies target group settings for a given listener. + This is created first, and the ARN is then passed to the listener. properties: name: description: Name of the TargetGroup. Must be unique over the same group of listeners. + maxLength: 32 type: string port: description: Port is the exposed port @@ -3265,6 +3632,9 @@ spec: timeoutSeconds: format: int64 type: integer + unhealthyThresholdCount: + format: int64 + type: integer type: object vpcId: type: string @@ -3288,19 +3658,19 @@ spec: format: int64 type: integer interval: - description: A Duration represents the elapsed time between - two instants as an int64 nanosecond count. The representation - limits the largest representable duration to approximately - 290 years. + description: |- + A Duration represents the elapsed time between two instants + as an int64 nanosecond count. The representation limits the + largest representable duration to approximately 290 years. format: int64 type: integer target: type: string timeout: - description: A Duration represents the elapsed time between - two instants as an int64 nanosecond count. The representation - limits the largest representable duration to approximately - 290 years. + description: |- + A Duration represents the elapsed time between two instants + as an int64 nanosecond count. The representation limits the + largest representable duration to approximately 290 years. format: int64 type: integer unhealthyThreshold: @@ -3352,9 +3722,9 @@ spec: - nlb type: string name: - description: The name of the load balancer. It must be unique - within the set of load balancers defined in the region. - It also serves as identifier. + description: |- + The name of the load balancer. It must be unique within the set of load balancers + defined in the region. It also serves as identifier. type: string scheme: description: Scheme is the load balancer scheme, either internet-facing @@ -3390,8 +3760,9 @@ spec: api server load balancer. properties: arn: - description: ARN of the load balancer. Unlike the ClassicLB, - ARN is used mostly to define and get it. + description: |- + ARN of the load balancer. Unlike the ClassicLB, ARN is used mostly + to define and get it. type: string attributes: description: ClassicElbAttributes defines extra attributes @@ -3402,9 +3773,9 @@ spec: load balancer load balancing. type: boolean idleTimeout: - description: IdleTimeout is time that the connection is - allowed to be idle (no data has been sent over the connection) - before it is closed by the load balancer. + description: |- + IdleTimeout is time that the connection is allowed to be idle (no data + has been sent over the connection) before it is closed by the load balancer. format: int64 type: integer type: object @@ -3438,13 +3809,14 @@ spec: for a load balancer. type: string targetGroup: - description: TargetGroupSpec specifies target group - settings for a given listener. This is created first, - and the ARN is then passed to the listener. + description: |- + TargetGroupSpec specifies target group settings for a given listener. + This is created first, and the ARN is then passed to the listener. properties: name: description: Name of the TargetGroup. Must be unique over the same group of listeners. + maxLength: 32 type: string port: description: Port is the exposed port @@ -3480,6 +3852,9 @@ spec: timeoutSeconds: format: int64 type: integer + unhealthyThresholdCount: + format: int64 + type: integer type: object vpcId: type: string @@ -3503,19 +3878,19 @@ spec: format: int64 type: integer interval: - description: A Duration represents the elapsed time between - two instants as an int64 nanosecond count. The representation - limits the largest representable duration to approximately - 290 years. + description: |- + A Duration represents the elapsed time between two instants + as an int64 nanosecond count. The representation limits the + largest representable duration to approximately 290 years. format: int64 type: integer target: type: string timeout: - description: A Duration represents the elapsed time between - two instants as an int64 nanosecond count. The representation - limits the largest representable duration to approximately - 290 years. + description: |- + A Duration represents the elapsed time between two instants + as an int64 nanosecond count. The representation limits the + largest representable duration to approximately 290 years. format: int64 type: integer unhealthyThreshold: @@ -3567,9 +3942,9 @@ spec: - nlb type: string name: - description: The name of the load balancer. It must be unique - within the set of load balancers defined in the region. - It also serves as identifier. + description: |- + The name of the load balancer. It must be unique within the set of load balancers + defined in the region. It also serves as identifier. type: string scheme: description: Scheme is the load balancer scheme, either internet-facing @@ -3628,6 +4003,10 @@ spec: items: type: string type: array + natGatewaysIPsSource: + description: NatGatewaysIPsSource use the NAT gateways + IPs as the source for the ingress rule. + type: boolean protocol: description: Protocol is the protocol for the ingress rule. Accepted values are "-1" (all), "4" (IP in @@ -3649,10 +4028,9 @@ spec: type: string type: array sourceSecurityGroupRoles: - description: The security group role to allow access - from. Cannot be specified with CidrBlocks. The field - will be combined with source security group IDs - if specified. + description: |- + The security group role to allow access from. Cannot be specified with CidrBlocks. + The field will be combined with source security group IDs if specified. items: description: SecurityGroupRole defines the unique role of a security group. @@ -3707,8 +4085,9 @@ spec: type: object ready: default: false - description: Ready denotes that the AWSManagedControlPlane API Server - is ready to receive requests and that the VPC infra is ready. + description: |- + Ready denotes that the AWSManagedControlPlane API Server is ready to + receive requests and that the VPC infra is ready. type: boolean required: - ready diff --git a/config/crd/bases/controlplane.cluster.x-k8s.io_rosacontrolplanes.yaml b/config/crd/bases/controlplane.cluster.x-k8s.io_rosacontrolplanes.yaml index 245d9d72a6..3e9eb51cd9 100644 --- a/config/crd/bases/controlplane.cluster.x-k8s.io_rosacontrolplanes.yaml +++ b/config/crd/bases/controlplane.cluster.x-k8s.io_rosacontrolplanes.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.14.0 name: rosacontrolplanes.controlplane.cluster.x-k8s.io spec: group: controlplane.cluster.x-k8s.io @@ -30,27 +30,58 @@ spec: name: v1beta2 schema: openAPIV3Schema: + description: ROSAControlPlane is the Schema for the ROSAControlPlanes API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: + description: RosaControlPlaneSpec defines the desired state of ROSAControlPlane. properties: + additionalTags: + additionalProperties: + type: string + description: AdditionalTags are user-defined tags to be added on the + AWS resources associated with the control plane. + type: object + auditLogRoleARN: + description: |- + AuditLogRoleARN defines the role that is used to forward audit logs to AWS CloudWatch. + If not set, audit log forwarding is disabled. + type: string availabilityZones: - description: AWS AvailabilityZones of the worker nodes should match - the AvailabilityZones of the Subnets. + description: |- + AvailabilityZones describe AWS AvailabilityZones of the worker nodes. + should match the AvailabilityZones of the provided Subnets. + a machinepool will be created for each availabilityZone. items: type: string type: array + billingAccount: + description: |- + BillingAccount is an optional AWS account to use for billing the subscription fees for ROSA clusters. + The cost of running each ROSA cluster will be billed to the infrastructure account in which the cluster + is running. + type: string + x-kubernetes-validations: + - message: billingAccount is immutable + rule: self == oldSelf + - message: billingAccount must be a valid AWS account ID + rule: self.matches('^[0-9]{12}$') controlPlaneEndpoint: description: ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. @@ -67,21 +98,335 @@ spec: - port type: object credentialsSecretRef: - description: 'CredentialsSecretRef references a secret with necessary - credentials to connect to the OCM API. The secret should contain - the following data keys: - ocmToken: eyJhbGciOiJIUzI1NiIsI.... - - ocmApiUrl: Optional, defaults to ''https://api.openshift.com''' + description: |- + CredentialsSecretRef references a secret with necessary credentials to connect to the OCM API. + The secret should contain the following data keys: + - ocmToken: eyJhbGciOiJIUzI1NiIsI.... + - ocmApiUrl: Optional, defaults to 'https://api.openshift.com' properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string type: object x-kubernetes-map-type: atomic + defaultMachinePoolSpec: + description: |- + DefaultMachinePoolSpec defines the configuration for the default machinepool(s) provisioned as part of the cluster creation. + One MachinePool will be created with this configuration per AvailabilityZone. Those default machinepools are required for openshift cluster operators + to work properly. + As these machinepool not created using ROSAMachinePool CR, they will not be visible/managed by ROSA CAPI provider. + `rosa list machinepools -c ` can be used to view those machinepools. + + + This field will be removed in the future once the current limitation is resolved. + properties: + autoscaling: + description: |- + Autoscaling specifies auto scaling behaviour for the default MachinePool. Autoscaling min/max value + must be equal or multiple of the availability zones count. + properties: + maxReplicas: + minimum: 1 + type: integer + minReplicas: + minimum: 1 + type: integer + type: object + instanceType: + description: The instance type to use, for example `r5.xlarge`. + Instance type ref; https://aws.amazon.com/ec2/instance-types/ + type: string + type: object + domainPrefix: + description: |- + DomainPrefix is an optional prefix added to the cluster's domain name. It will be used + when generating a sub-domain for the cluster on openshiftapps domain. It must be valid DNS-1035 label + consisting of lower case alphanumeric characters or '-', start with an alphabetic character + end with an alphanumeric character and have a max length of 15 characters. + maxLength: 15 + pattern: ^[a-z]([-a-z0-9]*[a-z0-9])?$ + type: string + x-kubernetes-validations: + - message: domainPrefix is immutable + rule: self == oldSelf + enableExternalAuthProviders: + default: false + description: EnableExternalAuthProviders enables external authentication + configuration for the cluster. + type: boolean + x-kubernetes-validations: + - message: enableExternalAuthProviders is immutable + rule: self == oldSelf + endpointAccess: + default: Public + description: |- + EndpointAccess specifies the publishing scope of cluster endpoints. The + default is Public. + enum: + - Public + - Private + type: string + etcdEncryptionKMSARN: + description: |- + EtcdEncryptionKMSARN is the ARN of the KMS key used to encrypt etcd. The key itself needs to be + created out-of-band by the user and tagged with `red-hat:true`. + type: string + externalAuthProviders: + description: |- + ExternalAuthProviders are external OIDC identity providers that can issue tokens for this cluster. + Can only be set if "enableExternalAuthProviders" is set to "True". + + + At most one provider can be configured. + items: + description: ExternalAuthProvider is an external OIDC identity provider + that can issue tokens for this cluster + properties: + claimMappings: + description: |- + ClaimMappings describes rules on how to transform information from an + ID token into a cluster identity + properties: + groups: + description: |- + Groups is a name of the claim that should be used to construct + groups for the cluster identity. + The referenced claim must use array of strings values. + properties: + claim: + description: Claim is a JWT token claim to be used in + the mapping + type: string + prefix: + description: |- + Prefix is a string to prefix the value from the token in the result of the + claim mapping. + + + By default, no prefixing occurs. + + + Example: if `prefix` is set to "myoidc:"" and the `claim` in JWT contains + an array of strings "a", "b" and "c", the mapping will result in an + array of string "myoidc:a", "myoidc:b" and "myoidc:c". + type: string + required: + - claim + type: object + username: + description: |- + Username is a name of the claim that should be used to construct + usernames for the cluster identity. + + + Default value: "sub" + properties: + claim: + description: Claim is a JWT token claim to be used in + the mapping + type: string + prefix: + description: Prefix is prepended to claim to prevent + clashes with existing names. + minLength: 1 + type: string + prefixPolicy: + description: |- + PrefixPolicy specifies how a prefix should apply. + + + By default, claims other than `email` will be prefixed with the issuer URL to + prevent naming clashes with other plugins. + + + Set to "NoPrefix" to disable prefixing. + + + Example: + (1) `prefix` is set to "myoidc:" and `claim` is set to "username". + If the JWT claim `username` contains value `userA`, the resulting + mapped value will be "myoidc:userA". + (2) `prefix` is set to "myoidc:" and `claim` is set to "email". If the + JWT `email` claim contains value "userA@myoidc.tld", the resulting + mapped value will be "myoidc:userA@myoidc.tld". + (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, + the JWT claims include "username":"userA" and "email":"userA@myoidc.tld", + and `claim` is set to: + (a) "username": the mapped value will be "https://myoidc.tld#userA" + (b) "email": the mapped value will be "userA@myoidc.tld" + enum: + - "" + - NoPrefix + - Prefix + type: string + required: + - claim + type: object + x-kubernetes-validations: + - message: prefix must be set if prefixPolicy is 'Prefix', + but must remain unset otherwise + rule: 'self.prefixPolicy == ''Prefix'' ? has(self.prefix) + : !has(self.prefix)' + type: object + claimValidationRules: + description: ClaimValidationRules are rules that are applied + to validate token claims to authenticate users. + items: + description: TokenClaimValidationRule validates token claims + to authenticate users. + properties: + requiredClaim: + description: RequiredClaim allows configuring a required + claim name and its expected value + properties: + claim: + description: |- + Claim is a name of a required claim. Only claims with string values are + supported. + minLength: 1 + type: string + requiredValue: + description: RequiredValue is the required value for + the claim. + minLength: 1 + type: string + required: + - claim + - requiredValue + type: object + type: + default: RequiredClaim + description: Type sets the type of the validation rule + enum: + - RequiredClaim + type: string + required: + - requiredClaim + - type + type: object + type: array + x-kubernetes-list-type: atomic + issuer: + description: Issuer describes attributes of the OIDC token issuer + properties: + audiences: + description: |- + Audiences is an array of audiences that the token was issued for. + Valid tokens must include at least one of these values in their + "aud" claim. + Must be set to exactly one value. + items: + description: TokenAudience is the audience that the token + was issued for. + minLength: 1 + type: string + maxItems: 10 + minItems: 1 + type: array + x-kubernetes-list-type: set + issuerCertificateAuthority: + description: |- + CertificateAuthority is a reference to a config map in the + configuration namespace. The .data of the configMap must contain + the "ca-bundle.crt" key. + If unset, system trust is used instead. + properties: + name: + description: Name is the metadata.name of the referenced + object. + type: string + required: + - name + type: object + issuerURL: + description: |- + URL is the serving URL of the token issuer. + Must use the https:// scheme. + pattern: ^https:\/\/[^\s] + type: string + required: + - audiences + - issuerURL + type: object + name: + description: Name of the OIDC provider + minLength: 1 + type: string + oidcClients: + description: |- + OIDCClients contains configuration for the platform's clients that + need to request tokens from the issuer + items: + description: |- + OIDCClientConfig contains configuration for the platform's client that + need to request tokens from the issuer. + properties: + clientID: + description: ClientID is the identifier of the OIDC client + from the OIDC provider + minLength: 1 + type: string + clientSecret: + description: |- + ClientSecret refers to a secret that + contains the client secret in the `clientSecret` key of the `.data` field + properties: + name: + description: Name is the metadata.name of the referenced + object. + type: string + required: + - name + type: object + componentName: + description: |- + ComponentName is the name of the component that is supposed to consume this + client configuration + maxLength: 256 + minLength: 1 + type: string + componentNamespace: + description: |- + ComponentNamespace is the namespace of the component that is supposed to consume this + client configuration + maxLength: 63 + minLength: 1 + type: string + extraScopes: + description: ExtraScopes is an optional set of scopes + to request tokens with. + items: + type: string + type: array + x-kubernetes-list-type: set + required: + - clientID + - clientSecret + - componentName + - componentNamespace + type: object + maxItems: 20 + type: array + x-kubernetes-list-map-keys: + - componentNamespace + - componentName + x-kubernetes-list-type: map + required: + - issuer + - name + type: object + maxItems: 1 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map identityRef: - description: IdentityRef is a reference to an identity to be used - when reconciling the managed control plane. If no identity is specified, - the default identity for this controller will be used. + description: |- + IdentityRef is a reference to an identity to be used when reconciling the managed control plane. + If no identity is specified, the default identity for this controller will be used. properties: kind: description: Kind of the identity. @@ -99,16 +444,53 @@ spec: - name type: object installerRoleARN: - description: 'TODO: these are to satisfy ocm sdk. Explore how to drop - them.' - type: string - machineCIDR: - description: Block of IP addresses used by OpenShift while installing - the cluster, for example "10.0.0.0/16". + description: InstallerRoleARN is an AWS IAM role that OpenShift Cluster + Manager will assume to create the cluster.. type: string + network: + description: Network config for the ROSA HCP cluster. + properties: + hostPrefix: + default: 23 + description: Network host prefix which is defaulted to `23` if + not specified. + type: integer + machineCIDR: + description: IP addresses block used by OpenShift while installing + the cluster, for example "10.0.0.0/16". + format: cidr + type: string + networkType: + default: OVNKubernetes + description: The CNI network type default is OVNKubernetes. + enum: + - OVNKubernetes + - Other + type: string + podCIDR: + description: IP address block from which to assign pod IP addresses, + for example `10.128.0.0/14`. + format: cidr + type: string + serviceCIDR: + description: IP address block from which to assign service IP + addresses, for example `172.30.0.0/16`. + format: cidr + type: string + type: object oidcID: - description: The ID of the OpenID Connect Provider. + description: The ID of the internal OpenID Connect Provider. + type: string + x-kubernetes-validations: + - message: oidcID is immutable + rule: self == oldSelf + provisionShardID: + description: ProvisionShardID defines the shard where rosa control + plane components will be hosted. type: string + x-kubernetes-validations: + - message: provisionShardID is immutable + rule: self == oldSelf region: description: The AWS Region the cluster lives in. type: string @@ -118,142 +500,182 @@ spec: properties: controlPlaneOperatorARN: description: "ControlPlaneOperatorARN is an ARN value referencing - a role appropriate for the Control Plane Operator. \n The following - is an example of a valid policy document: \n { \"Version\": - \"2012-10-17\", \"Statement\": [ { \"Effect\": \"Allow\", \"Action\": - [ \"ec2:CreateVpcEndpoint\", \"ec2:DescribeVpcEndpoints\", \"ec2:ModifyVpcEndpoint\", - \"ec2:DeleteVpcEndpoints\", \"ec2:CreateTags\", \"route53:ListHostedZones\", - \"ec2:CreateSecurityGroup\", \"ec2:AuthorizeSecurityGroupIngress\", - \"ec2:AuthorizeSecurityGroupEgress\", \"ec2:DeleteSecurityGroup\", - \"ec2:RevokeSecurityGroupIngress\", \"ec2:RevokeSecurityGroupEgress\", - \"ec2:DescribeSecurityGroups\", \"ec2:DescribeVpcs\", ], \"Resource\": - \"*\" }, { \"Effect\": \"Allow\", \"Action\": [ \"route53:ChangeResourceRecordSets\", - \"route53:ListResourceRecordSets\" ], \"Resource\": \"arn:aws:route53:::%s\" - } ] }" + a role appropriate for the Control Plane Operator.\n\n\nThe + following is an example of a valid policy document:\n\n\n{\n\t\"Version\": + \"2012-10-17\",\n\t\"Statement\": [\n\t\t{\n\t\t\t\"Effect\": + \"Allow\",\n\t\t\t\"Action\": [\n\t\t\t\t\"ec2:CreateVpcEndpoint\",\n\t\t\t\t\"ec2:DescribeVpcEndpoints\",\n\t\t\t\t\"ec2:ModifyVpcEndpoint\",\n\t\t\t\t\"ec2:DeleteVpcEndpoints\",\n\t\t\t\t\"ec2:CreateTags\",\n\t\t\t\t\"route53:ListHostedZones\",\n\t\t\t\t\"ec2:CreateSecurityGroup\",\n\t\t\t\t\"ec2:AuthorizeSecurityGroupIngress\",\n\t\t\t\t\"ec2:AuthorizeSecurityGroupEgress\",\n\t\t\t\t\"ec2:DeleteSecurityGroup\",\n\t\t\t\t\"ec2:RevokeSecurityGroupIngress\",\n\t\t\t\t\"ec2:RevokeSecurityGroupEgress\",\n\t\t\t\t\"ec2:DescribeSecurityGroups\",\n\t\t\t\t\"ec2:DescribeVpcs\",\n\t\t\t],\n\t\t\t\"Resource\": + \"*\"\n\t\t},\n\t\t{\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Action\": + [\n\t\t\t\t\"route53:ChangeResourceRecordSets\",\n\t\t\t\t\"route53:ListResourceRecordSets\"\n\t\t\t],\n\t\t\t\"Resource\": + \"arn:aws:route53:::%s\"\n\t\t}\n\t]\n}" type: string imageRegistryARN: description: "ImageRegistryARN is an ARN value referencing a role - appropriate for the Image Registry Operator. \n The following - is an example of a valid policy document: \n { \"Version\": - \"2012-10-17\", \"Statement\": [ { \"Effect\": \"Allow\", \"Action\": - [ \"s3:CreateBucket\", \"s3:DeleteBucket\", \"s3:PutBucketTagging\", - \"s3:GetBucketTagging\", \"s3:PutBucketPublicAccessBlock\", - \"s3:GetBucketPublicAccessBlock\", \"s3:PutEncryptionConfiguration\", - \"s3:GetEncryptionConfiguration\", \"s3:PutLifecycleConfiguration\", - \"s3:GetLifecycleConfiguration\", \"s3:GetBucketLocation\", - \"s3:ListBucket\", \"s3:GetObject\", \"s3:PutObject\", \"s3:DeleteObject\", - \"s3:ListBucketMultipartUploads\", \"s3:AbortMultipartUpload\", - \"s3:ListMultipartUploadParts\" ], \"Resource\": \"*\" } ] }" + appropriate for the Image Registry Operator.\n\n\nThe following + is an example of a valid policy document:\n\n\n{\n\t\"Version\": + \"2012-10-17\",\n\t\"Statement\": [\n\t\t{\n\t\t\t\"Effect\": + \"Allow\",\n\t\t\t\"Action\": [\n\t\t\t\t\"s3:CreateBucket\",\n\t\t\t\t\"s3:DeleteBucket\",\n\t\t\t\t\"s3:PutBucketTagging\",\n\t\t\t\t\"s3:GetBucketTagging\",\n\t\t\t\t\"s3:PutBucketPublicAccessBlock\",\n\t\t\t\t\"s3:GetBucketPublicAccessBlock\",\n\t\t\t\t\"s3:PutEncryptionConfiguration\",\n\t\t\t\t\"s3:GetEncryptionConfiguration\",\n\t\t\t\t\"s3:PutLifecycleConfiguration\",\n\t\t\t\t\"s3:GetLifecycleConfiguration\",\n\t\t\t\t\"s3:GetBucketLocation\",\n\t\t\t\t\"s3:ListBucket\",\n\t\t\t\t\"s3:GetObject\",\n\t\t\t\t\"s3:PutObject\",\n\t\t\t\t\"s3:DeleteObject\",\n\t\t\t\t\"s3:ListBucketMultipartUploads\",\n\t\t\t\t\"s3:AbortMultipartUpload\",\n\t\t\t\t\"s3:ListMultipartUploadParts\"\n\t\t\t],\n\t\t\t\"Resource\": + \"*\"\n\t\t}\n\t]\n}" type: string ingressARN: description: "The referenced role must have a trust relationship - that allows it to be assumed via web identity. https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc.html. - Example: { \"Version\": \"2012-10-17\", \"Statement\": [ { \"Effect\": - \"Allow\", \"Principal\": { \"Federated\": \"{{ .ProviderARN - }}\" }, \"Action\": \"sts:AssumeRoleWithWebIdentity\", \"Condition\": - { \"StringEquals\": { \"{{ .ProviderName }}:sub\": {{ .ServiceAccounts - }} } } } ] } \n IngressARN is an ARN value referencing a role - appropriate for the Ingress Operator. \n The following is an - example of a valid policy document: \n { \"Version\": \"2012-10-17\", - \"Statement\": [ { \"Effect\": \"Allow\", \"Action\": [ \"elasticloadbalancing:DescribeLoadBalancers\", - \"tag:GetResources\", \"route53:ListHostedZones\" ], \"Resource\": - \"*\" }, { \"Effect\": \"Allow\", \"Action\": [ \"route53:ChangeResourceRecordSets\" - ], \"Resource\": [ \"arn:aws:route53:::PUBLIC_ZONE_ID\", \"arn:aws:route53:::PRIVATE_ZONE_ID\" - ] } ] }" + that allows it to be assumed via web identity.\nhttps://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc.html.\nExample:\n{\n\t\t\"Version\": + \"2012-10-17\",\n\t\t\"Statement\": [\n\t\t\t{\n\t\t\t\t\"Effect\": + \"Allow\",\n\t\t\t\t\"Principal\": {\n\t\t\t\t\t\"Federated\": + \"{{ .ProviderARN }}\"\n\t\t\t\t},\n\t\t\t\t\t\"Action\": \"sts:AssumeRoleWithWebIdentity\",\n\t\t\t\t\"Condition\": + {\n\t\t\t\t\t\"StringEquals\": {\n\t\t\t\t\t\t\"{{ .ProviderName + }}:sub\": {{ .ServiceAccounts }}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t]\n\t}\n\n\nIngressARN + is an ARN value referencing a role appropriate for the Ingress + Operator.\n\n\nThe following is an example of a valid policy + document:\n\n\n{\n\t\"Version\": \"2012-10-17\",\n\t\"Statement\": + [\n\t\t{\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Action\": [\n\t\t\t\t\"elasticloadbalancing:DescribeLoadBalancers\",\n\t\t\t\t\"tag:GetResources\",\n\t\t\t\t\"route53:ListHostedZones\"\n\t\t\t],\n\t\t\t\"Resource\": + \"*\"\n\t\t},\n\t\t{\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Action\": + [\n\t\t\t\t\"route53:ChangeResourceRecordSets\"\n\t\t\t],\n\t\t\t\"Resource\": + [\n\t\t\t\t\"arn:aws:route53:::PUBLIC_ZONE_ID\",\n\t\t\t\t\"arn:aws:route53:::PRIVATE_ZONE_ID\"\n\t\t\t]\n\t\t}\n\t]\n}" type: string kmsProviderARN: type: string kubeCloudControllerARN: - description: "KubeCloudControllerARN is an ARN value referencing - a role appropriate for the KCM/KCC. Source: https://cloud-provider-aws.sigs.k8s.io/prerequisites/#iam-policies - \n The following is an example of a valid policy document: \n - { \"Version\": \"2012-10-17\", \"Statement\": [ { \"Action\": - [ \"autoscaling:DescribeAutoScalingGroups\", \"autoscaling:DescribeLaunchConfigurations\", - \"autoscaling:DescribeTags\", \"ec2:DescribeAvailabilityZones\", - \"ec2:DescribeInstances\", \"ec2:DescribeImages\", \"ec2:DescribeRegions\", - \"ec2:DescribeRouteTables\", \"ec2:DescribeSecurityGroups\", - \"ec2:DescribeSubnets\", \"ec2:DescribeVolumes\", \"ec2:CreateSecurityGroup\", - \"ec2:CreateTags\", \"ec2:CreateVolume\", \"ec2:ModifyInstanceAttribute\", - \"ec2:ModifyVolume\", \"ec2:AttachVolume\", \"ec2:AuthorizeSecurityGroupIngress\", - \"ec2:CreateRoute\", \"ec2:DeleteRoute\", \"ec2:DeleteSecurityGroup\", - \"ec2:DeleteVolume\", \"ec2:DetachVolume\", \"ec2:RevokeSecurityGroupIngress\", - \"ec2:DescribeVpcs\", \"elasticloadbalancing:AddTags\", \"elasticloadbalancing:AttachLoadBalancerToSubnets\", - \"elasticloadbalancing:ApplySecurityGroupsToLoadBalancer\", - \"elasticloadbalancing:CreateLoadBalancer\", \"elasticloadbalancing:CreateLoadBalancerPolicy\", - \"elasticloadbalancing:CreateLoadBalancerListeners\", \"elasticloadbalancing:ConfigureHealthCheck\", - \"elasticloadbalancing:DeleteLoadBalancer\", \"elasticloadbalancing:DeleteLoadBalancerListeners\", - \"elasticloadbalancing:DescribeLoadBalancers\", \"elasticloadbalancing:DescribeLoadBalancerAttributes\", - \"elasticloadbalancing:DetachLoadBalancerFromSubnets\", \"elasticloadbalancing:DeregisterInstancesFromLoadBalancer\", - \"elasticloadbalancing:ModifyLoadBalancerAttributes\", \"elasticloadbalancing:RegisterInstancesWithLoadBalancer\", - \"elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer\", - \"elasticloadbalancing:AddTags\", \"elasticloadbalancing:CreateListener\", - \"elasticloadbalancing:CreateTargetGroup\", \"elasticloadbalancing:DeleteListener\", - \"elasticloadbalancing:DeleteTargetGroup\", \"elasticloadbalancing:DeregisterTargets\", - \"elasticloadbalancing:DescribeListeners\", \"elasticloadbalancing:DescribeLoadBalancerPolicies\", - \"elasticloadbalancing:DescribeTargetGroups\", \"elasticloadbalancing:DescribeTargetHealth\", - \"elasticloadbalancing:ModifyListener\", \"elasticloadbalancing:ModifyTargetGroup\", - \"elasticloadbalancing:RegisterTargets\", \"elasticloadbalancing:SetLoadBalancerPoliciesOfListener\", - \"iam:CreateServiceLinkedRole\", \"kms:DescribeKey\" ], \"Resource\": - [ \"*\" ], \"Effect\": \"Allow\" } ] }" + description: |- + KubeCloudControllerARN is an ARN value referencing a role appropriate for the KCM/KCC. + Source: https://cloud-provider-aws.sigs.k8s.io/prerequisites/#iam-policies + + + The following is an example of a valid policy document: + + + { + "Version": "2012-10-17", + "Statement": [ + { + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeInstances", + "ec2:DescribeImages", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateRoute", + "ec2:DeleteRoute", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:RevokeSecurityGroupIngress", + "ec2:DescribeVpcs", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DeregisterTargets", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", + "iam:CreateServiceLinkedRole", + "kms:DescribeKey" + ], + "Resource": [ + "*" + ], + "Effect": "Allow" + } + ] + } type: string networkARN: description: "NetworkARN is an ARN value referencing a role appropriate - for the Network Operator. \n The following is an example of - a valid policy document: \n { \"Version\": \"2012-10-17\", \"Statement\": - [ { \"Effect\": \"Allow\", \"Action\": [ \"ec2:DescribeInstances\", - \"ec2:DescribeInstanceStatus\", \"ec2:DescribeInstanceTypes\", - \"ec2:UnassignPrivateIpAddresses\", \"ec2:AssignPrivateIpAddresses\", - \"ec2:UnassignIpv6Addresses\", \"ec2:AssignIpv6Addresses\", - \"ec2:DescribeSubnets\", \"ec2:DescribeNetworkInterfaces\" ], - \"Resource\": \"*\" } ] }" + for the Network Operator.\n\n\nThe following is an example of + a valid policy document:\n\n\n{\n\t\"Version\": \"2012-10-17\",\n\t\"Statement\": + [\n\t\t{\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Action\": [\n\t\t\t\t\"ec2:DescribeInstances\",\n + \ \"ec2:DescribeInstanceStatus\",\n \"ec2:DescribeInstanceTypes\",\n + \ \"ec2:UnassignPrivateIpAddresses\",\n \"ec2:AssignPrivateIpAddresses\",\n + \ \"ec2:UnassignIpv6Addresses\",\n \"ec2:AssignIpv6Addresses\",\n + \ \"ec2:DescribeSubnets\",\n \"ec2:DescribeNetworkInterfaces\"\n\t\t\t],\n\t\t\t\"Resource\": + \"*\"\n\t\t}\n\t]\n}" type: string nodePoolManagementARN: description: "NodePoolManagementARN is an ARN value referencing - a role appropriate for the CAPI Controller. \n The following - is an example of a valid policy document: \n { \"Version\": - \"2012-10-17\", \"Statement\": [ { \"Action\": [ \"ec2:AssociateRouteTable\", - \"ec2:AttachInternetGateway\", \"ec2:AuthorizeSecurityGroupIngress\", - \"ec2:CreateInternetGateway\", \"ec2:CreateNatGateway\", \"ec2:CreateRoute\", - \"ec2:CreateRouteTable\", \"ec2:CreateSecurityGroup\", \"ec2:CreateSubnet\", - \"ec2:CreateTags\", \"ec2:DeleteInternetGateway\", \"ec2:DeleteNatGateway\", - \"ec2:DeleteRouteTable\", \"ec2:DeleteSecurityGroup\", \"ec2:DeleteSubnet\", - \"ec2:DeleteTags\", \"ec2:DescribeAccountAttributes\", \"ec2:DescribeAddresses\", - \"ec2:DescribeAvailabilityZones\", \"ec2:DescribeImages\", \"ec2:DescribeInstances\", - \"ec2:DescribeInternetGateways\", \"ec2:DescribeNatGateways\", - \"ec2:DescribeNetworkInterfaces\", \"ec2:DescribeNetworkInterfaceAttribute\", - \"ec2:DescribeRouteTables\", \"ec2:DescribeSecurityGroups\", - \"ec2:DescribeSubnets\", \"ec2:DescribeVpcs\", \"ec2:DescribeVpcAttribute\", - \"ec2:DescribeVolumes\", \"ec2:DetachInternetGateway\", \"ec2:DisassociateRouteTable\", - \"ec2:DisassociateAddress\", \"ec2:ModifyInstanceAttribute\", - \"ec2:ModifyNetworkInterfaceAttribute\", \"ec2:ModifySubnetAttribute\", - \"ec2:RevokeSecurityGroupIngress\", \"ec2:RunInstances\", \"ec2:TerminateInstances\", - \"tag:GetResources\", \"ec2:CreateLaunchTemplate\", \"ec2:CreateLaunchTemplateVersion\", - \"ec2:DescribeLaunchTemplates\", \"ec2:DescribeLaunchTemplateVersions\", - \"ec2:DeleteLaunchTemplate\", \"ec2:DeleteLaunchTemplateVersions\" - ], \"Resource\": [ \"*\" ], \"Effect\": \"Allow\" }, { \"Condition\": - { \"StringLike\": { \"iam:AWSServiceName\": \"elasticloadbalancing.amazonaws.com\" - } }, \"Action\": [ \"iam:CreateServiceLinkedRole\" ], \"Resource\": - [ \"arn:*:iam::*:role/aws-service-role/elasticloadbalancing.amazonaws.com/AWSServiceRoleForElasticLoadBalancing\" - ], \"Effect\": \"Allow\" }, { \"Action\": [ \"iam:PassRole\" - ], \"Resource\": [ \"arn:*:iam::*:role/*-worker-role\" ], \"Effect\": - \"Allow\" }, { \"Effect\": \"Allow\", \"Action\": [ \"kms:Decrypt\", - \"kms:ReEncrypt\", \"kms:GenerateDataKeyWithoutPlainText\", - \"kms:DescribeKey\" ], \"Resource\": \"*\" }, { \"Effect\": - \"Allow\", \"Action\": [ \"kms:CreateGrant\" ], \"Resource\": - \"*\", \"Condition\": { \"Bool\": { \"kms:GrantIsForAWSResource\": - true } } } ] }" + a role appropriate for the CAPI Controller.\n\n\nThe following + is an example of a valid policy document:\n\n\n{\n \"Version\": + \"2012-10-17\",\n \"Statement\": [\n {\n \"Action\": [\n + \ \"ec2:AssociateRouteTable\",\n \"ec2:AttachInternetGateway\",\n + \ \"ec2:AuthorizeSecurityGroupIngress\",\n \"ec2:CreateInternetGateway\",\n + \ \"ec2:CreateNatGateway\",\n \"ec2:CreateRoute\",\n + \ \"ec2:CreateRouteTable\",\n \"ec2:CreateSecurityGroup\",\n + \ \"ec2:CreateSubnet\",\n \"ec2:CreateTags\",\n \"ec2:DeleteInternetGateway\",\n + \ \"ec2:DeleteNatGateway\",\n \"ec2:DeleteRouteTable\",\n + \ \"ec2:DeleteSecurityGroup\",\n \"ec2:DeleteSubnet\",\n + \ \"ec2:DeleteTags\",\n \"ec2:DescribeAccountAttributes\",\n + \ \"ec2:DescribeAddresses\",\n \"ec2:DescribeAvailabilityZones\",\n + \ \"ec2:DescribeImages\",\n \"ec2:DescribeInstances\",\n + \ \"ec2:DescribeInternetGateways\",\n \"ec2:DescribeNatGateways\",\n + \ \"ec2:DescribeNetworkInterfaces\",\n \"ec2:DescribeNetworkInterfaceAttribute\",\n + \ \"ec2:DescribeRouteTables\",\n \"ec2:DescribeSecurityGroups\",\n + \ \"ec2:DescribeSubnets\",\n \"ec2:DescribeVpcs\",\n + \ \"ec2:DescribeVpcAttribute\",\n \"ec2:DescribeVolumes\",\n + \ \"ec2:DetachInternetGateway\",\n \"ec2:DisassociateRouteTable\",\n + \ \"ec2:DisassociateAddress\",\n \"ec2:ModifyInstanceAttribute\",\n + \ \"ec2:ModifyNetworkInterfaceAttribute\",\n \"ec2:ModifySubnetAttribute\",\n + \ \"ec2:RevokeSecurityGroupIngress\",\n \"ec2:RunInstances\",\n + \ \"ec2:TerminateInstances\",\n \"tag:GetResources\",\n + \ \"ec2:CreateLaunchTemplate\",\n \"ec2:CreateLaunchTemplateVersion\",\n + \ \"ec2:DescribeLaunchTemplates\",\n \"ec2:DescribeLaunchTemplateVersions\",\n + \ \"ec2:DeleteLaunchTemplate\",\n \"ec2:DeleteLaunchTemplateVersions\"\n + \ ],\n \"Resource\": [\n \"*\"\n ],\n \"Effect\": + \"Allow\"\n },\n {\n \"Condition\": {\n \"StringLike\": + {\n \"iam:AWSServiceName\": \"elasticloadbalancing.amazonaws.com\"\n + \ }\n },\n \"Action\": [\n \"iam:CreateServiceLinkedRole\"\n + \ ],\n \"Resource\": [\n \"arn:*:iam::*:role/aws-service-role/elasticloadbalancing.amazonaws.com/AWSServiceRoleForElasticLoadBalancing\"\n + \ ],\n \"Effect\": \"Allow\"\n },\n {\n \"Action\": + [\n \"iam:PassRole\"\n ],\n \"Resource\": [\n + \ \"arn:*:iam::*:role/*-worker-role\"\n ],\n \"Effect\": + \"Allow\"\n },\n\t {\n\t \t\"Effect\": \"Allow\",\n\t \t\"Action\": + [\n\t \t\t\"kms:Decrypt\",\n\t \t\t\"kms:ReEncrypt\",\n\t + \ \t\t\"kms:GenerateDataKeyWithoutPlainText\",\n\t \t\t\"kms:DescribeKey\"\n\t + \ \t],\n\t \t\"Resource\": \"*\"\n\t },\n\t {\n\t \t\"Effect\": + \"Allow\",\n\t \t\"Action\": [\n\t \t\t\"kms:CreateGrant\"\n\t + \ \t],\n\t \t\"Resource\": \"*\",\n\t \t\"Condition\": {\n\t + \ \t\t\"Bool\": {\n\t \t\t\t\"kms:GrantIsForAWSResource\": + true\n\t \t\t}\n\t \t}\n\t }\n ]\n}" type: string storageARN: description: "StorageARN is an ARN value referencing a role appropriate - for the Storage Operator. \n The following is an example of - a valid policy document: \n { \"Version\": \"2012-10-17\", \"Statement\": - [ { \"Effect\": \"Allow\", \"Action\": [ \"ec2:AttachVolume\", - \"ec2:CreateSnapshot\", \"ec2:CreateTags\", \"ec2:CreateVolume\", - \"ec2:DeleteSnapshot\", \"ec2:DeleteTags\", \"ec2:DeleteVolume\", - \"ec2:DescribeInstances\", \"ec2:DescribeSnapshots\", \"ec2:DescribeTags\", - \"ec2:DescribeVolumes\", \"ec2:DescribeVolumesModifications\", - \"ec2:DetachVolume\", \"ec2:ModifyVolume\" ], \"Resource\": - \"*\" } ] }" + for the Storage Operator.\n\n\nThe following is an example of + a valid policy document:\n\n\n{\n\t\"Version\": \"2012-10-17\",\n\t\"Statement\": + [\n\t\t{\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Action\": [\n\t\t\t\t\"ec2:AttachVolume\",\n\t\t\t\t\"ec2:CreateSnapshot\",\n\t\t\t\t\"ec2:CreateTags\",\n\t\t\t\t\"ec2:CreateVolume\",\n\t\t\t\t\"ec2:DeleteSnapshot\",\n\t\t\t\t\"ec2:DeleteTags\",\n\t\t\t\t\"ec2:DeleteVolume\",\n\t\t\t\t\"ec2:DescribeInstances\",\n\t\t\t\t\"ec2:DescribeSnapshots\",\n\t\t\t\t\"ec2:DescribeTags\",\n\t\t\t\t\"ec2:DescribeVolumes\",\n\t\t\t\t\"ec2:DescribeVolumesModifications\",\n\t\t\t\t\"ec2:DetachVolume\",\n\t\t\t\t\"ec2:ModifyVolume\"\n\t\t\t],\n\t\t\t\"Resource\": + \"*\"\n\t\t}\n\t]\n}" type: string required: - controlPlaneOperatorARN @@ -266,34 +688,38 @@ spec: - storageARN type: object rosaClusterName: - description: Cluster name must be valid DNS-1035 label, so it must - consist of lower case alphanumeric characters or '-', start with - an alphabetic character, end with an alphanumeric character and - have a max length of 15 characters. - maxLength: 15 + description: |- + Cluster name must be valid DNS-1035 label, so it must consist of lower case alphanumeric + characters or '-', start with an alphabetic character, end with an alphanumeric character + and have a max length of 54 characters. + maxLength: 54 pattern: ^[a-z]([-a-z0-9]*[a-z0-9])?$ type: string x-kubernetes-validations: - message: rosaClusterName is immutable rule: self == oldSelf subnets: - description: The Subnet IDs to use when installing the cluster. SubnetIDs - should come in pairs; two per availability zone, one private and - one public. + description: |- + The Subnet IDs to use when installing the cluster. + SubnetIDs should come in pairs; two per availability zone, one private and one public. items: type: string type: array supportRoleARN: + description: |- + SupportRoleARN is an AWS IAM role used by Red Hat SREs to enable + access to the cluster account in order to provide support. type: string version: - description: Openshift version, for example "4.14.5". + description: OpenShift semantic version, for example "4.14.5". type: string workerRoleARN: + description: WorkerRoleARN is an AWS IAM role that will be attached + to worker instances. type: string required: - availabilityZones - installerRoleARN - - machineCIDR - oidcID - region - rolesRef @@ -304,46 +730,47 @@ spec: - workerRoleARN type: object status: + description: RosaControlPlaneStatus defines the observed state of ROSAControlPlane. properties: conditions: - description: Conditions specifies the cpnditions for the managed control + description: Conditions specifies the conditions for the managed control plane items: description: Condition defines an observation of a Cluster API resource operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime @@ -356,29 +783,33 @@ spec: type: string externalManagedControlPlane: default: true - description: ExternalManagedControlPlane indicates to cluster-api - that the control plane is managed by an external service such as - AKS, EKS, GKE, etc. + description: |- + ExternalManagedControlPlane indicates to cluster-api that the control plane + is managed by an external service such as AKS, EKS, GKE, etc. type: boolean failureMessage: - description: "FailureMessage will be set in the event that there is - a terminal problem reconciling the state and will be set to a descriptive - error message. \n This field should not be set for transitive errors - that a controller faces that are expected to be fixed automatically - over time (like service outages), but instead indicate that something - is fundamentally wrong with the spec or the configuration of the - controller, and that manual intervention is required." + description: |- + FailureMessage will be set in the event that there is a terminal problem + reconciling the state and will be set to a descriptive error message. + + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the spec or the configuration of + the controller, and that manual intervention is required. type: string id: description: ID is the cluster ID given by ROSA. type: string initialized: - description: Initialized denotes whether or not the control plane - has the uploaded kubernetes config-map. + description: |- + Initialized denotes whether or not the control plane has the + uploaded kubernetes config-map. type: boolean oidcEndpointURL: description: OIDCEndpointURL is the endpoint url for the managed OIDC - porvider. + provider. type: string ready: default: false diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustercontrolleridentities.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustercontrolleridentities.yaml index d400720ff8..858d93489a 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustercontrolleridentities.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustercontrolleridentities.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.14.0 name: awsclustercontrolleridentities.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io @@ -21,19 +21,24 @@ spec: - name: v1beta1 schema: openAPIV3Schema: - description: AWSClusterControllerIdentity is the Schema for the awsclustercontrolleridentities - API It is used to grant access to use Cluster API Provider AWS Controller - credentials. + description: |- + AWSClusterControllerIdentity is the Schema for the awsclustercontrolleridentities API + It is used to grant access to use Cluster API Provider AWS Controller credentials. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -41,14 +46,12 @@ spec: description: Spec for this AWSClusterControllerIdentity. properties: allowedNamespaces: - description: AllowedNamespaces is used to identify which namespaces - are allowed to use the identity from. Namespaces can be selected - either using an array of namespaces or with label selector. An empty - allowedNamespaces object indicates that AWSClusters can use this - identity from any namespace. If this object is nil, no namespaces - will be allowed (default behaviour, if this field is not provided) - A namespace should be either in the NamespaceList or match with - Selector to use the identity. + description: |- + AllowedNamespaces is used to identify which namespaces are allowed to use the identity from. + Namespaces can be selected either using an array of namespaces or with label selector. + An empty allowedNamespaces object indicates that AWSClusters can use this identity from any namespace. + If this object is nil, no namespaces will be allowed (default behaviour, if this field is not provided) + A namespace should be either in the NamespaceList or match with Selector to use the identity. nullable: true properties: list: @@ -59,32 +62,33 @@ spec: nullable: true type: array selector: - description: An empty selector indicates that AWSClusters cannot - use this AWSClusterIdentity from any namespace. + description: |- + An empty selector indicates that AWSClusters cannot use this + AWSClusterIdentity from any namespace. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If - the operator is In or NotIn, the values array must - be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced - during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -96,11 +100,10 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A - single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is "key", - the operator is "In", and the values array contains only - "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic @@ -112,19 +115,24 @@ spec: - name: v1beta2 schema: openAPIV3Schema: - description: AWSClusterControllerIdentity is the Schema for the awsclustercontrolleridentities - API It is used to grant access to use Cluster API Provider AWS Controller - credentials. + description: |- + AWSClusterControllerIdentity is the Schema for the awsclustercontrolleridentities API + It is used to grant access to use Cluster API Provider AWS Controller credentials. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -132,14 +140,12 @@ spec: description: Spec for this AWSClusterControllerIdentity. properties: allowedNamespaces: - description: AllowedNamespaces is used to identify which namespaces - are allowed to use the identity from. Namespaces can be selected - either using an array of namespaces or with label selector. An empty - allowedNamespaces object indicates that AWSClusters can use this - identity from any namespace. If this object is nil, no namespaces - will be allowed (default behaviour, if this field is not provided) - A namespace should be either in the NamespaceList or match with - Selector to use the identity. + description: |- + AllowedNamespaces is used to identify which namespaces are allowed to use the identity from. + Namespaces can be selected either using an array of namespaces or with label selector. + An empty allowedNamespaces object indicates that AWSClusters can use this identity from any namespace. + If this object is nil, no namespaces will be allowed (default behaviour, if this field is not provided) + A namespace should be either in the NamespaceList or match with Selector to use the identity. nullable: true properties: list: @@ -150,32 +156,33 @@ spec: nullable: true type: array selector: - description: An empty selector indicates that AWSClusters cannot - use this AWSClusterIdentity from any namespace. + description: |- + An empty selector indicates that AWSClusters cannot use this + AWSClusterIdentity from any namespace. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If - the operator is In or NotIn, the values array must - be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced - during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -187,11 +194,10 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A - single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is "key", - the operator is "In", and the values array contains only - "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusterroleidentities.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusterroleidentities.yaml index 35e0bdd5fa..cfe210a32e 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusterroleidentities.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusterroleidentities.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.14.0 name: awsclusterroleidentities.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io @@ -21,18 +21,24 @@ spec: - name: v1beta1 schema: openAPIV3Schema: - description: AWSClusterRoleIdentity is the Schema for the awsclusterroleidentities - API It is used to assume a role using the provided sourceRef. + description: |- + AWSClusterRoleIdentity is the Schema for the awsclusterroleidentities API + It is used to assume a role using the provided sourceRef. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -40,14 +46,12 @@ spec: description: Spec for this AWSClusterRoleIdentity. properties: allowedNamespaces: - description: AllowedNamespaces is used to identify which namespaces - are allowed to use the identity from. Namespaces can be selected - either using an array of namespaces or with label selector. An empty - allowedNamespaces object indicates that AWSClusters can use this - identity from any namespace. If this object is nil, no namespaces - will be allowed (default behaviour, if this field is not provided) - A namespace should be either in the NamespaceList or match with - Selector to use the identity. + description: |- + AllowedNamespaces is used to identify which namespaces are allowed to use the identity from. + Namespaces can be selected either using an array of namespaces or with label selector. + An empty allowedNamespaces object indicates that AWSClusters can use this identity from any namespace. + If this object is nil, no namespaces will be allowed (default behaviour, if this field is not provided) + A namespace should be either in the NamespaceList or match with Selector to use the identity. nullable: true properties: list: @@ -58,32 +62,33 @@ spec: nullable: true type: array selector: - description: An empty selector indicates that AWSClusters cannot - use this AWSClusterIdentity from any namespace. + description: |- + An empty selector indicates that AWSClusters cannot use this + AWSClusterIdentity from any namespace. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If - the operator is In or NotIn, the values array must - be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced - during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -95,11 +100,10 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A - single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is "key", - the operator is "In", and the values array contains only - "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic @@ -112,26 +116,26 @@ spec: minimum: 900 type: integer externalID: - description: A unique identifier that might be required when you assume - a role in another account. If the administrator of the account to - which the role belongs provided you with an external ID, then provide - that value in the ExternalId parameter. This value can be any string, - such as a passphrase or account number. A cross-account role is - usually set up to trust everyone in an account. Therefore, the administrator - of the trusting account might send an external ID to the administrator - of the trusted account. That way, only someone with the ID can assume - the role, rather than everyone in the account. For more information - about the external ID, see How to Use an External ID When Granting - Access to Your AWS Resources to a Third Party in the IAM User Guide. + description: |- + A unique identifier that might be required when you assume a role in another account. + If the administrator of the account to which the role belongs provided you with an + external ID, then provide that value in the ExternalId parameter. This value can be + any string, such as a passphrase or account number. A cross-account role is usually + set up to trust everyone in an account. Therefore, the administrator of the trusting + account might send an external ID to the administrator of the trusted account. That + way, only someone with the ID can assume the role, rather than everyone in the + account. For more information about the external ID, see How to Use an External ID + When Granting Access to Your AWS Resources to a Third Party in the IAM User Guide. type: string inlinePolicy: description: An IAM policy as a JSON-encoded string that you want to use as an inline session policy. type: string policyARNs: - description: The Amazon Resource Names (ARNs) of the IAM managed policies - that you want to use as managed session policies. The policies must - exist in the same account as the role. + description: |- + The Amazon Resource Names (ARNs) of the IAM managed policies that you want + to use as managed session policies. + The policies must exist in the same account as the role. items: type: string type: array @@ -142,9 +146,9 @@ spec: description: An identifier for the assumed role session type: string sourceIdentityRef: - description: SourceIdentityRef is a reference to another identity - which will be chained to do role assumption. All identity types - are accepted. + description: |- + SourceIdentityRef is a reference to another identity which will be chained to do + role assumption. All identity types are accepted. properties: kind: description: Kind of the identity. @@ -170,18 +174,24 @@ spec: - name: v1beta2 schema: openAPIV3Schema: - description: AWSClusterRoleIdentity is the Schema for the awsclusterroleidentities - API It is used to assume a role using the provided sourceRef. + description: |- + AWSClusterRoleIdentity is the Schema for the awsclusterroleidentities API + It is used to assume a role using the provided sourceRef. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -189,14 +199,12 @@ spec: description: Spec for this AWSClusterRoleIdentity. properties: allowedNamespaces: - description: AllowedNamespaces is used to identify which namespaces - are allowed to use the identity from. Namespaces can be selected - either using an array of namespaces or with label selector. An empty - allowedNamespaces object indicates that AWSClusters can use this - identity from any namespace. If this object is nil, no namespaces - will be allowed (default behaviour, if this field is not provided) - A namespace should be either in the NamespaceList or match with - Selector to use the identity. + description: |- + AllowedNamespaces is used to identify which namespaces are allowed to use the identity from. + Namespaces can be selected either using an array of namespaces or with label selector. + An empty allowedNamespaces object indicates that AWSClusters can use this identity from any namespace. + If this object is nil, no namespaces will be allowed (default behaviour, if this field is not provided) + A namespace should be either in the NamespaceList or match with Selector to use the identity. nullable: true properties: list: @@ -207,32 +215,33 @@ spec: nullable: true type: array selector: - description: An empty selector indicates that AWSClusters cannot - use this AWSClusterIdentity from any namespace. + description: |- + An empty selector indicates that AWSClusters cannot use this + AWSClusterIdentity from any namespace. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If - the operator is In or NotIn, the values array must - be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced - during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -244,11 +253,10 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A - single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is "key", - the operator is "In", and the values array contains only - "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic @@ -261,26 +269,26 @@ spec: minimum: 900 type: integer externalID: - description: A unique identifier that might be required when you assume - a role in another account. If the administrator of the account to - which the role belongs provided you with an external ID, then provide - that value in the ExternalId parameter. This value can be any string, - such as a passphrase or account number. A cross-account role is - usually set up to trust everyone in an account. Therefore, the administrator - of the trusting account might send an external ID to the administrator - of the trusted account. That way, only someone with the ID can assume - the role, rather than everyone in the account. For more information - about the external ID, see How to Use an External ID When Granting - Access to Your AWS Resources to a Third Party in the IAM User Guide. + description: |- + A unique identifier that might be required when you assume a role in another account. + If the administrator of the account to which the role belongs provided you with an + external ID, then provide that value in the ExternalId parameter. This value can be + any string, such as a passphrase or account number. A cross-account role is usually + set up to trust everyone in an account. Therefore, the administrator of the trusting + account might send an external ID to the administrator of the trusted account. That + way, only someone with the ID can assume the role, rather than everyone in the + account. For more information about the external ID, see How to Use an External ID + When Granting Access to Your AWS Resources to a Third Party in the IAM User Guide. type: string inlinePolicy: description: An IAM policy as a JSON-encoded string that you want to use as an inline session policy. type: string policyARNs: - description: The Amazon Resource Names (ARNs) of the IAM managed policies - that you want to use as managed session policies. The policies must - exist in the same account as the role. + description: |- + The Amazon Resource Names (ARNs) of the IAM managed policies that you want + to use as managed session policies. + The policies must exist in the same account as the role. items: type: string type: array @@ -291,9 +299,9 @@ spec: description: An identifier for the assumed role session type: string sourceIdentityRef: - description: SourceIdentityRef is a reference to another identity - which will be chained to do role assumption. All identity types - are accepted. + description: |- + SourceIdentityRef is a reference to another identity which will be chained to do + role assumption. All identity types are accepted. properties: kind: description: Kind of the identity. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml index a74b9c7e82..f3eb3a2fc7 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.14.0 name: awsclusters.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io @@ -47,14 +47,19 @@ spec: API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -65,38 +70,39 @@ spec: additionalTags: additionalProperties: type: string - description: AdditionalTags is an optional set of tags to add to AWS - resources managed by the AWS provider, in addition to the ones added - by default. + description: |- + AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the + ones added by default. type: object bastion: description: Bastion contains options to configure the bastion host. properties: allowedCIDRBlocks: - description: AllowedCIDRBlocks is a list of CIDR blocks allowed - to access the bastion host. They are set as ingress rules for - the Bastion host's Security Group (defaults to 0.0.0.0/0). + description: |- + AllowedCIDRBlocks is a list of CIDR blocks allowed to access the bastion host. + They are set as ingress rules for the Bastion host's Security Group (defaults to 0.0.0.0/0). items: type: string type: array ami: - description: AMI will use the specified AMI to boot the bastion. - If not specified, the AMI will default to one picked out in - public space. + description: |- + AMI will use the specified AMI to boot the bastion. If not specified, + the AMI will default to one picked out in public space. type: string disableIngressRules: - description: DisableIngressRules will ensure there are no Ingress - rules in the bastion host's security group. Requires AllowedCIDRBlocks - to be empty. + description: |- + DisableIngressRules will ensure there are no Ingress rules in the bastion host's security group. + Requires AllowedCIDRBlocks to be empty. type: boolean enabled: - description: Enabled allows this provider to create a bastion - host instance with a public ip to access the VPC private network. + description: |- + Enabled allows this provider to create a bastion host instance + with a public ip to access the VPC private network. type: boolean instanceType: - description: InstanceType will use the specified instance type - for the bastion. If not specified, Cluster API Provider AWS - will use t3.micro for all regions except us-east-1, where t2.micro + description: |- + InstanceType will use the specified instance type for the bastion. If not specified, + Cluster API Provider AWS will use t3.micro for all regions except us-east-1, where t2.micro will be the default. type: string type: object @@ -120,33 +126,36 @@ spec: customizing control plane behavior. properties: additionalSecurityGroups: - description: AdditionalSecurityGroups sets the security groups - used by the load balancer. Expected to be security group IDs - This is optional - if not provided new security groups will - be created for the load balancer + description: |- + AdditionalSecurityGroups sets the security groups used by the load balancer. Expected to be security group IDs + This is optional - if not provided new security groups will be created for the load balancer items: type: string type: array crossZoneLoadBalancing: - description: "CrossZoneLoadBalancing enables the classic ELB cross - availability zone balancing. \n With cross-zone load balancing, - each load balancer node for your Classic Load Balancer distributes - requests evenly across the registered instances in all enabled - Availability Zones. If cross-zone load balancing is disabled, - each load balancer node distributes requests evenly across the - registered instances in its Availability Zone only. \n Defaults - to false." + description: |- + CrossZoneLoadBalancing enables the classic ELB cross availability zone balancing. + + + With cross-zone load balancing, each load balancer node for your Classic Load Balancer + distributes requests evenly across the registered instances in all enabled Availability Zones. + If cross-zone load balancing is disabled, each load balancer node distributes requests evenly across + the registered instances in its Availability Zone only. + + + Defaults to false. type: boolean healthCheckProtocol: - description: HealthCheckProtocol sets the protocol type for classic - ELB health check target default value is ClassicELBProtocolSSL + description: |- + HealthCheckProtocol sets the protocol type for classic ELB health check target + default value is ClassicELBProtocolSSL type: string name: - description: Name sets the name of the classic ELB load balancer. - As per AWS, the name must be unique within your set of load - balancers for the region, must have a maximum of 32 characters, - must contain only alphanumeric characters or hyphens, and cannot - begin or end with a hyphen. Once set, the value cannot be changed. + description: |- + Name sets the name of the classic ELB load balancer. As per AWS, the name must be unique + within your set of load balancers for the region, must have a maximum of 32 characters, must + contain only alphanumeric characters or hyphens, and cannot begin or end with a hyphen. Once + set, the value cannot be changed. maxLength: 32 pattern: ^[A-Za-z0-9]([A-Za-z0-9]{0,31}|[-A-Za-z0-9]{0,30}[A-Za-z0-9])$ type: string @@ -167,9 +176,9 @@ spec: type: array type: object identityRef: - description: IdentityRef is a reference to an identity to be used - when reconciling the managed control plane. If no identity is specified, - the default identity for this controller will be used. + description: |- + IdentityRef is a reference to an identity to be used when reconciling the managed control plane. + If no identity is specified, the default identity for this controller will be used. properties: kind: description: Kind of the identity. @@ -187,30 +196,32 @@ spec: - name type: object imageLookupBaseOS: - description: ImageLookupBaseOS is the name of the base operating system - used to look up machine images when a machine does not specify an - AMI. When set, this will be used for all cluster machines unless - a machine specifies a different ImageLookupBaseOS. + description: |- + ImageLookupBaseOS is the name of the base operating system used to look + up machine images when a machine does not specify an AMI. When set, this + will be used for all cluster machines unless a machine specifies a + different ImageLookupBaseOS. type: string imageLookupFormat: - description: 'ImageLookupFormat is the AMI naming format to look up - machine images when a machine does not specify an AMI. When set, - this will be used for all cluster machines unless a machine specifies - a different ImageLookupOrg. Supports substitutions for {{.BaseOS}} - and {{.K8sVersion}} with the base OS and kubernetes version, respectively. - The BaseOS will be the value in ImageLookupBaseOS or ubuntu (the - default), and the kubernetes version as defined by the packages - produced by kubernetes/release without v as a prefix: 1.13.0, 1.12.5-mybuild.1, - or 1.17.3. For example, the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* - will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* - for a Machine that is targeting kubernetes v1.18.0 and the ubuntu - base OS. See also: https://golang.org/pkg/text/template/' + description: |- + ImageLookupFormat is the AMI naming format to look up machine images when + a machine does not specify an AMI. When set, this will be used for all + cluster machines unless a machine specifies a different ImageLookupOrg. + Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base + OS and kubernetes version, respectively. The BaseOS will be the value in + ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as + defined by the packages produced by kubernetes/release without v as a + prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default + image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up + searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a + Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See + also: https://golang.org/pkg/text/template/ type: string imageLookupOrg: - description: ImageLookupOrg is the AWS Organization ID to look up - machine images when a machine does not specify an AMI. When set, - this will be used for all cluster machines unless a machine specifies - a different ImageLookupOrg. + description: |- + ImageLookupOrg is the AWS Organization ID to look up machine images when a + machine does not specify an AMI. When set, this will be used for all + cluster machines unless a machine specifies a different ImageLookupOrg. type: string network: description: NetworkSpec encapsulates all things related to AWS network. @@ -219,10 +230,9 @@ spec: description: CNI configuration properties: cniIngressRules: - description: CNIIngressRules specify rules to apply to control - plane and worker node security groups. The source for the - rule will be set to control plane and worker security group - IDs. + description: |- + CNIIngressRules specify rules to apply to control plane and worker node security groups. + The source for the rule will be set to control plane and worker security group IDs. items: description: CNIIngressRule defines an AWS ingress rule for CNI requirements. @@ -250,9 +260,9 @@ spec: securityGroupOverrides: additionalProperties: type: string - description: SecurityGroupOverrides is an optional set of security - groups to use for cluster instances This is optional - if not - provided new security groups will be created for the cluster + description: |- + SecurityGroupOverrides is an optional set of security groups to use for cluster instances + This is optional - if not provided new security groups will be created for the cluster type: object subnets: description: Subnets configuration. @@ -272,17 +282,15 @@ spec: this resource. type: string ipv6CidrBlock: - description: IPv6CidrBlock is the IPv6 CIDR block to be - used when the provider creates a managed VPC. A subnet - can have an IPv4 and an IPv6 address. IPv6 is only supported - in managed clusters, this field cannot be set on AWSCluster - object. + description: |- + IPv6CidrBlock is the IPv6 CIDR block to be used when the provider creates a managed VPC. + A subnet can have an IPv4 and an IPv6 address. + IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object. type: string isIpv6: - description: IsIPv6 defines the subnet as an IPv6 subnet. - A subnet is IPv6 when it is associated with a VPC that - has IPv6 enabled. IPv6 is only supported in managed clusters, - this field cannot be set on AWSCluster object. + description: |- + IsIPv6 defines the subnet as an IPv6 subnet. A subnet is IPv6 when it is associated with a VPC that has IPv6 enabled. + IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object. type: boolean isPublic: description: IsPublic defines the subnet as a public subnet. @@ -290,12 +298,9 @@ spec: table that has a route to an internet gateway. type: boolean natGatewayId: - description: NatGatewayID is the NAT gateway id associated - with the subnet. Ignored unless the subnet is managed - by the provider, in which case this is set on the public - subnet where the NAT gateway resides. It is then used - to determine routes for private subnets in the same AZ - as the public subnet. + description: |- + NatGatewayID is the NAT gateway id associated with the subnet. + Ignored unless the subnet is managed by the provider, in which case this is set on the public subnet where the NAT gateway resides. It is then used to determine routes for private subnets in the same AZ as the public subnet. type: string routeTableId: description: RouteTableID is the routing table id associated @@ -314,28 +319,29 @@ spec: properties: availabilityZoneSelection: default: Ordered - description: 'AvailabilityZoneSelection specifies how AZs - should be selected if there are more AZs in a region than - specified by AvailabilityZoneUsageLimit. There are 2 selection - schemes: Ordered - selects based on alphabetical order Random - - selects AZs randomly in a region Defaults to Ordered' + description: |- + AvailabilityZoneSelection specifies how AZs should be selected if there are more AZs + in a region than specified by AvailabilityZoneUsageLimit. There are 2 selection schemes: + Ordered - selects based on alphabetical order + Random - selects AZs randomly in a region + Defaults to Ordered enum: - Ordered - Random type: string availabilityZoneUsageLimit: default: 3 - description: AvailabilityZoneUsageLimit specifies the maximum - number of availability zones (AZ) that should be used in - a region when automatically creating subnets. If a region - has more than this number of AZs then this number of AZs - will be picked randomly when creating default subnets. Defaults - to 3 + description: |- + AvailabilityZoneUsageLimit specifies the maximum number of availability zones (AZ) that + should be used in a region when automatically creating subnets. If a region has more + than this number of AZs then this number of AZs will be picked randomly when creating + default subnets. Defaults to 3 minimum: 1 type: integer cidrBlock: - description: CidrBlock is the CIDR block to be used when the - provider creates a managed VPC. Defaults to 10.0.0.0/16. + description: |- + CidrBlock is the CIDR block to be used when the provider creates a managed VPC. + Defaults to 10.0.0.0/16. type: string id: description: ID is the vpc-id of the VPC this provider should @@ -346,9 +352,9 @@ spec: associated with the VPC. type: string ipv6: - description: IPv6 contains ipv6 specific settings for the - network. Supported only in managed clusters. This field - cannot be set on AWSCluster object. + description: |- + IPv6 contains ipv6 specific settings for the network. Supported only in managed clusters. + This field cannot be set on AWSCluster object. properties: cidrBlock: description: CidrBlock is the CIDR block provided by Amazon @@ -375,15 +381,16 @@ spec: description: The AWS Region the cluster lives in. type: string s3Bucket: - description: S3Bucket contains options to configure a supporting S3 - bucket for this cluster - currently used for nodes requiring Ignition + description: |- + S3Bucket contains options to configure a supporting S3 bucket for this + cluster - currently used for nodes requiring Ignition (https://coreos.github.io/ignition/) for bootstrapping (requires BootstrapFormatIgnition feature flag to be enabled). properties: controlPlaneIAMInstanceProfile: - description: ControlPlaneIAMInstanceProfile is a name of the IAMInstanceProfile, - which will be allowed to read control-plane node bootstrap data - from S3 Bucket. + description: |- + ControlPlaneIAMInstanceProfile is a name of the IAMInstanceProfile, which will be allowed + to read control-plane node bootstrap data from S3 Bucket. type: string name: description: Name defines name of S3 Bucket to be created. @@ -392,9 +399,9 @@ spec: pattern: ^[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]$ type: string nodesIAMInstanceProfiles: - description: NodesIAMInstanceProfiles is a list of IAM instance - profiles, which will be allowed to read worker nodes bootstrap - data from S3 Bucket. + description: |- + NodesIAMInstanceProfiles is a list of IAM instance profiles, which will be allowed to read + worker nodes bootstrap data from S3 Bucket. items: type: string type: array @@ -475,11 +482,10 @@ spec: or not. type: boolean encryptionKey: - description: EncryptionKey is the KMS key to use to encrypt - the volume. Can be either a KMS key ID or ARN. If Encrypted - is set and this is omitted, the default AWS key will be - used. The key must already exist and be accessible by - the controller. + description: |- + EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. + If Encrypted is set and this is omitted, the default AWS key will be used. + The key must already exist and be accessible by the controller. type: string iops: description: IOPS is the number of IOPS requested for the @@ -487,9 +493,9 @@ spec: format: int64 type: integer size: - description: Size specifies size (in Gi) of the storage - device. Must be greater than the image snapshot size or - 8 (whichever is greater). + description: |- + Size specifies size (in Gi) of the storage device. + Must be greater than the image snapshot size or 8 (whichever is greater). format: int64 minimum: 8 type: integer @@ -524,11 +530,10 @@ spec: or not. type: boolean encryptionKey: - description: EncryptionKey is the KMS key to use to encrypt - the volume. Can be either a KMS key ID or ARN. If Encrypted - is set and this is omitted, the default AWS key will be - used. The key must already exist and be accessible by the - controller. + description: |- + EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. + If Encrypted is set and this is omitted, the default AWS key will be used. + The key must already exist and be accessible by the controller. type: string iops: description: IOPS is the number of IOPS requested for the @@ -536,9 +541,9 @@ spec: format: int64 type: integer size: - description: Size specifies size (in Gi) of the storage device. - Must be greater than the image snapshot size or 8 (whichever - is greater). + description: |- + Size specifies size (in Gi) of the storage device. + Must be greater than the image snapshot size or 8 (whichever is greater). format: int64 minimum: 8 type: integer @@ -588,9 +593,9 @@ spec: description: The instance type. type: string userData: - description: UserData is the raw data script passed to the instance - which is run upon bootstrap. This field must not be base64 encoded - and should only be used when running a new instance. + description: |- + UserData is the raw data script passed to the instance which is run upon bootstrap. + This field must not be base64 encoded and should only be used when running a new instance. type: string volumeIDs: description: IDs of the instance's volumes @@ -608,37 +613,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime @@ -648,9 +653,9 @@ spec: type: array failureDomains: additionalProperties: - description: FailureDomainSpec is the Schema for Cluster API failure - domains. It allows controllers to understand how many failure - domains a cluster can optionally span across. + description: |- + FailureDomainSpec is the Schema for Cluster API failure domains. + It allows controllers to understand how many failure domains a cluster can optionally span across. properties: attributes: additionalProperties: @@ -681,9 +686,9 @@ spec: load balancer load balancing. type: boolean idleTimeout: - description: IdleTimeout is time that the connection is - allowed to be idle (no data has been sent over the connection) - before it is closed by the load balancer. + description: |- + IdleTimeout is time that the connection is allowed to be idle (no data + has been sent over the connection) before it is closed by the load balancer. format: int64 type: integer type: object @@ -704,19 +709,19 @@ spec: format: int64 type: integer interval: - description: A Duration represents the elapsed time between - two instants as an int64 nanosecond count. The representation - limits the largest representable duration to approximately - 290 years. + description: |- + A Duration represents the elapsed time between two instants + as an int64 nanosecond count. The representation limits the + largest representable duration to approximately 290 years. format: int64 type: integer target: type: string timeout: - description: A Duration represents the elapsed time between - two instants as an int64 nanosecond count. The representation - limits the largest representable duration to approximately - 290 years. + description: |- + A Duration represents the elapsed time between two instants + as an int64 nanosecond count. The representation limits the + largest representable duration to approximately 290 years. format: int64 type: integer unhealthyThreshold: @@ -759,9 +764,9 @@ spec: type: object type: array name: - description: The name of the load balancer. It must be unique - within the set of load balancers defined in the region. - It also serves as identifier. + description: |- + The name of the load balancer. It must be unique within the set of load balancers + defined in the region. It also serves as identifier. type: string scheme: description: Scheme is the load balancer scheme, either internet-facing @@ -894,14 +899,19 @@ spec: API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -912,38 +922,39 @@ spec: additionalTags: additionalProperties: type: string - description: AdditionalTags is an optional set of tags to add to AWS - resources managed by the AWS provider, in addition to the ones added - by default. + description: |- + AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the + ones added by default. type: object bastion: description: Bastion contains options to configure the bastion host. properties: allowedCIDRBlocks: - description: AllowedCIDRBlocks is a list of CIDR blocks allowed - to access the bastion host. They are set as ingress rules for - the Bastion host's Security Group (defaults to 0.0.0.0/0). + description: |- + AllowedCIDRBlocks is a list of CIDR blocks allowed to access the bastion host. + They are set as ingress rules for the Bastion host's Security Group (defaults to 0.0.0.0/0). items: type: string type: array ami: - description: AMI will use the specified AMI to boot the bastion. - If not specified, the AMI will default to one picked out in - public space. + description: |- + AMI will use the specified AMI to boot the bastion. If not specified, + the AMI will default to one picked out in public space. type: string disableIngressRules: - description: DisableIngressRules will ensure there are no Ingress - rules in the bastion host's security group. Requires AllowedCIDRBlocks - to be empty. + description: |- + DisableIngressRules will ensure there are no Ingress rules in the bastion host's security group. + Requires AllowedCIDRBlocks to be empty. type: boolean enabled: - description: Enabled allows this provider to create a bastion - host instance with a public ip to access the VPC private network. + description: |- + Enabled allows this provider to create a bastion host instance + with a public ip to access the VPC private network. type: boolean instanceType: - description: InstanceType will use the specified instance type - for the bastion. If not specified, Cluster API Provider AWS - will use t3.micro for all regions except us-east-1, where t2.micro + description: |- + InstanceType will use the specified instance type for the bastion. If not specified, + Cluster API Provider AWS will use t3.micro for all regions except us-east-1, where t2.micro will be the default. type: string type: object @@ -967,13 +978,70 @@ spec: customizing control plane behavior. properties: additionalListeners: - description: AdditionalListeners sets the additional listeners - for the control plane load balancer. This is only applicable - to Network Load Balancer (NLB) types for the time being. + description: |- + AdditionalListeners sets the additional listeners for the control plane load balancer. + This is only applicable to Network Load Balancer (NLB) types for the time being. items: - description: AdditionalListenerSpec defines the desired state - of an additional listener on an AWS load balancer. + description: |- + AdditionalListenerSpec defines the desired state of an + additional listener on an AWS load balancer. properties: + healthCheck: + description: HealthCheck sets the optional custom health + check configuration to the API target group. + properties: + intervalSeconds: + description: |- + The approximate amount of time, in seconds, between health checks of an individual + target. + format: int64 + maximum: 300 + minimum: 5 + type: integer + path: + description: |- + The destination for health checks on the targets when using the protocol HTTP or HTTPS, + otherwise the path will be ignored. + type: string + port: + description: |- + The port the load balancer uses when performing health checks for additional target groups. When + not specified this value will be set for the same of listener port. + type: string + protocol: + description: |- + The protocol to use to health check connect with the target. When not specified the Protocol + will be the same of the listener. + enum: + - TCP + - HTTP + - HTTPS + type: string + thresholdCount: + description: |- + The number of consecutive health check successes required before considering + a target healthy. + format: int64 + maximum: 10 + minimum: 2 + type: integer + timeoutSeconds: + description: |- + The amount of time, in seconds, during which no response from a target means + a failed health check. + format: int64 + maximum: 120 + minimum: 2 + type: integer + unhealthyThresholdCount: + description: |- + The number of consecutive health check failures required before considering + a target unhealthy. + format: int64 + maximum: 10 + minimum: 2 + type: integer + type: object port: description: Port sets the port for the additional listener. format: int64 @@ -982,8 +1050,9 @@ spec: type: integer protocol: default: TCP - description: Protocol sets the protocol for the additional - listener. Currently only TCP is supported. + description: |- + Protocol sets the protocol for the additional listener. + Currently only TCP is supported. enum: - TCP type: string @@ -995,31 +1064,71 @@ spec: - port x-kubernetes-list-type: map additionalSecurityGroups: - description: AdditionalSecurityGroups sets the security groups - used by the load balancer. Expected to be security group IDs - This is optional - if not provided new security groups will - be created for the load balancer + description: |- + AdditionalSecurityGroups sets the security groups used by the load balancer. Expected to be security group IDs + This is optional - if not provided new security groups will be created for the load balancer items: type: string type: array crossZoneLoadBalancing: - description: "CrossZoneLoadBalancing enables the classic ELB cross - availability zone balancing. \n With cross-zone load balancing, - each load balancer node for your Classic Load Balancer distributes - requests evenly across the registered instances in all enabled - Availability Zones. If cross-zone load balancing is disabled, - each load balancer node distributes requests evenly across the - registered instances in its Availability Zone only. \n Defaults - to false." + description: |- + CrossZoneLoadBalancing enables the classic ELB cross availability zone balancing. + + + With cross-zone load balancing, each load balancer node for your Classic Load Balancer + distributes requests evenly across the registered instances in all enabled Availability Zones. + If cross-zone load balancing is disabled, each load balancer node distributes requests evenly across + the registered instances in its Availability Zone only. + + + Defaults to false. type: boolean disableHostsRewrite: - description: DisableHostsRewrite disabled the hair pinning issue - solution that adds the NLB's address as 127.0.0.1 to the hosts + description: |- + DisableHostsRewrite disabled the hair pinning issue solution that adds the NLB's address as 127.0.0.1 to the hosts file of each instance. This is by default, false. type: boolean + healthCheck: + description: HealthCheck sets custom health check configuration + to the API target group. + properties: + intervalSeconds: + description: |- + The approximate amount of time, in seconds, between health checks of an individual + target. + format: int64 + maximum: 300 + minimum: 5 + type: integer + thresholdCount: + description: |- + The number of consecutive health check successes required before considering + a target healthy. + format: int64 + maximum: 10 + minimum: 2 + type: integer + timeoutSeconds: + description: |- + The amount of time, in seconds, during which no response from a target means + a failed health check. + format: int64 + maximum: 120 + minimum: 2 + type: integer + unhealthyThresholdCount: + description: |- + The number of consecutive health check failures required before considering + a target unhealthy. + format: int64 + maximum: 10 + minimum: 2 + type: integer + type: object healthCheckProtocol: - description: HealthCheckProtocol sets the protocol type for ELB - health check target default value is ELBProtocolSSL + description: |- + HealthCheckProtocol sets the protocol type for ELB health check target + default value is ELBProtocolSSL enum: - TCP - SSL @@ -1055,6 +1164,10 @@ spec: items: type: string type: array + natGatewaysIPsSource: + description: NatGatewaysIPsSource use the NAT gateways IPs + as the source for the ingress rule. + type: boolean protocol: description: Protocol is the protocol for the ingress rule. Accepted values are "-1" (all), "4" (IP in IP),"tcp", @@ -1075,9 +1188,9 @@ spec: type: string type: array sourceSecurityGroupRoles: - description: The security group role to allow access from. - Cannot be specified with CidrBlocks. The field will be - combined with source security group IDs if specified. + description: |- + The security group role to allow access from. Cannot be specified with CidrBlocks. + The field will be combined with source security group IDs if specified. items: description: SecurityGroupRole defines the unique role of a security group. @@ -1113,18 +1226,18 @@ spec: - disabled type: string name: - description: Name sets the name of the classic ELB load balancer. - As per AWS, the name must be unique within your set of load - balancers for the region, must have a maximum of 32 characters, - must contain only alphanumeric characters or hyphens, and cannot - begin or end with a hyphen. Once set, the value cannot be changed. + description: |- + Name sets the name of the classic ELB load balancer. As per AWS, the name must be unique + within your set of load balancers for the region, must have a maximum of 32 characters, must + contain only alphanumeric characters or hyphens, and cannot begin or end with a hyphen. Once + set, the value cannot be changed. maxLength: 32 pattern: ^[A-Za-z0-9]([A-Za-z0-9]{0,31}|[-A-Za-z0-9]{0,30}[A-Za-z0-9])$ type: string preserveClientIP: - description: PreserveClientIP lets the user control if preservation - of client ips must be retained or not. If this is enabled 6443 - will be opened to 0.0.0.0/0. + description: |- + PreserveClientIP lets the user control if preservation of client ips must be retained or not. + If this is enabled 6443 will be opened to 0.0.0.0/0. type: boolean scheme: default: internet-facing @@ -1143,9 +1256,9 @@ spec: type: array type: object identityRef: - description: IdentityRef is a reference to an identity to be used - when reconciling the managed control plane. If no identity is specified, - the default identity for this controller will be used. + description: |- + IdentityRef is a reference to an identity to be used when reconciling the managed control plane. + If no identity is specified, the default identity for this controller will be used. properties: kind: description: Kind of the identity. @@ -1163,30 +1276,32 @@ spec: - name type: object imageLookupBaseOS: - description: ImageLookupBaseOS is the name of the base operating system - used to look up machine images when a machine does not specify an - AMI. When set, this will be used for all cluster machines unless - a machine specifies a different ImageLookupBaseOS. + description: |- + ImageLookupBaseOS is the name of the base operating system used to look + up machine images when a machine does not specify an AMI. When set, this + will be used for all cluster machines unless a machine specifies a + different ImageLookupBaseOS. type: string imageLookupFormat: - description: 'ImageLookupFormat is the AMI naming format to look up - machine images when a machine does not specify an AMI. When set, - this will be used for all cluster machines unless a machine specifies - a different ImageLookupOrg. Supports substitutions for {{.BaseOS}} - and {{.K8sVersion}} with the base OS and kubernetes version, respectively. - The BaseOS will be the value in ImageLookupBaseOS or ubuntu (the - default), and the kubernetes version as defined by the packages - produced by kubernetes/release without v as a prefix: 1.13.0, 1.12.5-mybuild.1, - or 1.17.3. For example, the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* - will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* - for a Machine that is targeting kubernetes v1.18.0 and the ubuntu - base OS. See also: https://golang.org/pkg/text/template/' + description: |- + ImageLookupFormat is the AMI naming format to look up machine images when + a machine does not specify an AMI. When set, this will be used for all + cluster machines unless a machine specifies a different ImageLookupOrg. + Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base + OS and kubernetes version, respectively. The BaseOS will be the value in + ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as + defined by the packages produced by kubernetes/release without v as a + prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default + image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up + searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a + Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See + also: https://golang.org/pkg/text/template/ type: string imageLookupOrg: - description: ImageLookupOrg is the AWS Organization ID to look up - machine images when a machine does not specify an AMI. When set, - this will be used for all cluster machines unless a machine specifies - a different ImageLookupOrg. + description: |- + ImageLookupOrg is the AWS Organization ID to look up machine images when a + machine does not specify an AMI. When set, this will be used for all + cluster machines unless a machine specifies a different ImageLookupOrg. type: string network: description: NetworkSpec encapsulates all things related to AWS network. @@ -1218,6 +1333,10 @@ spec: items: type: string type: array + natGatewaysIPsSource: + description: NatGatewaysIPsSource use the NAT gateways IPs + as the source for the ingress rule. + type: boolean protocol: description: Protocol is the protocol for the ingress rule. Accepted values are "-1" (all), "4" (IP in IP),"tcp", @@ -1238,9 +1357,9 @@ spec: type: string type: array sourceSecurityGroupRoles: - description: The security group role to allow access from. - Cannot be specified with CidrBlocks. The field will be - combined with source security group IDs if specified. + description: |- + The security group role to allow access from. Cannot be specified with CidrBlocks. + The field will be combined with source security group IDs if specified. items: description: SecurityGroupRole defines the unique role of a security group. @@ -1268,10 +1387,9 @@ spec: description: CNI configuration properties: cniIngressRules: - description: CNIIngressRules specify rules to apply to control - plane and worker node security groups. The source for the - rule will be set to control plane and worker security group - IDs. + description: |- + CNIIngressRules specify rules to apply to control plane and worker node security groups. + The source for the rule will be set to control plane and worker security group IDs. items: description: CNIIngressRule defines an AWS ingress rule for CNI requirements. @@ -1299,9 +1417,9 @@ spec: securityGroupOverrides: additionalProperties: type: string - description: SecurityGroupOverrides is an optional set of security - groups to use for cluster instances This is optional - if not - provided new security groups will be created for the cluster + description: |- + SecurityGroupOverrides is an optional set of security groups to use for cluster instances + This is optional - if not provided new security groups will be created for the cluster type: object subnets: description: Subnets configuration. @@ -1317,28 +1435,27 @@ spec: the provider creates a managed VPC. type: string id: - description: "ID defines a unique identifier to reference - this resource. If you're bringing your subnet, set the - AWS subnet-id here, it must start with `subnet-`. \n When - the VPC is managed by CAPA, and you'd like the provider - to create a subnet for you, the id can be set to any placeholder - value that does not start with `subnet-`; upon creation, - the subnet AWS identifier will be populated in the `ResourceID` - field and the `id` field is going to be used as the subnet - name. If you specify a tag called `Name`, it takes precedence." + description: |- + ID defines a unique identifier to reference this resource. + If you're bringing your subnet, set the AWS subnet-id here, it must start with `subnet-`. + + + When the VPC is managed by CAPA, and you'd like the provider to create a subnet for you, + the id can be set to any placeholder value that does not start with `subnet-`; + upon creation, the subnet AWS identifier will be populated in the `ResourceID` field and + the `id` field is going to be used as the subnet name. If you specify a tag + called `Name`, it takes precedence. type: string ipv6CidrBlock: - description: IPv6CidrBlock is the IPv6 CIDR block to be - used when the provider creates a managed VPC. A subnet - can have an IPv4 and an IPv6 address. IPv6 is only supported - in managed clusters, this field cannot be set on AWSCluster - object. + description: |- + IPv6CidrBlock is the IPv6 CIDR block to be used when the provider creates a managed VPC. + A subnet can have an IPv4 and an IPv6 address. + IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object. type: string isIpv6: - description: IsIPv6 defines the subnet as an IPv6 subnet. - A subnet is IPv6 when it is associated with a VPC that - has IPv6 enabled. IPv6 is only supported in managed clusters, - this field cannot be set on AWSCluster object. + description: |- + IsIPv6 defines the subnet as an IPv6 subnet. A subnet is IPv6 when it is associated with a VPC that has IPv6 enabled. + IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object. type: boolean isPublic: description: IsPublic defines the subnet as a public subnet. @@ -1346,17 +1463,23 @@ spec: table that has a route to an internet gateway. type: boolean natGatewayId: - description: NatGatewayID is the NAT gateway id associated - with the subnet. Ignored unless the subnet is managed - by the provider, in which case this is set on the public - subnet where the NAT gateway resides. It is then used - to determine routes for private subnets in the same AZ - as the public subnet. + description: |- + NatGatewayID is the NAT gateway id associated with the subnet. + Ignored unless the subnet is managed by the provider, in which case this is set on the public subnet where the NAT gateway resides. It is then used to determine routes for private subnets in the same AZ as the public subnet. + type: string + parentZoneName: + description: |- + ParentZoneName is the zone name where the current subnet's zone is tied when + the zone is a Local Zone. + + + The subnets in Local Zone or Wavelength Zone locations consume the ParentZoneName + to select the correct private route table to egress traffic to the internet. type: string resourceID: - description: ResourceID is the subnet identifier from AWS, - READ ONLY. This field is populated when the provider manages - the subnet. + description: |- + ResourceID is the subnet identifier from AWS, READ ONLY. + This field is populated when the provider manages the subnet. type: string routeTableId: description: RouteTableID is the routing table id associated @@ -1368,6 +1491,42 @@ spec: description: Tags is a collection of tags describing the resource. type: object + zoneType: + description: |- + ZoneType defines the type of the zone where the subnet is created. + + + The valid values are availability-zone, local-zone, and wavelength-zone. + + + Subnet with zone type availability-zone (regular) is always selected to create cluster + resources, like Load Balancers, NAT Gateways, Contol Plane nodes, etc. + + + Subnet with zone type local-zone or wavelength-zone is not eligible to automatically create + regular cluster resources. + + + The public subnet in availability-zone or local-zone is associated with regular public + route table with default route entry to a Internet Gateway. + + + The public subnet in wavelength-zone is associated with a carrier public + route table with default route entry to a Carrier Gateway. + + + The private subnet in the availability-zone is associated with a private route table with + the default route entry to a NAT Gateway created in that zone. + + + The private subnet in the local-zone or wavelength-zone is associated with a private route table with + the default route entry re-using the NAT Gateway in the Region (preferred from the + parent zone, the zone type availability-zone in the region, or first table available). + enum: + - availability-zone + - local-zone + - wavelength-zone + type: string required: - id type: object @@ -1380,40 +1539,84 @@ spec: properties: availabilityZoneSelection: default: Ordered - description: 'AvailabilityZoneSelection specifies how AZs - should be selected if there are more AZs in a region than - specified by AvailabilityZoneUsageLimit. There are 2 selection - schemes: Ordered - selects based on alphabetical order Random - - selects AZs randomly in a region Defaults to Ordered' + description: |- + AvailabilityZoneSelection specifies how AZs should be selected if there are more AZs + in a region than specified by AvailabilityZoneUsageLimit. There are 2 selection schemes: + Ordered - selects based on alphabetical order + Random - selects AZs randomly in a region + Defaults to Ordered enum: - Ordered - Random type: string availabilityZoneUsageLimit: default: 3 - description: AvailabilityZoneUsageLimit specifies the maximum - number of availability zones (AZ) that should be used in - a region when automatically creating subnets. If a region - has more than this number of AZs then this number of AZs - will be picked randomly when creating default subnets. Defaults - to 3 + description: |- + AvailabilityZoneUsageLimit specifies the maximum number of availability zones (AZ) that + should be used in a region when automatically creating subnets. If a region has more + than this number of AZs then this number of AZs will be picked randomly when creating + default subnets. Defaults to 3 minimum: 1 type: integer + carrierGatewayId: + description: |- + CarrierGatewayID is the id of the internet gateway associated with the VPC, + for carrier network (Wavelength Zones). + type: string + x-kubernetes-validations: + - message: Carrier Gateway ID must start with 'cagw-' + rule: self.startsWith('cagw-') cidrBlock: - description: CidrBlock is the CIDR block to be used when the - provider creates a managed VPC. Defaults to 10.0.0.0/16. + description: |- + CidrBlock is the CIDR block to be used when the provider creates a managed VPC. + Defaults to 10.0.0.0/16. Mutually exclusive with IPAMPool. type: string + elasticIpPool: + description: |- + ElasticIPPool contains specific configuration to allocate Public IPv4 address (Elastic IP) from user-defined pool + brought to AWS for core infrastructure resources, like NAT Gateways and Public Network Load Balancers for + the API Server. + properties: + publicIpv4Pool: + description: |- + PublicIpv4Pool sets a custom Public IPv4 Pool used to create Elastic IP address for resources + created in public IPv4 subnets. Every IPv4 address, Elastic IP, will be allocated from the custom + Public IPv4 pool that you brought to AWS, instead of Amazon-provided pool. The public IPv4 pool + resource ID starts with 'ipv4pool-ec2'. + maxLength: 30 + type: string + publicIpv4PoolFallbackOrder: + description: |- + PublicIpv4PoolFallBackOrder defines the fallback action when the Public IPv4 Pool has been exhausted, + no more IPv4 address available in the pool. + + + When set to 'amazon-pool', the controller check if the pool has available IPv4 address, when pool has reached the + IPv4 limit, the address will be claimed from Amazon-pool (default). + + + When set to 'none', the controller will fail the Elastic IP allocation when the publicIpv4Pool is exhausted. + enum: + - amazon-pool + - none + type: string + x-kubernetes-validations: + - message: allowed values are 'none' and 'amazon-pool' + rule: self in ['none','amazon-pool'] + type: object emptyRoutesDefaultVPCSecurityGroup: - description: "EmptyRoutesDefaultVPCSecurityGroup specifies - whether the default VPC security group ingress and egress - rules should be removed. \n By default, when creating a - VPC, AWS creates a security group called `default` with - ingress and egress rules that allow traffic from anywhere. - The group could be used as a potential surface attack and - it's generally suggested that the group rules are removed - or modified appropriately. \n NOTE: This only applies when - the VPC is managed by the Cluster API AWS controller." + description: |- + EmptyRoutesDefaultVPCSecurityGroup specifies whether the default VPC security group ingress + and egress rules should be removed. + + + By default, when creating a VPC, AWS creates a security group called `default` with ingress and egress + rules that allow traffic from anywhere. The group could be used as a potential surface attack and + it's generally suggested that the group rules are removed or modified appropriately. + + + NOTE: This only applies when the VPC is managed by the Cluster API AWS controller. type: boolean id: description: ID is the vpc-id of the VPC this provider should @@ -1424,8 +1627,9 @@ spec: associated with the VPC. type: string ipamPool: - description: IPAMPool defines the IPAMv4 pool to be used for - VPC. Mutually exclusive with CidrBlock. + description: |- + IPAMPool defines the IPAMv4 pool to be used for VPC. + Mutually exclusive with CidrBlock. properties: id: description: ID is the ID of the IPAM pool this provider @@ -1436,20 +1640,22 @@ spec: should use to create VPC. type: string netmaskLength: - description: The netmask length of the IPv4 CIDR you want - to allocate to VPC from an Amazon VPC IP Address Manager - (IPAM) pool. Defaults to /16 for IPv4 if not specified. + description: |- + The netmask length of the IPv4 CIDR you want to allocate to VPC from + an Amazon VPC IP Address Manager (IPAM) pool. + Defaults to /16 for IPv4 if not specified. format: int64 type: integer type: object ipv6: - description: IPv6 contains ipv6 specific settings for the - network. Supported only in managed clusters. This field - cannot be set on AWSCluster object. + description: |- + IPv6 contains ipv6 specific settings for the network. Supported only in managed clusters. + This field cannot be set on AWSCluster object. properties: cidrBlock: - description: CidrBlock is the CIDR block provided by Amazon - when VPC has enabled IPv6. Mutually exclusive with IPAMPool. + description: |- + CidrBlock is the CIDR block provided by Amazon when VPC has enabled IPv6. + Mutually exclusive with IPAMPool. type: string egressOnlyInternetGatewayId: description: EgressOnlyInternetGatewayID is the id of @@ -1457,8 +1663,9 @@ spec: IPv6 enabled VPC. type: string ipamPool: - description: IPAMPool defines the IPAMv6 pool to be used - for VPC. Mutually exclusive with CidrBlock. + description: |- + IPAMPool defines the IPAMv6 pool to be used for VPC. + Mutually exclusive with CidrBlock. properties: id: description: ID is the ID of the IPAM pool this provider @@ -1469,31 +1676,60 @@ spec: provider should use to create VPC. type: string netmaskLength: - description: The netmask length of the IPv4 CIDR you - want to allocate to VPC from an Amazon VPC IP Address - Manager (IPAM) pool. Defaults to /16 for IPv4 if - not specified. + description: |- + The netmask length of the IPv4 CIDR you want to allocate to VPC from + an Amazon VPC IP Address Manager (IPAM) pool. + Defaults to /16 for IPv4 if not specified. format: int64 type: integer type: object poolId: - description: PoolID is the IP pool which must be defined - in case of BYO IP is defined. Must be specified if CidrBlock - is set. Mutually exclusive with IPAMPool. + description: |- + PoolID is the IP pool which must be defined in case of BYO IP is defined. + Must be specified if CidrBlock is set. + Mutually exclusive with IPAMPool. type: string type: object privateDnsHostnameTypeOnLaunch: - description: PrivateDNSHostnameTypeOnLaunch is the type of - hostname to assign to instances in the subnet at launch. - For IPv4-only and dual-stack (IPv4 and IPv6) subnets, an - instance DNS name can be based on the instance IPv4 address - (ip-name) or the instance ID (resource-name). For IPv6 only - subnets, an instance DNS name must be based on the instance - ID (resource-name). + description: |- + PrivateDNSHostnameTypeOnLaunch is the type of hostname to assign to instances in the subnet at launch. + For IPv4-only and dual-stack (IPv4 and IPv6) subnets, an instance DNS name can be based on the instance IPv4 address (ip-name) + or the instance ID (resource-name). For IPv6 only subnets, an instance DNS name must be based on the instance ID (resource-name). enum: - ip-name - resource-name type: string + secondaryCidrBlocks: + description: |- + SecondaryCidrBlocks are additional CIDR blocks to be associated when the provider creates a managed VPC. + Defaults to none. Mutually exclusive with IPAMPool. This makes sense to use if, for example, you want to use + a separate IP range for pods (e.g. Cilium ENI mode). + items: + description: VpcCidrBlock defines the CIDR block and settings + to associate with the managed VPC. Currently, only IPv4 + is supported. + properties: + ipv4CidrBlock: + description: IPv4CidrBlock is the IPv4 CIDR block to + associate with the managed VPC. + minLength: 1 + type: string + required: + - ipv4CidrBlock + type: object + type: array + subnetSchema: + default: PreferPrivate + description: |- + SubnetSchema specifies how CidrBlock should be divided on subnets in the VPC depending on the number of AZs. + PreferPrivate - one private subnet for each AZ plus one other subnet that will be further sub-divided for the public subnets. + PreferPublic - have the reverse logic of PreferPrivate, one public subnet for each AZ plus one other subnet + that will be further sub-divided for the private subnets. + Defaults to PreferPrivate + enum: + - PreferPrivate + - PreferPublic + type: string tags: additionalProperties: type: string @@ -1509,15 +1745,20 @@ spec: description: The AWS Region the cluster lives in. type: string s3Bucket: - description: S3Bucket contains options to configure a supporting S3 - bucket for this cluster - currently used for nodes requiring Ignition + description: |- + S3Bucket contains options to configure a supporting S3 bucket for this + cluster - currently used for nodes requiring Ignition (https://coreos.github.io/ignition/) for bootstrapping (requires BootstrapFormatIgnition feature flag to be enabled). properties: + bestEffortDeleteObjects: + description: BestEffortDeleteObjects defines whether access/permission + errors during object deletion should be ignored. + type: boolean controlPlaneIAMInstanceProfile: - description: ControlPlaneIAMInstanceProfile is a name of the IAMInstanceProfile, - which will be allowed to read control-plane node bootstrap data - from S3 Bucket. + description: |- + ControlPlaneIAMInstanceProfile is a name of the IAMInstanceProfile, which will be allowed + to read control-plane node bootstrap data from S3 Bucket. type: string name: description: Name defines name of S3 Bucket to be created. @@ -1526,36 +1767,99 @@ spec: pattern: ^[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]$ type: string nodesIAMInstanceProfiles: - description: NodesIAMInstanceProfiles is a list of IAM instance - profiles, which will be allowed to read worker nodes bootstrap - data from S3 Bucket. + description: |- + NodesIAMInstanceProfiles is a list of IAM instance profiles, which will be allowed to read + worker nodes bootstrap data from S3 Bucket. items: type: string type: array presignedURLDuration: - description: "PresignedURLDuration defines the duration for which - presigned URLs are valid. \n This is used to generate presigned - URLs for S3 Bucket objects, which are used by control-plane - and worker nodes to fetch bootstrap data. \n When enabled, the - IAM instance profiles specified are not used." + description: |- + PresignedURLDuration defines the duration for which presigned URLs are valid. + + + This is used to generate presigned URLs for S3 Bucket objects, which are used by + control-plane and worker nodes to fetch bootstrap data. + + + When enabled, the IAM instance profiles specified are not used. type: string required: - name type: object secondaryControlPlaneLoadBalancer: - description: "SecondaryControlPlaneLoadBalancer is an additional load - balancer that can be used for the control plane. \n An example use - case is to have a separate internal load balancer for internal traffic, - and a separate external load balancer for external traffic." + description: |- + SecondaryControlPlaneLoadBalancer is an additional load balancer that can be used for the control plane. + + + An example use case is to have a separate internal load balancer for internal traffic, + and a separate external load balancer for external traffic. properties: additionalListeners: - description: AdditionalListeners sets the additional listeners - for the control plane load balancer. This is only applicable - to Network Load Balancer (NLB) types for the time being. + description: |- + AdditionalListeners sets the additional listeners for the control plane load balancer. + This is only applicable to Network Load Balancer (NLB) types for the time being. items: - description: AdditionalListenerSpec defines the desired state - of an additional listener on an AWS load balancer. + description: |- + AdditionalListenerSpec defines the desired state of an + additional listener on an AWS load balancer. properties: + healthCheck: + description: HealthCheck sets the optional custom health + check configuration to the API target group. + properties: + intervalSeconds: + description: |- + The approximate amount of time, in seconds, between health checks of an individual + target. + format: int64 + maximum: 300 + minimum: 5 + type: integer + path: + description: |- + The destination for health checks on the targets when using the protocol HTTP or HTTPS, + otherwise the path will be ignored. + type: string + port: + description: |- + The port the load balancer uses when performing health checks for additional target groups. When + not specified this value will be set for the same of listener port. + type: string + protocol: + description: |- + The protocol to use to health check connect with the target. When not specified the Protocol + will be the same of the listener. + enum: + - TCP + - HTTP + - HTTPS + type: string + thresholdCount: + description: |- + The number of consecutive health check successes required before considering + a target healthy. + format: int64 + maximum: 10 + minimum: 2 + type: integer + timeoutSeconds: + description: |- + The amount of time, in seconds, during which no response from a target means + a failed health check. + format: int64 + maximum: 120 + minimum: 2 + type: integer + unhealthyThresholdCount: + description: |- + The number of consecutive health check failures required before considering + a target unhealthy. + format: int64 + maximum: 10 + minimum: 2 + type: integer + type: object port: description: Port sets the port for the additional listener. format: int64 @@ -1564,8 +1868,9 @@ spec: type: integer protocol: default: TCP - description: Protocol sets the protocol for the additional - listener. Currently only TCP is supported. + description: |- + Protocol sets the protocol for the additional listener. + Currently only TCP is supported. enum: - TCP type: string @@ -1577,31 +1882,71 @@ spec: - port x-kubernetes-list-type: map additionalSecurityGroups: - description: AdditionalSecurityGroups sets the security groups - used by the load balancer. Expected to be security group IDs - This is optional - if not provided new security groups will - be created for the load balancer + description: |- + AdditionalSecurityGroups sets the security groups used by the load balancer. Expected to be security group IDs + This is optional - if not provided new security groups will be created for the load balancer items: type: string type: array crossZoneLoadBalancing: - description: "CrossZoneLoadBalancing enables the classic ELB cross - availability zone balancing. \n With cross-zone load balancing, - each load balancer node for your Classic Load Balancer distributes - requests evenly across the registered instances in all enabled - Availability Zones. If cross-zone load balancing is disabled, - each load balancer node distributes requests evenly across the - registered instances in its Availability Zone only. \n Defaults - to false." + description: |- + CrossZoneLoadBalancing enables the classic ELB cross availability zone balancing. + + + With cross-zone load balancing, each load balancer node for your Classic Load Balancer + distributes requests evenly across the registered instances in all enabled Availability Zones. + If cross-zone load balancing is disabled, each load balancer node distributes requests evenly across + the registered instances in its Availability Zone only. + + + Defaults to false. type: boolean disableHostsRewrite: - description: DisableHostsRewrite disabled the hair pinning issue - solution that adds the NLB's address as 127.0.0.1 to the hosts + description: |- + DisableHostsRewrite disabled the hair pinning issue solution that adds the NLB's address as 127.0.0.1 to the hosts file of each instance. This is by default, false. type: boolean + healthCheck: + description: HealthCheck sets custom health check configuration + to the API target group. + properties: + intervalSeconds: + description: |- + The approximate amount of time, in seconds, between health checks of an individual + target. + format: int64 + maximum: 300 + minimum: 5 + type: integer + thresholdCount: + description: |- + The number of consecutive health check successes required before considering + a target healthy. + format: int64 + maximum: 10 + minimum: 2 + type: integer + timeoutSeconds: + description: |- + The amount of time, in seconds, during which no response from a target means + a failed health check. + format: int64 + maximum: 120 + minimum: 2 + type: integer + unhealthyThresholdCount: + description: |- + The number of consecutive health check failures required before considering + a target unhealthy. + format: int64 + maximum: 10 + minimum: 2 + type: integer + type: object healthCheckProtocol: - description: HealthCheckProtocol sets the protocol type for ELB - health check target default value is ELBProtocolSSL + description: |- + HealthCheckProtocol sets the protocol type for ELB health check target + default value is ELBProtocolSSL enum: - TCP - SSL @@ -1637,6 +1982,10 @@ spec: items: type: string type: array + natGatewaysIPsSource: + description: NatGatewaysIPsSource use the NAT gateways IPs + as the source for the ingress rule. + type: boolean protocol: description: Protocol is the protocol for the ingress rule. Accepted values are "-1" (all), "4" (IP in IP),"tcp", @@ -1657,9 +2006,9 @@ spec: type: string type: array sourceSecurityGroupRoles: - description: The security group role to allow access from. - Cannot be specified with CidrBlocks. The field will be - combined with source security group IDs if specified. + description: |- + The security group role to allow access from. Cannot be specified with CidrBlocks. + The field will be combined with source security group IDs if specified. items: description: SecurityGroupRole defines the unique role of a security group. @@ -1695,18 +2044,18 @@ spec: - disabled type: string name: - description: Name sets the name of the classic ELB load balancer. - As per AWS, the name must be unique within your set of load - balancers for the region, must have a maximum of 32 characters, - must contain only alphanumeric characters or hyphens, and cannot - begin or end with a hyphen. Once set, the value cannot be changed. + description: |- + Name sets the name of the classic ELB load balancer. As per AWS, the name must be unique + within your set of load balancers for the region, must have a maximum of 32 characters, must + contain only alphanumeric characters or hyphens, and cannot begin or end with a hyphen. Once + set, the value cannot be changed. maxLength: 32 pattern: ^[A-Za-z0-9]([A-Za-z0-9]{0,31}|[-A-Za-z0-9]{0,30}[A-Za-z0-9])$ type: string preserveClientIP: - description: PreserveClientIP lets the user control if preservation - of client ips must be retained or not. If this is enabled 6443 - will be opened to 0.0.0.0/0. + description: |- + PreserveClientIP lets the user control if preservation of client ips must be retained or not. + If this is enabled 6443 will be opened to 0.0.0.0/0. type: boolean scheme: default: internet-facing @@ -1757,6 +2106,10 @@ spec: availabilityZone: description: Availability zone of instance type: string + capacityReservationId: + description: CapacityReservationID specifies the target Capacity + Reservation into which the instance should be launched. + type: string ebsOptimized: description: Indicates whether the instance is optimized for Amazon EBS I/O. @@ -1780,48 +2133,64 @@ spec: properties: httpEndpoint: default: enabled - description: "Enables or disables the HTTP metadata endpoint - on your instances. \n If you specify a value of disabled, - you cannot access your instance metadata. \n Default: enabled" + description: |- + Enables or disables the HTTP metadata endpoint on your instances. + + + If you specify a value of disabled, you cannot access your instance metadata. + + + Default: enabled enum: - enabled - disabled type: string httpPutResponseHopLimit: default: 1 - description: "The desired HTTP PUT response hop limit for - instance metadata requests. The larger the number, the further - instance metadata requests can travel. \n Default: 1" + description: |- + The desired HTTP PUT response hop limit for instance metadata requests. The + larger the number, the further instance metadata requests can travel. + + + Default: 1 format: int64 maximum: 64 minimum: 1 type: integer httpTokens: default: optional - description: "The state of token usage for your instance metadata - requests. \n If the state is optional, you can choose to - retrieve instance metadata with or without a session token - on your request. If you retrieve the IAM role credentials - without a token, the version 1.0 role credentials are returned. - If you retrieve the IAM role credentials using a valid session - token, the version 2.0 role credentials are returned. \n - If the state is required, you must send a session token - with any instance metadata retrieval requests. In this state, - retrieving the IAM role credentials always returns the version - 2.0 credentials; the version 1.0 credentials are not available. - \n Default: optional" + description: |- + The state of token usage for your instance metadata requests. + + + If the state is optional, you can choose to retrieve instance metadata with + or without a session token on your request. If you retrieve the IAM role + credentials without a token, the version 1.0 role credentials are returned. + If you retrieve the IAM role credentials using a valid session token, the + version 2.0 role credentials are returned. + + + If the state is required, you must send a session token with any instance + metadata retrieval requests. In this state, retrieving the IAM role credentials + always returns the version 2.0 credentials; the version 1.0 credentials are + not available. + + + Default: optional enum: - optional - required type: string instanceMetadataTags: default: disabled - description: "Set to enabled to allow access to instance tags - from the instance metadata. Set to disabled to turn off - access to instance tags from the instance metadata. For - more information, see Work with instance tags using the - instance metadata (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS). - \n Default: disabled" + description: |- + Set to enabled to allow access to instance tags from the instance metadata. + Set to disabled to turn off access to instance tags from the instance metadata. + For more information, see Work with instance tags using the instance metadata + (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS). + + + Default: disabled enum: - enabled - disabled @@ -1849,11 +2218,10 @@ spec: or not. type: boolean encryptionKey: - description: EncryptionKey is the KMS key to use to encrypt - the volume. Can be either a KMS key ID or ARN. If Encrypted - is set and this is omitted, the default AWS key will be - used. The key must already exist and be accessible by - the controller. + description: |- + EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. + If Encrypted is set and this is omitted, the default AWS key will be used. + The key must already exist and be accessible by the controller. type: string iops: description: IOPS is the number of IOPS requested for the @@ -1861,9 +2229,9 @@ spec: format: int64 type: integer size: - description: Size specifies size (in Gi) of the storage - device. Must be greater than the image snapshot size or - 8 (whichever is greater). + description: |- + Size specifies size (in Gi) of the storage device. + Must be greater than the image snapshot size or 8 (whichever is greater). format: int64 minimum: 8 type: integer @@ -1884,6 +2252,15 @@ spec: description: PlacementGroupName specifies the name of the placement group in which to launch the instance. type: string + placementGroupPartition: + description: |- + PlacementGroupPartition is the partition number within the placement group in which to launch the instance. + This value is only valid if the placement group, referred in `PlacementGroupName`, was created with + strategy set to partition. + format: int64 + maximum: 7 + minimum: 1 + type: integer privateDnsName: description: PrivateDNSName is the options for the instance hostname. properties: @@ -1907,6 +2284,10 @@ spec: privateIp: description: The private IPv4 address assigned to the instance. type: string + publicIPOnLaunch: + description: PublicIPOnLaunch is the option to associate a public + IP on instance launch + type: boolean publicIp: description: The public IPv4 address assigned to the instance, if applicable. @@ -1922,11 +2303,10 @@ spec: or not. type: boolean encryptionKey: - description: EncryptionKey is the KMS key to use to encrypt - the volume. Can be either a KMS key ID or ARN. If Encrypted - is set and this is omitted, the default AWS key will be - used. The key must already exist and be accessible by the - controller. + description: |- + EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. + If Encrypted is set and this is omitted, the default AWS key will be used. + The key must already exist and be accessible by the controller. type: string iops: description: IOPS is the number of IOPS requested for the @@ -1934,9 +2314,9 @@ spec: format: int64 type: integer size: - description: Size specifies size (in Gi) of the storage device. - Must be greater than the image snapshot size or 8 (whichever - is greater). + description: |- + Size specifies size (in Gi) of the storage device. + Must be greater than the image snapshot size or 8 (whichever is greater). format: int64 minimum: 8 type: integer @@ -1986,9 +2366,9 @@ spec: description: The instance type. type: string userData: - description: UserData is the raw data script passed to the instance - which is run upon bootstrap. This field must not be base64 encoded - and should only be used when running a new instance. + description: |- + UserData is the raw data script passed to the instance which is run upon bootstrap. + This field must not be base64 encoded and should only be used when running a new instance. type: string volumeIDs: description: IDs of the instance's volumes @@ -2006,37 +2386,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime @@ -2046,9 +2426,9 @@ spec: type: array failureDomains: additionalProperties: - description: FailureDomainSpec is the Schema for Cluster API failure - domains. It allows controllers to understand how many failure - domains a cluster can optionally span across. + description: |- + FailureDomainSpec is the Schema for Cluster API failure domains. + It allows controllers to understand how many failure domains a cluster can optionally span across. properties: attributes: additionalProperties: @@ -2070,8 +2450,9 @@ spec: description: APIServerELB is the Kubernetes api server load balancer. properties: arn: - description: ARN of the load balancer. Unlike the ClassicLB, - ARN is used mostly to define and get it. + description: |- + ARN of the load balancer. Unlike the ClassicLB, ARN is used mostly + to define and get it. type: string attributes: description: ClassicElbAttributes defines extra attributes @@ -2082,9 +2463,9 @@ spec: load balancer load balancing. type: boolean idleTimeout: - description: IdleTimeout is time that the connection is - allowed to be idle (no data has been sent over the connection) - before it is closed by the load balancer. + description: |- + IdleTimeout is time that the connection is allowed to be idle (no data + has been sent over the connection) before it is closed by the load balancer. format: int64 type: integer type: object @@ -2118,13 +2499,14 @@ spec: for a load balancer. type: string targetGroup: - description: TargetGroupSpec specifies target group - settings for a given listener. This is created first, - and the ARN is then passed to the listener. + description: |- + TargetGroupSpec specifies target group settings for a given listener. + This is created first, and the ARN is then passed to the listener. properties: name: description: Name of the TargetGroup. Must be unique over the same group of listeners. + maxLength: 32 type: string port: description: Port is the exposed port @@ -2160,6 +2542,9 @@ spec: timeoutSeconds: format: int64 type: integer + unhealthyThresholdCount: + format: int64 + type: integer type: object vpcId: type: string @@ -2183,19 +2568,19 @@ spec: format: int64 type: integer interval: - description: A Duration represents the elapsed time between - two instants as an int64 nanosecond count. The representation - limits the largest representable duration to approximately - 290 years. + description: |- + A Duration represents the elapsed time between two instants + as an int64 nanosecond count. The representation limits the + largest representable duration to approximately 290 years. format: int64 type: integer target: type: string timeout: - description: A Duration represents the elapsed time between - two instants as an int64 nanosecond count. The representation - limits the largest representable duration to approximately - 290 years. + description: |- + A Duration represents the elapsed time between two instants + as an int64 nanosecond count. The representation limits the + largest representable duration to approximately 290 years. format: int64 type: integer unhealthyThreshold: @@ -2247,9 +2632,9 @@ spec: - nlb type: string name: - description: The name of the load balancer. It must be unique - within the set of load balancers defined in the region. - It also serves as identifier. + description: |- + The name of the load balancer. It must be unique within the set of load balancers + defined in the region. It also serves as identifier. type: string scheme: description: Scheme is the load balancer scheme, either internet-facing @@ -2285,8 +2670,9 @@ spec: api server load balancer. properties: arn: - description: ARN of the load balancer. Unlike the ClassicLB, - ARN is used mostly to define and get it. + description: |- + ARN of the load balancer. Unlike the ClassicLB, ARN is used mostly + to define and get it. type: string attributes: description: ClassicElbAttributes defines extra attributes @@ -2297,9 +2683,9 @@ spec: load balancer load balancing. type: boolean idleTimeout: - description: IdleTimeout is time that the connection is - allowed to be idle (no data has been sent over the connection) - before it is closed by the load balancer. + description: |- + IdleTimeout is time that the connection is allowed to be idle (no data + has been sent over the connection) before it is closed by the load balancer. format: int64 type: integer type: object @@ -2333,13 +2719,14 @@ spec: for a load balancer. type: string targetGroup: - description: TargetGroupSpec specifies target group - settings for a given listener. This is created first, - and the ARN is then passed to the listener. + description: |- + TargetGroupSpec specifies target group settings for a given listener. + This is created first, and the ARN is then passed to the listener. properties: name: description: Name of the TargetGroup. Must be unique over the same group of listeners. + maxLength: 32 type: string port: description: Port is the exposed port @@ -2375,6 +2762,9 @@ spec: timeoutSeconds: format: int64 type: integer + unhealthyThresholdCount: + format: int64 + type: integer type: object vpcId: type: string @@ -2398,19 +2788,19 @@ spec: format: int64 type: integer interval: - description: A Duration represents the elapsed time between - two instants as an int64 nanosecond count. The representation - limits the largest representable duration to approximately - 290 years. + description: |- + A Duration represents the elapsed time between two instants + as an int64 nanosecond count. The representation limits the + largest representable duration to approximately 290 years. format: int64 type: integer target: type: string timeout: - description: A Duration represents the elapsed time between - two instants as an int64 nanosecond count. The representation - limits the largest representable duration to approximately - 290 years. + description: |- + A Duration represents the elapsed time between two instants + as an int64 nanosecond count. The representation limits the + largest representable duration to approximately 290 years. format: int64 type: integer unhealthyThreshold: @@ -2462,9 +2852,9 @@ spec: - nlb type: string name: - description: The name of the load balancer. It must be unique - within the set of load balancers defined in the region. - It also serves as identifier. + description: |- + The name of the load balancer. It must be unique within the set of load balancers + defined in the region. It also serves as identifier. type: string scheme: description: Scheme is the load balancer scheme, either internet-facing @@ -2523,6 +2913,10 @@ spec: items: type: string type: array + natGatewaysIPsSource: + description: NatGatewaysIPsSource use the NAT gateways + IPs as the source for the ingress rule. + type: boolean protocol: description: Protocol is the protocol for the ingress rule. Accepted values are "-1" (all), "4" (IP in @@ -2544,10 +2938,9 @@ spec: type: string type: array sourceSecurityGroupRoles: - description: The security group role to allow access - from. Cannot be specified with CidrBlocks. The field - will be combined with source security group IDs - if specified. + description: |- + The security group role to allow access from. Cannot be specified with CidrBlocks. + The field will be combined with source security group IDs if specified. items: description: SecurityGroupRole defines the unique role of a security group. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusterstaticidentities.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusterstaticidentities.yaml index 409a4431d0..5e11a9ba11 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusterstaticidentities.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusterstaticidentities.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.14.0 name: awsclusterstaticidentities.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io @@ -21,19 +21,24 @@ spec: - name: v1beta1 schema: openAPIV3Schema: - description: AWSClusterStaticIdentity is the Schema for the awsclusterstaticidentities - API It represents a reference to an AWS access key ID and secret access - key, stored in a secret. + description: |- + AWSClusterStaticIdentity is the Schema for the awsclusterstaticidentities API + It represents a reference to an AWS access key ID and secret access key, stored in a secret. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -41,14 +46,12 @@ spec: description: Spec for this AWSClusterStaticIdentity properties: allowedNamespaces: - description: AllowedNamespaces is used to identify which namespaces - are allowed to use the identity from. Namespaces can be selected - either using an array of namespaces or with label selector. An empty - allowedNamespaces object indicates that AWSClusters can use this - identity from any namespace. If this object is nil, no namespaces - will be allowed (default behaviour, if this field is not provided) - A namespace should be either in the NamespaceList or match with - Selector to use the identity. + description: |- + AllowedNamespaces is used to identify which namespaces are allowed to use the identity from. + Namespaces can be selected either using an array of namespaces or with label selector. + An empty allowedNamespaces object indicates that AWSClusters can use this identity from any namespace. + If this object is nil, no namespaces will be allowed (default behaviour, if this field is not provided) + A namespace should be either in the NamespaceList or match with Selector to use the identity. nullable: true properties: list: @@ -59,32 +62,33 @@ spec: nullable: true type: array selector: - description: An empty selector indicates that AWSClusters cannot - use this AWSClusterIdentity from any namespace. + description: |- + An empty selector indicates that AWSClusters cannot use this + AWSClusterIdentity from any namespace. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If - the operator is In or NotIn, the values array must - be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced - during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -96,20 +100,21 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A - single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is "key", - the operator is "In", and the values array contains only - "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic type: object secretRef: - description: 'Reference to a secret containing the credentials. The - secret should contain the following data keys: AccessKeyID: AKIAIOSFODNN7EXAMPLE - SecretAccessKey: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY SessionToken: - Optional' + description: |- + Reference to a secret containing the credentials. The secret should + contain the following data keys: + AccessKeyID: AKIAIOSFODNN7EXAMPLE + SecretAccessKey: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY + SessionToken: Optional type: string required: - secretRef @@ -120,19 +125,24 @@ spec: - name: v1beta2 schema: openAPIV3Schema: - description: AWSClusterStaticIdentity is the Schema for the awsclusterstaticidentities - API It represents a reference to an AWS access key ID and secret access - key, stored in a secret. + description: |- + AWSClusterStaticIdentity is the Schema for the awsclusterstaticidentities API + It represents a reference to an AWS access key ID and secret access key, stored in a secret. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -140,14 +150,12 @@ spec: description: Spec for this AWSClusterStaticIdentity properties: allowedNamespaces: - description: AllowedNamespaces is used to identify which namespaces - are allowed to use the identity from. Namespaces can be selected - either using an array of namespaces or with label selector. An empty - allowedNamespaces object indicates that AWSClusters can use this - identity from any namespace. If this object is nil, no namespaces - will be allowed (default behaviour, if this field is not provided) - A namespace should be either in the NamespaceList or match with - Selector to use the identity. + description: |- + AllowedNamespaces is used to identify which namespaces are allowed to use the identity from. + Namespaces can be selected either using an array of namespaces or with label selector. + An empty allowedNamespaces object indicates that AWSClusters can use this identity from any namespace. + If this object is nil, no namespaces will be allowed (default behaviour, if this field is not provided) + A namespace should be either in the NamespaceList or match with Selector to use the identity. nullable: true properties: list: @@ -158,32 +166,33 @@ spec: nullable: true type: array selector: - description: An empty selector indicates that AWSClusters cannot - use this AWSClusterIdentity from any namespace. + description: |- + An empty selector indicates that AWSClusters cannot use this + AWSClusterIdentity from any namespace. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If - the operator is In or NotIn, the values array must - be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced - during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -195,20 +204,21 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A - single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is "key", - the operator is "In", and the values array contains only - "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic type: object secretRef: - description: 'Reference to a secret containing the credentials. The - secret should contain the following data keys: AccessKeyID: AKIAIOSFODNN7EXAMPLE - SecretAccessKey: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY SessionToken: - Optional' + description: |- + Reference to a secret containing the credentials. The secret should + contain the following data keys: + AccessKeyID: AKIAIOSFODNN7EXAMPLE + SecretAccessKey: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY + SessionToken: Optional type: string required: - secretRef diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml index df369e0c2d..94beb2e660 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.14.0 name: awsclustertemplates.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io @@ -30,14 +30,19 @@ spec: Cluster Templates. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -45,26 +50,31 @@ spec: description: AWSClusterTemplateSpec defines the desired state of AWSClusterTemplate. properties: template: + description: AWSClusterTemplateResource defines the desired state + of AWSClusterTemplate. properties: metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key value map - stored with a resource that may be set by external tools - to store and retrieve arbitrary metadata. They are not queryable - and should be preserved when modifying objects. More info: - http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object labels: additionalProperties: type: string - description: 'Map of string keys and values that can be used - to organize and categorize (scope and select) objects. May - match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object type: object spec: @@ -74,42 +84,41 @@ spec: additionalTags: additionalProperties: type: string - description: AdditionalTags is an optional set of tags to - add to AWS resources managed by the AWS provider, in addition - to the ones added by default. + description: |- + AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the + ones added by default. type: object bastion: description: Bastion contains options to configure the bastion host. properties: allowedCIDRBlocks: - description: AllowedCIDRBlocks is a list of CIDR blocks - allowed to access the bastion host. They are set as - ingress rules for the Bastion host's Security Group - (defaults to 0.0.0.0/0). + description: |- + AllowedCIDRBlocks is a list of CIDR blocks allowed to access the bastion host. + They are set as ingress rules for the Bastion host's Security Group (defaults to 0.0.0.0/0). items: type: string type: array ami: - description: AMI will use the specified AMI to boot the - bastion. If not specified, the AMI will default to one - picked out in public space. + description: |- + AMI will use the specified AMI to boot the bastion. If not specified, + the AMI will default to one picked out in public space. type: string disableIngressRules: - description: DisableIngressRules will ensure there are - no Ingress rules in the bastion host's security group. + description: |- + DisableIngressRules will ensure there are no Ingress rules in the bastion host's security group. Requires AllowedCIDRBlocks to be empty. type: boolean enabled: - description: Enabled allows this provider to create a - bastion host instance with a public ip to access the - VPC private network. + description: |- + Enabled allows this provider to create a bastion host instance + with a public ip to access the VPC private network. type: boolean instanceType: - description: InstanceType will use the specified instance - type for the bastion. If not specified, Cluster API - Provider AWS will use t3.micro for all regions except - us-east-1, where t2.micro will be the default. + description: |- + InstanceType will use the specified instance type for the bastion. If not specified, + Cluster API Provider AWS will use t3.micro for all regions except us-east-1, where t2.micro + will be the default. type: string type: object controlPlaneEndpoint: @@ -132,36 +141,36 @@ spec: for customizing control plane behavior. properties: additionalSecurityGroups: - description: AdditionalSecurityGroups sets the security - groups used by the load balancer. Expected to be security - group IDs This is optional - if not provided new security - groups will be created for the load balancer + description: |- + AdditionalSecurityGroups sets the security groups used by the load balancer. Expected to be security group IDs + This is optional - if not provided new security groups will be created for the load balancer items: type: string type: array crossZoneLoadBalancing: - description: "CrossZoneLoadBalancing enables the classic - ELB cross availability zone balancing. \n With cross-zone - load balancing, each load balancer node for your Classic - Load Balancer distributes requests evenly across the - registered instances in all enabled Availability Zones. - If cross-zone load balancing is disabled, each load - balancer node distributes requests evenly across the - registered instances in its Availability Zone only. - \n Defaults to false." + description: |- + CrossZoneLoadBalancing enables the classic ELB cross availability zone balancing. + + + With cross-zone load balancing, each load balancer node for your Classic Load Balancer + distributes requests evenly across the registered instances in all enabled Availability Zones. + If cross-zone load balancing is disabled, each load balancer node distributes requests evenly across + the registered instances in its Availability Zone only. + + + Defaults to false. type: boolean healthCheckProtocol: - description: HealthCheckProtocol sets the protocol type - for classic ELB health check target default value is - ClassicELBProtocolSSL + description: |- + HealthCheckProtocol sets the protocol type for classic ELB health check target + default value is ClassicELBProtocolSSL type: string name: - description: Name sets the name of the classic ELB load - balancer. As per AWS, the name must be unique within - your set of load balancers for the region, must have - a maximum of 32 characters, must contain only alphanumeric - characters or hyphens, and cannot begin or end with - a hyphen. Once set, the value cannot be changed. + description: |- + Name sets the name of the classic ELB load balancer. As per AWS, the name must be unique + within your set of load balancers for the region, must have a maximum of 32 characters, must + contain only alphanumeric characters or hyphens, and cannot begin or end with a hyphen. Once + set, the value cannot be changed. maxLength: 32 pattern: ^[A-Za-z0-9]([A-Za-z0-9]{0,31}|[-A-Za-z0-9]{0,30}[A-Za-z0-9])$ type: string @@ -183,10 +192,9 @@ spec: type: array type: object identityRef: - description: IdentityRef is a reference to an identity to - be used when reconciling the managed control plane. If no - identity is specified, the default identity for this controller - will be used. + description: |- + IdentityRef is a reference to an identity to be used when reconciling the managed control plane. + If no identity is specified, the default identity for this controller will be used. properties: kind: description: Kind of the identity. @@ -204,32 +212,32 @@ spec: - name type: object imageLookupBaseOS: - description: ImageLookupBaseOS is the name of the base operating - system used to look up machine images when a machine does - not specify an AMI. When set, this will be used for all - cluster machines unless a machine specifies a different - ImageLookupBaseOS. + description: |- + ImageLookupBaseOS is the name of the base operating system used to look + up machine images when a machine does not specify an AMI. When set, this + will be used for all cluster machines unless a machine specifies a + different ImageLookupBaseOS. type: string imageLookupFormat: - description: 'ImageLookupFormat is the AMI naming format to - look up machine images when a machine does not specify an - AMI. When set, this will be used for all cluster machines - unless a machine specifies a different ImageLookupOrg. Supports - substitutions for {{.BaseOS}} and {{.K8sVersion}} with the - base OS and kubernetes version, respectively. The BaseOS - will be the value in ImageLookupBaseOS or ubuntu (the default), - and the kubernetes version as defined by the packages produced - by kubernetes/release without v as a prefix: 1.13.0, 1.12.5-mybuild.1, - or 1.17.3. For example, the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* - will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* - for a Machine that is targeting kubernetes v1.18.0 and the - ubuntu base OS. See also: https://golang.org/pkg/text/template/' + description: |- + ImageLookupFormat is the AMI naming format to look up machine images when + a machine does not specify an AMI. When set, this will be used for all + cluster machines unless a machine specifies a different ImageLookupOrg. + Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base + OS and kubernetes version, respectively. The BaseOS will be the value in + ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as + defined by the packages produced by kubernetes/release without v as a + prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default + image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up + searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a + Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See + also: https://golang.org/pkg/text/template/ type: string imageLookupOrg: - description: ImageLookupOrg is the AWS Organization ID to - look up machine images when a machine does not specify an - AMI. When set, this will be used for all cluster machines - unless a machine specifies a different ImageLookupOrg. + description: |- + ImageLookupOrg is the AWS Organization ID to look up machine images when a + machine does not specify an AMI. When set, this will be used for all + cluster machines unless a machine specifies a different ImageLookupOrg. type: string network: description: NetworkSpec encapsulates all things related to @@ -239,10 +247,9 @@ spec: description: CNI configuration properties: cniIngressRules: - description: CNIIngressRules specify rules to apply - to control plane and worker node security groups. - The source for the rule will be set to control plane - and worker security group IDs. + description: |- + CNIIngressRules specify rules to apply to control plane and worker node security groups. + The source for the rule will be set to control plane and worker security group IDs. items: description: CNIIngressRule defines an AWS ingress rule for CNI requirements. @@ -270,10 +277,9 @@ spec: securityGroupOverrides: additionalProperties: type: string - description: SecurityGroupOverrides is an optional set - of security groups to use for cluster instances This - is optional - if not provided new security groups will - be created for the cluster + description: |- + SecurityGroupOverrides is an optional set of security groups to use for cluster instances + This is optional - if not provided new security groups will be created for the cluster type: object subnets: description: Subnets configuration. @@ -293,18 +299,15 @@ spec: this resource. type: string ipv6CidrBlock: - description: IPv6CidrBlock is the IPv6 CIDR block - to be used when the provider creates a managed - VPC. A subnet can have an IPv4 and an IPv6 address. - IPv6 is only supported in managed clusters, this - field cannot be set on AWSCluster object. + description: |- + IPv6CidrBlock is the IPv6 CIDR block to be used when the provider creates a managed VPC. + A subnet can have an IPv4 and an IPv6 address. + IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object. type: string isIpv6: - description: IsIPv6 defines the subnet as an IPv6 - subnet. A subnet is IPv6 when it is associated - with a VPC that has IPv6 enabled. IPv6 is only - supported in managed clusters, this field cannot - be set on AWSCluster object. + description: |- + IsIPv6 defines the subnet as an IPv6 subnet. A subnet is IPv6 when it is associated with a VPC that has IPv6 enabled. + IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object. type: boolean isPublic: description: IsPublic defines the subnet as a public @@ -313,13 +316,9 @@ spec: gateway. type: boolean natGatewayId: - description: NatGatewayID is the NAT gateway id - associated with the subnet. Ignored unless the - subnet is managed by the provider, in which case - this is set on the public subnet where the NAT - gateway resides. It is then used to determine - routes for private subnets in the same AZ as the - public subnet. + description: |- + NatGatewayID is the NAT gateway id associated with the subnet. + Ignored unless the subnet is managed by the provider, in which case this is set on the public subnet where the NAT gateway resides. It is then used to determine routes for private subnets in the same AZ as the public subnet. type: string routeTableId: description: RouteTableID is the routing table id @@ -338,30 +337,29 @@ spec: properties: availabilityZoneSelection: default: Ordered - description: 'AvailabilityZoneSelection specifies - how AZs should be selected if there are more AZs - in a region than specified by AvailabilityZoneUsageLimit. - There are 2 selection schemes: Ordered - selects - based on alphabetical order Random - selects AZs - randomly in a region Defaults to Ordered' + description: |- + AvailabilityZoneSelection specifies how AZs should be selected if there are more AZs + in a region than specified by AvailabilityZoneUsageLimit. There are 2 selection schemes: + Ordered - selects based on alphabetical order + Random - selects AZs randomly in a region + Defaults to Ordered enum: - Ordered - Random type: string availabilityZoneUsageLimit: default: 3 - description: AvailabilityZoneUsageLimit specifies - the maximum number of availability zones (AZ) that - should be used in a region when automatically creating - subnets. If a region has more than this number of - AZs then this number of AZs will be picked randomly - when creating default subnets. Defaults to 3 + description: |- + AvailabilityZoneUsageLimit specifies the maximum number of availability zones (AZ) that + should be used in a region when automatically creating subnets. If a region has more + than this number of AZs then this number of AZs will be picked randomly when creating + default subnets. Defaults to 3 minimum: 1 type: integer cidrBlock: - description: CidrBlock is the CIDR block to be used - when the provider creates a managed VPC. Defaults - to 10.0.0.0/16. + description: |- + CidrBlock is the CIDR block to be used when the provider creates a managed VPC. + Defaults to 10.0.0.0/16. type: string id: description: ID is the vpc-id of the VPC this provider @@ -372,8 +370,8 @@ spec: gateway associated with the VPC. type: string ipv6: - description: IPv6 contains ipv6 specific settings - for the network. Supported only in managed clusters. + description: |- + IPv6 contains ipv6 specific settings for the network. Supported only in managed clusters. This field cannot be set on AWSCluster object. properties: cidrBlock: @@ -402,15 +400,16 @@ spec: description: The AWS Region the cluster lives in. type: string s3Bucket: - description: S3Bucket contains options to configure a supporting - S3 bucket for this cluster - currently used for nodes requiring - Ignition (https://coreos.github.io/ignition/) for bootstrapping - (requires BootstrapFormatIgnition feature flag to be enabled). + description: |- + S3Bucket contains options to configure a supporting S3 bucket for this + cluster - currently used for nodes requiring Ignition + (https://coreos.github.io/ignition/) for bootstrapping (requires + BootstrapFormatIgnition feature flag to be enabled). properties: controlPlaneIAMInstanceProfile: - description: ControlPlaneIAMInstanceProfile is a name - of the IAMInstanceProfile, which will be allowed to - read control-plane node bootstrap data from S3 Bucket. + description: |- + ControlPlaneIAMInstanceProfile is a name of the IAMInstanceProfile, which will be allowed + to read control-plane node bootstrap data from S3 Bucket. type: string name: description: Name defines name of S3 Bucket to be created. @@ -419,9 +418,9 @@ spec: pattern: ^[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]$ type: string nodesIAMInstanceProfiles: - description: NodesIAMInstanceProfiles is a list of IAM - instance profiles, which will be allowed to read worker - nodes bootstrap data from S3 Bucket. + description: |- + NodesIAMInstanceProfiles is a list of IAM instance profiles, which will be allowed to read + worker nodes bootstrap data from S3 Bucket. items: type: string type: array @@ -459,14 +458,19 @@ spec: Cluster Templates. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -474,26 +478,31 @@ spec: description: AWSClusterTemplateSpec defines the desired state of AWSClusterTemplate. properties: template: + description: AWSClusterTemplateResource defines the desired state + of AWSClusterTemplateResource. properties: metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key value map - stored with a resource that may be set by external tools - to store and retrieve arbitrary metadata. They are not queryable - and should be preserved when modifying objects. More info: - http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object labels: additionalProperties: type: string - description: 'Map of string keys and values that can be used - to organize and categorize (scope and select) objects. May - match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object type: object spec: @@ -503,42 +512,41 @@ spec: additionalTags: additionalProperties: type: string - description: AdditionalTags is an optional set of tags to - add to AWS resources managed by the AWS provider, in addition - to the ones added by default. + description: |- + AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the + ones added by default. type: object bastion: description: Bastion contains options to configure the bastion host. properties: allowedCIDRBlocks: - description: AllowedCIDRBlocks is a list of CIDR blocks - allowed to access the bastion host. They are set as - ingress rules for the Bastion host's Security Group - (defaults to 0.0.0.0/0). + description: |- + AllowedCIDRBlocks is a list of CIDR blocks allowed to access the bastion host. + They are set as ingress rules for the Bastion host's Security Group (defaults to 0.0.0.0/0). items: type: string type: array ami: - description: AMI will use the specified AMI to boot the - bastion. If not specified, the AMI will default to one - picked out in public space. + description: |- + AMI will use the specified AMI to boot the bastion. If not specified, + the AMI will default to one picked out in public space. type: string disableIngressRules: - description: DisableIngressRules will ensure there are - no Ingress rules in the bastion host's security group. + description: |- + DisableIngressRules will ensure there are no Ingress rules in the bastion host's security group. Requires AllowedCIDRBlocks to be empty. type: boolean enabled: - description: Enabled allows this provider to create a - bastion host instance with a public ip to access the - VPC private network. + description: |- + Enabled allows this provider to create a bastion host instance + with a public ip to access the VPC private network. type: boolean instanceType: - description: InstanceType will use the specified instance - type for the bastion. If not specified, Cluster API - Provider AWS will use t3.micro for all regions except - us-east-1, where t2.micro will be the default. + description: |- + InstanceType will use the specified instance type for the bastion. If not specified, + Cluster API Provider AWS will use t3.micro for all regions except us-east-1, where t2.micro + will be the default. type: string type: object controlPlaneEndpoint: @@ -561,13 +569,70 @@ spec: for customizing control plane behavior. properties: additionalListeners: - description: AdditionalListeners sets the additional listeners - for the control plane load balancer. This is only applicable - to Network Load Balancer (NLB) types for the time being. + description: |- + AdditionalListeners sets the additional listeners for the control plane load balancer. + This is only applicable to Network Load Balancer (NLB) types for the time being. items: - description: AdditionalListenerSpec defines the desired - state of an additional listener on an AWS load balancer. + description: |- + AdditionalListenerSpec defines the desired state of an + additional listener on an AWS load balancer. properties: + healthCheck: + description: HealthCheck sets the optional custom + health check configuration to the API target group. + properties: + intervalSeconds: + description: |- + The approximate amount of time, in seconds, between health checks of an individual + target. + format: int64 + maximum: 300 + minimum: 5 + type: integer + path: + description: |- + The destination for health checks on the targets when using the protocol HTTP or HTTPS, + otherwise the path will be ignored. + type: string + port: + description: |- + The port the load balancer uses when performing health checks for additional target groups. When + not specified this value will be set for the same of listener port. + type: string + protocol: + description: |- + The protocol to use to health check connect with the target. When not specified the Protocol + will be the same of the listener. + enum: + - TCP + - HTTP + - HTTPS + type: string + thresholdCount: + description: |- + The number of consecutive health check successes required before considering + a target healthy. + format: int64 + maximum: 10 + minimum: 2 + type: integer + timeoutSeconds: + description: |- + The amount of time, in seconds, during which no response from a target means + a failed health check. + format: int64 + maximum: 120 + minimum: 2 + type: integer + unhealthyThresholdCount: + description: |- + The number of consecutive health check failures required before considering + a target unhealthy. + format: int64 + maximum: 10 + minimum: 2 + type: integer + type: object port: description: Port sets the port for the additional listener. @@ -577,8 +642,9 @@ spec: type: integer protocol: default: TCP - description: Protocol sets the protocol for the - additional listener. Currently only TCP is supported. + description: |- + Protocol sets the protocol for the additional listener. + Currently only TCP is supported. enum: - TCP type: string @@ -590,33 +656,71 @@ spec: - port x-kubernetes-list-type: map additionalSecurityGroups: - description: AdditionalSecurityGroups sets the security - groups used by the load balancer. Expected to be security - group IDs This is optional - if not provided new security - groups will be created for the load balancer + description: |- + AdditionalSecurityGroups sets the security groups used by the load balancer. Expected to be security group IDs + This is optional - if not provided new security groups will be created for the load balancer items: type: string type: array crossZoneLoadBalancing: - description: "CrossZoneLoadBalancing enables the classic - ELB cross availability zone balancing. \n With cross-zone - load balancing, each load balancer node for your Classic - Load Balancer distributes requests evenly across the - registered instances in all enabled Availability Zones. - If cross-zone load balancing is disabled, each load - balancer node distributes requests evenly across the - registered instances in its Availability Zone only. - \n Defaults to false." + description: |- + CrossZoneLoadBalancing enables the classic ELB cross availability zone balancing. + + + With cross-zone load balancing, each load balancer node for your Classic Load Balancer + distributes requests evenly across the registered instances in all enabled Availability Zones. + If cross-zone load balancing is disabled, each load balancer node distributes requests evenly across + the registered instances in its Availability Zone only. + + + Defaults to false. type: boolean disableHostsRewrite: - description: DisableHostsRewrite disabled the hair pinning - issue solution that adds the NLB's address as 127.0.0.1 - to the hosts file of each instance. This is by default, - false. + description: |- + DisableHostsRewrite disabled the hair pinning issue solution that adds the NLB's address as 127.0.0.1 to the hosts + file of each instance. This is by default, false. type: boolean + healthCheck: + description: HealthCheck sets custom health check configuration + to the API target group. + properties: + intervalSeconds: + description: |- + The approximate amount of time, in seconds, between health checks of an individual + target. + format: int64 + maximum: 300 + minimum: 5 + type: integer + thresholdCount: + description: |- + The number of consecutive health check successes required before considering + a target healthy. + format: int64 + maximum: 10 + minimum: 2 + type: integer + timeoutSeconds: + description: |- + The amount of time, in seconds, during which no response from a target means + a failed health check. + format: int64 + maximum: 120 + minimum: 2 + type: integer + unhealthyThresholdCount: + description: |- + The number of consecutive health check failures required before considering + a target unhealthy. + format: int64 + maximum: 10 + minimum: 2 + type: integer + type: object healthCheckProtocol: - description: HealthCheckProtocol sets the protocol type - for ELB health check target default value is ELBProtocolSSL + description: |- + HealthCheckProtocol sets the protocol type for ELB health check target + default value is ELBProtocolSSL enum: - TCP - SSL @@ -652,6 +756,10 @@ spec: items: type: string type: array + natGatewaysIPsSource: + description: NatGatewaysIPsSource use the NAT gateways + IPs as the source for the ingress rule. + type: boolean protocol: description: Protocol is the protocol for the ingress rule. Accepted values are "-1" (all), "4" (IP @@ -673,10 +781,9 @@ spec: type: string type: array sourceSecurityGroupRoles: - description: The security group role to allow access - from. Cannot be specified with CidrBlocks. The - field will be combined with source security group - IDs if specified. + description: |- + The security group role to allow access from. Cannot be specified with CidrBlocks. + The field will be combined with source security group IDs if specified. items: description: SecurityGroupRole defines the unique role of a security group. @@ -712,18 +819,17 @@ spec: - disabled type: string name: - description: Name sets the name of the classic ELB load - balancer. As per AWS, the name must be unique within - your set of load balancers for the region, must have - a maximum of 32 characters, must contain only alphanumeric - characters or hyphens, and cannot begin or end with - a hyphen. Once set, the value cannot be changed. + description: |- + Name sets the name of the classic ELB load balancer. As per AWS, the name must be unique + within your set of load balancers for the region, must have a maximum of 32 characters, must + contain only alphanumeric characters or hyphens, and cannot begin or end with a hyphen. Once + set, the value cannot be changed. maxLength: 32 pattern: ^[A-Za-z0-9]([A-Za-z0-9]{0,31}|[-A-Za-z0-9]{0,30}[A-Za-z0-9])$ type: string preserveClientIP: - description: PreserveClientIP lets the user control if - preservation of client ips must be retained or not. + description: |- + PreserveClientIP lets the user control if preservation of client ips must be retained or not. If this is enabled 6443 will be opened to 0.0.0.0/0. type: boolean scheme: @@ -744,10 +850,9 @@ spec: type: array type: object identityRef: - description: IdentityRef is a reference to an identity to - be used when reconciling the managed control plane. If no - identity is specified, the default identity for this controller - will be used. + description: |- + IdentityRef is a reference to an identity to be used when reconciling the managed control plane. + If no identity is specified, the default identity for this controller will be used. properties: kind: description: Kind of the identity. @@ -765,32 +870,32 @@ spec: - name type: object imageLookupBaseOS: - description: ImageLookupBaseOS is the name of the base operating - system used to look up machine images when a machine does - not specify an AMI. When set, this will be used for all - cluster machines unless a machine specifies a different - ImageLookupBaseOS. + description: |- + ImageLookupBaseOS is the name of the base operating system used to look + up machine images when a machine does not specify an AMI. When set, this + will be used for all cluster machines unless a machine specifies a + different ImageLookupBaseOS. type: string imageLookupFormat: - description: 'ImageLookupFormat is the AMI naming format to - look up machine images when a machine does not specify an - AMI. When set, this will be used for all cluster machines - unless a machine specifies a different ImageLookupOrg. Supports - substitutions for {{.BaseOS}} and {{.K8sVersion}} with the - base OS and kubernetes version, respectively. The BaseOS - will be the value in ImageLookupBaseOS or ubuntu (the default), - and the kubernetes version as defined by the packages produced - by kubernetes/release without v as a prefix: 1.13.0, 1.12.5-mybuild.1, - or 1.17.3. For example, the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* - will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* - for a Machine that is targeting kubernetes v1.18.0 and the - ubuntu base OS. See also: https://golang.org/pkg/text/template/' + description: |- + ImageLookupFormat is the AMI naming format to look up machine images when + a machine does not specify an AMI. When set, this will be used for all + cluster machines unless a machine specifies a different ImageLookupOrg. + Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base + OS and kubernetes version, respectively. The BaseOS will be the value in + ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as + defined by the packages produced by kubernetes/release without v as a + prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default + image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up + searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a + Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See + also: https://golang.org/pkg/text/template/ type: string imageLookupOrg: - description: ImageLookupOrg is the AWS Organization ID to - look up machine images when a machine does not specify an - AMI. When set, this will be used for all cluster machines - unless a machine specifies a different ImageLookupOrg. + description: |- + ImageLookupOrg is the AWS Organization ID to look up machine images when a + machine does not specify an AMI. When set, this will be used for all + cluster machines unless a machine specifies a different ImageLookupOrg. type: string network: description: NetworkSpec encapsulates all things related to @@ -824,6 +929,10 @@ spec: items: type: string type: array + natGatewaysIPsSource: + description: NatGatewaysIPsSource use the NAT gateways + IPs as the source for the ingress rule. + type: boolean protocol: description: Protocol is the protocol for the ingress rule. Accepted values are "-1" (all), "4" (IP @@ -845,10 +954,9 @@ spec: type: string type: array sourceSecurityGroupRoles: - description: The security group role to allow access - from. Cannot be specified with CidrBlocks. The - field will be combined with source security group - IDs if specified. + description: |- + The security group role to allow access from. Cannot be specified with CidrBlocks. + The field will be combined with source security group IDs if specified. items: description: SecurityGroupRole defines the unique role of a security group. @@ -876,10 +984,9 @@ spec: description: CNI configuration properties: cniIngressRules: - description: CNIIngressRules specify rules to apply - to control plane and worker node security groups. - The source for the rule will be set to control plane - and worker security group IDs. + description: |- + CNIIngressRules specify rules to apply to control plane and worker node security groups. + The source for the rule will be set to control plane and worker security group IDs. items: description: CNIIngressRule defines an AWS ingress rule for CNI requirements. @@ -907,10 +1014,9 @@ spec: securityGroupOverrides: additionalProperties: type: string - description: SecurityGroupOverrides is an optional set - of security groups to use for cluster instances This - is optional - if not provided new security groups will - be created for the cluster + description: |- + SecurityGroupOverrides is an optional set of security groups to use for cluster instances + This is optional - if not provided new security groups will be created for the cluster type: object subnets: description: Subnets configuration. @@ -926,31 +1032,27 @@ spec: when the provider creates a managed VPC. type: string id: - description: "ID defines a unique identifier to - reference this resource. If you're bringing your - subnet, set the AWS subnet-id here, it must start - with `subnet-`. \n When the VPC is managed by - CAPA, and you'd like the provider to create a - subnet for you, the id can be set to any placeholder - value that does not start with `subnet-`; upon - creation, the subnet AWS identifier will be populated - in the `ResourceID` field and the `id` field is - going to be used as the subnet name. If you specify - a tag called `Name`, it takes precedence." + description: |- + ID defines a unique identifier to reference this resource. + If you're bringing your subnet, set the AWS subnet-id here, it must start with `subnet-`. + + + When the VPC is managed by CAPA, and you'd like the provider to create a subnet for you, + the id can be set to any placeholder value that does not start with `subnet-`; + upon creation, the subnet AWS identifier will be populated in the `ResourceID` field and + the `id` field is going to be used as the subnet name. If you specify a tag + called `Name`, it takes precedence. type: string ipv6CidrBlock: - description: IPv6CidrBlock is the IPv6 CIDR block - to be used when the provider creates a managed - VPC. A subnet can have an IPv4 and an IPv6 address. - IPv6 is only supported in managed clusters, this - field cannot be set on AWSCluster object. + description: |- + IPv6CidrBlock is the IPv6 CIDR block to be used when the provider creates a managed VPC. + A subnet can have an IPv4 and an IPv6 address. + IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object. type: string isIpv6: - description: IsIPv6 defines the subnet as an IPv6 - subnet. A subnet is IPv6 when it is associated - with a VPC that has IPv6 enabled. IPv6 is only - supported in managed clusters, this field cannot - be set on AWSCluster object. + description: |- + IsIPv6 defines the subnet as an IPv6 subnet. A subnet is IPv6 when it is associated with a VPC that has IPv6 enabled. + IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object. type: boolean isPublic: description: IsPublic defines the subnet as a public @@ -959,18 +1061,23 @@ spec: gateway. type: boolean natGatewayId: - description: NatGatewayID is the NAT gateway id - associated with the subnet. Ignored unless the - subnet is managed by the provider, in which case - this is set on the public subnet where the NAT - gateway resides. It is then used to determine - routes for private subnets in the same AZ as the - public subnet. + description: |- + NatGatewayID is the NAT gateway id associated with the subnet. + Ignored unless the subnet is managed by the provider, in which case this is set on the public subnet where the NAT gateway resides. It is then used to determine routes for private subnets in the same AZ as the public subnet. + type: string + parentZoneName: + description: |- + ParentZoneName is the zone name where the current subnet's zone is tied when + the zone is a Local Zone. + + + The subnets in Local Zone or Wavelength Zone locations consume the ParentZoneName + to select the correct private route table to egress traffic to the internet. type: string resourceID: - description: ResourceID is the subnet identifier - from AWS, READ ONLY. This field is populated when - the provider manages the subnet. + description: |- + ResourceID is the subnet identifier from AWS, READ ONLY. + This field is populated when the provider manages the subnet. type: string routeTableId: description: RouteTableID is the routing table id @@ -982,6 +1089,42 @@ spec: description: Tags is a collection of tags describing the resource. type: object + zoneType: + description: |- + ZoneType defines the type of the zone where the subnet is created. + + + The valid values are availability-zone, local-zone, and wavelength-zone. + + + Subnet with zone type availability-zone (regular) is always selected to create cluster + resources, like Load Balancers, NAT Gateways, Contol Plane nodes, etc. + + + Subnet with zone type local-zone or wavelength-zone is not eligible to automatically create + regular cluster resources. + + + The public subnet in availability-zone or local-zone is associated with regular public + route table with default route entry to a Internet Gateway. + + + The public subnet in wavelength-zone is associated with a carrier public + route table with default route entry to a Carrier Gateway. + + + The private subnet in the availability-zone is associated with a private route table with + the default route entry to a NAT Gateway created in that zone. + + + The private subnet in the local-zone or wavelength-zone is associated with a private route table with + the default route entry re-using the NAT Gateway in the Region (preferred from the + parent zone, the zone type availability-zone in the region, or first table available). + enum: + - availability-zone + - local-zone + - wavelength-zone + type: string required: - id type: object @@ -994,42 +1137,84 @@ spec: properties: availabilityZoneSelection: default: Ordered - description: 'AvailabilityZoneSelection specifies - how AZs should be selected if there are more AZs - in a region than specified by AvailabilityZoneUsageLimit. - There are 2 selection schemes: Ordered - selects - based on alphabetical order Random - selects AZs - randomly in a region Defaults to Ordered' + description: |- + AvailabilityZoneSelection specifies how AZs should be selected if there are more AZs + in a region than specified by AvailabilityZoneUsageLimit. There are 2 selection schemes: + Ordered - selects based on alphabetical order + Random - selects AZs randomly in a region + Defaults to Ordered enum: - Ordered - Random type: string availabilityZoneUsageLimit: default: 3 - description: AvailabilityZoneUsageLimit specifies - the maximum number of availability zones (AZ) that - should be used in a region when automatically creating - subnets. If a region has more than this number of - AZs then this number of AZs will be picked randomly - when creating default subnets. Defaults to 3 + description: |- + AvailabilityZoneUsageLimit specifies the maximum number of availability zones (AZ) that + should be used in a region when automatically creating subnets. If a region has more + than this number of AZs then this number of AZs will be picked randomly when creating + default subnets. Defaults to 3 minimum: 1 type: integer + carrierGatewayId: + description: |- + CarrierGatewayID is the id of the internet gateway associated with the VPC, + for carrier network (Wavelength Zones). + type: string + x-kubernetes-validations: + - message: Carrier Gateway ID must start with 'cagw-' + rule: self.startsWith('cagw-') cidrBlock: - description: CidrBlock is the CIDR block to be used - when the provider creates a managed VPC. Defaults - to 10.0.0.0/16. Mutually exclusive with IPAMPool. + description: |- + CidrBlock is the CIDR block to be used when the provider creates a managed VPC. + Defaults to 10.0.0.0/16. + Mutually exclusive with IPAMPool. type: string + elasticIpPool: + description: |- + ElasticIPPool contains specific configuration to allocate Public IPv4 address (Elastic IP) from user-defined pool + brought to AWS for core infrastructure resources, like NAT Gateways and Public Network Load Balancers for + the API Server. + properties: + publicIpv4Pool: + description: |- + PublicIpv4Pool sets a custom Public IPv4 Pool used to create Elastic IP address for resources + created in public IPv4 subnets. Every IPv4 address, Elastic IP, will be allocated from the custom + Public IPv4 pool that you brought to AWS, instead of Amazon-provided pool. The public IPv4 pool + resource ID starts with 'ipv4pool-ec2'. + maxLength: 30 + type: string + publicIpv4PoolFallbackOrder: + description: |- + PublicIpv4PoolFallBackOrder defines the fallback action when the Public IPv4 Pool has been exhausted, + no more IPv4 address available in the pool. + + + When set to 'amazon-pool', the controller check if the pool has available IPv4 address, when pool has reached the + IPv4 limit, the address will be claimed from Amazon-pool (default). + + + When set to 'none', the controller will fail the Elastic IP allocation when the publicIpv4Pool is exhausted. + enum: + - amazon-pool + - none + type: string + x-kubernetes-validations: + - message: allowed values are 'none' and 'amazon-pool' + rule: self in ['none','amazon-pool'] + type: object emptyRoutesDefaultVPCSecurityGroup: - description: "EmptyRoutesDefaultVPCSecurityGroup specifies - whether the default VPC security group ingress and - egress rules should be removed. \n By default, when - creating a VPC, AWS creates a security group called - `default` with ingress and egress rules that allow - traffic from anywhere. The group could be used as - a potential surface attack and it's generally suggested - that the group rules are removed or modified appropriately. - \n NOTE: This only applies when the VPC is managed - by the Cluster API AWS controller." + description: |- + EmptyRoutesDefaultVPCSecurityGroup specifies whether the default VPC security group ingress + and egress rules should be removed. + + + By default, when creating a VPC, AWS creates a security group called `default` with ingress and egress + rules that allow traffic from anywhere. The group could be used as a potential surface attack and + it's generally suggested that the group rules are removed or modified appropriately. + + + NOTE: This only applies when the VPC is managed by the Cluster API AWS controller. type: boolean id: description: ID is the vpc-id of the VPC this provider @@ -1040,8 +1225,9 @@ spec: gateway associated with the VPC. type: string ipamPool: - description: IPAMPool defines the IPAMv4 pool to be - used for VPC. Mutually exclusive with CidrBlock. + description: |- + IPAMPool defines the IPAMv4 pool to be used for VPC. + Mutually exclusive with CidrBlock. properties: id: description: ID is the ID of the IPAM pool this @@ -1052,22 +1238,22 @@ spec: this provider should use to create VPC. type: string netmaskLength: - description: The netmask length of the IPv4 CIDR - you want to allocate to VPC from an Amazon VPC - IP Address Manager (IPAM) pool. Defaults to - /16 for IPv4 if not specified. + description: |- + The netmask length of the IPv4 CIDR you want to allocate to VPC from + an Amazon VPC IP Address Manager (IPAM) pool. + Defaults to /16 for IPv4 if not specified. format: int64 type: integer type: object ipv6: - description: IPv6 contains ipv6 specific settings - for the network. Supported only in managed clusters. + description: |- + IPv6 contains ipv6 specific settings for the network. Supported only in managed clusters. This field cannot be set on AWSCluster object. properties: cidrBlock: - description: CidrBlock is the CIDR block provided - by Amazon when VPC has enabled IPv6. Mutually - exclusive with IPAMPool. + description: |- + CidrBlock is the CIDR block provided by Amazon when VPC has enabled IPv6. + Mutually exclusive with IPAMPool. type: string egressOnlyInternetGatewayId: description: EgressOnlyInternetGatewayID is the @@ -1075,9 +1261,9 @@ spec: with an IPv6 enabled VPC. type: string ipamPool: - description: IPAMPool defines the IPAMv6 pool - to be used for VPC. Mutually exclusive with - CidrBlock. + description: |- + IPAMPool defines the IPAMv6 pool to be used for VPC. + Mutually exclusive with CidrBlock. properties: id: description: ID is the ID of the IPAM pool @@ -1089,32 +1275,60 @@ spec: VPC. type: string netmaskLength: - description: The netmask length of the IPv4 - CIDR you want to allocate to VPC from an - Amazon VPC IP Address Manager (IPAM) pool. + description: |- + The netmask length of the IPv4 CIDR you want to allocate to VPC from + an Amazon VPC IP Address Manager (IPAM) pool. Defaults to /16 for IPv4 if not specified. format: int64 type: integer type: object poolId: - description: PoolID is the IP pool which must - be defined in case of BYO IP is defined. Must - be specified if CidrBlock is set. Mutually exclusive - with IPAMPool. + description: |- + PoolID is the IP pool which must be defined in case of BYO IP is defined. + Must be specified if CidrBlock is set. + Mutually exclusive with IPAMPool. type: string type: object privateDnsHostnameTypeOnLaunch: - description: PrivateDNSHostnameTypeOnLaunch is the - type of hostname to assign to instances in the subnet - at launch. For IPv4-only and dual-stack (IPv4 and - IPv6) subnets, an instance DNS name can be based - on the instance IPv4 address (ip-name) or the instance - ID (resource-name). For IPv6 only subnets, an instance - DNS name must be based on the instance ID (resource-name). + description: |- + PrivateDNSHostnameTypeOnLaunch is the type of hostname to assign to instances in the subnet at launch. + For IPv4-only and dual-stack (IPv4 and IPv6) subnets, an instance DNS name can be based on the instance IPv4 address (ip-name) + or the instance ID (resource-name). For IPv6 only subnets, an instance DNS name must be based on the instance ID (resource-name). enum: - ip-name - resource-name type: string + secondaryCidrBlocks: + description: |- + SecondaryCidrBlocks are additional CIDR blocks to be associated when the provider creates a managed VPC. + Defaults to none. Mutually exclusive with IPAMPool. This makes sense to use if, for example, you want to use + a separate IP range for pods (e.g. Cilium ENI mode). + items: + description: VpcCidrBlock defines the CIDR block + and settings to associate with the managed VPC. + Currently, only IPv4 is supported. + properties: + ipv4CidrBlock: + description: IPv4CidrBlock is the IPv4 CIDR + block to associate with the managed VPC. + minLength: 1 + type: string + required: + - ipv4CidrBlock + type: object + type: array + subnetSchema: + default: PreferPrivate + description: |- + SubnetSchema specifies how CidrBlock should be divided on subnets in the VPC depending on the number of AZs. + PreferPrivate - one private subnet for each AZ plus one other subnet that will be further sub-divided for the public subnets. + PreferPublic - have the reverse logic of PreferPrivate, one public subnet for each AZ plus one other subnet + that will be further sub-divided for the private subnets. + Defaults to PreferPrivate + enum: + - PreferPrivate + - PreferPublic + type: string tags: additionalProperties: type: string @@ -1131,15 +1345,20 @@ spec: description: The AWS Region the cluster lives in. type: string s3Bucket: - description: S3Bucket contains options to configure a supporting - S3 bucket for this cluster - currently used for nodes requiring - Ignition (https://coreos.github.io/ignition/) for bootstrapping - (requires BootstrapFormatIgnition feature flag to be enabled). + description: |- + S3Bucket contains options to configure a supporting S3 bucket for this + cluster - currently used for nodes requiring Ignition + (https://coreos.github.io/ignition/) for bootstrapping (requires + BootstrapFormatIgnition feature flag to be enabled). properties: + bestEffortDeleteObjects: + description: BestEffortDeleteObjects defines whether access/permission + errors during object deletion should be ignored. + type: boolean controlPlaneIAMInstanceProfile: - description: ControlPlaneIAMInstanceProfile is a name - of the IAMInstanceProfile, which will be allowed to - read control-plane node bootstrap data from S3 Bucket. + description: |- + ControlPlaneIAMInstanceProfile is a name of the IAMInstanceProfile, which will be allowed + to read control-plane node bootstrap data from S3 Bucket. type: string name: description: Name defines name of S3 Bucket to be created. @@ -1148,38 +1367,99 @@ spec: pattern: ^[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]$ type: string nodesIAMInstanceProfiles: - description: NodesIAMInstanceProfiles is a list of IAM - instance profiles, which will be allowed to read worker - nodes bootstrap data from S3 Bucket. + description: |- + NodesIAMInstanceProfiles is a list of IAM instance profiles, which will be allowed to read + worker nodes bootstrap data from S3 Bucket. items: type: string type: array presignedURLDuration: - description: "PresignedURLDuration defines the duration - for which presigned URLs are valid. \n This is used - to generate presigned URLs for S3 Bucket objects, which - are used by control-plane and worker nodes to fetch - bootstrap data. \n When enabled, the IAM instance profiles - specified are not used." + description: |- + PresignedURLDuration defines the duration for which presigned URLs are valid. + + + This is used to generate presigned URLs for S3 Bucket objects, which are used by + control-plane and worker nodes to fetch bootstrap data. + + + When enabled, the IAM instance profiles specified are not used. type: string required: - name type: object secondaryControlPlaneLoadBalancer: - description: "SecondaryControlPlaneLoadBalancer is an additional - load balancer that can be used for the control plane. \n - An example use case is to have a separate internal load - balancer for internal traffic, and a separate external load - balancer for external traffic." + description: |- + SecondaryControlPlaneLoadBalancer is an additional load balancer that can be used for the control plane. + + + An example use case is to have a separate internal load balancer for internal traffic, + and a separate external load balancer for external traffic. properties: additionalListeners: - description: AdditionalListeners sets the additional listeners - for the control plane load balancer. This is only applicable - to Network Load Balancer (NLB) types for the time being. + description: |- + AdditionalListeners sets the additional listeners for the control plane load balancer. + This is only applicable to Network Load Balancer (NLB) types for the time being. items: - description: AdditionalListenerSpec defines the desired - state of an additional listener on an AWS load balancer. + description: |- + AdditionalListenerSpec defines the desired state of an + additional listener on an AWS load balancer. properties: + healthCheck: + description: HealthCheck sets the optional custom + health check configuration to the API target group. + properties: + intervalSeconds: + description: |- + The approximate amount of time, in seconds, between health checks of an individual + target. + format: int64 + maximum: 300 + minimum: 5 + type: integer + path: + description: |- + The destination for health checks on the targets when using the protocol HTTP or HTTPS, + otherwise the path will be ignored. + type: string + port: + description: |- + The port the load balancer uses when performing health checks for additional target groups. When + not specified this value will be set for the same of listener port. + type: string + protocol: + description: |- + The protocol to use to health check connect with the target. When not specified the Protocol + will be the same of the listener. + enum: + - TCP + - HTTP + - HTTPS + type: string + thresholdCount: + description: |- + The number of consecutive health check successes required before considering + a target healthy. + format: int64 + maximum: 10 + minimum: 2 + type: integer + timeoutSeconds: + description: |- + The amount of time, in seconds, during which no response from a target means + a failed health check. + format: int64 + maximum: 120 + minimum: 2 + type: integer + unhealthyThresholdCount: + description: |- + The number of consecutive health check failures required before considering + a target unhealthy. + format: int64 + maximum: 10 + minimum: 2 + type: integer + type: object port: description: Port sets the port for the additional listener. @@ -1189,8 +1469,9 @@ spec: type: integer protocol: default: TCP - description: Protocol sets the protocol for the - additional listener. Currently only TCP is supported. + description: |- + Protocol sets the protocol for the additional listener. + Currently only TCP is supported. enum: - TCP type: string @@ -1202,33 +1483,71 @@ spec: - port x-kubernetes-list-type: map additionalSecurityGroups: - description: AdditionalSecurityGroups sets the security - groups used by the load balancer. Expected to be security - group IDs This is optional - if not provided new security - groups will be created for the load balancer + description: |- + AdditionalSecurityGroups sets the security groups used by the load balancer. Expected to be security group IDs + This is optional - if not provided new security groups will be created for the load balancer items: type: string type: array crossZoneLoadBalancing: - description: "CrossZoneLoadBalancing enables the classic - ELB cross availability zone balancing. \n With cross-zone - load balancing, each load balancer node for your Classic - Load Balancer distributes requests evenly across the - registered instances in all enabled Availability Zones. - If cross-zone load balancing is disabled, each load - balancer node distributes requests evenly across the - registered instances in its Availability Zone only. - \n Defaults to false." + description: |- + CrossZoneLoadBalancing enables the classic ELB cross availability zone balancing. + + + With cross-zone load balancing, each load balancer node for your Classic Load Balancer + distributes requests evenly across the registered instances in all enabled Availability Zones. + If cross-zone load balancing is disabled, each load balancer node distributes requests evenly across + the registered instances in its Availability Zone only. + + + Defaults to false. type: boolean disableHostsRewrite: - description: DisableHostsRewrite disabled the hair pinning - issue solution that adds the NLB's address as 127.0.0.1 - to the hosts file of each instance. This is by default, - false. + description: |- + DisableHostsRewrite disabled the hair pinning issue solution that adds the NLB's address as 127.0.0.1 to the hosts + file of each instance. This is by default, false. type: boolean + healthCheck: + description: HealthCheck sets custom health check configuration + to the API target group. + properties: + intervalSeconds: + description: |- + The approximate amount of time, in seconds, between health checks of an individual + target. + format: int64 + maximum: 300 + minimum: 5 + type: integer + thresholdCount: + description: |- + The number of consecutive health check successes required before considering + a target healthy. + format: int64 + maximum: 10 + minimum: 2 + type: integer + timeoutSeconds: + description: |- + The amount of time, in seconds, during which no response from a target means + a failed health check. + format: int64 + maximum: 120 + minimum: 2 + type: integer + unhealthyThresholdCount: + description: |- + The number of consecutive health check failures required before considering + a target unhealthy. + format: int64 + maximum: 10 + minimum: 2 + type: integer + type: object healthCheckProtocol: - description: HealthCheckProtocol sets the protocol type - for ELB health check target default value is ELBProtocolSSL + description: |- + HealthCheckProtocol sets the protocol type for ELB health check target + default value is ELBProtocolSSL enum: - TCP - SSL @@ -1264,6 +1583,10 @@ spec: items: type: string type: array + natGatewaysIPsSource: + description: NatGatewaysIPsSource use the NAT gateways + IPs as the source for the ingress rule. + type: boolean protocol: description: Protocol is the protocol for the ingress rule. Accepted values are "-1" (all), "4" (IP @@ -1285,10 +1608,9 @@ spec: type: string type: array sourceSecurityGroupRoles: - description: The security group role to allow access - from. Cannot be specified with CidrBlocks. The - field will be combined with source security group - IDs if specified. + description: |- + The security group role to allow access from. Cannot be specified with CidrBlocks. + The field will be combined with source security group IDs if specified. items: description: SecurityGroupRole defines the unique role of a security group. @@ -1324,18 +1646,17 @@ spec: - disabled type: string name: - description: Name sets the name of the classic ELB load - balancer. As per AWS, the name must be unique within - your set of load balancers for the region, must have - a maximum of 32 characters, must contain only alphanumeric - characters or hyphens, and cannot begin or end with - a hyphen. Once set, the value cannot be changed. + description: |- + Name sets the name of the classic ELB load balancer. As per AWS, the name must be unique + within your set of load balancers for the region, must have a maximum of 32 characters, must + contain only alphanumeric characters or hyphens, and cannot begin or end with a hyphen. Once + set, the value cannot be changed. maxLength: 32 pattern: ^[A-Za-z0-9]([A-Za-z0-9]{0,31}|[-A-Za-z0-9]{0,30}[A-Za-z0-9])$ type: string preserveClientIP: - description: PreserveClientIP lets the user control if - preservation of client ips must be retained or not. + description: |- + PreserveClientIP lets the user control if preservation of client ips must be retained or not. If this is enabled 6443 will be opened to 0.0.0.0/0. type: boolean scheme: diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsfargateprofiles.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsfargateprofiles.yaml index e1c4006c01..f3699dfdfc 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsfargateprofiles.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsfargateprofiles.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.14.0 name: awsfargateprofiles.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io @@ -37,14 +37,19 @@ spec: description: AWSFargateProfile is the Schema for the awsfargateprofiles API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -54,9 +59,9 @@ spec: additionalTags: additionalProperties: type: string - description: AdditionalTags is an optional set of tags to add to AWS - resources managed by the AWS provider, in addition to the ones added - by default. + description: |- + AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the + ones added by default. type: object clusterName: description: ClusterName is the name of the Cluster this object belongs @@ -67,10 +72,11 @@ spec: description: ProfileName specifies the profile name. type: string roleName: - description: RoleName specifies the name of IAM role for this fargate - pool If the role is pre-existing we will treat it as unmanaged and - not delete it on deletion. If the EKSEnableIAM feature flag is true - and no name is supplied then a role is created. + description: |- + RoleName specifies the name of IAM role for this fargate pool + If the role is pre-existing we will treat it as unmanaged + and not delete it on deletion. If the EKSEnableIAM feature + flag is true and no name is supplied then a role is created. type: string selectors: description: Selectors specify fargate pod selectors. @@ -91,8 +97,9 @@ spec: type: object type: array subnetIDs: - description: SubnetIDs specifies which subnets are used for the auto - scaling group of this nodegroup. + description: |- + SubnetIDs specifies which subnets are used for the + auto scaling group of this nodegroup. items: type: string type: array @@ -109,37 +116,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime @@ -148,36 +155,46 @@ spec: type: object type: array failureMessage: - description: "FailureMessage will be set in the event that there is - a terminal problem reconciling the FargateProfile and will contain - a more verbose string suitable for logging and human consumption. - \n This field should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over time (like - service outages), but instead indicate that something is fundamentally - wrong with the FargateProfile's spec or the configuration of the - controller, and that manual intervention is required. Examples of - terminal errors would be invalid combinations of settings in the - spec, values that are unsupported by the controller, or the responsible - controller itself being critically misconfigured. \n Any transient - errors that occur during the reconciliation of FargateProfiles can - be added as events to the FargateProfile object and/or logged in - the controller's output." + description: |- + FailureMessage will be set in the event that there is a terminal problem + reconciling the FargateProfile and will contain a more verbose string suitable + for logging and human consumption. + + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the FargateProfile's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of + FargateProfiles can be added as events to the FargateProfile + object and/or logged in the controller's output. type: string failureReason: - description: "FailureReason will be set in the event that there is - a terminal problem reconciling the FargateProfile and will contain - a succinct value suitable for machine interpretation. \n This field - should not be set for transitive errors that a controller faces - that are expected to be fixed automatically over time (like service - outages), but instead indicate that something is fundamentally wrong - with the FargateProfile's spec or the configuration of the controller, - and that manual intervention is required. Examples of terminal errors - would be invalid combinations of settings in the spec, values that - are unsupported by the controller, or the responsible controller - itself being critically misconfigured. \n Any transient errors that - occur during the reconciliation of FargateProfiles can be added - as events to the FargateProfile object and/or logged in the controller's - output." + description: |- + FailureReason will be set in the event that there is a terminal problem + reconciling the FargateProfile and will contain a succinct value suitable + for machine interpretation. + + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the FargateProfile's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of + FargateProfiles can be added as events to the FargateProfile object + and/or logged in the controller's output. type: string ready: default: false @@ -210,14 +227,19 @@ spec: description: AWSFargateProfile is the Schema for the awsfargateprofiles API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -227,9 +249,9 @@ spec: additionalTags: additionalProperties: type: string - description: AdditionalTags is an optional set of tags to add to AWS - resources managed by the AWS provider, in addition to the ones added - by default. + description: |- + AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the + ones added by default. type: object clusterName: description: ClusterName is the name of the Cluster this object belongs @@ -240,10 +262,11 @@ spec: description: ProfileName specifies the profile name. type: string roleName: - description: RoleName specifies the name of IAM role for this fargate - pool If the role is pre-existing we will treat it as unmanaged and - not delete it on deletion. If the EKSEnableIAM feature flag is true - and no name is supplied then a role is created. + description: |- + RoleName specifies the name of IAM role for this fargate pool + If the role is pre-existing we will treat it as unmanaged + and not delete it on deletion. If the EKSEnableIAM feature + flag is true and no name is supplied then a role is created. type: string selectors: description: Selectors specify fargate pod selectors. @@ -264,8 +287,9 @@ spec: type: object type: array subnetIDs: - description: SubnetIDs specifies which subnets are used for the auto - scaling group of this nodegroup. + description: |- + SubnetIDs specifies which subnets are used for the + auto scaling group of this nodegroup. items: type: string type: array @@ -282,37 +306,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime @@ -321,36 +345,46 @@ spec: type: object type: array failureMessage: - description: "FailureMessage will be set in the event that there is - a terminal problem reconciling the FargateProfile and will contain - a more verbose string suitable for logging and human consumption. - \n This field should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over time (like - service outages), but instead indicate that something is fundamentally - wrong with the FargateProfile's spec or the configuration of the - controller, and that manual intervention is required. Examples of - terminal errors would be invalid combinations of settings in the - spec, values that are unsupported by the controller, or the responsible - controller itself being critically misconfigured. \n Any transient - errors that occur during the reconciliation of FargateProfiles can - be added as events to the FargateProfile object and/or logged in - the controller's output." + description: |- + FailureMessage will be set in the event that there is a terminal problem + reconciling the FargateProfile and will contain a more verbose string suitable + for logging and human consumption. + + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the FargateProfile's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of + FargateProfiles can be added as events to the FargateProfile + object and/or logged in the controller's output. type: string failureReason: - description: "FailureReason will be set in the event that there is - a terminal problem reconciling the FargateProfile and will contain - a succinct value suitable for machine interpretation. \n This field - should not be set for transitive errors that a controller faces - that are expected to be fixed automatically over time (like service - outages), but instead indicate that something is fundamentally wrong - with the FargateProfile's spec or the configuration of the controller, - and that manual intervention is required. Examples of terminal errors - would be invalid combinations of settings in the spec, values that - are unsupported by the controller, or the responsible controller - itself being critically misconfigured. \n Any transient errors that - occur during the reconciliation of FargateProfiles can be added - as events to the FargateProfile object and/or logged in the controller's - output." + description: |- + FailureReason will be set in the event that there is a terminal problem + reconciling the FargateProfile and will contain a succinct value suitable + for machine interpretation. + + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the FargateProfile's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of + FargateProfiles can be added as events to the FargateProfile object + and/or logged in the controller's output. type: string ready: default: false diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinepools.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinepools.yaml index b646849522..e28522622c 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinepools.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinepools.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.14.0 name: awsmachinepools.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io @@ -45,14 +45,19 @@ spec: description: AWSMachinePool is the Schema for the awsmachinepools API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -62,8 +67,9 @@ spec: additionalTags: additionalProperties: type: string - description: AdditionalTags is an optional set of tags to add to an - instance, in addition to the ones added by default by the AWS provider. + description: |- + AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the + AWS provider. type: object availabilityZones: description: AvailabilityZones is an array of availability zones instances @@ -76,20 +82,21 @@ spec: to use when an instance is launched. properties: additionalSecurityGroups: - description: AdditionalSecurityGroups is an array of references - to security groups that should be applied to the instances. - These security groups would be set in addition to any security - groups defined at the cluster level or in the actuator. + description: |- + AdditionalSecurityGroups is an array of references to security groups that should be applied to the + instances. These security groups would be set in addition to any security groups defined + at the cluster level or in the actuator. items: - description: AWSResourceReference is a reference to a specific - AWS resource by ID or filters. Only one of ID or Filters may - be specified. Specifying more than one will result in a validation - error. + description: |- + AWSResourceReference is a reference to a specific AWS resource by ID or filters. + Only one of ID or Filters may be specified. Specifying more than one will result in + a validation error. properties: filters: - description: 'Filters is a set of key/value pairs used to - identify a resource They are applied according to the - rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html' + description: |- + Filters is a set of key/value pairs used to identify a resource + They are applied according to the rules defined by the AWS API: + https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html items: description: Filter is a filter used to identify an AWS resource. @@ -130,26 +137,29 @@ spec: type: string type: object iamInstanceProfile: - description: The name or the Amazon Resource Name (ARN) of the - instance profile associated with the IAM role for the instance. - The instance profile contains the IAM role. + description: |- + The name or the Amazon Resource Name (ARN) of the instance profile associated + with the IAM role for the instance. The instance profile contains the IAM + role. type: string imageLookupBaseOS: - description: ImageLookupBaseOS is the name of the base operating - system to use for image lookup the AMI is not set. + description: |- + ImageLookupBaseOS is the name of the base operating system to use for + image lookup the AMI is not set. type: string imageLookupFormat: - description: 'ImageLookupFormat is the AMI naming format to look - up the image for this machine It will be ignored if an explicit - AMI is set. Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} - with the base OS and kubernetes version, respectively. The BaseOS - will be the value in ImageLookupBaseOS or ubuntu (the default), - and the kubernetes version as defined by the packages produced - by kubernetes/release without v as a prefix: 1.13.0, 1.12.5-mybuild.1, - or 1.17.3. For example, the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* - will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* - for a Machine that is targeting kubernetes v1.18.0 and the ubuntu - base OS. See also: https://golang.org/pkg/text/template/' + description: |- + ImageLookupFormat is the AMI naming format to look up the image for this + machine It will be ignored if an explicit AMI is set. Supports + substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and + kubernetes version, respectively. The BaseOS will be the value in + ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as + defined by the packages produced by kubernetes/release without v as a + prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default + image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up + searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a + Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See + also: https://golang.org/pkg/text/template/ type: string imageLookupOrg: description: ImageLookupOrg is the AWS Organization ID to use @@ -174,11 +184,10 @@ spec: or not. type: boolean encryptionKey: - description: EncryptionKey is the KMS key to use to encrypt - the volume. Can be either a KMS key ID or ARN. If Encrypted - is set and this is omitted, the default AWS key will be - used. The key must already exist and be accessible by the - controller. + description: |- + EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. + If Encrypted is set and this is omitted, the default AWS key will be used. + The key must already exist and be accessible by the controller. type: string iops: description: IOPS is the number of IOPS requested for the @@ -186,9 +195,9 @@ spec: format: int64 type: integer size: - description: Size specifies size (in Gi) of the storage device. - Must be greater than the image snapshot size or 8 (whichever - is greater). + description: |- + Size specifies size (in Gi) of the storage device. + Must be greater than the image snapshot size or 8 (whichever is greater). format: int64 minimum: 8 type: integer @@ -214,17 +223,17 @@ spec: type: string type: object sshKeyName: - description: SSHKeyName is the name of the ssh key to attach to - the instance. Valid values are empty string (do not use SSH - keys), a valid SSH key name, or omitted (use the default SSH - key name) + description: |- + SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string + (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name) type: string versionNumber: - description: 'VersionNumber is the version of the launch template - that is applied. Typically a new version is created when at - least one of the following happens: 1) A new launch template - spec is applied. 2) One or more parameters in an existing template - is changed. 3) A new AMI is discovered.' + description: |- + VersionNumber is the version of the launch template that is applied. + Typically a new version is created when at least one of the following happens: + 1) A new launch template spec is applied. + 2) One or more parameters in an existing template is changed. + 3) A new AMI is discovered. format: int64 type: integer type: object @@ -233,9 +242,9 @@ spec: group feature type: boolean defaultCoolDown: - description: The amount of time, in seconds, after a scaling activity - completes before another scaling activity can start. If no value - is supplied by user a default value of 300 seconds is set + description: |- + The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. + If no value is supplied by user a default value of 300 seconds is set type: string maxSize: default: 1 @@ -283,9 +292,9 @@ spec: type: object overrides: items: - description: Overrides are used to override the instance type - specified by the launch template with multiple instance types - that can be used to launch On-Demand Instances and Spot Instances. + description: |- + Overrides are used to override the instance type specified by the launch template with multiple + instance types that can be used to launch On-Demand Instances and Spot Instances. properties: instanceType: type: string @@ -298,10 +307,9 @@ spec: description: ProviderID is the ARN of the associated ASG type: string providerIDList: - description: ProviderIDList are the identification IDs of machine - instances provided by the provider. This field must match the provider - IDs as seen on the node objects corresponding to a machine pool's - machine instances. + description: |- + ProviderIDList are the identification IDs of machine instances provided by the provider. + This field must match the provider IDs as seen on the node objects corresponding to a machine pool's machine instances. items: type: string type: array @@ -310,36 +318,38 @@ spec: with the instance refresh request. properties: instanceWarmup: - description: The number of seconds until a newly launched instance - is configured and ready to use. During this time, the next replacement - will not be initiated. The default is to use the value for the - health check grace period defined for the group. + description: |- + The number of seconds until a newly launched instance is configured and ready + to use. During this time, the next replacement will not be initiated. + The default is to use the value for the health check grace period defined for the group. format: int64 type: integer minHealthyPercentage: - description: The amount of capacity as a percentage in ASG that - must remain healthy during an instance refresh. The default - is 90. + description: |- + The amount of capacity as a percentage in ASG that must remain healthy + during an instance refresh. The default is 90. format: int64 type: integer strategy: - description: The strategy to use for the instance refresh. The - only valid value is Rolling. A rolling update is an update that - is applied to all instances in an Auto Scaling group until all - instances have been updated. + description: |- + The strategy to use for the instance refresh. The only valid value is Rolling. + A rolling update is an update that is applied to all instances in an Auto + Scaling group until all instances have been updated. type: string type: object subnets: description: Subnets is an array of subnet configurations items: - description: AWSResourceReference is a reference to a specific AWS - resource by ID or filters. Only one of ID or Filters may be specified. - Specifying more than one will result in a validation error. + description: |- + AWSResourceReference is a reference to a specific AWS resource by ID or filters. + Only one of ID or Filters may be specified. Specifying more than one will result in + a validation error. properties: filters: - description: 'Filters is a set of key/value pairs used to identify - a resource They are applied according to the rules defined - by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html' + description: |- + Filters is a set of key/value pairs used to identify a resource + They are applied according to the rules defined by the AWS API: + https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html items: description: Filter is a filter used to identify an AWS resource. properties: @@ -381,37 +391,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime @@ -420,34 +430,46 @@ spec: type: object type: array failureMessage: - description: "FailureMessage will be set in the event that there is - a terminal problem reconciling the Machine and will contain a more - verbose string suitable for logging and human consumption. \n This - field should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over time (like - service outages), but instead indicate that something is fundamentally - wrong with the Machine's spec or the configuration of the controller, - and that manual intervention is required. Examples of terminal errors - would be invalid combinations of settings in the spec, values that - are unsupported by the controller, or the responsible controller - itself being critically misconfigured. \n Any transient errors that - occur during the reconciliation of Machines can be added as events - to the Machine object and/or logged in the controller's output." + description: |- + FailureMessage will be set in the event that there is a terminal problem + reconciling the Machine and will contain a more verbose string suitable + for logging and human consumption. + + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the Machine's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of Machines + can be added as events to the Machine object and/or logged in the + controller's output. type: string failureReason: - description: "FailureReason will be set in the event that there is - a terminal problem reconciling the Machine and will contain a succinct - value suitable for machine interpretation. \n This field should - not be set for transitive errors that a controller faces that are - expected to be fixed automatically over time (like service outages), - but instead indicate that something is fundamentally wrong with - the Machine's spec or the configuration of the controller, and that - manual intervention is required. Examples of terminal errors would - be invalid combinations of settings in the spec, values that are - unsupported by the controller, or the responsible controller itself - being critically misconfigured. \n Any transient errors that occur - during the reconciliation of Machines can be added as events to - the Machine object and/or logged in the controller's output." + description: |- + FailureReason will be set in the event that there is a terminal problem + reconciling the Machine and will contain a succinct value suitable + for machine interpretation. + + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the Machine's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of Machines + can be added as events to the Machine object and/or logged in the + controller's output. type: string instances: description: Instances contains the status for each instance in the @@ -512,14 +534,19 @@ spec: description: AWSMachinePool is the Schema for the awsmachinepools API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -529,8 +556,9 @@ spec: additionalTags: additionalProperties: type: string - description: AdditionalTags is an optional set of tags to add to an - instance, in addition to the ones added by default by the AWS provider. + description: |- + AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the + AWS provider. type: object availabilityZoneSubnetType: description: AvailabilityZoneSubnetType specifies which type of subnets @@ -551,20 +579,21 @@ spec: to use when an instance is launched. properties: additionalSecurityGroups: - description: AdditionalSecurityGroups is an array of references - to security groups that should be applied to the instances. - These security groups would be set in addition to any security - groups defined at the cluster level or in the actuator. + description: |- + AdditionalSecurityGroups is an array of references to security groups that should be applied to the + instances. These security groups would be set in addition to any security groups defined + at the cluster level or in the actuator. items: - description: AWSResourceReference is a reference to a specific - AWS resource by ID or filters. Only one of ID or Filters may - be specified. Specifying more than one will result in a validation - error. + description: |- + AWSResourceReference is a reference to a specific AWS resource by ID or filters. + Only one of ID or Filters may be specified. Specifying more than one will result in + a validation error. properties: filters: - description: 'Filters is a set of key/value pairs used to - identify a resource They are applied according to the - rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html' + description: |- + Filters is a set of key/value pairs used to identify a resource + They are applied according to the rules defined by the AWS API: + https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html items: description: Filter is a filter used to identify an AWS resource. @@ -605,26 +634,29 @@ spec: type: string type: object iamInstanceProfile: - description: The name or the Amazon Resource Name (ARN) of the - instance profile associated with the IAM role for the instance. - The instance profile contains the IAM role. + description: |- + The name or the Amazon Resource Name (ARN) of the instance profile associated + with the IAM role for the instance. The instance profile contains the IAM + role. type: string imageLookupBaseOS: - description: ImageLookupBaseOS is the name of the base operating - system to use for image lookup the AMI is not set. + description: |- + ImageLookupBaseOS is the name of the base operating system to use for + image lookup the AMI is not set. type: string imageLookupFormat: - description: 'ImageLookupFormat is the AMI naming format to look - up the image for this machine It will be ignored if an explicit - AMI is set. Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} - with the base OS and kubernetes version, respectively. The BaseOS - will be the value in ImageLookupBaseOS or ubuntu (the default), - and the kubernetes version as defined by the packages produced - by kubernetes/release without v as a prefix: 1.13.0, 1.12.5-mybuild.1, - or 1.17.3. For example, the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* - will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* - for a Machine that is targeting kubernetes v1.18.0 and the ubuntu - base OS. See also: https://golang.org/pkg/text/template/' + description: |- + ImageLookupFormat is the AMI naming format to look up the image for this + machine It will be ignored if an explicit AMI is set. Supports + substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and + kubernetes version, respectively. The BaseOS will be the value in + ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as + defined by the packages produced by kubernetes/release without v as a + prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default + image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up + searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a + Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See + also: https://golang.org/pkg/text/template/ type: string imageLookupOrg: description: ImageLookupOrg is the AWS Organization ID to use @@ -636,48 +668,64 @@ spec: properties: httpEndpoint: default: enabled - description: "Enables or disables the HTTP metadata endpoint - on your instances. \n If you specify a value of disabled, - you cannot access your instance metadata. \n Default: enabled" + description: |- + Enables or disables the HTTP metadata endpoint on your instances. + + + If you specify a value of disabled, you cannot access your instance metadata. + + + Default: enabled enum: - enabled - disabled type: string httpPutResponseHopLimit: default: 1 - description: "The desired HTTP PUT response hop limit for - instance metadata requests. The larger the number, the further - instance metadata requests can travel. \n Default: 1" + description: |- + The desired HTTP PUT response hop limit for instance metadata requests. The + larger the number, the further instance metadata requests can travel. + + + Default: 1 format: int64 maximum: 64 minimum: 1 type: integer httpTokens: default: optional - description: "The state of token usage for your instance metadata - requests. \n If the state is optional, you can choose to - retrieve instance metadata with or without a session token - on your request. If you retrieve the IAM role credentials - without a token, the version 1.0 role credentials are returned. - If you retrieve the IAM role credentials using a valid session - token, the version 2.0 role credentials are returned. \n - If the state is required, you must send a session token - with any instance metadata retrieval requests. In this state, - retrieving the IAM role credentials always returns the version - 2.0 credentials; the version 1.0 credentials are not available. - \n Default: optional" + description: |- + The state of token usage for your instance metadata requests. + + + If the state is optional, you can choose to retrieve instance metadata with + or without a session token on your request. If you retrieve the IAM role + credentials without a token, the version 1.0 role credentials are returned. + If you retrieve the IAM role credentials using a valid session token, the + version 2.0 role credentials are returned. + + + If the state is required, you must send a session token with any instance + metadata retrieval requests. In this state, retrieving the IAM role credentials + always returns the version 2.0 credentials; the version 1.0 credentials are + not available. + + + Default: optional enum: - optional - required type: string instanceMetadataTags: default: disabled - description: "Set to enabled to allow access to instance tags - from the instance metadata. Set to disabled to turn off - access to instance tags from the instance metadata. For - more information, see Work with instance tags using the - instance metadata (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS). - \n Default: disabled" + description: |- + Set to enabled to allow access to instance tags from the instance metadata. + Set to disabled to turn off access to instance tags from the instance metadata. + For more information, see Work with instance tags using the instance metadata + (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS). + + + Default: disabled enum: - enabled - disabled @@ -690,6 +738,50 @@ spec: name: description: The name of the launch template. type: string + nonRootVolumes: + description: Configuration options for the non root storage volumes. + items: + description: Volume encapsulates the configuration options for + the storage device. + properties: + deviceName: + description: Device name + type: string + encrypted: + description: Encrypted is whether the volume should be encrypted + or not. + type: boolean + encryptionKey: + description: |- + EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. + If Encrypted is set and this is omitted, the default AWS key will be used. + The key must already exist and be accessible by the controller. + type: string + iops: + description: IOPS is the number of IOPS requested for the + disk. Not applicable to all types. + format: int64 + type: integer + size: + description: |- + Size specifies size (in Gi) of the storage device. + Must be greater than the image snapshot size or 8 (whichever is greater). + format: int64 + minimum: 8 + type: integer + throughput: + description: Throughput to provision in MiB/s supported + for the volume type. Not applicable to all types. + format: int64 + type: integer + type: + description: Type is the type of the volume (e.g. gp2, io1, + etc...). + type: string + required: + - size + type: object + type: array privateDnsName: description: PrivateDNSName is the options for the instance hostname. properties: @@ -722,11 +814,10 @@ spec: or not. type: boolean encryptionKey: - description: EncryptionKey is the KMS key to use to encrypt - the volume. Can be either a KMS key ID or ARN. If Encrypted - is set and this is omitted, the default AWS key will be - used. The key must already exist and be accessible by the - controller. + description: |- + EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. + If Encrypted is set and this is omitted, the default AWS key will be used. + The key must already exist and be accessible by the controller. type: string iops: description: IOPS is the number of IOPS requested for the @@ -734,9 +825,9 @@ spec: format: int64 type: integer size: - description: Size specifies size (in Gi) of the storage device. - Must be greater than the image snapshot size or 8 (whichever - is greater). + description: |- + Size specifies size (in Gi) of the storage device. + Must be greater than the image snapshot size or 8 (whichever is greater). format: int64 minimum: 8 type: integer @@ -762,17 +853,17 @@ spec: type: string type: object sshKeyName: - description: SSHKeyName is the name of the ssh key to attach to - the instance. Valid values are empty string (do not use SSH - keys), a valid SSH key name, or omitted (use the default SSH - key name) + description: |- + SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string + (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name) type: string versionNumber: - description: 'VersionNumber is the version of the launch template - that is applied. Typically a new version is created when at - least one of the following happens: 1) A new launch template - spec is applied. 2) One or more parameters in an existing template - is changed. 3) A new AMI is discovered.' + description: |- + VersionNumber is the version of the launch template that is applied. + Typically a new version is created when at least one of the following happens: + 1) A new launch template spec is applied. + 2) One or more parameters in an existing template is changed. + 3) A new AMI is discovered. format: int64 type: integer type: object @@ -781,15 +872,16 @@ spec: group feature type: boolean defaultCoolDown: - description: The amount of time, in seconds, after a scaling activity - completes before another scaling activity can start. If no value - is supplied by user a default value of 300 seconds is set + description: |- + The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. + If no value is supplied by user a default value of 300 seconds is set type: string defaultInstanceWarmup: - description: The amount of time, in seconds, until a new instance - is considered to have finished initializing and resource consumption - to become stable after it enters the InService state. If no value - is supplied by user a default value of 300 seconds is set + description: |- + The amount of time, in seconds, until a new instance is considered to + have finished initializing and resource consumption to become stable + after it enters the InService state. + If no value is supplied by user a default value of 300 seconds is set type: string maxSize: default: 1 @@ -840,9 +932,9 @@ spec: type: object overrides: items: - description: Overrides are used to override the instance type - specified by the launch template with multiple instance types - that can be used to launch On-Demand Instances and Spot Instances. + description: |- + Overrides are used to override the instance type specified by the launch template with multiple + instance types that can be used to launch On-Demand Instances and Spot Instances. properties: instanceType: type: string @@ -855,10 +947,9 @@ spec: description: ProviderID is the ARN of the associated ASG type: string providerIDList: - description: ProviderIDList are the identification IDs of machine - instances provided by the provider. This field must match the provider - IDs as seen on the node objects corresponding to a machine pool's - machine instances. + description: |- + ProviderIDList are the identification IDs of machine instances provided by the provider. + This field must match the provider IDs as seen on the node objects corresponding to a machine pool's machine instances. items: type: string type: array @@ -867,41 +958,43 @@ spec: with the instance refresh request. properties: disable: - description: Disable, if true, disables instance refresh from - triggering when new launch templates are detected. This is useful - in scenarios where ASG nodes are externally managed. + description: |- + Disable, if true, disables instance refresh from triggering when new launch templates are detected. + This is useful in scenarios where ASG nodes are externally managed. type: boolean instanceWarmup: - description: The number of seconds until a newly launched instance - is configured and ready to use. During this time, the next replacement - will not be initiated. The default is to use the value for the - health check grace period defined for the group. + description: |- + The number of seconds until a newly launched instance is configured and ready + to use. During this time, the next replacement will not be initiated. + The default is to use the value for the health check grace period defined for the group. format: int64 type: integer minHealthyPercentage: - description: The amount of capacity as a percentage in ASG that - must remain healthy during an instance refresh. The default - is 90. + description: |- + The amount of capacity as a percentage in ASG that must remain healthy + during an instance refresh. The default is 90. format: int64 type: integer strategy: - description: The strategy to use for the instance refresh. The - only valid value is Rolling. A rolling update is an update that - is applied to all instances in an Auto Scaling group until all - instances have been updated. + description: |- + The strategy to use for the instance refresh. The only valid value is Rolling. + A rolling update is an update that is applied to all instances in an Auto + Scaling group until all instances have been updated. type: string type: object subnets: description: Subnets is an array of subnet configurations items: - description: AWSResourceReference is a reference to a specific AWS - resource by ID or filters. Only one of ID or Filters may be specified. - Specifying more than one will result in a validation error. + description: |- + AWSResourceReference is a reference to a specific AWS resource by ID or filters. + Only one of ID or Filters may be specified. Specifying more than one will result in + a validation error. properties: filters: - description: 'Filters is a set of key/value pairs used to identify - a resource They are applied according to the rules defined - by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html' + description: |- + Filters is a set of key/value pairs used to identify a resource + They are applied according to the rules defined by the AWS API: + https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html items: description: Filter is a filter used to identify an AWS resource. properties: @@ -925,9 +1018,9 @@ spec: type: object type: array suspendProcesses: - description: SuspendProcesses defines a list of processes to suspend - for the given ASG. This is constantly reconciled. If a process is - removed from this list it will automatically be resumed. + description: |- + SuspendProcesses defines a list of processes to suspend for the given ASG. This is constantly reconciled. + If a process is removed from this list it will automatically be resumed. properties: all: type: boolean @@ -974,37 +1067,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime @@ -1013,34 +1106,46 @@ spec: type: object type: array failureMessage: - description: "FailureMessage will be set in the event that there is - a terminal problem reconciling the Machine and will contain a more - verbose string suitable for logging and human consumption. \n This - field should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over time (like - service outages), but instead indicate that something is fundamentally - wrong with the Machine's spec or the configuration of the controller, - and that manual intervention is required. Examples of terminal errors - would be invalid combinations of settings in the spec, values that - are unsupported by the controller, or the responsible controller - itself being critically misconfigured. \n Any transient errors that - occur during the reconciliation of Machines can be added as events - to the Machine object and/or logged in the controller's output." + description: |- + FailureMessage will be set in the event that there is a terminal problem + reconciling the Machine and will contain a more verbose string suitable + for logging and human consumption. + + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the Machine's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of Machines + can be added as events to the Machine object and/or logged in the + controller's output. type: string failureReason: - description: "FailureReason will be set in the event that there is - a terminal problem reconciling the Machine and will contain a succinct - value suitable for machine interpretation. \n This field should - not be set for transitive errors that a controller faces that are - expected to be fixed automatically over time (like service outages), - but instead indicate that something is fundamentally wrong with - the Machine's spec or the configuration of the controller, and that - manual intervention is required. Examples of terminal errors would - be invalid combinations of settings in the spec, values that are - unsupported by the controller, or the responsible controller itself - being critically misconfigured. \n Any transient errors that occur - during the reconciliation of Machines can be added as events to - the Machine object and/or logged in the controller's output." + description: |- + FailureReason will be set in the event that there is a terminal problem + reconciling the Machine and will contain a succinct value suitable + for machine interpretation. + + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the Machine's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of Machines + can be added as events to the Machine object and/or logged in the + controller's output. type: string instances: description: Instances contains the status for each instance in the diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachines.yaml index e356896c1b..c02466fa59 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachines.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.14.0 name: awsmachines.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io @@ -45,14 +45,19 @@ spec: description: AWSMachine is the schema for Amazon EC2 machines. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -61,26 +66,27 @@ spec: instance. properties: additionalSecurityGroups: - description: AdditionalSecurityGroups is an array of references to - security groups that should be applied to the instance. These security - groups would be set in addition to any security groups defined at - the cluster level or in the actuator. It is possible to specify - either IDs of Filters. Using Filters will cause additional requests - to AWS API and if tags change the attached security groups might - change too. + description: |- + AdditionalSecurityGroups is an array of references to security groups that should be applied to the + instance. These security groups would be set in addition to any security groups defined + at the cluster level or in the actuator. It is possible to specify either IDs of Filters. Using Filters + will cause additional requests to AWS API and if tags change the attached security groups might change too. items: - description: AWSResourceReference is a reference to a specific AWS - resource by ID or filters. Only one of ID or Filters may be specified. - Specifying more than one will result in a validation error. + description: |- + AWSResourceReference is a reference to a specific AWS resource by ID or filters. + Only one of ID or Filters may be specified. Specifying more than one will result in + a validation error. properties: arn: - description: 'ARN of resource. Deprecated: This field has no - function and is going to be removed in the next release.' + description: |- + ARN of resource. + Deprecated: This field has no function and is going to be removed in the next release. type: string filters: - description: 'Filters is a set of key/value pairs used to identify - a resource They are applied according to the rules defined - by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html' + description: |- + Filters is a set of key/value pairs used to identify a resource + They are applied according to the rules defined by the AWS API: + https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html items: description: Filter is a filter used to identify an AWS resource. properties: @@ -106,10 +112,10 @@ spec: additionalTags: additionalProperties: type: string - description: AdditionalTags is an optional set of tags to add to an - instance, in addition to the ones added by default by the AWS provider. - If both the AWSCluster and the AWSMachine specify the same tag name - with different values, the AWSMachine's value takes precedence. + description: |- + AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the + AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the + AWSMachine's value takes precedence. type: object ami: description: AMI is the reference to the AMI from which to create @@ -127,15 +133,16 @@ spec: type: string type: object cloudInit: - description: CloudInit defines options related to the bootstrapping - systems where CloudInit is used. + description: |- + CloudInit defines options related to the bootstrapping systems where + CloudInit is used. properties: insecureSkipSecretsManager: - description: InsecureSkipSecretsManager, when set to true will - not use AWS Secrets Manager or AWS Systems Manager Parameter - Store to ensure privacy of userdata. By default, a cloud-init - boothook shell script is prepended to download the userdata - from Secrets Manager and additionally delete the secret. + description: |- + InsecureSkipSecretsManager, when set to true will not use AWS Secrets Manager + or AWS Systems Manager Parameter Store to ensure privacy of userdata. + By default, a cloud-init boothook shell script is prepended to download + the userdata from Secrets Manager and additionally delete the secret. type: boolean secretCount: description: SecretCount is the number of secrets used to form @@ -143,26 +150,26 @@ spec: format: int32 type: integer secretPrefix: - description: SecretPrefix is the prefix for the secret name. This - is stored temporarily, and deleted when the machine registers - as a node against the workload cluster. + description: |- + SecretPrefix is the prefix for the secret name. This is stored + temporarily, and deleted when the machine registers as a node against + the workload cluster. type: string secureSecretsBackend: - description: SecureSecretsBackend, when set to parameter-store - will utilize the AWS Systems Manager Parameter Storage to distribute - secrets. By default or with the value of secrets-manager, will - use AWS Secrets Manager instead. + description: |- + SecureSecretsBackend, when set to parameter-store will utilize the AWS Systems Manager + Parameter Storage to distribute secrets. By default or with the value of secrets-manager, + will use AWS Secrets Manager instead. enum: - secrets-manager - ssm-parameter-store type: string type: object failureDomain: - description: FailureDomain is the failure domain unique identifier - this Machine should be attached to, as defined in Cluster API. For - this infrastructure provider, the ID is equivalent to an AWS Availability - Zone. If multiple subnets are matched for the availability zone, - the first one returned is picked. + description: |- + FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API. + For this infrastructure provider, the ID is equivalent to an AWS Availability Zone. + If multiple subnets are matched for the availability zone, the first one returned is picked. type: string iamInstanceProfile: description: IAMInstanceProfile is a name of an IAM instance profile @@ -181,21 +188,23 @@ spec: type: string type: object imageLookupBaseOS: - description: ImageLookupBaseOS is the name of the base operating system - to use for image lookup the AMI is not set. + description: |- + ImageLookupBaseOS is the name of the base operating system to use for + image lookup the AMI is not set. type: string imageLookupFormat: - description: 'ImageLookupFormat is the AMI naming format to look up - the image for this machine It will be ignored if an explicit AMI - is set. Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} - with the base OS and kubernetes version, respectively. The BaseOS - will be the value in ImageLookupBaseOS or ubuntu (the default), - and the kubernetes version as defined by the packages produced by - kubernetes/release without v as a prefix: 1.13.0, 1.12.5-mybuild.1, - or 1.17.3. For example, the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* - will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* - for a Machine that is targeting kubernetes v1.18.0 and the ubuntu - base OS. See also: https://golang.org/pkg/text/template/' + description: |- + ImageLookupFormat is the AMI naming format to look up the image for this + machine It will be ignored if an explicit AMI is set. Supports + substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and + kubernetes version, respectively. The BaseOS will be the value in + ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as + defined by the packages produced by kubernetes/release without v as a + prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default + image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up + searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a + Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See + also: https://golang.org/pkg/text/template/ type: string imageLookupOrg: description: ImageLookupOrg is the AWS Organization ID to use for @@ -210,8 +219,9 @@ spec: minLength: 2 type: string networkInterfaces: - description: NetworkInterfaces is a list of ENIs to associate with - the instance. A maximum of 2 may be specified. + description: |- + NetworkInterfaces is a list of ENIs to associate with the instance. + A maximum of 2 may be specified. items: type: string maxItems: 2 @@ -230,9 +240,9 @@ spec: or not. type: boolean encryptionKey: - description: EncryptionKey is the KMS key to use to encrypt - the volume. Can be either a KMS key ID or ARN. If Encrypted - is set and this is omitted, the default AWS key will be used. + description: |- + EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. + If Encrypted is set and this is omitted, the default AWS key will be used. The key must already exist and be accessible by the controller. type: string iops: @@ -241,9 +251,9 @@ spec: format: int64 type: integer size: - description: Size specifies size (in Gi) of the storage device. - Must be greater than the image snapshot size or 8 (whichever - is greater). + description: |- + Size specifies size (in Gi) of the storage device. + Must be greater than the image snapshot size or 8 (whichever is greater). format: int64 minimum: 8 type: integer @@ -265,9 +275,12 @@ spec: cloud provider. type: string publicIP: - description: 'PublicIP specifies whether the instance should get a - public IP. Precedence for this setting is as follows: 1. This field - if set 2. Cluster/flavor setting 3. Subnet default' + description: |- + PublicIP specifies whether the instance should get a public IP. + Precedence for this setting is as follows: + 1. This field if set + 2. Cluster/flavor setting + 3. Subnet default type: boolean rootVolume: description: RootVolume encapsulates the configuration options for @@ -281,10 +294,10 @@ spec: or not. type: boolean encryptionKey: - description: EncryptionKey is the KMS key to use to encrypt the - volume. Can be either a KMS key ID or ARN. If Encrypted is set - and this is omitted, the default AWS key will be used. The key - must already exist and be accessible by the controller. + description: |- + EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. + If Encrypted is set and this is omitted, the default AWS key will be used. + The key must already exist and be accessible by the controller. type: string iops: description: IOPS is the number of IOPS requested for the disk. @@ -292,9 +305,9 @@ spec: format: int64 type: integer size: - description: Size specifies size (in Gi) of the storage device. - Must be greater than the image snapshot size or 8 (whichever - is greater). + description: |- + Size specifies size (in Gi) of the storage device. + Must be greater than the image snapshot size or 8 (whichever is greater). format: int64 minimum: 8 type: integer @@ -324,17 +337,20 @@ spec: valid SSH key name, or omitted (use the default SSH key name) type: string subnet: - description: Subnet is a reference to the subnet to use for this instance. - If not specified, the cluster subnet will be used. + description: |- + Subnet is a reference to the subnet to use for this instance. If not specified, + the cluster subnet will be used. properties: arn: - description: 'ARN of resource. Deprecated: This field has no function - and is going to be removed in the next release.' + description: |- + ARN of resource. + Deprecated: This field has no function and is going to be removed in the next release. type: string filters: - description: 'Filters is a set of key/value pairs used to identify - a resource They are applied according to the rules defined by - the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html' + description: |- + Filters is a set of key/value pairs used to identify a resource + They are applied according to the rules defined by the AWS API: + https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html items: description: Filter is a filter used to identify an AWS resource. properties: @@ -365,10 +381,10 @@ spec: - host type: string uncompressedUserData: - description: UncompressedUserData specify whether the user data is - gzip-compressed before it is sent to ec2 instance. cloud-init has - built-in support for gzip-compressed user data user data stored - in aws secret manager is always gzip-compressed. + description: |- + UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance. + cloud-init has built-in support for gzip-compressed user data + user data stored in aws secret manager is always gzip-compressed. type: boolean required: - instanceType @@ -401,37 +417,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime @@ -440,45 +456,55 @@ spec: type: object type: array failureMessage: - description: "FailureMessage will be set in the event that there is - a terminal problem reconciling the Machine and will contain a more - verbose string suitable for logging and human consumption. \n This - field should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over time (like - service outages), but instead indicate that something is fundamentally - wrong with the Machine's spec or the configuration of the controller, - and that manual intervention is required. Examples of terminal errors - would be invalid combinations of settings in the spec, values that - are unsupported by the controller, or the responsible controller - itself being critically misconfigured. \n Any transient errors that - occur during the reconciliation of Machines can be added as events - to the Machine object and/or logged in the controller's output." + description: |- + FailureMessage will be set in the event that there is a terminal problem + reconciling the Machine and will contain a more verbose string suitable + for logging and human consumption. + + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the Machine's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of Machines + can be added as events to the Machine object and/or logged in the + controller's output. type: string failureReason: - description: "FailureReason will be set in the event that there is - a terminal problem reconciling the Machine and will contain a succinct - value suitable for machine interpretation. \n This field should - not be set for transitive errors that a controller faces that are - expected to be fixed automatically over time (like service outages), - but instead indicate that something is fundamentally wrong with - the Machine's spec or the configuration of the controller, and that - manual intervention is required. Examples of terminal errors would - be invalid combinations of settings in the spec, values that are - unsupported by the controller, or the responsible controller itself - being critically misconfigured. \n Any transient errors that occur - during the reconciliation of Machines can be added as events to - the Machine object and/or logged in the controller's output." + description: |- + FailureReason will be set in the event that there is a terminal problem + reconciling the Machine and will contain a succinct value suitable + for machine interpretation. + + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the Machine's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of Machines + can be added as events to the Machine object and/or logged in the + controller's output. type: string instanceState: description: InstanceState is the state of the AWS instance for this machine. type: string interruptible: - description: Interruptible reports that this machine is using spot - instances and can therefore be interrupted by CAPI when it receives - a notice that the spot instance is to be terminated by AWS. This - will be set to true when SpotMarketOptions is not nil (i.e. this - machine is using a spot instance). + description: |- + Interruptible reports that this machine is using spot instances and can therefore be interrupted by CAPI when it receives a notice that the spot instance is to be terminated by AWS. + This will be set to true when SpotMarketOptions is not nil (i.e. this machine is using a spot instance). type: boolean ready: description: Ready is true when the provider resource is ready. @@ -516,14 +542,19 @@ spec: description: AWSMachine is the schema for Amazon EC2 machines. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -532,22 +563,22 @@ spec: instance. properties: additionalSecurityGroups: - description: AdditionalSecurityGroups is an array of references to - security groups that should be applied to the instance. These security - groups would be set in addition to any security groups defined at - the cluster level or in the actuator. It is possible to specify - either IDs of Filters. Using Filters will cause additional requests - to AWS API and if tags change the attached security groups might - change too. + description: |- + AdditionalSecurityGroups is an array of references to security groups that should be applied to the + instance. These security groups would be set in addition to any security groups defined + at the cluster level or in the actuator. It is possible to specify either IDs of Filters. Using Filters + will cause additional requests to AWS API and if tags change the attached security groups might change too. items: - description: AWSResourceReference is a reference to a specific AWS - resource by ID or filters. Only one of ID or Filters may be specified. - Specifying more than one will result in a validation error. + description: |- + AWSResourceReference is a reference to a specific AWS resource by ID or filters. + Only one of ID or Filters may be specified. Specifying more than one will result in + a validation error. properties: filters: - description: 'Filters is a set of key/value pairs used to identify - a resource They are applied according to the rules defined - by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html' + description: |- + Filters is a set of key/value pairs used to identify a resource + They are applied according to the rules defined by the AWS API: + https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html items: description: Filter is a filter used to identify an AWS resource. properties: @@ -573,10 +604,10 @@ spec: additionalTags: additionalProperties: type: string - description: AdditionalTags is an optional set of tags to add to an - instance, in addition to the ones added by default by the AWS provider. - If both the AWSCluster and the AWSMachine specify the same tag name - with different values, the AWSMachine's value takes precedence. + description: |- + AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the + AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the + AWSMachine's value takes precedence. type: object ami: description: AMI is the reference to the AMI from which to create @@ -593,16 +624,21 @@ spec: description: ID of resource type: string type: object + capacityReservationId: + description: CapacityReservationID specifies the target Capacity Reservation + into which the instance should be launched. + type: string cloudInit: - description: CloudInit defines options related to the bootstrapping - systems where CloudInit is used. + description: |- + CloudInit defines options related to the bootstrapping systems where + CloudInit is used. properties: insecureSkipSecretsManager: - description: InsecureSkipSecretsManager, when set to true will - not use AWS Secrets Manager or AWS Systems Manager Parameter - Store to ensure privacy of userdata. By default, a cloud-init - boothook shell script is prepended to download the userdata - from Secrets Manager and additionally delete the secret. + description: |- + InsecureSkipSecretsManager, when set to true will not use AWS Secrets Manager + or AWS Systems Manager Parameter Store to ensure privacy of userdata. + By default, a cloud-init boothook shell script is prepended to download + the userdata from Secrets Manager and additionally delete the secret. type: boolean secretCount: description: SecretCount is the number of secrets used to form @@ -610,20 +646,52 @@ spec: format: int32 type: integer secretPrefix: - description: SecretPrefix is the prefix for the secret name. This - is stored temporarily, and deleted when the machine registers - as a node against the workload cluster. + description: |- + SecretPrefix is the prefix for the secret name. This is stored + temporarily, and deleted when the machine registers as a node against + the workload cluster. type: string secureSecretsBackend: - description: SecureSecretsBackend, when set to parameter-store - will utilize the AWS Systems Manager Parameter Storage to distribute - secrets. By default or with the value of secrets-manager, will - use AWS Secrets Manager instead. + description: |- + SecureSecretsBackend, when set to parameter-store will utilize the AWS Systems Manager + Parameter Storage to distribute secrets. By default or with the value of secrets-manager, + will use AWS Secrets Manager instead. enum: - secrets-manager - ssm-parameter-store type: string type: object + elasticIpPool: + description: ElasticIPPool is the configuration to allocate Public + IPv4 address (Elastic IP/EIP) from user-defined pool. + properties: + publicIpv4Pool: + description: |- + PublicIpv4Pool sets a custom Public IPv4 Pool used to create Elastic IP address for resources + created in public IPv4 subnets. Every IPv4 address, Elastic IP, will be allocated from the custom + Public IPv4 pool that you brought to AWS, instead of Amazon-provided pool. The public IPv4 pool + resource ID starts with 'ipv4pool-ec2'. + maxLength: 30 + type: string + publicIpv4PoolFallbackOrder: + description: |- + PublicIpv4PoolFallBackOrder defines the fallback action when the Public IPv4 Pool has been exhausted, + no more IPv4 address available in the pool. + + + When set to 'amazon-pool', the controller check if the pool has available IPv4 address, when pool has reached the + IPv4 limit, the address will be claimed from Amazon-pool (default). + + + When set to 'none', the controller will fail the Elastic IP allocation when the publicIpv4Pool is exhausted. + enum: + - amazon-pool + - none + type: string + x-kubernetes-validations: + - message: allowed values are 'none' and 'amazon-pool' + rule: self in ['none','amazon-pool'] + type: object iamInstanceProfile: description: IAMInstanceProfile is a name of an IAM instance profile to assign to the instance @@ -632,28 +700,89 @@ spec: description: Ignition defined options related to the bootstrapping systems where Ignition is used. properties: + proxy: + description: |- + Proxy defines proxy settings for Ignition. + Only valid for Ignition versions 3.1 and above. + properties: + httpProxy: + description: |- + HTTPProxy is the HTTP proxy to use for Ignition. + A single URL that specifies the proxy server to use for HTTP and HTTPS requests, + unless overridden by the HTTPSProxy or NoProxy options. + type: string + httpsProxy: + description: |- + HTTPSProxy is the HTTPS proxy to use for Ignition. + A single URL that specifies the proxy server to use for HTTPS requests, + unless overridden by the NoProxy option. + type: string + noProxy: + description: |- + NoProxy is the list of domains to not proxy for Ignition. + Specifies a list of strings to hosts that should be excluded from proxying. + + + Each value is represented by: + - An IP address prefix (1.2.3.4) + - An IP address prefix in CIDR notation (1.2.3.4/8) + - A domain name + - A domain name matches that name and all subdomains + - A domain name with a leading . matches subdomains only + - A special DNS label (*), indicates that no proxying should be done + + + An IP address prefix and domain name can also include a literal port number (1.2.3.4:80). + items: + description: IgnitionNoProxy defines the list of domains + to not proxy for Ignition. + maxLength: 2048 + type: string + maxItems: 64 + type: array + type: object storageType: default: ClusterObjectStore - description: "StorageType defines how to store the boostrap user - data for Ignition. This can be used to instruct Ignition from - where to fetch the user data to bootstrap an instance. \n When - omitted, the storage option will default to ClusterObjectStore. - \n When set to \"ClusterObjectStore\", if the capability is - available and a Cluster ObjectStore configuration is correctly - provided in the Cluster object (under .spec.s3Bucket), an object - store will be used to store bootstrap user data. \n When set - to \"UnencryptedUserData\", EC2 Instance User Data will be used - to store the machine bootstrap user data, unencrypted. This - option is considered less secure than others as user data may - contain sensitive informations (keys, certificates, etc.) and - users with ec2:DescribeInstances permission or users running - pods that can access the ec2 metadata service have access to - this sensitive information. So this is only to be used at ones - own risk, and only when other more secure options are not viable." + description: |- + StorageType defines how to store the boostrap user data for Ignition. + This can be used to instruct Ignition from where to fetch the user data to bootstrap an instance. + + + When omitted, the storage option will default to ClusterObjectStore. + + + When set to "ClusterObjectStore", if the capability is available and a Cluster ObjectStore configuration + is correctly provided in the Cluster object (under .spec.s3Bucket), + an object store will be used to store bootstrap user data. + + + When set to "UnencryptedUserData", EC2 Instance User Data will be used to store the machine bootstrap user data, unencrypted. + This option is considered less secure than others as user data may contain sensitive informations (keys, certificates, etc.) + and users with ec2:DescribeInstances permission or users running pods + that can access the ec2 metadata service have access to this sensitive information. + So this is only to be used at ones own risk, and only when other more secure options are not viable. enum: - ClusterObjectStore - UnencryptedUserData type: string + tls: + description: |- + TLS defines TLS settings for Ignition. + Only valid for Ignition versions 3.1 and above. + properties: + certificateAuthorities: + description: |- + CASources defines the list of certificate authorities to use for Ignition. + The value is the certificate bundle (in PEM format). The bundle can contain multiple concatenated certificates. + Supported schemes are http, https, tftp, s3, arn, gs, and `data` (RFC 2397) URL scheme. + items: + description: IgnitionCASource defines the source of the + certificate authority to use for Ignition. + maxLength: 65536 + type: string + maxItems: 64 + type: array + type: object version: default: "2.3" description: Version defines which version of Ignition will be @@ -668,21 +797,23 @@ spec: type: string type: object imageLookupBaseOS: - description: ImageLookupBaseOS is the name of the base operating system - to use for image lookup the AMI is not set. + description: |- + ImageLookupBaseOS is the name of the base operating system to use for + image lookup the AMI is not set. type: string imageLookupFormat: - description: 'ImageLookupFormat is the AMI naming format to look up - the image for this machine It will be ignored if an explicit AMI - is set. Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} - with the base OS and kubernetes version, respectively. The BaseOS - will be the value in ImageLookupBaseOS or ubuntu (the default), - and the kubernetes version as defined by the packages produced by - kubernetes/release without v as a prefix: 1.13.0, 1.12.5-mybuild.1, - or 1.17.3. For example, the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* - will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* - for a Machine that is targeting kubernetes v1.18.0 and the ubuntu - base OS. See also: https://golang.org/pkg/text/template/' + description: |- + ImageLookupFormat is the AMI naming format to look up the image for this + machine It will be ignored if an explicit AMI is set. Supports + substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and + kubernetes version, respectively. The BaseOS will be the value in + ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as + defined by the packages produced by kubernetes/release without v as a + prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default + image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up + searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a + Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See + also: https://golang.org/pkg/text/template/ type: string imageLookupOrg: description: ImageLookupOrg is the AWS Organization ID to use for @@ -697,46 +828,64 @@ spec: properties: httpEndpoint: default: enabled - description: "Enables or disables the HTTP metadata endpoint on - your instances. \n If you specify a value of disabled, you cannot - access your instance metadata. \n Default: enabled" + description: |- + Enables or disables the HTTP metadata endpoint on your instances. + + + If you specify a value of disabled, you cannot access your instance metadata. + + + Default: enabled enum: - enabled - disabled type: string httpPutResponseHopLimit: default: 1 - description: "The desired HTTP PUT response hop limit for instance - metadata requests. The larger the number, the further instance - metadata requests can travel. \n Default: 1" + description: |- + The desired HTTP PUT response hop limit for instance metadata requests. The + larger the number, the further instance metadata requests can travel. + + + Default: 1 format: int64 maximum: 64 minimum: 1 type: integer httpTokens: default: optional - description: "The state of token usage for your instance metadata - requests. \n If the state is optional, you can choose to retrieve - instance metadata with or without a session token on your request. - If you retrieve the IAM role credentials without a token, the - version 1.0 role credentials are returned. If you retrieve the - IAM role credentials using a valid session token, the version - 2.0 role credentials are returned. \n If the state is required, - you must send a session token with any instance metadata retrieval - requests. In this state, retrieving the IAM role credentials - always returns the version 2.0 credentials; the version 1.0 - credentials are not available. \n Default: optional" + description: |- + The state of token usage for your instance metadata requests. + + + If the state is optional, you can choose to retrieve instance metadata with + or without a session token on your request. If you retrieve the IAM role + credentials without a token, the version 1.0 role credentials are returned. + If you retrieve the IAM role credentials using a valid session token, the + version 2.0 role credentials are returned. + + + If the state is required, you must send a session token with any instance + metadata retrieval requests. In this state, retrieving the IAM role credentials + always returns the version 2.0 credentials; the version 1.0 credentials are + not available. + + + Default: optional enum: - optional - required type: string instanceMetadataTags: default: disabled - description: "Set to enabled to allow access to instance tags - from the instance metadata. Set to disabled to turn off access - to instance tags from the instance metadata. For more information, - see Work with instance tags using the instance metadata (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS). - \n Default: disabled" + description: |- + Set to enabled to allow access to instance tags from the instance metadata. + Set to disabled to turn off access to instance tags from the instance metadata. + For more information, see Work with instance tags using the instance metadata + (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS). + + + Default: disabled enum: - enabled - disabled @@ -748,8 +897,9 @@ spec: minLength: 2 type: string networkInterfaces: - description: NetworkInterfaces is a list of ENIs to associate with - the instance. A maximum of 2 may be specified. + description: |- + NetworkInterfaces is a list of ENIs to associate with the instance. + A maximum of 2 may be specified. items: type: string maxItems: 2 @@ -768,9 +918,9 @@ spec: or not. type: boolean encryptionKey: - description: EncryptionKey is the KMS key to use to encrypt - the volume. Can be either a KMS key ID or ARN. If Encrypted - is set and this is omitted, the default AWS key will be used. + description: |- + EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. + If Encrypted is set and this is omitted, the default AWS key will be used. The key must already exist and be accessible by the controller. type: string iops: @@ -779,9 +929,9 @@ spec: format: int64 type: integer size: - description: Size specifies size (in Gi) of the storage device. - Must be greater than the image snapshot size or 8 (whichever - is greater). + description: |- + Size specifies size (in Gi) of the storage device. + Must be greater than the image snapshot size or 8 (whichever is greater). format: int64 minimum: 8 type: integer @@ -802,6 +952,15 @@ spec: description: PlacementGroupName specifies the name of the placement group in which to launch the instance. type: string + placementGroupPartition: + description: |- + PlacementGroupPartition is the partition number within the placement group in which to launch the instance. + This value is only valid if the placement group, referred in `PlacementGroupName`, was created with + strategy set to partition. + format: int64 + maximum: 7 + minimum: 1 + type: integer privateDnsName: description: PrivateDNSName is the options for the instance hostname. properties: @@ -826,9 +985,12 @@ spec: cloud provider. type: string publicIP: - description: 'PublicIP specifies whether the instance should get a - public IP. Precedence for this setting is as follows: 1. This field - if set 2. Cluster/flavor setting 3. Subnet default' + description: |- + PublicIP specifies whether the instance should get a public IP. + Precedence for this setting is as follows: + 1. This field if set + 2. Cluster/flavor setting + 3. Subnet default type: boolean rootVolume: description: RootVolume encapsulates the configuration options for @@ -842,10 +1004,10 @@ spec: or not. type: boolean encryptionKey: - description: EncryptionKey is the KMS key to use to encrypt the - volume. Can be either a KMS key ID or ARN. If Encrypted is set - and this is omitted, the default AWS key will be used. The key - must already exist and be accessible by the controller. + description: |- + EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. + If Encrypted is set and this is omitted, the default AWS key will be used. + The key must already exist and be accessible by the controller. type: string iops: description: IOPS is the number of IOPS requested for the disk. @@ -853,9 +1015,9 @@ spec: format: int64 type: integer size: - description: Size specifies size (in Gi) of the storage device. - Must be greater than the image snapshot size or 8 (whichever - is greater). + description: |- + Size specifies size (in Gi) of the storage device. + Must be greater than the image snapshot size or 8 (whichever is greater). format: int64 minimum: 8 type: integer @@ -873,9 +1035,9 @@ spec: securityGroupOverrides: additionalProperties: type: string - description: SecurityGroupOverrides is an optional set of security - groups to use for the node. This is optional - if not provided security - groups from the cluster will be used. + description: |- + SecurityGroupOverrides is an optional set of security groups to use for the node. + This is optional - if not provided security groups from the cluster will be used. type: object spotMarketOptions: description: SpotMarketOptions allows users to configure instances @@ -892,13 +1054,15 @@ spec: valid SSH key name, or omitted (use the default SSH key name) type: string subnet: - description: Subnet is a reference to the subnet to use for this instance. - If not specified, the cluster subnet will be used. + description: |- + Subnet is a reference to the subnet to use for this instance. If not specified, + the cluster subnet will be used. properties: filters: - description: 'Filters is a set of key/value pairs used to identify - a resource They are applied according to the rules defined by - the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html' + description: |- + Filters is a set of key/value pairs used to identify a resource + They are applied according to the rules defined by the AWS API: + https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html items: description: Filter is a filter used to identify an AWS resource. properties: @@ -929,10 +1093,10 @@ spec: - host type: string uncompressedUserData: - description: UncompressedUserData specify whether the user data is - gzip-compressed before it is sent to ec2 instance. cloud-init has - built-in support for gzip-compressed user data user data stored - in aws secret manager is always gzip-compressed. + description: |- + UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance. + cloud-init has built-in support for gzip-compressed user data + user data stored in aws secret manager is always gzip-compressed. type: boolean required: - instanceType @@ -965,37 +1129,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime @@ -1004,45 +1168,55 @@ spec: type: object type: array failureMessage: - description: "FailureMessage will be set in the event that there is - a terminal problem reconciling the Machine and will contain a more - verbose string suitable for logging and human consumption. \n This - field should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over time (like - service outages), but instead indicate that something is fundamentally - wrong with the Machine's spec or the configuration of the controller, - and that manual intervention is required. Examples of terminal errors - would be invalid combinations of settings in the spec, values that - are unsupported by the controller, or the responsible controller - itself being critically misconfigured. \n Any transient errors that - occur during the reconciliation of Machines can be added as events - to the Machine object and/or logged in the controller's output." + description: |- + FailureMessage will be set in the event that there is a terminal problem + reconciling the Machine and will contain a more verbose string suitable + for logging and human consumption. + + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the Machine's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of Machines + can be added as events to the Machine object and/or logged in the + controller's output. type: string failureReason: - description: "FailureReason will be set in the event that there is - a terminal problem reconciling the Machine and will contain a succinct - value suitable for machine interpretation. \n This field should - not be set for transitive errors that a controller faces that are - expected to be fixed automatically over time (like service outages), - but instead indicate that something is fundamentally wrong with - the Machine's spec or the configuration of the controller, and that - manual intervention is required. Examples of terminal errors would - be invalid combinations of settings in the spec, values that are - unsupported by the controller, or the responsible controller itself - being critically misconfigured. \n Any transient errors that occur - during the reconciliation of Machines can be added as events to - the Machine object and/or logged in the controller's output." + description: |- + FailureReason will be set in the event that there is a terminal problem + reconciling the Machine and will contain a succinct value suitable + for machine interpretation. + + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the Machine's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of Machines + can be added as events to the Machine object and/or logged in the + controller's output. type: string instanceState: description: InstanceState is the state of the AWS instance for this machine. type: string interruptible: - description: Interruptible reports that this machine is using spot - instances and can therefore be interrupted by CAPI when it receives - a notice that the spot instance is to be terminated by AWS. This - will be set to true when SpotMarketOptions is not nil (i.e. this - machine is using a spot instance). + description: |- + Interruptible reports that this machine is using spot instances and can therefore be interrupted by CAPI when it receives a notice that the spot instance is to be terminated by AWS. + This will be set to true when SpotMarketOptions is not nil (i.e. this machine is using a spot instance). type: boolean ready: description: Ready is true when the provider resource is ready. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinetemplates.yaml index 00b85b4969..501a837555 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinetemplates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.14.0 name: awsmachinetemplates.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io @@ -25,14 +25,19 @@ spec: API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -44,24 +49,27 @@ spec: to create am AWSMachine from a template. properties: metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key value map - stored with a resource that may be set by external tools - to store and retrieve arbitrary metadata. They are not queryable - and should be preserved when modifying objects. More info: - http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object labels: additionalProperties: type: string - description: 'Map of string keys and values that can be used - to organize and categorize (scope and select) objects. May - match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object type: object spec: @@ -69,28 +77,27 @@ spec: of the machine. properties: additionalSecurityGroups: - description: AdditionalSecurityGroups is an array of references - to security groups that should be applied to the instance. - These security groups would be set in addition to any security - groups defined at the cluster level or in the actuator. - It is possible to specify either IDs of Filters. Using Filters - will cause additional requests to AWS API and if tags change - the attached security groups might change too. + description: |- + AdditionalSecurityGroups is an array of references to security groups that should be applied to the + instance. These security groups would be set in addition to any security groups defined + at the cluster level or in the actuator. It is possible to specify either IDs of Filters. Using Filters + will cause additional requests to AWS API and if tags change the attached security groups might change too. items: - description: AWSResourceReference is a reference to a specific - AWS resource by ID or filters. Only one of ID or Filters - may be specified. Specifying more than one will result - in a validation error. + description: |- + AWSResourceReference is a reference to a specific AWS resource by ID or filters. + Only one of ID or Filters may be specified. Specifying more than one will result in + a validation error. properties: arn: - description: 'ARN of resource. Deprecated: This field - has no function and is going to be removed in the - next release.' + description: |- + ARN of resource. + Deprecated: This field has no function and is going to be removed in the next release. type: string filters: - description: 'Filters is a set of key/value pairs used - to identify a resource They are applied according - to the rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html' + description: |- + Filters is a set of key/value pairs used to identify a resource + They are applied according to the rules defined by the AWS API: + https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html items: description: Filter is a filter used to identify an AWS resource. @@ -118,11 +125,10 @@ spec: additionalTags: additionalProperties: type: string - description: AdditionalTags is an optional set of tags to - add to an instance, in addition to the ones added by default - by the AWS provider. If both the AWSCluster and the AWSMachine - specify the same tag name with different values, the AWSMachine's - value takes precedence. + description: |- + AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the + AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the + AWSMachine's value takes precedence. type: object ami: description: AMI is the reference to the AMI from which to @@ -140,16 +146,16 @@ spec: type: string type: object cloudInit: - description: CloudInit defines options related to the bootstrapping - systems where CloudInit is used. + description: |- + CloudInit defines options related to the bootstrapping systems where + CloudInit is used. properties: insecureSkipSecretsManager: - description: InsecureSkipSecretsManager, when set to true - will not use AWS Secrets Manager or AWS Systems Manager - Parameter Store to ensure privacy of userdata. By default, - a cloud-init boothook shell script is prepended to download - the userdata from Secrets Manager and additionally delete - the secret. + description: |- + InsecureSkipSecretsManager, when set to true will not use AWS Secrets Manager + or AWS Systems Manager Parameter Store to ensure privacy of userdata. + By default, a cloud-init boothook shell script is prepended to download + the userdata from Secrets Manager and additionally delete the secret. type: boolean secretCount: description: SecretCount is the number of secrets used @@ -157,26 +163,26 @@ spec: format: int32 type: integer secretPrefix: - description: SecretPrefix is the prefix for the secret - name. This is stored temporarily, and deleted when the - machine registers as a node against the workload cluster. + description: |- + SecretPrefix is the prefix for the secret name. This is stored + temporarily, and deleted when the machine registers as a node against + the workload cluster. type: string secureSecretsBackend: - description: SecureSecretsBackend, when set to parameter-store - will utilize the AWS Systems Manager Parameter Storage - to distribute secrets. By default or with the value - of secrets-manager, will use AWS Secrets Manager instead. + description: |- + SecureSecretsBackend, when set to parameter-store will utilize the AWS Systems Manager + Parameter Storage to distribute secrets. By default or with the value of secrets-manager, + will use AWS Secrets Manager instead. enum: - secrets-manager - ssm-parameter-store type: string type: object failureDomain: - description: FailureDomain is the failure domain unique identifier - this Machine should be attached to, as defined in Cluster - API. For this infrastructure provider, the ID is equivalent - to an AWS Availability Zone. If multiple subnets are matched - for the availability zone, the first one returned is picked. + description: |- + FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API. + For this infrastructure provider, the ID is equivalent to an AWS Availability Zone. + If multiple subnets are matched for the availability zone, the first one returned is picked. type: string iamInstanceProfile: description: IAMInstanceProfile is a name of an IAM instance @@ -195,22 +201,23 @@ spec: type: string type: object imageLookupBaseOS: - description: ImageLookupBaseOS is the name of the base operating - system to use for image lookup the AMI is not set. + description: |- + ImageLookupBaseOS is the name of the base operating system to use for + image lookup the AMI is not set. type: string imageLookupFormat: - description: 'ImageLookupFormat is the AMI naming format to - look up the image for this machine It will be ignored if - an explicit AMI is set. Supports substitutions for {{.BaseOS}} - and {{.K8sVersion}} with the base OS and kubernetes version, - respectively. The BaseOS will be the value in ImageLookupBaseOS - or ubuntu (the default), and the kubernetes version as defined - by the packages produced by kubernetes/release without v - as a prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, - the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* - will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* - for a Machine that is targeting kubernetes v1.18.0 and the - ubuntu base OS. See also: https://golang.org/pkg/text/template/' + description: |- + ImageLookupFormat is the AMI naming format to look up the image for this + machine It will be ignored if an explicit AMI is set. Supports + substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and + kubernetes version, respectively. The BaseOS will be the value in + ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as + defined by the packages produced by kubernetes/release without v as a + prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default + image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up + searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a + Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See + also: https://golang.org/pkg/text/template/ type: string imageLookupOrg: description: ImageLookupOrg is the AWS Organization ID to @@ -225,8 +232,9 @@ spec: minLength: 2 type: string networkInterfaces: - description: NetworkInterfaces is a list of ENIs to associate - with the instance. A maximum of 2 may be specified. + description: |- + NetworkInterfaces is a list of ENIs to associate with the instance. + A maximum of 2 may be specified. items: type: string maxItems: 2 @@ -246,11 +254,10 @@ spec: be encrypted or not. type: boolean encryptionKey: - description: EncryptionKey is the KMS key to use to - encrypt the volume. Can be either a KMS key ID or - ARN. If Encrypted is set and this is omitted, the - default AWS key will be used. The key must already - exist and be accessible by the controller. + description: |- + EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. + If Encrypted is set and this is omitted, the default AWS key will be used. + The key must already exist and be accessible by the controller. type: string iops: description: IOPS is the number of IOPS requested for @@ -258,9 +265,9 @@ spec: format: int64 type: integer size: - description: Size specifies size (in Gi) of the storage - device. Must be greater than the image snapshot size - or 8 (whichever is greater). + description: |- + Size specifies size (in Gi) of the storage device. + Must be greater than the image snapshot size or 8 (whichever is greater). format: int64 minimum: 8 type: integer @@ -282,10 +289,12 @@ spec: by the cloud provider. type: string publicIP: - description: 'PublicIP specifies whether the instance should - get a public IP. Precedence for this setting is as follows: - 1. This field if set 2. Cluster/flavor setting 3. Subnet - default' + description: |- + PublicIP specifies whether the instance should get a public IP. + Precedence for this setting is as follows: + 1. This field if set + 2. Cluster/flavor setting + 3. Subnet default type: boolean rootVolume: description: RootVolume encapsulates the configuration options @@ -299,11 +308,10 @@ spec: encrypted or not. type: boolean encryptionKey: - description: EncryptionKey is the KMS key to use to encrypt - the volume. Can be either a KMS key ID or ARN. If Encrypted - is set and this is omitted, the default AWS key will - be used. The key must already exist and be accessible - by the controller. + description: |- + EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. + If Encrypted is set and this is omitted, the default AWS key will be used. + The key must already exist and be accessible by the controller. type: string iops: description: IOPS is the number of IOPS requested for @@ -311,9 +319,9 @@ spec: format: int64 type: integer size: - description: Size specifies size (in Gi) of the storage - device. Must be greater than the image snapshot size - or 8 (whichever is greater). + description: |- + Size specifies size (in Gi) of the storage device. + Must be greater than the image snapshot size or 8 (whichever is greater). format: int64 minimum: 8 type: integer @@ -345,19 +353,20 @@ spec: SSH key name) type: string subnet: - description: Subnet is a reference to the subnet to use for - this instance. If not specified, the cluster subnet will - be used. + description: |- + Subnet is a reference to the subnet to use for this instance. If not specified, + the cluster subnet will be used. properties: arn: - description: 'ARN of resource. Deprecated: This field - has no function and is going to be removed in the next - release.' + description: |- + ARN of resource. + Deprecated: This field has no function and is going to be removed in the next release. type: string filters: - description: 'Filters is a set of key/value pairs used - to identify a resource They are applied according to - the rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html' + description: |- + Filters is a set of key/value pairs used to identify a resource + They are applied according to the rules defined by the AWS API: + https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html items: description: Filter is a filter used to identify an AWS resource. @@ -390,10 +399,10 @@ spec: - host type: string uncompressedUserData: - description: UncompressedUserData specify whether the user - data is gzip-compressed before it is sent to ec2 instance. - cloud-init has built-in support for gzip-compressed user - data user data stored in aws secret manager is always gzip-compressed. + description: |- + UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance. + cloud-init has built-in support for gzip-compressed user data + user data stored in aws secret manager is always gzip-compressed. type: boolean required: - instanceType @@ -414,9 +423,10 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Capacity defines the resource capacity for this machine. - This value is used for autoscaling from zero operations as defined - in: https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20210310-opt-in-autoscaling-from-zero.md' + description: |- + Capacity defines the resource capacity for this machine. + This value is used for autoscaling from zero operations as defined in: + https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20210310-opt-in-autoscaling-from-zero.md type: object type: object type: object @@ -429,14 +439,19 @@ spec: API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -448,24 +463,27 @@ spec: to create am AWSMachine from a template. properties: metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key value map - stored with a resource that may be set by external tools - to store and retrieve arbitrary metadata. They are not queryable - and should be preserved when modifying objects. More info: - http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object labels: additionalProperties: type: string - description: 'Map of string keys and values that can be used - to organize and categorize (scope and select) objects. May - match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object type: object spec: @@ -473,23 +491,22 @@ spec: of the machine. properties: additionalSecurityGroups: - description: AdditionalSecurityGroups is an array of references - to security groups that should be applied to the instance. - These security groups would be set in addition to any security - groups defined at the cluster level or in the actuator. - It is possible to specify either IDs of Filters. Using Filters - will cause additional requests to AWS API and if tags change - the attached security groups might change too. + description: |- + AdditionalSecurityGroups is an array of references to security groups that should be applied to the + instance. These security groups would be set in addition to any security groups defined + at the cluster level or in the actuator. It is possible to specify either IDs of Filters. Using Filters + will cause additional requests to AWS API and if tags change the attached security groups might change too. items: - description: AWSResourceReference is a reference to a specific - AWS resource by ID or filters. Only one of ID or Filters - may be specified. Specifying more than one will result - in a validation error. + description: |- + AWSResourceReference is a reference to a specific AWS resource by ID or filters. + Only one of ID or Filters may be specified. Specifying more than one will result in + a validation error. properties: filters: - description: 'Filters is a set of key/value pairs used - to identify a resource They are applied according - to the rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html' + description: |- + Filters is a set of key/value pairs used to identify a resource + They are applied according to the rules defined by the AWS API: + https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html items: description: Filter is a filter used to identify an AWS resource. @@ -517,11 +534,10 @@ spec: additionalTags: additionalProperties: type: string - description: AdditionalTags is an optional set of tags to - add to an instance, in addition to the ones added by default - by the AWS provider. If both the AWSCluster and the AWSMachine - specify the same tag name with different values, the AWSMachine's - value takes precedence. + description: |- + AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the + AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the + AWSMachine's value takes precedence. type: object ami: description: AMI is the reference to the AMI from which to @@ -538,17 +554,21 @@ spec: description: ID of resource type: string type: object + capacityReservationId: + description: CapacityReservationID specifies the target Capacity + Reservation into which the instance should be launched. + type: string cloudInit: - description: CloudInit defines options related to the bootstrapping - systems where CloudInit is used. + description: |- + CloudInit defines options related to the bootstrapping systems where + CloudInit is used. properties: insecureSkipSecretsManager: - description: InsecureSkipSecretsManager, when set to true - will not use AWS Secrets Manager or AWS Systems Manager - Parameter Store to ensure privacy of userdata. By default, - a cloud-init boothook shell script is prepended to download - the userdata from Secrets Manager and additionally delete - the secret. + description: |- + InsecureSkipSecretsManager, when set to true will not use AWS Secrets Manager + or AWS Systems Manager Parameter Store to ensure privacy of userdata. + By default, a cloud-init boothook shell script is prepended to download + the userdata from Secrets Manager and additionally delete the secret. type: boolean secretCount: description: SecretCount is the number of secrets used @@ -556,20 +576,52 @@ spec: format: int32 type: integer secretPrefix: - description: SecretPrefix is the prefix for the secret - name. This is stored temporarily, and deleted when the - machine registers as a node against the workload cluster. + description: |- + SecretPrefix is the prefix for the secret name. This is stored + temporarily, and deleted when the machine registers as a node against + the workload cluster. type: string secureSecretsBackend: - description: SecureSecretsBackend, when set to parameter-store - will utilize the AWS Systems Manager Parameter Storage - to distribute secrets. By default or with the value - of secrets-manager, will use AWS Secrets Manager instead. + description: |- + SecureSecretsBackend, when set to parameter-store will utilize the AWS Systems Manager + Parameter Storage to distribute secrets. By default or with the value of secrets-manager, + will use AWS Secrets Manager instead. enum: - secrets-manager - ssm-parameter-store type: string type: object + elasticIpPool: + description: ElasticIPPool is the configuration to allocate + Public IPv4 address (Elastic IP/EIP) from user-defined pool. + properties: + publicIpv4Pool: + description: |- + PublicIpv4Pool sets a custom Public IPv4 Pool used to create Elastic IP address for resources + created in public IPv4 subnets. Every IPv4 address, Elastic IP, will be allocated from the custom + Public IPv4 pool that you brought to AWS, instead of Amazon-provided pool. The public IPv4 pool + resource ID starts with 'ipv4pool-ec2'. + maxLength: 30 + type: string + publicIpv4PoolFallbackOrder: + description: |- + PublicIpv4PoolFallBackOrder defines the fallback action when the Public IPv4 Pool has been exhausted, + no more IPv4 address available in the pool. + + + When set to 'amazon-pool', the controller check if the pool has available IPv4 address, when pool has reached the + IPv4 limit, the address will be claimed from Amazon-pool (default). + + + When set to 'none', the controller will fail the Elastic IP allocation when the publicIpv4Pool is exhausted. + enum: + - amazon-pool + - none + type: string + x-kubernetes-validations: + - message: allowed values are 'none' and 'amazon-pool' + rule: self in ['none','amazon-pool'] + type: object iamInstanceProfile: description: IAMInstanceProfile is a name of an IAM instance profile to assign to the instance @@ -578,30 +630,89 @@ spec: description: Ignition defined options related to the bootstrapping systems where Ignition is used. properties: + proxy: + description: |- + Proxy defines proxy settings for Ignition. + Only valid for Ignition versions 3.1 and above. + properties: + httpProxy: + description: |- + HTTPProxy is the HTTP proxy to use for Ignition. + A single URL that specifies the proxy server to use for HTTP and HTTPS requests, + unless overridden by the HTTPSProxy or NoProxy options. + type: string + httpsProxy: + description: |- + HTTPSProxy is the HTTPS proxy to use for Ignition. + A single URL that specifies the proxy server to use for HTTPS requests, + unless overridden by the NoProxy option. + type: string + noProxy: + description: |- + NoProxy is the list of domains to not proxy for Ignition. + Specifies a list of strings to hosts that should be excluded from proxying. + + + Each value is represented by: + - An IP address prefix (1.2.3.4) + - An IP address prefix in CIDR notation (1.2.3.4/8) + - A domain name + - A domain name matches that name and all subdomains + - A domain name with a leading . matches subdomains only + - A special DNS label (*), indicates that no proxying should be done + + + An IP address prefix and domain name can also include a literal port number (1.2.3.4:80). + items: + description: IgnitionNoProxy defines the list of + domains to not proxy for Ignition. + maxLength: 2048 + type: string + maxItems: 64 + type: array + type: object storageType: default: ClusterObjectStore - description: "StorageType defines how to store the boostrap - user data for Ignition. This can be used to instruct - Ignition from where to fetch the user data to bootstrap - an instance. \n When omitted, the storage option will - default to ClusterObjectStore. \n When set to \"ClusterObjectStore\", - if the capability is available and a Cluster ObjectStore - configuration is correctly provided in the Cluster object - (under .spec.s3Bucket), an object store will be used - to store bootstrap user data. \n When set to \"UnencryptedUserData\", - EC2 Instance User Data will be used to store the machine - bootstrap user data, unencrypted. This option is considered - less secure than others as user data may contain sensitive - informations (keys, certificates, etc.) and users with - ec2:DescribeInstances permission or users running pods - that can access the ec2 metadata service have access - to this sensitive information. So this is only to be - used at ones own risk, and only when other more secure - options are not viable." + description: |- + StorageType defines how to store the boostrap user data for Ignition. + This can be used to instruct Ignition from where to fetch the user data to bootstrap an instance. + + + When omitted, the storage option will default to ClusterObjectStore. + + + When set to "ClusterObjectStore", if the capability is available and a Cluster ObjectStore configuration + is correctly provided in the Cluster object (under .spec.s3Bucket), + an object store will be used to store bootstrap user data. + + + When set to "UnencryptedUserData", EC2 Instance User Data will be used to store the machine bootstrap user data, unencrypted. + This option is considered less secure than others as user data may contain sensitive informations (keys, certificates, etc.) + and users with ec2:DescribeInstances permission or users running pods + that can access the ec2 metadata service have access to this sensitive information. + So this is only to be used at ones own risk, and only when other more secure options are not viable. enum: - ClusterObjectStore - UnencryptedUserData type: string + tls: + description: |- + TLS defines TLS settings for Ignition. + Only valid for Ignition versions 3.1 and above. + properties: + certificateAuthorities: + description: |- + CASources defines the list of certificate authorities to use for Ignition. + The value is the certificate bundle (in PEM format). The bundle can contain multiple concatenated certificates. + Supported schemes are http, https, tftp, s3, arn, gs, and `data` (RFC 2397) URL scheme. + items: + description: IgnitionCASource defines the source + of the certificate authority to use for Ignition. + maxLength: 65536 + type: string + maxItems: 64 + type: array + type: object version: default: "2.3" description: Version defines which version of Ignition @@ -616,22 +727,23 @@ spec: type: string type: object imageLookupBaseOS: - description: ImageLookupBaseOS is the name of the base operating - system to use for image lookup the AMI is not set. + description: |- + ImageLookupBaseOS is the name of the base operating system to use for + image lookup the AMI is not set. type: string imageLookupFormat: - description: 'ImageLookupFormat is the AMI naming format to - look up the image for this machine It will be ignored if - an explicit AMI is set. Supports substitutions for {{.BaseOS}} - and {{.K8sVersion}} with the base OS and kubernetes version, - respectively. The BaseOS will be the value in ImageLookupBaseOS - or ubuntu (the default), and the kubernetes version as defined - by the packages produced by kubernetes/release without v - as a prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, - the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* - will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* - for a Machine that is targeting kubernetes v1.18.0 and the - ubuntu base OS. See also: https://golang.org/pkg/text/template/' + description: |- + ImageLookupFormat is the AMI naming format to look up the image for this + machine It will be ignored if an explicit AMI is set. Supports + substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and + kubernetes version, respectively. The BaseOS will be the value in + ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as + defined by the packages produced by kubernetes/release without v as a + prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default + image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up + searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a + Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See + also: https://golang.org/pkg/text/template/ type: string imageLookupOrg: description: ImageLookupOrg is the AWS Organization ID to @@ -646,51 +758,64 @@ spec: properties: httpEndpoint: default: enabled - description: "Enables or disables the HTTP metadata endpoint - on your instances. \n If you specify a value of disabled, - you cannot access your instance metadata. \n Default: - enabled" + description: |- + Enables or disables the HTTP metadata endpoint on your instances. + + + If you specify a value of disabled, you cannot access your instance metadata. + + + Default: enabled enum: - enabled - disabled type: string httpPutResponseHopLimit: default: 1 - description: "The desired HTTP PUT response hop limit - for instance metadata requests. The larger the number, - the further instance metadata requests can travel. \n - Default: 1" + description: |- + The desired HTTP PUT response hop limit for instance metadata requests. The + larger the number, the further instance metadata requests can travel. + + + Default: 1 format: int64 maximum: 64 minimum: 1 type: integer httpTokens: default: optional - description: "The state of token usage for your instance - metadata requests. \n If the state is optional, you - can choose to retrieve instance metadata with or without - a session token on your request. If you retrieve the - IAM role credentials without a token, the version 1.0 - role credentials are returned. If you retrieve the IAM - role credentials using a valid session token, the version - 2.0 role credentials are returned. \n If the state is - required, you must send a session token with any instance - metadata retrieval requests. In this state, retrieving - the IAM role credentials always returns the version - 2.0 credentials; the version 1.0 credentials are not - available. \n Default: optional" + description: |- + The state of token usage for your instance metadata requests. + + + If the state is optional, you can choose to retrieve instance metadata with + or without a session token on your request. If you retrieve the IAM role + credentials without a token, the version 1.0 role credentials are returned. + If you retrieve the IAM role credentials using a valid session token, the + version 2.0 role credentials are returned. + + + If the state is required, you must send a session token with any instance + metadata retrieval requests. In this state, retrieving the IAM role credentials + always returns the version 2.0 credentials; the version 1.0 credentials are + not available. + + + Default: optional enum: - optional - required type: string instanceMetadataTags: default: disabled - description: "Set to enabled to allow access to instance - tags from the instance metadata. Set to disabled to - turn off access to instance tags from the instance metadata. - For more information, see Work with instance tags using - the instance metadata (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS). - \n Default: disabled" + description: |- + Set to enabled to allow access to instance tags from the instance metadata. + Set to disabled to turn off access to instance tags from the instance metadata. + For more information, see Work with instance tags using the instance metadata + (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS). + + + Default: disabled enum: - enabled - disabled @@ -702,8 +827,9 @@ spec: minLength: 2 type: string networkInterfaces: - description: NetworkInterfaces is a list of ENIs to associate - with the instance. A maximum of 2 may be specified. + description: |- + NetworkInterfaces is a list of ENIs to associate with the instance. + A maximum of 2 may be specified. items: type: string maxItems: 2 @@ -723,11 +849,10 @@ spec: be encrypted or not. type: boolean encryptionKey: - description: EncryptionKey is the KMS key to use to - encrypt the volume. Can be either a KMS key ID or - ARN. If Encrypted is set and this is omitted, the - default AWS key will be used. The key must already - exist and be accessible by the controller. + description: |- + EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. + If Encrypted is set and this is omitted, the default AWS key will be used. + The key must already exist and be accessible by the controller. type: string iops: description: IOPS is the number of IOPS requested for @@ -735,9 +860,9 @@ spec: format: int64 type: integer size: - description: Size specifies size (in Gi) of the storage - device. Must be greater than the image snapshot size - or 8 (whichever is greater). + description: |- + Size specifies size (in Gi) of the storage device. + Must be greater than the image snapshot size or 8 (whichever is greater). format: int64 minimum: 8 type: integer @@ -758,6 +883,15 @@ spec: description: PlacementGroupName specifies the name of the placement group in which to launch the instance. type: string + placementGroupPartition: + description: |- + PlacementGroupPartition is the partition number within the placement group in which to launch the instance. + This value is only valid if the placement group, referred in `PlacementGroupName`, was created with + strategy set to partition. + format: int64 + maximum: 7 + minimum: 1 + type: integer privateDnsName: description: PrivateDNSName is the options for the instance hostname. @@ -784,10 +918,12 @@ spec: by the cloud provider. type: string publicIP: - description: 'PublicIP specifies whether the instance should - get a public IP. Precedence for this setting is as follows: - 1. This field if set 2. Cluster/flavor setting 3. Subnet - default' + description: |- + PublicIP specifies whether the instance should get a public IP. + Precedence for this setting is as follows: + 1. This field if set + 2. Cluster/flavor setting + 3. Subnet default type: boolean rootVolume: description: RootVolume encapsulates the configuration options @@ -801,11 +937,10 @@ spec: encrypted or not. type: boolean encryptionKey: - description: EncryptionKey is the KMS key to use to encrypt - the volume. Can be either a KMS key ID or ARN. If Encrypted - is set and this is omitted, the default AWS key will - be used. The key must already exist and be accessible - by the controller. + description: |- + EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. + If Encrypted is set and this is omitted, the default AWS key will be used. + The key must already exist and be accessible by the controller. type: string iops: description: IOPS is the number of IOPS requested for @@ -813,9 +948,9 @@ spec: format: int64 type: integer size: - description: Size specifies size (in Gi) of the storage - device. Must be greater than the image snapshot size - or 8 (whichever is greater). + description: |- + Size specifies size (in Gi) of the storage device. + Must be greater than the image snapshot size or 8 (whichever is greater). format: int64 minimum: 8 type: integer @@ -834,10 +969,9 @@ spec: securityGroupOverrides: additionalProperties: type: string - description: SecurityGroupOverrides is an optional set of - security groups to use for the node. This is optional - - if not provided security groups from the cluster will be - used. + description: |- + SecurityGroupOverrides is an optional set of security groups to use for the node. + This is optional - if not provided security groups from the cluster will be used. type: object spotMarketOptions: description: SpotMarketOptions allows users to configure instances @@ -855,14 +989,15 @@ spec: SSH key name) type: string subnet: - description: Subnet is a reference to the subnet to use for - this instance. If not specified, the cluster subnet will - be used. + description: |- + Subnet is a reference to the subnet to use for this instance. If not specified, + the cluster subnet will be used. properties: filters: - description: 'Filters is a set of key/value pairs used - to identify a resource They are applied according to - the rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html' + description: |- + Filters is a set of key/value pairs used to identify a resource + They are applied according to the rules defined by the AWS API: + https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html items: description: Filter is a filter used to identify an AWS resource. @@ -895,10 +1030,10 @@ spec: - host type: string uncompressedUserData: - description: UncompressedUserData specify whether the user - data is gzip-compressed before it is sent to ec2 instance. - cloud-init has built-in support for gzip-compressed user - data user data stored in aws secret manager is always gzip-compressed. + description: |- + UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance. + cloud-init has built-in support for gzip-compressed user data + user data stored in aws secret manager is always gzip-compressed. type: boolean required: - instanceType @@ -919,9 +1054,10 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Capacity defines the resource capacity for this machine. - This value is used for autoscaling from zero operations as defined - in: https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20210310-opt-in-autoscaling-from-zero.md' + description: |- + Capacity defines the resource capacity for this machine. + This value is used for autoscaling from zero operations as defined in: + https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20210310-opt-in-autoscaling-from-zero.md type: object type: object type: object diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml index ec464772b7..aea8369f91 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.14.0 name: awsmanagedclusters.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io @@ -38,14 +38,19 @@ spec: description: AWSManagedCluster is the Schema for the awsmanagedclusters API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -73,9 +78,9 @@ spec: properties: failureDomains: additionalProperties: - description: FailureDomainSpec is the Schema for Cluster API failure - domains. It allows controllers to understand how many failure - domains a cluster can optionally span across. + description: |- + FailureDomainSpec is the Schema for Cluster API failure domains. + It allows controllers to understand how many failure domains a cluster can optionally span across. properties: attributes: additionalProperties: diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedmachinepools.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedmachinepools.yaml index aa6fec1755..008bfd9d2e 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedmachinepools.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedmachinepools.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.14.0 name: awsmanagedmachinepools.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io @@ -34,14 +34,19 @@ spec: API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -51,9 +56,9 @@ spec: additionalTags: additionalProperties: type: string - description: AdditionalTags is an optional set of tags to add to AWS - resources managed by the AWS provider, in addition to the ones added - by default. + description: |- + AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the + ones added by default. type: object amiType: default: AL2_x86_64 @@ -62,12 +67,15 @@ spec: - AL2_x86_64 - AL2_x86_64_GPU - AL2_ARM_64 + - AL2023_x86_64_STANDARD + - AL2023_ARM_64_STANDARD - CUSTOM type: string amiVersion: - description: AMIVersion defines the desired AMI release version. If - no version number is supplied then the latest version for the Kubernetes - version will be used + description: |- + AMIVersion defines the desired AMI release version. If no version number + is supplied then the latest version for the Kubernetes version + will be used minLength: 2 type: string availabilityZones: @@ -77,26 +85,27 @@ spec: type: string type: array awsLaunchTemplate: - description: AWSLaunchTemplate specifies the launch template to use - to create the managed node group. If AWSLaunchTemplate is specified, - certain node group configuraions outside of launch template are - prohibited (https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html). + description: |- + AWSLaunchTemplate specifies the launch template to use to create the managed node group. + If AWSLaunchTemplate is specified, certain node group configuraions outside of launch template + are prohibited (https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html). properties: additionalSecurityGroups: - description: AdditionalSecurityGroups is an array of references - to security groups that should be applied to the instances. - These security groups would be set in addition to any security - groups defined at the cluster level or in the actuator. + description: |- + AdditionalSecurityGroups is an array of references to security groups that should be applied to the + instances. These security groups would be set in addition to any security groups defined + at the cluster level or in the actuator. items: - description: AWSResourceReference is a reference to a specific - AWS resource by ID or filters. Only one of ID or Filters may - be specified. Specifying more than one will result in a validation - error. + description: |- + AWSResourceReference is a reference to a specific AWS resource by ID or filters. + Only one of ID or Filters may be specified. Specifying more than one will result in + a validation error. properties: filters: - description: 'Filters is a set of key/value pairs used to - identify a resource They are applied according to the - rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html' + description: |- + Filters is a set of key/value pairs used to identify a resource + They are applied according to the rules defined by the AWS API: + https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html items: description: Filter is a filter used to identify an AWS resource. @@ -137,26 +146,29 @@ spec: type: string type: object iamInstanceProfile: - description: The name or the Amazon Resource Name (ARN) of the - instance profile associated with the IAM role for the instance. - The instance profile contains the IAM role. + description: |- + The name or the Amazon Resource Name (ARN) of the instance profile associated + with the IAM role for the instance. The instance profile contains the IAM + role. type: string imageLookupBaseOS: - description: ImageLookupBaseOS is the name of the base operating - system to use for image lookup the AMI is not set. + description: |- + ImageLookupBaseOS is the name of the base operating system to use for + image lookup the AMI is not set. type: string imageLookupFormat: - description: 'ImageLookupFormat is the AMI naming format to look - up the image for this machine It will be ignored if an explicit - AMI is set. Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} - with the base OS and kubernetes version, respectively. The BaseOS - will be the value in ImageLookupBaseOS or ubuntu (the default), - and the kubernetes version as defined by the packages produced - by kubernetes/release without v as a prefix: 1.13.0, 1.12.5-mybuild.1, - or 1.17.3. For example, the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* - will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* - for a Machine that is targeting kubernetes v1.18.0 and the ubuntu - base OS. See also: https://golang.org/pkg/text/template/' + description: |- + ImageLookupFormat is the AMI naming format to look up the image for this + machine It will be ignored if an explicit AMI is set. Supports + substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and + kubernetes version, respectively. The BaseOS will be the value in + ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as + defined by the packages produced by kubernetes/release without v as a + prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default + image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up + searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a + Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See + also: https://golang.org/pkg/text/template/ type: string imageLookupOrg: description: ImageLookupOrg is the AWS Organization ID to use @@ -181,11 +193,10 @@ spec: or not. type: boolean encryptionKey: - description: EncryptionKey is the KMS key to use to encrypt - the volume. Can be either a KMS key ID or ARN. If Encrypted - is set and this is omitted, the default AWS key will be - used. The key must already exist and be accessible by the - controller. + description: |- + EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. + If Encrypted is set and this is omitted, the default AWS key will be used. + The key must already exist and be accessible by the controller. type: string iops: description: IOPS is the number of IOPS requested for the @@ -193,9 +204,9 @@ spec: format: int64 type: integer size: - description: Size specifies size (in Gi) of the storage device. - Must be greater than the image snapshot size or 8 (whichever - is greater). + description: |- + Size specifies size (in Gi) of the storage device. + Must be greater than the image snapshot size or 8 (whichever is greater). format: int64 minimum: 8 type: integer @@ -221,17 +232,17 @@ spec: type: string type: object sshKeyName: - description: SSHKeyName is the name of the ssh key to attach to - the instance. Valid values are empty string (do not use SSH - keys), a valid SSH key name, or omitted (use the default SSH - key name) + description: |- + SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string + (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name) type: string versionNumber: - description: 'VersionNumber is the version of the launch template - that is applied. Typically a new version is created when at - least one of the following happens: 1) A new launch template - spec is applied. 2) One or more parameters in an existing template - is changed. 3) A new AMI is discovered.' + description: |- + VersionNumber is the version of the launch template that is applied. + Typically a new version is created when at least one of the following happens: + 1) A new launch template spec is applied. + 2) One or more parameters in an existing template is changed. + 3) A new AMI is discovered. format: int64 type: integer type: object @@ -248,10 +259,11 @@ spec: format: int32 type: integer eksNodegroupName: - description: EKSNodegroupName specifies the name of the nodegroup - in AWS corresponding to this MachinePool. If you don't specify a - name then a default name will be created based on the namespace - and name of the managed machine pool. + description: |- + EKSNodegroupName specifies the name of the nodegroup in AWS + corresponding to this MachinePool. If you don't specify a name + then a default name will be created based on the namespace and + name of the managed machine pool. type: string instanceType: description: InstanceType specifies the AWS instance type @@ -262,9 +274,10 @@ spec: description: Labels specifies labels for the Kubernetes node objects type: object providerIDList: - description: ProviderIDList are the provider IDs of instances in the - autoscaling group corresponding to the nodegroup represented by - this machine pool + description: |- + ProviderIDList are the provider IDs of instances in the + autoscaling group corresponding to the nodegroup represented by this + machine pool items: type: string type: array @@ -282,23 +295,25 @@ spec: type: string type: array sshKeyName: - description: SSHKeyName specifies which EC2 SSH key can be used - to access machines. If left empty, the key from the control - plane is used. + description: |- + SSHKeyName specifies which EC2 SSH key can be used to access machines. + If left empty, the key from the control plane is used. type: string type: object roleAdditionalPolicies: - description: RoleAdditionalPolicies allows you to attach additional - polices to the node group role. You must enable the EKSAllowAddRoles + description: |- + RoleAdditionalPolicies allows you to attach additional polices to + the node group role. You must enable the EKSAllowAddRoles feature flag to incorporate these into the created role. items: type: string type: array roleName: - description: RoleName specifies the name of IAM role for the node - group. If the role is pre-existing we will treat it as unmanaged - and not delete it on deletion. If the EKSEnableIAM feature flag - is true and no name is supplied then a role is created. + description: |- + RoleName specifies the name of IAM role for the node group. + If the role is pre-existing we will treat it as unmanaged + and not delete it on deletion. If the EKSEnableIAM feature + flag is true and no name is supplied then a role is created. type: string scaling: description: Scaling specifies scaling for the ASG behind this pool @@ -311,8 +326,9 @@ spec: type: integer type: object subnetIDs: - description: SubnetIDs specifies which subnets are used for the auto - scaling group of this nodegroup + description: |- + SubnetIDs specifies which subnets are used for the + auto scaling group of this nodegroup items: type: string type: array @@ -342,20 +358,21 @@ spec: type: object type: array updateConfig: - description: UpdateConfig holds the optional config to control the - behaviour of the update to the nodegroup. + description: |- + UpdateConfig holds the optional config to control the behaviour of the update + to the nodegroup. properties: maxUnavailable: - description: MaxUnavailable is the maximum number of nodes unavailable - at once during a version update. Nodes will be updated in parallel. - The maximum number is 100. + description: |- + MaxUnavailable is the maximum number of nodes unavailable at once during a version update. + Nodes will be updated in parallel. The maximum number is 100. maximum: 100 minimum: 1 type: integer maxUnavailablePrecentage: - description: MaxUnavailablePercentage is the maximum percentage - of nodes unavailable during a version update. This percentage - of nodes will be updated in parallel, up to 100 nodes at once. + description: |- + MaxUnavailablePercentage is the maximum percentage of nodes unavailable during a version update. This + percentage of nodes will be updated in parallel, up to 100 nodes at once. maximum: 100 minimum: 1 type: integer @@ -373,37 +390,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime @@ -412,36 +429,46 @@ spec: type: object type: array failureMessage: - description: "FailureMessage will be set in the event that there is - a terminal problem reconciling the MachinePool and will contain - a more verbose string suitable for logging and human consumption. - \n This field should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over time (like - service outages), but instead indicate that something is fundamentally - wrong with the MachinePool's spec or the configuration of the controller, - and that manual intervention is required. Examples of terminal errors - would be invalid combinations of settings in the spec, values that - are unsupported by the controller, or the responsible controller - itself being critically misconfigured. \n Any transient errors that - occur during the reconciliation of MachinePools can be added as - events to the MachinePool object and/or logged in the controller's - output." + description: |- + FailureMessage will be set in the event that there is a terminal problem + reconciling the MachinePool and will contain a more verbose string suitable + for logging and human consumption. + + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the MachinePool's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of MachinePools + can be added as events to the MachinePool object and/or logged in the + controller's output. type: string failureReason: - description: "FailureReason will be set in the event that there is - a terminal problem reconciling the MachinePool and will contain - a succinct value suitable for machine interpretation. \n This field - should not be set for transitive errors that a controller faces - that are expected to be fixed automatically over time (like service - outages), but instead indicate that something is fundamentally wrong - with the Machine's spec or the configuration of the controller, - and that manual intervention is required. Examples of terminal errors - would be invalid combinations of settings in the spec, values that - are unsupported by the controller, or the responsible controller - itself being critically misconfigured. \n Any transient errors that - occur during the reconciliation of MachinePools can be added as - events to the MachinePool object and/or logged in the controller's - output." + description: |- + FailureReason will be set in the event that there is a terminal problem + reconciling the MachinePool and will contain a succinct value suitable + for machine interpretation. + + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the Machine's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of MachinePools + can be added as events to the MachinePool object and/or logged in the + controller's output. type: string launchTemplateID: description: The ID of the launch template @@ -451,8 +478,9 @@ spec: type: string ready: default: false - description: Ready denotes that the AWSManagedMachinePool nodegroup - has joined the cluster + description: |- + Ready denotes that the AWSManagedMachinePool nodegroup has joined + the cluster type: boolean replicas: description: Replicas is the most recently observed number of replicas. @@ -482,14 +510,19 @@ spec: API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -499,9 +532,9 @@ spec: additionalTags: additionalProperties: type: string - description: AdditionalTags is an optional set of tags to add to AWS - resources managed by the AWS provider, in addition to the ones added - by default. + description: |- + AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the + ones added by default. type: object amiType: default: AL2_x86_64 @@ -510,12 +543,15 @@ spec: - AL2_x86_64 - AL2_x86_64_GPU - AL2_ARM_64 + - AL2023_x86_64_STANDARD + - AL2023_ARM_64_STANDARD - CUSTOM type: string amiVersion: - description: AMIVersion defines the desired AMI release version. If - no version number is supplied then the latest version for the Kubernetes - version will be used + description: |- + AMIVersion defines the desired AMI release version. If no version number + is supplied then the latest version for the Kubernetes version + will be used minLength: 2 type: string availabilityZoneSubnetType: @@ -533,26 +569,27 @@ spec: type: string type: array awsLaunchTemplate: - description: AWSLaunchTemplate specifies the launch template to use - to create the managed node group. If AWSLaunchTemplate is specified, - certain node group configuraions outside of launch template are - prohibited (https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html). + description: |- + AWSLaunchTemplate specifies the launch template to use to create the managed node group. + If AWSLaunchTemplate is specified, certain node group configuraions outside of launch template + are prohibited (https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html). properties: additionalSecurityGroups: - description: AdditionalSecurityGroups is an array of references - to security groups that should be applied to the instances. - These security groups would be set in addition to any security - groups defined at the cluster level or in the actuator. + description: |- + AdditionalSecurityGroups is an array of references to security groups that should be applied to the + instances. These security groups would be set in addition to any security groups defined + at the cluster level or in the actuator. items: - description: AWSResourceReference is a reference to a specific - AWS resource by ID or filters. Only one of ID or Filters may - be specified. Specifying more than one will result in a validation - error. + description: |- + AWSResourceReference is a reference to a specific AWS resource by ID or filters. + Only one of ID or Filters may be specified. Specifying more than one will result in + a validation error. properties: filters: - description: 'Filters is a set of key/value pairs used to - identify a resource They are applied according to the - rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html' + description: |- + Filters is a set of key/value pairs used to identify a resource + They are applied according to the rules defined by the AWS API: + https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html items: description: Filter is a filter used to identify an AWS resource. @@ -593,26 +630,29 @@ spec: type: string type: object iamInstanceProfile: - description: The name or the Amazon Resource Name (ARN) of the - instance profile associated with the IAM role for the instance. - The instance profile contains the IAM role. + description: |- + The name or the Amazon Resource Name (ARN) of the instance profile associated + with the IAM role for the instance. The instance profile contains the IAM + role. type: string imageLookupBaseOS: - description: ImageLookupBaseOS is the name of the base operating - system to use for image lookup the AMI is not set. + description: |- + ImageLookupBaseOS is the name of the base operating system to use for + image lookup the AMI is not set. type: string imageLookupFormat: - description: 'ImageLookupFormat is the AMI naming format to look - up the image for this machine It will be ignored if an explicit - AMI is set. Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} - with the base OS and kubernetes version, respectively. The BaseOS - will be the value in ImageLookupBaseOS or ubuntu (the default), - and the kubernetes version as defined by the packages produced - by kubernetes/release without v as a prefix: 1.13.0, 1.12.5-mybuild.1, - or 1.17.3. For example, the default image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* - will end up searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* - for a Machine that is targeting kubernetes v1.18.0 and the ubuntu - base OS. See also: https://golang.org/pkg/text/template/' + description: |- + ImageLookupFormat is the AMI naming format to look up the image for this + machine It will be ignored if an explicit AMI is set. Supports + substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and + kubernetes version, respectively. The BaseOS will be the value in + ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as + defined by the packages produced by kubernetes/release without v as a + prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default + image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up + searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a + Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See + also: https://golang.org/pkg/text/template/ type: string imageLookupOrg: description: ImageLookupOrg is the AWS Organization ID to use @@ -624,48 +664,64 @@ spec: properties: httpEndpoint: default: enabled - description: "Enables or disables the HTTP metadata endpoint - on your instances. \n If you specify a value of disabled, - you cannot access your instance metadata. \n Default: enabled" + description: |- + Enables or disables the HTTP metadata endpoint on your instances. + + + If you specify a value of disabled, you cannot access your instance metadata. + + + Default: enabled enum: - enabled - disabled type: string httpPutResponseHopLimit: default: 1 - description: "The desired HTTP PUT response hop limit for - instance metadata requests. The larger the number, the further - instance metadata requests can travel. \n Default: 1" + description: |- + The desired HTTP PUT response hop limit for instance metadata requests. The + larger the number, the further instance metadata requests can travel. + + + Default: 1 format: int64 maximum: 64 minimum: 1 type: integer httpTokens: default: optional - description: "The state of token usage for your instance metadata - requests. \n If the state is optional, you can choose to - retrieve instance metadata with or without a session token - on your request. If you retrieve the IAM role credentials - without a token, the version 1.0 role credentials are returned. - If you retrieve the IAM role credentials using a valid session - token, the version 2.0 role credentials are returned. \n - If the state is required, you must send a session token - with any instance metadata retrieval requests. In this state, - retrieving the IAM role credentials always returns the version - 2.0 credentials; the version 1.0 credentials are not available. - \n Default: optional" + description: |- + The state of token usage for your instance metadata requests. + + + If the state is optional, you can choose to retrieve instance metadata with + or without a session token on your request. If you retrieve the IAM role + credentials without a token, the version 1.0 role credentials are returned. + If you retrieve the IAM role credentials using a valid session token, the + version 2.0 role credentials are returned. + + + If the state is required, you must send a session token with any instance + metadata retrieval requests. In this state, retrieving the IAM role credentials + always returns the version 2.0 credentials; the version 1.0 credentials are + not available. + + + Default: optional enum: - optional - required type: string instanceMetadataTags: default: disabled - description: "Set to enabled to allow access to instance tags - from the instance metadata. Set to disabled to turn off - access to instance tags from the instance metadata. For - more information, see Work with instance tags using the - instance metadata (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS). - \n Default: disabled" + description: |- + Set to enabled to allow access to instance tags from the instance metadata. + Set to disabled to turn off access to instance tags from the instance metadata. + For more information, see Work with instance tags using the instance metadata + (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS). + + + Default: disabled enum: - enabled - disabled @@ -678,6 +734,50 @@ spec: name: description: The name of the launch template. type: string + nonRootVolumes: + description: Configuration options for the non root storage volumes. + items: + description: Volume encapsulates the configuration options for + the storage device. + properties: + deviceName: + description: Device name + type: string + encrypted: + description: Encrypted is whether the volume should be encrypted + or not. + type: boolean + encryptionKey: + description: |- + EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. + If Encrypted is set and this is omitted, the default AWS key will be used. + The key must already exist and be accessible by the controller. + type: string + iops: + description: IOPS is the number of IOPS requested for the + disk. Not applicable to all types. + format: int64 + type: integer + size: + description: |- + Size specifies size (in Gi) of the storage device. + Must be greater than the image snapshot size or 8 (whichever is greater). + format: int64 + minimum: 8 + type: integer + throughput: + description: Throughput to provision in MiB/s supported + for the volume type. Not applicable to all types. + format: int64 + type: integer + type: + description: Type is the type of the volume (e.g. gp2, io1, + etc...). + type: string + required: + - size + type: object + type: array privateDnsName: description: PrivateDNSName is the options for the instance hostname. properties: @@ -710,11 +810,10 @@ spec: or not. type: boolean encryptionKey: - description: EncryptionKey is the KMS key to use to encrypt - the volume. Can be either a KMS key ID or ARN. If Encrypted - is set and this is omitted, the default AWS key will be - used. The key must already exist and be accessible by the - controller. + description: |- + EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. + If Encrypted is set and this is omitted, the default AWS key will be used. + The key must already exist and be accessible by the controller. type: string iops: description: IOPS is the number of IOPS requested for the @@ -722,9 +821,9 @@ spec: format: int64 type: integer size: - description: Size specifies size (in Gi) of the storage device. - Must be greater than the image snapshot size or 8 (whichever - is greater). + description: |- + Size specifies size (in Gi) of the storage device. + Must be greater than the image snapshot size or 8 (whichever is greater). format: int64 minimum: 8 type: integer @@ -750,17 +849,17 @@ spec: type: string type: object sshKeyName: - description: SSHKeyName is the name of the ssh key to attach to - the instance. Valid values are empty string (do not use SSH - keys), a valid SSH key name, or omitted (use the default SSH - key name) + description: |- + SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string + (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name) type: string versionNumber: - description: 'VersionNumber is the version of the launch template - that is applied. Typically a new version is created when at - least one of the following happens: 1) A new launch template - spec is applied. 2) One or more parameters in an existing template - is changed. 3) A new AMI is discovered.' + description: |- + VersionNumber is the version of the launch template that is applied. + Typically a new version is created when at least one of the following happens: + 1) A new launch template spec is applied. + 2) One or more parameters in an existing template is changed. + 3) A new AMI is discovered. format: int64 type: integer type: object @@ -777,10 +876,11 @@ spec: format: int32 type: integer eksNodegroupName: - description: EKSNodegroupName specifies the name of the nodegroup - in AWS corresponding to this MachinePool. If you don't specify a - name then a default name will be created based on the namespace - and name of the managed machine pool. + description: |- + EKSNodegroupName specifies the name of the nodegroup in AWS + corresponding to this MachinePool. If you don't specify a name + then a default name will be created based on the namespace and + name of the managed machine pool. type: string instanceType: description: InstanceType specifies the AWS instance type @@ -791,9 +891,10 @@ spec: description: Labels specifies labels for the Kubernetes node objects type: object providerIDList: - description: ProviderIDList are the provider IDs of instances in the - autoscaling group corresponding to the nodegroup represented by - this machine pool + description: |- + ProviderIDList are the provider IDs of instances in the + autoscaling group corresponding to the nodegroup represented by this + machine pool items: type: string type: array @@ -811,23 +912,25 @@ spec: type: string type: array sshKeyName: - description: SSHKeyName specifies which EC2 SSH key can be used - to access machines. If left empty, the key from the control - plane is used. + description: |- + SSHKeyName specifies which EC2 SSH key can be used to access machines. + If left empty, the key from the control plane is used. type: string type: object roleAdditionalPolicies: - description: RoleAdditionalPolicies allows you to attach additional - polices to the node group role. You must enable the EKSAllowAddRoles + description: |- + RoleAdditionalPolicies allows you to attach additional polices to + the node group role. You must enable the EKSAllowAddRoles feature flag to incorporate these into the created role. items: type: string type: array roleName: - description: RoleName specifies the name of IAM role for the node - group. If the role is pre-existing we will treat it as unmanaged - and not delete it on deletion. If the EKSEnableIAM feature flag - is true and no name is supplied then a role is created. + description: |- + RoleName specifies the name of IAM role for the node group. + If the role is pre-existing we will treat it as unmanaged + and not delete it on deletion. If the EKSEnableIAM feature + flag is true and no name is supplied then a role is created. type: string scaling: description: Scaling specifies scaling for the ASG behind this pool @@ -840,8 +943,9 @@ spec: type: integer type: object subnetIDs: - description: SubnetIDs specifies which subnets are used for the auto - scaling group of this nodegroup + description: |- + SubnetIDs specifies which subnets are used for the + auto scaling group of this nodegroup items: type: string type: array @@ -871,20 +975,21 @@ spec: type: object type: array updateConfig: - description: UpdateConfig holds the optional config to control the - behaviour of the update to the nodegroup. + description: |- + UpdateConfig holds the optional config to control the behaviour of the update + to the nodegroup. properties: maxUnavailable: - description: MaxUnavailable is the maximum number of nodes unavailable - at once during a version update. Nodes will be updated in parallel. - The maximum number is 100. + description: |- + MaxUnavailable is the maximum number of nodes unavailable at once during a version update. + Nodes will be updated in parallel. The maximum number is 100. maximum: 100 minimum: 1 type: integer maxUnavailablePercentage: - description: MaxUnavailablePercentage is the maximum percentage - of nodes unavailable during a version update. This percentage - of nodes will be updated in parallel, up to 100 nodes at once. + description: |- + MaxUnavailablePercentage is the maximum percentage of nodes unavailable during a version update. This + percentage of nodes will be updated in parallel, up to 100 nodes at once. maximum: 100 minimum: 1 type: integer @@ -902,37 +1007,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime @@ -941,36 +1046,46 @@ spec: type: object type: array failureMessage: - description: "FailureMessage will be set in the event that there is - a terminal problem reconciling the MachinePool and will contain - a more verbose string suitable for logging and human consumption. - \n This field should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over time (like - service outages), but instead indicate that something is fundamentally - wrong with the MachinePool's spec or the configuration of the controller, - and that manual intervention is required. Examples of terminal errors - would be invalid combinations of settings in the spec, values that - are unsupported by the controller, or the responsible controller - itself being critically misconfigured. \n Any transient errors that - occur during the reconciliation of MachinePools can be added as - events to the MachinePool object and/or logged in the controller's - output." + description: |- + FailureMessage will be set in the event that there is a terminal problem + reconciling the MachinePool and will contain a more verbose string suitable + for logging and human consumption. + + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the MachinePool's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of MachinePools + can be added as events to the MachinePool object and/or logged in the + controller's output. type: string failureReason: - description: "FailureReason will be set in the event that there is - a terminal problem reconciling the MachinePool and will contain - a succinct value suitable for machine interpretation. \n This field - should not be set for transitive errors that a controller faces - that are expected to be fixed automatically over time (like service - outages), but instead indicate that something is fundamentally wrong - with the Machine's spec or the configuration of the controller, - and that manual intervention is required. Examples of terminal errors - would be invalid combinations of settings in the spec, values that - are unsupported by the controller, or the responsible controller - itself being critically misconfigured. \n Any transient errors that - occur during the reconciliation of MachinePools can be added as - events to the MachinePool object and/or logged in the controller's - output." + description: |- + FailureReason will be set in the event that there is a terminal problem + reconciling the MachinePool and will contain a succinct value suitable + for machine interpretation. + + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the Machine's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of MachinePools + can be added as events to the MachinePool object and/or logged in the + controller's output. type: string launchTemplateID: description: The ID of the launch template @@ -980,8 +1095,9 @@ spec: type: string ready: default: false - description: Ready denotes that the AWSManagedMachinePool nodegroup - has joined the cluster + description: |- + Ready denotes that the AWSManagedMachinePool nodegroup has joined + the cluster type: boolean replicas: description: Replicas is the most recently observed number of replicas. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_rosaclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_rosaclusters.yaml index 710e61955a..2d0c295c0b 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_rosaclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_rosaclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.14.0 name: rosaclusters.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io @@ -35,20 +35,27 @@ spec: name: v1beta2 schema: openAPIV3Schema: + description: ROSACluster is the Schema for the ROSAClusters API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: + description: ROSAClusterSpec defines the desired state of ROSACluster. properties: controlPlaneEndpoint: description: ControlPlaneEndpoint represents the endpoint used to @@ -67,13 +74,13 @@ spec: type: object type: object status: - description: ROSAClusterStatus defines the observed state of ROSACluster + description: ROSAClusterStatus defines the observed state of ROSACluster. properties: failureDomains: additionalProperties: - description: FailureDomainSpec is the Schema for Cluster API failure - domains. It allows controllers to understand how many failure - domains a cluster can optionally span across. + description: |- + FailureDomainSpec is the Schema for Cluster API failure domains. + It allows controllers to understand how many failure domains a cluster can optionally span across. properties: attributes: additionalProperties: diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_rosamachinepools.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_rosamachinepools.yaml index a964cad45e..a5786fd8bf 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_rosamachinepools.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_rosamachinepools.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.14.0 name: rosamachinepools.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io @@ -33,28 +33,48 @@ spec: description: ROSAMachinePool is the Schema for the rosamachinepools API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: description: RosaMachinePoolSpec defines the desired state of RosaMachinePool. properties: + additionalSecurityGroups: + description: |- + AdditionalSecurityGroups is an optional set of security groups to associate + with all node instances of the machine pool. + items: + type: string + type: array + additionalTags: + additionalProperties: + type: string + description: AdditionalTags are user-defined tags to be added on the + underlying EC2 instances associated with this machine pool. + type: object autoRepair: - default: false - description: AutoRepair specifies whether health checks should be - enabled for machines in the NodePool. The default is false. + default: true + description: |- + AutoRepair specifies whether health checks should be enabled for machines + in the NodePool. The default is true. type: boolean autoscaling: - description: Autoscaling specifies auto scaling behaviour for this - MachinePool. required if Replicas is not configured + description: |- + Autoscaling specifies auto scaling behaviour for this MachinePool. + required if Replicas is not configured properties: maxReplicas: minimum: 1 @@ -64,9 +84,9 @@ spec: type: integer type: object availabilityZone: - description: AvailabilityZone is an optinal field specifying the availability - zone where instances of this machine pool should run For Multi-AZ - clusters, you can create a machine pool in a Single-AZ of your choice. + description: |- + AvailabilityZone is an optinal field specifying the availability zone where instances of this machine pool should run + For Multi-AZ clusters, you can create a machine pool in a Single-AZ of your choice. type: string instanceType: description: InstanceType specifies the AWS instance type @@ -76,10 +96,20 @@ spec: type: string description: Labels specifies labels for the Kubernetes node objects type: object + nodeDrainGracePeriod: + description: |- + NodeDrainGracePeriod is grace period for how long Pod Disruption Budget-protected workloads will be + respected during upgrades. After this grace period, any workloads protected by Pod Disruption + Budgets that have not been successfully drained from a node will be forcibly evicted. + + + Valid values are from 0 to 1 week(10080m|168h) . + 0 or empty value means that the MachinePool can be drained without any time limitation. + type: string nodePoolName: - description: NodePoolName specifies the name of the nodepool in Rosa - must be a valid DNS-1035 label, so it must consist of lower case - alphanumeric and have a max length of 15 characters. + description: |- + NodePoolName specifies the name of the nodepool in Rosa + must be a valid DNS-1035 label, so it must consist of lower case alphanumeric and have a max length of 15 characters. maxLength: 15 pattern: ^[a-z]([-a-z0-9]*[a-z0-9])?$ type: string @@ -94,11 +124,103 @@ spec: type: array subnet: type: string + x-kubernetes-validations: + - message: subnet is immutable + rule: self == oldSelf + taints: + description: Taints specifies the taints to apply to the nodes of + the machine pool + items: + description: RosaTaint represents a taint to be applied to a node. + properties: + effect: + description: |- + The effect of the taint on pods that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + enum: + - NoSchedule + - PreferNoSchedule + - NoExecute + type: string + key: + description: The taint key to be applied to a node. + type: string + value: + description: The taint value corresponding to the taint key. + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + required: + - effect + - key + type: object + type: array + tuningConfigs: + description: |- + TuningConfigs specifies the names of the tuning configs to be applied to this MachinePool. + Tuning configs must already exist. + items: + type: string + type: array + updateConfig: + description: UpdateConfig specifies update configurations. + properties: + rollingUpdate: + description: RollingUpdate specifies MaxUnavailable & MaxSurge + number of nodes during update. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + default: 1 + description: |- + MaxSurge is the maximum number of nodes that can be provisioned above the desired number of nodes. + Value can be an absolute number (ex: 5) or a percentage of desired nodes (ex: 10%). + Absolute number is calculated from percentage by rounding up. + + + MaxSurge can not be 0 if MaxUnavailable is 0, default is 1. + Both MaxSurge & MaxUnavailable must use the same units (absolute value or percentage). + + + Example: when MaxSurge is set to 30%, new nodes can be provisioned immediately + when the rolling update starts, such that the total number of old and new + nodes do not exceed 130% of desired nodes. Once old nodes have been + deleted, new nodes can be provisioned, ensuring that total number of nodes + running at any time during the update is at most 130% of desired nodes. + pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + default: 0 + description: |- + MaxUnavailable is the maximum number of nodes that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired nodes (ex: 10%). + Absolute number is calculated from percentage by rounding down. + + + MaxUnavailable can not be 0 if MaxSurge is 0, default is 0. + Both MaxUnavailable & MaxSurge must use the same units (absolute value or percentage). + + + Example: when MaxUnavailable is set to 30%, old nodes can be deleted down to 70% of + desired nodes immediately when the rolling update starts. Once new nodes + are ready, more old nodes be deleted, followed by provisioning new nodes, + ensuring that the total number of nodes available at all times during the + update is at least 70% of desired nodes. + pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ + x-kubernetes-int-or-string: true + type: object + type: object version: - description: Version specifies the penshift version of the nodes associated - with this machinepool. ROSAControlPlane version is used if not set. + description: |- + Version specifies the OpenShift version of the nodes associated with this machinepool. + ROSAControlPlane version is used if not set. type: string required: + - instanceType - nodePoolName type: object status: @@ -112,37 +234,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime @@ -151,20 +273,24 @@ spec: type: object type: array failureMessage: - description: "FailureMessage will be set in the event that there is - a terminal problem reconciling the state and will be set to a descriptive - error message. \n This field should not be set for transitive errors - that a controller faces that are expected to be fixed automatically - over time (like service outages), but instead indicate that something - is fundamentally wrong with the spec or the configuration of the - controller, and that manual intervention is required." + description: |- + FailureMessage will be set in the event that there is a terminal problem + reconciling the state and will be set to a descriptive error message. + + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the spec or the configuration of + the controller, and that manual intervention is required. type: string id: description: ID is the ID given by ROSA. type: string ready: default: false - description: Ready denotes that the RosaMachinePool nodepool has joined + description: |- + Ready denotes that the RosaMachinePool nodepool has joined the cluster type: boolean replicas: diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 33946b3057..3ff4afe303 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -4,6 +4,18 @@ kind: ClusterRole metadata: name: manager-role rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - "" resources: @@ -175,6 +187,12 @@ rules: - get - list - watch +- apiGroups: + - controlplane.cluster.x-k8s.io + resources: + - rosacontrolplanes/finalizers + verbs: + - update - apiGroups: - controlplane.cluster.x-k8s.io resources: @@ -409,6 +427,12 @@ rules: - patch - update - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - rosamachinepools/finalizers + verbs: + - update - apiGroups: - infrastructure.cluster.x-k8s.io resources: diff --git a/controllers/awscluster_controller_test.go b/controllers/awscluster_controller_test.go index cb74ddacd1..d0f51fa4e4 100644 --- a/controllers/awscluster_controller_test.go +++ b/controllers/awscluster_controller_test.go @@ -76,6 +76,8 @@ func TestAWSClusterReconcilerIntegrationTests(t *testing.T) { mockedVPCCallsForExistingVPCAndSubnets(m) mockedCreateSGCalls(false, "vpc-exists", m) mockedDescribeInstanceCall(m) + mockedDescribeAvailabilityZones(m, []string{"us-east-1c", "us-east-1a"}) + // Second iteration: the AWS Cluster object has been patched, // thus a valid Control Plane Endpoint has been provided mockedVPCCallsForExistingVPCAndSubnets(m) @@ -189,7 +191,9 @@ func TestAWSClusterReconcilerIntegrationTests(t *testing.T) { mockedCreateSGCalls(false, "vpc-exists", m) mockedCreateLBCalls(t, e) mockedDescribeInstanceCall(m) + mockedDescribeAvailabilityZones(m, []string{"us-east-1c", "us-east-1a"}) } + expect(ec2Mock.EXPECT(), elbMock.EXPECT()) setup(t) @@ -211,7 +215,7 @@ func TestAWSClusterReconcilerIntegrationTests(t *testing.T) { } err := testEnv.Get(ctx, key, cluster) return err == nil - }, 10*time.Second).Should(BeTrue()) + }, 10*time.Second).Should(BeTrue(), fmt.Sprintf("Eventually failed getting the newly created cluster %q", awsCluster.Name)) defer teardown() defer t.Cleanup(func() { @@ -298,7 +302,14 @@ func TestAWSClusterReconcilerIntegrationTests(t *testing.T) { mockedCreateSGCalls(true, "vpc-exists", m) mockedCreateLBV2Calls(t, e) mockedDescribeInstanceCall(m) + mockedDescribeAvailabilityZones(m, []string{"us-east-1c", "us-east-1a"}) + mockedDescribeTargetGroupsCall(t, e) + mockedCreateTargetGroupCall(t, e) + mockedModifyTargetGroupAttributes(t, e) + mockedDescribeListenersCall(t, e) + mockedCreateListenerCall(t, e) } + expect(ec2Mock.EXPECT(), elbv2Mock.EXPECT()) g.Expect(testEnv.Create(ctx, &awsCluster)).To(Succeed()) @@ -310,7 +321,7 @@ func TestAWSClusterReconcilerIntegrationTests(t *testing.T) { } err := testEnv.Get(ctx, key, cluster) return err == nil - }, 10*time.Second).Should(BeTrue()) + }, 10*time.Second).Should(BeTrue(), fmt.Sprintf("Eventually failed getting the newly created cluster %q", awsCluster.Name)) defer teardown() defer t.Cleanup(func() { @@ -384,7 +395,9 @@ func TestAWSClusterReconcilerIntegrationTests(t *testing.T) { mockedCallsForMissingEverything(m, e, "my-managed-subnet-priv", "my-managed-subnet-pub") mockedCreateSGCalls(false, "vpc-new", m) mockedDescribeInstanceCall(m) + mockedDescribeAvailabilityZones(m, []string{"us-east-1a"}) } + expect(ec2Mock.EXPECT(), elbMock.EXPECT()) setup(t) @@ -416,7 +429,7 @@ func TestAWSClusterReconcilerIntegrationTests(t *testing.T) { } err := testEnv.Get(ctx, key, cluster) return err == nil - }, 10*time.Second).Should(BeTrue()) + }, 10*time.Second).Should(BeTrue(), fmt.Sprintf("Eventually failed getting the newly created cluster %q", awsCluster.Name)) defer teardown() defer t.Cleanup(func() { @@ -524,7 +537,8 @@ func TestAWSClusterReconcilerIntegrationTests(t *testing.T) { } err := testEnv.Get(ctx, key, cluster) return err == nil - }, 10*time.Second).Should(BeTrue()) + }, 10*time.Second).Should(BeTrue(), fmt.Sprintf("Eventually failed getting the newly created cluster %q", awsCluster.Name)) + defer t.Cleanup(func() { g.Expect(testEnv.Cleanup(ctx, &awsCluster, controllerIdentity, ns)).To(Succeed()) }) @@ -589,7 +603,7 @@ func TestAWSClusterReconcilerIntegrationTests(t *testing.T) { } err := testEnv.Get(ctx, key, cluster) return err == nil - }, 10*time.Second).Should(BeTrue()) + }, 10*time.Second).Should(BeTrue(), fmt.Sprintf("Eventually failed getting the newly created cluster %q", awsCluster.Name)) defer t.Cleanup(func() { g.Expect(testEnv.Cleanup(ctx, &awsCluster, controllerIdentity, ns)).To(Succeed()) @@ -651,6 +665,26 @@ func mockedDeleteSGCalls(m *mocks.MockEC2APIMockRecorder) { m.DescribeSecurityGroupsPagesWithContext(context.TODO(), gomock.Any(), gomock.Any()).Return(nil) } +func mockedDescribeAvailabilityZones(m *mocks.MockEC2APIMockRecorder, zones []string) { + output := &ec2.DescribeAvailabilityZonesOutput{} + matcher := gomock.Any() + + if len(zones) > 0 { + input := &ec2.DescribeAvailabilityZonesInput{} + for _, zone := range zones { + input.ZoneNames = append(input.ZoneNames, aws.String(zone)) + output.AvailabilityZones = append(output.AvailabilityZones, &ec2.AvailabilityZone{ + ZoneName: aws.String(zone), + ZoneType: aws.String("availability-zone"), + }) + } + + matcher = gomock.Eq(input) + } + m.DescribeAvailabilityZonesWithContext(context.TODO(), matcher).AnyTimes(). + Return(output, nil) +} + func createControllerIdentity(g *WithT) *infrav1.AWSClusterControllerIdentity { controllerIdentity := &infrav1.AWSClusterControllerIdentity{ TypeMeta: metav1.TypeMeta{ @@ -970,7 +1004,7 @@ func mockedCallsForMissingEverything(m *mocks.MockEC2APIMockRecorder, e *mocks.M }, { Key: aws.String("kubernetes.io/cluster/test-cluster"), - Value: aws.String("shared"), + Value: aws.String("owned"), }, { Key: aws.String("kubernetes.io/role/internal-elb"), @@ -1001,7 +1035,7 @@ func mockedCallsForMissingEverything(m *mocks.MockEC2APIMockRecorder, e *mocks.M }, { Key: aws.String("kubernetes.io/cluster/test-cluster"), - Value: aws.String("shared"), + Value: aws.String("owned"), }, { Key: aws.String("kubernetes.io/role/internal-elb"), @@ -1037,7 +1071,7 @@ func mockedCallsForMissingEverything(m *mocks.MockEC2APIMockRecorder, e *mocks.M }, { Key: aws.String("kubernetes.io/cluster/test-cluster"), - Value: aws.String("shared"), + Value: aws.String("owned"), }, { Key: aws.String("kubernetes.io/role/elb"), @@ -1068,7 +1102,7 @@ func mockedCallsForMissingEverything(m *mocks.MockEC2APIMockRecorder, e *mocks.M }, { Key: aws.String("kubernetes.io/cluster/test-cluster"), - Value: aws.String("shared"), + Value: aws.String("owned"), }, { Key: aws.String("kubernetes.io/role/elb"), @@ -1177,7 +1211,7 @@ func mockedCallsForMissingEverything(m *mocks.MockEC2APIMockRecorder, e *mocks.M }, { Name: aws.String("tag:sigs.k8s.io/cluster-api-provider-aws/role"), - Values: aws.StringSlice([]string{"apiserver"}), + Values: aws.StringSlice([]string{"common"}), }, }, })).Return(&ec2.DescribeAddressesOutput{ @@ -1192,7 +1226,7 @@ func mockedCallsForMissingEverything(m *mocks.MockEC2APIMockRecorder, e *mocks.M Tags: []*ec2.Tag{ { Key: aws.String("Name"), - Value: aws.String("test-cluster-eip-apiserver"), + Value: aws.String("test-cluster-eip-common"), }, { Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), @@ -1200,7 +1234,7 @@ func mockedCallsForMissingEverything(m *mocks.MockEC2APIMockRecorder, e *mocks.M }, { Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), - Value: aws.String("apiserver"), + Value: aws.String("common"), }, }, }, @@ -1430,7 +1464,12 @@ func mockedDeleteVPCCallsForNonExistentVPC(m *mocks.MockEC2APIMockRecorder) { { Name: aws.String("tag-key"), Values: aws.StringSlice([]string{"sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"}), - }}, + }, + { + Name: aws.String("tag:sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Values: aws.StringSlice([]string{"owned"}), + }, + }, })).Return(nil, nil) m.DeleteVpcWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DeleteVpcInput{ VpcId: aws.String("vpc-exists")})).Return(nil, nil) @@ -1521,6 +1560,10 @@ func mockedDeleteVPCCalls(m *mocks.MockEC2APIMockRecorder) { { Name: aws.String("tag-key"), Values: aws.StringSlice([]string{"sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"}), + }, + { + Name: aws.String("tag:sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Values: aws.StringSlice([]string{"owned"}), }}, })).Return(&ec2.DescribeAddressesOutput{ Addresses: []*ec2.Address{ diff --git a/controllers/awscluster_controller_unit_test.go b/controllers/awscluster_controller_unit_test.go index 22e3af6ad3..e282d2bf79 100644 --- a/controllers/awscluster_controller_unit_test.go +++ b/controllers/awscluster_controller_unit_test.go @@ -23,12 +23,14 @@ import ( "testing" "time" + "github.com/aws/aws-sdk-go/aws" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -247,6 +249,36 @@ func TestAWSClusterReconcileOperations(t *testing.T) { expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1.LoadBalancerReadyCondition, corev1.ConditionTrue, "", ""}}) g.Expect(awsCluster.GetFinalizers()).To(ContainElement(infrav1.ClusterFinalizer)) }) + + t.Run("when BYO IP is set", func(t *testing.T) { + g := NewWithT(t) + runningCluster := func() { + ec2Svc.EXPECT().ReconcileBastion().Return(nil) + elbSvc.EXPECT().ReconcileLoadbalancers().Return(nil) + networkSvc.EXPECT().ReconcileNetwork().Return(nil) + sgSvc.EXPECT().ReconcileSecurityGroups().Return(nil) + } + + awsCluster := getAWSCluster("test", "test") + csClient := setup(t, &awsCluster) + defer teardown() + runningCluster() + cs, err := scope.NewClusterScope( + scope.ClusterScopeParams{ + Client: csClient, + Cluster: &clusterv1.Cluster{}, + AWSCluster: &awsCluster, + }, + ) + g.Expect(err).To(BeNil()) + awsCluster.Spec.NetworkSpec.VPC.ElasticIPPool = &infrav1.ElasticIPPool{ + PublicIpv4Pool: aws.String("ipv4pool-ec2-0123456789abcdef0"), + PublicIpv4PoolFallBackOrder: ptr.To(infrav1.PublicIpv4PoolFallbackOrderAmazonPool), + } + g.Expect(err).To(Not(HaveOccurred())) + _, err = reconciler.reconcileNormal(cs) + g.Expect(err).To(Not(HaveOccurred())) + }) }) t.Run("Reconcile failure", func(t *testing.T) { expectedErr := errors.New("failed to get resource") @@ -628,7 +660,7 @@ func createCluster(g *WithT, awsCluster *infrav1.AWSCluster, namespace string) { } err := testEnv.Get(ctx, key, cluster) return err == nil - }, 10*time.Second).Should(BeTrue()) + }, 10*time.Second).Should(BeTrue(), fmt.Sprintf("Eventually failed getting the newly created cluster %q", awsCluster.Name)) } } diff --git a/controllers/awsmachine_controller.go b/controllers/awsmachine_controller.go index 7ef74fe8c5..8f5773d0f1 100644 --- a/controllers/awsmachine_controller.go +++ b/controllers/awsmachine_controller.go @@ -32,7 +32,6 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" @@ -61,7 +60,6 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/controllers/external" capierrors "sigs.k8s.io/cluster-api/errors" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" @@ -147,6 +145,7 @@ func (r *AWSMachineReconciler) getObjectStoreService(scope scope.S3Scope) servic // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachines,verbs=get;list;watch;update;patch;delete // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachines/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=*,verbs=get;list;watch // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch // +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch // +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch @@ -202,16 +201,10 @@ func (r *AWSMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) infrav1.SetDefaults_AWSMachineSpec(&awsMachine.Spec) - cp, err := r.getControlPlane(ctx, log, cluster) - if err != nil { - return ctrl.Result{}, err - } - // Create the machine scope machineScope, err := scope.NewMachineScope(scope.MachineScopeParams{ Client: r.Client, Cluster: cluster, - ControlPlane: cp, Machine: machine, InfraCluster: infraCluster, AWSMachine: awsMachine, @@ -416,6 +409,15 @@ func (r *AWSMachineReconciler) reconcileDelete(machineScope *scope.MachineScope, conditions.MarkFalse(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") } + // Release an Elastic IP when the machine has public IP Address (EIP) with a cluster-wide config + // to consume from BYO IPv4 Pool. + if machineScope.GetElasticIPPool() != nil { + if err := ec2Service.ReleaseElasticIP(instance.ID); err != nil { + machineScope.Error(err, "failed to release elastic IP address") + return ctrl.Result{}, err + } + } + machineScope.Info("EC2 instance successfully terminated", "instance-id", instance.ID) r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, "SuccessfulTerminate", "Terminated instance %q", instance.ID) @@ -510,7 +512,7 @@ func (r *AWSMachineReconciler) reconcileNormal(_ context.Context, machineScope * // Avoid a flickering condition between InstanceProvisionStarted and InstanceProvisionFailed if there's a persistent failure with createInstance if conditions.GetReason(machineScope.AWSMachine, infrav1.InstanceReadyCondition) != infrav1.InstanceProvisionFailedReason { conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceProvisionStartedReason, clusterv1.ConditionSeverityInfo, "") - if patchErr := machineScope.PatchObject(); err != nil { + if patchErr := machineScope.PatchObject(); patchErr != nil { machineScope.Error(patchErr, "failed to patch conditions") return ctrl.Result{}, patchErr } @@ -529,6 +531,21 @@ func (r *AWSMachineReconciler) reconcileNormal(_ context.Context, machineScope * return ctrl.Result{}, err } } + + // BYO Public IPv4 Pool feature: allocates and associates an EIP to machine when PublicIP and + // cluster-wide Public IPv4 Pool configuration are set. The EIP must be associated after the instance + // is created and transictioned from Pending state. + // In the regular flow, if the instance have already a public IPv4 address (EIP) associated it will + // be released when a new is assigned, the createInstance() prevents that behavior by enforcing + // to not launch an instance with EIP, allowing ReconcileElasticIPFromPublicPool assigning + // a BYOIP without duplication. + if pool := machineScope.GetElasticIPPool(); pool != nil { + if err := ec2svc.ReconcileElasticIPFromPublicPool(pool, instance); err != nil { + machineScope.Error(err, "failed to associate elastic IP address") + return ctrl.Result{}, err + } + } + if feature.Gates.Enabled(feature.EventBridgeInstanceState) { instancestateSvc := instancestate.NewService(ec2Scope) if err := instancestateSvc.AddInstanceToEventPattern(instance.ID); err != nil { @@ -602,8 +619,14 @@ func (r *AWSMachineReconciler) reconcileNormal(_ context.Context, machineScope * } if err := r.reconcileLBAttachment(machineScope, elbScope, instance); err != nil { - machineScope.Error(err, "failed to reconcile LB attachment") - return ctrl.Result{}, err + // We are tolerating InstanceNotRunning error, so we don't report it as an error condition. + // Because we are reconciling all load balancers, attempt to treat the error as a list of errors. + if err := kerrors.FilterOut(err, elb.IsInstanceNotRunning); err != nil { + machineScope.Error(err, "failed to reconcile LB attachment") + return ctrl.Result{}, err + } + // Cannot attach non-running instances to LB + shouldRequeue = true } } @@ -814,6 +837,25 @@ func (r *AWSMachineReconciler) generateIgnitionWithRemoteStorage(scope *scope.Ma }, } + if scope.AWSMachine.Spec.Ignition.Proxy != nil { + ignData.Ignition.Proxy = ignV3Types.Proxy{ + HTTPProxy: scope.AWSMachine.Spec.Ignition.Proxy.HTTPProxy, + HTTPSProxy: scope.AWSMachine.Spec.Ignition.Proxy.HTTPSProxy, + } + for _, noProxy := range scope.AWSMachine.Spec.Ignition.Proxy.NoProxy { + ignData.Ignition.Proxy.NoProxy = append(ignData.Ignition.Proxy.NoProxy, ignV3Types.NoProxyItem(noProxy)) + } + } + + if scope.AWSMachine.Spec.Ignition.TLS != nil { + for _, cert := range scope.AWSMachine.Spec.Ignition.TLS.CASources { + ignData.Ignition.Security.TLS.CertificateAuthorities = append( + ignData.Ignition.Security.TLS.CertificateAuthorities, + ignV3Types.Resource{Source: aws.String(string(cert))}, + ) + } + } + return json.Marshal(ignData) default: return nil, errors.Errorf("unsupported ignition version %q", ignVersion) @@ -919,17 +961,10 @@ func (r *AWSMachineReconciler) reconcileLBAttachment(machineScope *scope.Machine func (r *AWSMachineReconciler) registerInstanceToLBs(machineScope *scope.MachineScope, elbsvc services.ELBInterface, i *infrav1.Instance, lb *infrav1.AWSLoadBalancerSpec) error { switch lb.LoadBalancerType { - case infrav1.LoadBalancerTypeClassic: - fallthrough - case "": + case infrav1.LoadBalancerTypeClassic, "": machineScope.Debug("registering to classic load balancer") return r.registerInstanceToClassicLB(machineScope, elbsvc, i) - - case infrav1.LoadBalancerTypeELB: - fallthrough - case infrav1.LoadBalancerTypeALB: - fallthrough - case infrav1.LoadBalancerTypeNLB: + case infrav1.LoadBalancerTypeELB, infrav1.LoadBalancerTypeALB, infrav1.LoadBalancerTypeNLB: machineScope.Debug("registering to v2 load balancer") return r.registerInstanceToV2LB(machineScope, elbsvc, i, lb) } @@ -973,6 +1008,14 @@ func (r *AWSMachineReconciler) registerInstanceToV2LB(machineScope *scope.Machin return nil } + // See https://docs.aws.amazon.com/elasticloadbalancing/latest/application/target-group-register-targets.html#register-instances + if ptr.Deref(machineScope.GetInstanceState(), infrav1.InstanceStatePending) != infrav1.InstanceStateRunning { + r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedAttachControlPlaneELB", + "Cannot register control plane instance %q with load balancer: instance is not running", instance.ID) + conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBAttachFailedReason, clusterv1.ConditionSeverityInfo, "instance not running") + return elb.NewInstanceNotRunning("instance is not running") + } + if err := elbsvc.RegisterInstanceWithAPIServerLB(instance, lb); err != nil { r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedAttachControlPlaneELB", "Failed to register control plane instance %q with load balancer: %v", instance.ID, err) @@ -1225,22 +1268,3 @@ func (r *AWSMachineReconciler) ensureInstanceMetadataOptions(ec2svc services.EC2 return ec2svc.ModifyInstanceMetadataOptions(instance.ID, machine.Spec.InstanceMetadataOptions) } - -// +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=*,verbs=get;list;watch - -func (r *AWSMachineReconciler) getControlPlane(ctx context.Context, log *logger.Logger, cluster *clusterv1.Cluster) (*unstructured.Unstructured, error) { - var ns string - - if ns = cluster.Spec.ControlPlaneRef.Namespace; ns == "" { - ns = cluster.Namespace - } - - controlPlane, err := external.Get(ctx, r.Client, cluster.Spec.ControlPlaneRef, ns) - if err != nil { - log.Error(err, "unable to get ControlPlane referenced in the given cluster", "cluster", fmt.Sprintf("%s/%s", cluster.Namespace, cluster.Name)) - - return nil, err - } - - return controlPlane, nil -} diff --git a/controllers/awsmachine_controller_test.go b/controllers/awsmachine_controller_test.go index 733d6ce9e9..b96047a9e9 100644 --- a/controllers/awsmachine_controller_test.go +++ b/controllers/awsmachine_controller_test.go @@ -30,7 +30,6 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" @@ -115,6 +114,10 @@ func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { }}}) g.Expect(err).To(BeNil()) cs.Cluster = &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}} + cs.AWSCluster.Spec.NetworkSpec.VPC = infrav1.VPCSpec{ + ID: "vpc-exists", + CidrBlock: "10.0.0.0/16", + } cs.AWSCluster.Status.Network.APIServerELB.DNSName = DNSName cs.AWSCluster.Spec.ControlPlaneLoadBalancer = &infrav1.AWSLoadBalancerSpec{ LoadBalancerType: infrav1.LoadBalancerTypeClassic, @@ -151,6 +154,8 @@ func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { return elbSvc } + ec2Mock.EXPECT().AssociateAddressWithContext(context.TODO(), gomock.Any()).MaxTimes(1) + reconciler.secretsManagerServiceFactory = func(clusterScope cloud.ClusterScoper) services.SecretInterface { return secretMock } @@ -284,6 +289,10 @@ func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { g.Expect(err).To(BeNil()) cs.Cluster = &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}} cs.AWSCluster.Status.Network.APIServerELB.DNSName = DNSName + cs.AWSCluster.Spec.NetworkSpec.VPC = infrav1.VPCSpec{ + ID: "vpc-exists", + CidrBlock: "10.0.0.0/16", + } cs.AWSCluster.Spec.ControlPlaneLoadBalancer = &infrav1.AWSLoadBalancerSpec{ LoadBalancerType: infrav1.LoadBalancerTypeClassic, } @@ -323,6 +332,8 @@ func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { return secretMock } + ec2Mock.EXPECT().AssociateAddressWithContext(context.TODO(), gomock.Any()).MaxTimes(1) + _, err = reconciler.reconcileNormal(ctx, ms, cs, cs, cs, cs) g.Expect(err).Should(HaveOccurred()) expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionTrue, "", ""}}) @@ -418,7 +429,6 @@ func getMachineScope(cs *scope.ClusterScope, awsMachine *infrav1.AWSMachine) (*s InfrastructureReady: true, }, }, - ControlPlane: &unstructured.Unstructured{}, Machine: &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -444,7 +454,7 @@ func createAWSMachine(g *WithT, awsMachine *infrav1.AWSMachine) { Namespace: awsMachine.Namespace, } return testEnv.Get(ctx, key, machine) == nil - }, 10*time.Second).Should(BeTrue()) + }, 10*time.Second).Should(BeTrue(), fmt.Sprintf("Eventually failed get the newly created machine %q", awsMachine.Name)) } func getAWSMachine() *infrav1.AWSMachine { diff --git a/controllers/awsmachine_controller_unit_test.go b/controllers/awsmachine_controller_unit_test.go index dd444c5275..78058f3cec 100644 --- a/controllers/awsmachine_controller_unit_test.go +++ b/controllers/awsmachine_controller_unit_test.go @@ -33,7 +33,6 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" @@ -131,7 +130,6 @@ func TestAWSMachineReconciler(t *testing.T) { }, }, InfraCluster: cs, - ControlPlane: &unstructured.Unstructured{}, AWSMachine: awsMachine, }, ) @@ -160,7 +158,6 @@ func TestAWSMachineReconciler(t *testing.T) { InfrastructureReady: true, }, }, - ControlPlane: &unstructured.Unstructured{}, Machine: &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", @@ -998,6 +995,40 @@ func TestAWSMachineReconciler(t *testing.T) { }) }) }) + t.Run("when BYOIP is set", func(t *testing.T) { + var instance *infrav1.Instance + secretPrefix := "test/secret" + + t.Run("should succeed", func(t *testing.T) { + g := NewWithT(t) + awsMachine := getAWSMachine() + setup(t, g, awsMachine) + defer teardown(t, g) + + instance = &infrav1.Instance{ + ID: "myMachine", + State: infrav1.InstanceStatePending, + } + + ec2Svc.EXPECT().GetRunningInstanceByTags(gomock.Any()).Return(nil, nil).AnyTimes() + secretSvc.EXPECT().Create(gomock.Any(), gomock.Any()).Return(secretPrefix, int32(1), nil).Times(1) + ec2Svc.EXPECT().CreateInstance(gomock.Any(), gomock.Any(), gomock.Any()).Return(instance, nil).AnyTimes() + secretSvc.EXPECT().UserData(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).Times(1) + ec2Svc.EXPECT().GetInstanceSecurityGroups(gomock.Any()).Return(map[string][]string{"eid": {}}, nil).Times(1) + ec2Svc.EXPECT().GetCoreSecurityGroups(gomock.Any()).Return([]string{}, nil).Times(1) + ec2Svc.EXPECT().GetAdditionalSecurityGroupsIDs(gomock.Any()).Return(nil, nil).Times(1) + + ms.AWSMachine.Spec.PublicIP = aws.Bool(false) + ms.AWSMachine.Spec.ElasticIPPool = &infrav1.ElasticIPPool{ + PublicIpv4Pool: aws.String("ipv4pool-ec2-0123456789abcdef0"), + PublicIpv4PoolFallBackOrder: ptr.To(infrav1.PublicIpv4PoolFallbackOrderAmazonPool), + } + ec2Svc.EXPECT().ReconcileElasticIPFromPublicPool(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + + _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) + g.Expect(err).To(BeNil()) + }) + }) }) t.Run("Secrets management lifecycle", func(t *testing.T) { @@ -1102,7 +1133,7 @@ func TestAWSMachineReconciler(t *testing.T) { defer teardown(t, g) setNodeRef(t, g) - ms.AWSMachine.Status.FailureReason = capierrors.MachineStatusErrorPtr(capierrors.UpdateMachineError) + ms.AWSMachine.Status.FailureReason = ptr.To(capierrors.UpdateMachineError) secretSvc.EXPECT().Delete(gomock.Any()).Return(nil).Times(1) ec2Svc.EXPECT().TerminateInstance(gomock.Any()).Return(nil).AnyTimes() _, _ = reconciler.reconcileDelete(ms, cs, cs, cs, cs) @@ -1232,7 +1263,7 @@ func TestAWSMachineReconciler(t *testing.T) { defer teardown(t, g) setSSM(t, g) - ms.AWSMachine.Status.FailureReason = capierrors.MachineStatusErrorPtr(capierrors.UpdateMachineError) + ms.AWSMachine.Status.FailureReason = ptr.To(capierrors.UpdateMachineError) secretSvc.EXPECT().Delete(gomock.Any()).Return(nil).Times(1) ec2Svc.EXPECT().TerminateInstance(gomock.Any()).Return(nil).AnyTimes() _, _ = reconciler.reconcileDelete(ms, cs, cs, cs, cs) @@ -1449,7 +1480,7 @@ func TestAWSMachineReconciler(t *testing.T) { useIgnitionWithClusterObjectStore(t, g) // TODO: This seems to have no effect on the test result. - ms.AWSMachine.Status.FailureReason = capierrors.MachineStatusErrorPtr(capierrors.UpdateMachineError) + ms.AWSMachine.Status.FailureReason = ptr.To(capierrors.UpdateMachineError) objectStoreSvc.EXPECT().Delete(gomock.Any()).Return(nil).Times(1) ec2Svc.EXPECT().TerminateInstance(gomock.Any()).Return(nil).AnyTimes() @@ -1521,7 +1552,7 @@ func TestAWSMachineReconciler(t *testing.T) { useIgnitionWithClusterObjectStore(t, g) // TODO: This seems to have no effect on the test result. - ms.AWSMachine.Status.FailureReason = capierrors.MachineStatusErrorPtr(capierrors.UpdateMachineError) + ms.AWSMachine.Status.FailureReason = ptr.To(capierrors.UpdateMachineError) objectStoreSvc.EXPECT().Delete(gomock.Any()).Return(nil).Times(1) ec2Svc.EXPECT().TerminateInstance(gomock.Any()).Return(nil).AnyTimes() _, _ = reconciler.reconcileDelete(ms, cs, cs, cs, cs) @@ -2462,7 +2493,7 @@ func TestAWSMachineReconcilerReconcile(t *testing.T) { } err = testEnv.Get(ctx, key, machine) return err == nil - }, 10*time.Second).Should(BeTrue()) + }, 10*time.Second).Should(BeTrue(), fmt.Sprintf("Eventually failed get the newly created machine %q", tc.awsMachine.Name)) result, err := reconciler.Reconcile(ctx, ctrl.Request{ NamespacedName: client.ObjectKey{ @@ -2725,6 +2756,7 @@ func TestAWSMachineReconcilerReconcileDefaultsToLoadBalancerTypeClassic(t *testi Attribute: aws.String("groupSet"), })).Return(&ec2.DescribeNetworkInterfaceAttributeOutput{Groups: []*ec2.GroupIdentifier{{GroupId: aws.String("3")}}}, nil).MaxTimes(1) ec2Mock.EXPECT().ModifyNetworkInterfaceAttributeWithContext(context.TODO(), gomock.Any()).AnyTimes() + ec2Mock.EXPECT().AssociateAddressWithContext(context.TODO(), gomock.Any()).MaxTimes(1) _, err = reconciler.Reconcile(ctx, ctrl.Request{ NamespacedName: client.ObjectKey{ diff --git a/controllers/helpers_test.go b/controllers/helpers_test.go index f4511e9508..09d4402af8 100644 --- a/controllers/helpers_test.go +++ b/controllers/helpers_test.go @@ -29,6 +29,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" + "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" @@ -39,6 +40,7 @@ const DNSName = "www.google.com" var ( lbName = aws.String("test-cluster-apiserver") lbArn = aws.String("loadbalancer::arn") + tgArn = aws.String("arn::target-group") describeLBInput = &elb.DescribeLoadBalancersInput{ LoadBalancerNames: aws.StringSlice([]string{"test-cluster-apiserver"}), } @@ -291,6 +293,157 @@ func mockedCreateLBV2Calls(t *testing.T, m *mocks.MockELBV2APIMockRecorder) { })).MaxTimes(1) } +func mockedDescribeTargetGroupsCall(t *testing.T, m *mocks.MockELBV2APIMockRecorder) { + t.Helper() + m.DescribeTargetGroups(gomock.Eq(&elbv2.DescribeTargetGroupsInput{ + LoadBalancerArn: lbArn, + })). + Return(&elbv2.DescribeTargetGroupsOutput{ + NextMarker: new(string), + TargetGroups: []*elbv2.TargetGroup{ + { + HealthCheckEnabled: aws.Bool(true), + HealthCheckIntervalSeconds: new(int64), + HealthCheckPath: new(string), + HealthCheckPort: new(string), + HealthCheckProtocol: new(string), + HealthCheckTimeoutSeconds: new(int64), + HealthyThresholdCount: new(int64), + IpAddressType: new(string), + LoadBalancerArns: []*string{lbArn}, + Matcher: &elbv2.Matcher{}, + Port: new(int64), + Protocol: new(string), + ProtocolVersion: new(string), + TargetGroupArn: tgArn, + TargetGroupName: new(string), + TargetType: new(string), + UnhealthyThresholdCount: new(int64), + VpcId: new(string), + }}, + }, nil) +} + +func mockedCreateTargetGroupCall(t *testing.T, m *mocks.MockELBV2APIMockRecorder) { + t.Helper() + m.CreateTargetGroup(helpers.PartialMatchCreateTargetGroupInput(t, &elbv2.CreateTargetGroupInput{ + HealthCheckEnabled: aws.Bool(true), + HealthCheckIntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec), + HealthCheckPort: aws.String(infrav1.DefaultAPIServerPortString), + HealthCheckProtocol: aws.String("TCP"), + HealthCheckTimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec), + HealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount), + // Note: this is treated as a prefix with the partial matcher. + Name: aws.String("apiserver-target"), + Port: aws.Int64(infrav1.DefaultAPIServerPort), + Protocol: aws.String("TCP"), + Tags: []*elbv2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String("bar-apiserver"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("apiserver"), + }, + }, + UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount), + VpcId: aws.String("vpc-exists"), + })).Return(&elbv2.CreateTargetGroupOutput{ + TargetGroups: []*elbv2.TargetGroup{{ + HealthCheckEnabled: aws.Bool(true), + HealthCheckIntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec), + HealthCheckPort: aws.String(infrav1.DefaultAPIServerPortString), + HealthCheckProtocol: aws.String("TCP"), + HealthCheckTimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec), + HealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount), + LoadBalancerArns: []*string{lbArn}, + Matcher: &elbv2.Matcher{}, + Port: aws.Int64(infrav1.DefaultAPIServerPort), + Protocol: aws.String("TCP"), + TargetGroupArn: tgArn, + TargetGroupName: aws.String("apiserver-target"), + UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount), + VpcId: aws.String("vpc-exists"), + }}, + }, nil) +} + +func mockedModifyTargetGroupAttributes(t *testing.T, m *mocks.MockELBV2APIMockRecorder) { + t.Helper() + m.ModifyTargetGroupAttributes(gomock.Eq(&elbv2.ModifyTargetGroupAttributesInput{ + TargetGroupArn: tgArn, + Attributes: []*elbv2.TargetGroupAttribute{ + { + Key: aws.String(infrav1.TargetGroupAttributeEnablePreserveClientIP), + Value: aws.String("false"), + }, + }, + })).Return(nil, nil) +} + +func mockedDescribeListenersCall(t *testing.T, m *mocks.MockELBV2APIMockRecorder) { + t.Helper() + m.DescribeListeners(gomock.Eq(&elbv2.DescribeListenersInput{ + LoadBalancerArn: lbArn, + })). + Return(&elbv2.DescribeListenersOutput{ + Listeners: []*elbv2.Listener{{ + DefaultActions: []*elbv2.Action{{ + TargetGroupArn: aws.String("arn::targetgroup-not-found"), + }}, + ListenerArn: aws.String("arn::listener"), + LoadBalancerArn: lbArn, + }}, + }, nil) +} + +func mockedCreateListenerCall(t *testing.T, m *mocks.MockELBV2APIMockRecorder) { + t.Helper() + m.CreateListener(gomock.Eq(&elbv2.CreateListenerInput{ + DefaultActions: []*elbv2.Action{ + { + TargetGroupArn: tgArn, + Type: aws.String(elbv2.ActionTypeEnumForward), + }, + }, + LoadBalancerArn: lbArn, + Port: aws.Int64(infrav1.DefaultAPIServerPort), + Protocol: aws.String("TCP"), + Tags: []*elbv2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String("test-cluster-apiserver"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("apiserver"), + }, + }, + })).Return(&elbv2.CreateListenerOutput{ + Listeners: []*elbv2.Listener{ + { + DefaultActions: []*elbv2.Action{ + { + TargetGroupArn: tgArn, + Type: aws.String(elbv2.ActionTypeEnumForward), + }, + }, + ListenerArn: aws.String("listener::arn"), + Port: aws.Int64(infrav1.DefaultAPIServerPort), + Protocol: aws.String("TCP"), + }, + }}, nil) +} + func mockedDeleteLBCalls(expectV2Call bool, mv2 *mocks.MockELBV2APIMockRecorder, m *mocks.MockELBAPIMockRecorder) { if expectV2Call { mv2.DescribeLoadBalancers(gomock.Any()).Return(describeLBOutputV2, nil) diff --git a/controllers/rosacluster_controller.go b/controllers/rosacluster_controller.go index e57cb7402a..d81716e72b 100644 --- a/controllers/rosacluster_controller.go +++ b/controllers/rosacluster_controller.go @@ -109,7 +109,6 @@ func (r *ROSAClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) // Set the values from the managed control plane rosaCluster.Status.Ready = true rosaCluster.Spec.ControlPlaneEndpoint = controlPlane.Spec.ControlPlaneEndpoint - // rosaCluster.Status.FailureDomains = controlPlane.Status.FailureDomains if err := patchHelper.Patch(ctx, rosaCluster); err != nil { return reconcile.Result{}, fmt.Errorf("failed to patch ROSACluster: %w", err) diff --git a/controlplane/eks/api/v1beta1/awsmanagedcontrolplane_types.go b/controlplane/eks/api/v1beta1/awsmanagedcontrolplane_types.go index 4f7fc33cc5..a965bef381 100644 --- a/controlplane/eks/api/v1beta1/awsmanagedcontrolplane_types.go +++ b/controlplane/eks/api/v1beta1/awsmanagedcontrolplane_types.go @@ -228,6 +228,7 @@ type OIDCProviderStatus struct { TrustPolicy string `json:"trustPolicy,omitempty"` } +// IdentityProviderStatus holds the status for associated identity provider type IdentityProviderStatus struct { // ARN holds the ARN of associated identity provider ARN string `json:"arn,omitempty"` diff --git a/controlplane/eks/api/v1beta1/conversion.go b/controlplane/eks/api/v1beta1/conversion.go index 57284afd25..e137e7dede 100644 --- a/controlplane/eks/api/v1beta1/conversion.go +++ b/controlplane/eks/api/v1beta1/conversion.go @@ -40,6 +40,7 @@ func (r *AWSManagedControlPlane) ConvertTo(dstRaw conversion.Hub) error { } dst.Spec.VpcCni.Disable = r.Spec.DisableVPCCNI dst.Spec.Partition = restored.Spec.Partition + dst.Spec.RestrictPrivateSubnets = restored.Spec.RestrictPrivateSubnets return nil } diff --git a/controlplane/eks/api/v1beta1/conversion_test.go b/controlplane/eks/api/v1beta1/conversion_test.go index 207a6b6695..b7b360d1d1 100644 --- a/controlplane/eks/api/v1beta1/conversion_test.go +++ b/controlplane/eks/api/v1beta1/conversion_test.go @@ -19,9 +19,8 @@ package v1beta1 import ( "testing" - . "github.com/onsi/gomega" - fuzz "github.com/google/gofuzz" + . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/api/apitesting/fuzzer" "k8s.io/apimachinery/pkg/runtime" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" diff --git a/controlplane/eks/api/v1beta1/types.go b/controlplane/eks/api/v1beta1/types.go index a85c433303..0ca9a64ebe 100644 --- a/controlplane/eks/api/v1beta1/types.go +++ b/controlplane/eks/api/v1beta1/types.go @@ -218,8 +218,8 @@ const ( SecurityGroupCluster = infrav1.SecurityGroupRole("cluster") ) +// OIDCIdentityProviderConfig defines the configuration for an OIDC identity provider. type OIDCIdentityProviderConfig struct { - // This is also known as audience. The ID for the client application that makes // authentication requests to the OpenID identity provider. // +kubebuilder:validation:Required diff --git a/controlplane/eks/api/v1beta1/zz_generated.conversion.go b/controlplane/eks/api/v1beta1/zz_generated.conversion.go index ecc37543d6..151772f75b 100644 --- a/controlplane/eks/api/v1beta1/zz_generated.conversion.go +++ b/controlplane/eks/api/v1beta1/zz_generated.conversion.go @@ -393,6 +393,7 @@ func autoConvert_v1beta2_AWSManagedControlPlaneSpec_To_v1beta1_AWSManagedControl if err := Convert_v1beta2_VpcCni_To_v1beta1_VpcCni(&in.VpcCni, &out.VpcCni, s); err != nil { return err } + // WARNING: in.RestrictPrivateSubnets requires manual conversion: does not exist in peer-type if err := Convert_v1beta2_KubeProxy_To_v1beta1_KubeProxy(&in.KubeProxy, &out.KubeProxy, s); err != nil { return err } diff --git a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go index 89d5e8bc2b..109752e573 100644 --- a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go +++ b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go @@ -173,6 +173,10 @@ type AWSManagedControlPlaneSpec struct { //nolint: maligned // +optional VpcCni VpcCni `json:"vpcCni,omitempty"` + // RestrictPrivateSubnets indicates that the EKS control plane should only use private subnets. + // +kubebuilder:default=false + RestrictPrivateSubnets bool `json:"restrictPrivateSubnets,omitempty"` + // KubeProxy defines managed attributes of the kube-proxy daemonset KubeProxy KubeProxy `json:"kubeProxy,omitempty"` } @@ -231,6 +235,7 @@ type OIDCProviderStatus struct { TrustPolicy string `json:"trustPolicy,omitempty"` } +// IdentityProviderStatus holds the status for associated identity provider. type IdentityProviderStatus struct { // ARN holds the ARN of associated identity provider ARN string `json:"arn,omitempty"` diff --git a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook.go b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook.go index 4b44508b65..abda129f92 100644 --- a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook.go +++ b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook.go @@ -91,6 +91,7 @@ func (r *AWSManagedControlPlane) ValidateCreate() (admission.Warnings, error) { allErrs = append(allErrs, r.validateSecondaryCIDR()...) allErrs = append(allErrs, r.validateEKSAddons()...) allErrs = append(allErrs, r.validateDisableVPCCNI()...) + allErrs = append(allErrs, r.validateRestrictPrivateSubnets()...) allErrs = append(allErrs, r.validateKubeProxy()...) allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...) allErrs = append(allErrs, r.validateNetwork()...) @@ -126,6 +127,7 @@ func (r *AWSManagedControlPlane) ValidateUpdate(old runtime.Object) (admission.W allErrs = append(allErrs, r.validateSecondaryCIDR()...) allErrs = append(allErrs, r.validateEKSAddons()...) allErrs = append(allErrs, r.validateDisableVPCCNI()...) + allErrs = append(allErrs, r.validateRestrictPrivateSubnets()...) allErrs = append(allErrs, r.validateKubeProxy()...) allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...) allErrs = append(allErrs, r.validatePrivateDNSHostnameTypeOnLaunch()...) @@ -164,7 +166,7 @@ func (r *AWSManagedControlPlane) ValidateUpdate(old runtime.Object) (admission.W if oldAWSManagedControlplane.Spec.NetworkSpec.VPC.IsIPv6Enabled() != r.Spec.NetworkSpec.VPC.IsIPv6Enabled() { allErrs = append(allErrs, - field.Invalid(field.NewPath("spec", "networkSpec", "vpc", "enableIPv6"), r.Spec.NetworkSpec.VPC.IsIPv6Enabled(), "changing IP family is not allowed after it has been set")) + field.Invalid(field.NewPath("spec", "network", "vpc", "enableIPv6"), r.Spec.NetworkSpec.VPC.IsIPv6Enabled(), "changing IP family is not allowed after it has been set")) } if len(allErrs) == 0 { @@ -392,6 +394,22 @@ func (r *AWSManagedControlPlane) validateDisableVPCCNI() field.ErrorList { return allErrs } +func (r *AWSManagedControlPlane) validateRestrictPrivateSubnets() field.ErrorList { + var allErrs field.ErrorList + + if r.Spec.RestrictPrivateSubnets && r.Spec.NetworkSpec.VPC.IsUnmanaged(r.Spec.EKSClusterName) { + boolField := field.NewPath("spec", "restrictPrivateSubnets") + if len(r.Spec.NetworkSpec.Subnets.FilterPrivate()) == 0 { + allErrs = append(allErrs, field.Invalid(boolField, r.Spec.RestrictPrivateSubnets, "cannot enable private subnets restriction when no private subnets are specified")) + } + } + + if len(allErrs) == 0 { + return nil + } + return allErrs +} + func (r *AWSManagedControlPlane) validatePrivateDNSHostnameTypeOnLaunch() field.ErrorList { var allErrs field.ErrorList @@ -406,23 +424,53 @@ func (r *AWSManagedControlPlane) validatePrivateDNSHostnameTypeOnLaunch() field. func (r *AWSManagedControlPlane) validateNetwork() field.ErrorList { var allErrs field.ErrorList + // If only `AWSManagedControlPlane.spec.secondaryCidrBlock` is set, no additional checks are done to remain + // backward-compatible. The `VPCSpec.SecondaryCidrBlocks` field was added later - if that list is not empty, we + // require `AWSManagedControlPlane.spec.secondaryCidrBlock` to be listed in there as well. This may allow merging + // the fields later on. + podSecondaryCidrBlock := r.Spec.SecondaryCidrBlock + secondaryCidrBlocks := r.Spec.NetworkSpec.VPC.SecondaryCidrBlocks + secondaryCidrBlocksField := field.NewPath("spec", "network", "vpc", "secondaryCidrBlocks") + if podSecondaryCidrBlock != nil && len(secondaryCidrBlocks) > 0 { + found := false + for _, cidrBlock := range secondaryCidrBlocks { + if cidrBlock.IPv4CidrBlock == *podSecondaryCidrBlock { + found = true + break + } + } + if !found { + allErrs = append(allErrs, field.Invalid(secondaryCidrBlocksField, secondaryCidrBlocks, fmt.Sprintf("AWSManagedControlPlane.spec.secondaryCidrBlock %v must be listed in AWSManagedControlPlane.spec.network.vpc.secondaryCidrBlocks (required if both fields are filled)", *podSecondaryCidrBlock))) + } + } + + if podSecondaryCidrBlock != nil && r.Spec.NetworkSpec.VPC.CidrBlock != "" && r.Spec.NetworkSpec.VPC.CidrBlock == *podSecondaryCidrBlock { + secondaryCidrBlockField := field.NewPath("spec", "vpc", "secondaryCidrBlock") + allErrs = append(allErrs, field.Invalid(secondaryCidrBlockField, secondaryCidrBlocks, fmt.Sprintf("AWSManagedControlPlane.spec.secondaryCidrBlock %v must not be equal to the primary AWSManagedControlPlane.spec.network.vpc.cidrBlock", *podSecondaryCidrBlock))) + } + for _, cidrBlock := range secondaryCidrBlocks { + if r.Spec.NetworkSpec.VPC.CidrBlock != "" && r.Spec.NetworkSpec.VPC.CidrBlock == cidrBlock.IPv4CidrBlock { + allErrs = append(allErrs, field.Invalid(secondaryCidrBlocksField, secondaryCidrBlocks, fmt.Sprintf("AWSManagedControlPlane.spec.network.vpc.secondaryCidrBlocks must not contain the primary AWSManagedControlPlane.spec.network.vpc.cidrBlock %v", r.Spec.NetworkSpec.VPC.CidrBlock))) + } + } + if r.Spec.NetworkSpec.VPC.IsIPv6Enabled() && r.Spec.NetworkSpec.VPC.IPv6.CidrBlock != "" && r.Spec.NetworkSpec.VPC.IPv6.PoolID == "" { - poolField := field.NewPath("spec", "networkSpec", "vpc", "ipv6", "poolId") + poolField := field.NewPath("spec", "network", "vpc", "ipv6", "poolId") allErrs = append(allErrs, field.Invalid(poolField, r.Spec.NetworkSpec.VPC.IPv6.PoolID, "poolId cannot be empty if cidrBlock is set")) } if r.Spec.NetworkSpec.VPC.IsIPv6Enabled() && r.Spec.NetworkSpec.VPC.IPv6.PoolID != "" && r.Spec.NetworkSpec.VPC.IPv6.IPAMPool != nil { - poolField := field.NewPath("spec", "networkSpec", "vpc", "ipv6", "poolId") + poolField := field.NewPath("spec", "network", "vpc", "ipv6", "poolId") allErrs = append(allErrs, field.Invalid(poolField, r.Spec.NetworkSpec.VPC.IPv6.PoolID, "poolId and ipamPool cannot be used together")) } if r.Spec.NetworkSpec.VPC.IsIPv6Enabled() && r.Spec.NetworkSpec.VPC.IPv6.CidrBlock != "" && r.Spec.NetworkSpec.VPC.IPv6.IPAMPool != nil { - cidrBlockField := field.NewPath("spec", "networkSpec", "vpc", "ipv6", "cidrBlock") + cidrBlockField := field.NewPath("spec", "network", "vpc", "ipv6", "cidrBlock") allErrs = append(allErrs, field.Invalid(cidrBlockField, r.Spec.NetworkSpec.VPC.IPv6.CidrBlock, "cidrBlock and ipamPool cannot be used together")) } if r.Spec.NetworkSpec.VPC.IsIPv6Enabled() && r.Spec.NetworkSpec.VPC.IPv6.IPAMPool != nil && r.Spec.NetworkSpec.VPC.IPv6.IPAMPool.ID == "" && r.Spec.NetworkSpec.VPC.IPv6.IPAMPool.Name == "" { - ipamPoolField := field.NewPath("spec", "networkSpec", "vpc", "ipv6", "ipamPool") + ipamPoolField := field.NewPath("spec", "network", "vpc", "ipv6", "ipamPool") allErrs = append(allErrs, field.Invalid(ipamPoolField, r.Spec.NetworkSpec.VPC.IPv6.IPAMPool, "ipamPool must have either id or name")) } diff --git a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook_test.go b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook_test.go index bc3cd5d086..7441040b8e 100644 --- a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook_test.go +++ b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook_test.go @@ -45,6 +45,7 @@ func TestDefaultingWebhook(t *testing.T) { defaultVPCSpec := infrav1.VPCSpec{ AvailabilityZoneUsageLimit: &AZUsageLimit, AvailabilityZoneSelection: &infrav1.AZSelectionSchemeOrdered, + SubnetSchema: &infrav1.SubnetSchemaPreferPrivate, } defaultIdentityRef := &infrav1.AWSIdentityReference{ Kind: infrav1.ControllerIdentityKind, @@ -167,15 +168,17 @@ func TestDefaultingWebhook(t *testing.T) { func TestWebhookCreate(t *testing.T) { tests := []struct { //nolint:maligned - name string - eksClusterName string - expectError bool - eksVersion string - hasAddons bool - vpcCNI VpcCni - additionalTags infrav1.Tags - secondaryCidr *string - kubeProxy KubeProxy + name string + eksClusterName string + expectError bool + expectErrorToContain string // if non-empty, the error message must contain this substring + eksVersion string + hasAddons bool + vpcCNI VpcCni + additionalTags infrav1.Tags + secondaryCidr *string + secondaryCidrBlocks []infrav1.VpcCidrBlock + kubeProxy KubeProxy }{ { name: "ekscluster specified", @@ -253,6 +256,15 @@ func TestWebhookCreate(t *testing.T) { vpcCNI: VpcCni{Disable: true}, secondaryCidr: aws.String("100.64.0.0/10"), }, + { + name: "secondary CIDR block not listed in NetworkSpec.VPC.SecondaryCidrBlocks", + eksClusterName: "default_cluster1", + eksVersion: "v1.19", + expectError: true, + expectErrorToContain: "100.64.0.0/16 must be listed in AWSManagedControlPlane.spec.network.vpc.secondaryCidrBlocks", + secondaryCidr: aws.String("100.64.0.0/16"), + secondaryCidrBlocks: []infrav1.VpcCidrBlock{{IPv4CidrBlock: "123.456.0.0/16"}}, + }, { name: "invalid tags not allowed", eksClusterName: "default_cluster1", @@ -327,6 +339,11 @@ func TestWebhookCreate(t *testing.T) { KubeProxy: tc.kubeProxy, AdditionalTags: tc.additionalTags, VpcCni: tc.vpcCNI, + NetworkSpec: infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + SecondaryCidrBlocks: tc.secondaryCidrBlocks, + }, + }, }, } if tc.eksVersion != "" { @@ -353,7 +370,16 @@ func TestWebhookCreate(t *testing.T) { if tc.expectError { g.Expect(err).ToNot(BeNil()) + + if tc.expectErrorToContain != "" && err != nil { + g.Expect(err.Error()).To(ContainSubstring(tc.expectErrorToContain)) + } } else { + if tc.expectErrorToContain != "" { + t.Error("Logic error: expectError=false means that expectErrorToContain must be empty") + t.FailNow() + } + g.Expect(err).To(BeNil()) } }) diff --git a/controlplane/eks/api/v1beta2/doc.go b/controlplane/eks/api/v1beta2/doc.go index b2fbc38795..8409bb024f 100644 --- a/controlplane/eks/api/v1beta2/doc.go +++ b/controlplane/eks/api/v1beta2/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// package v1beta2 contains API Schema definitions for the controlplane v1beta2 API group +// Package v1beta2 contains API Schema definitions for the controlplane v1beta2 API group // +gencrdrefdocs:force // +groupName=controlplane.cluster.x-k8s.io // +k8s:defaulter-gen=TypeMeta diff --git a/controlplane/eks/api/v1beta2/groupversion_info.go b/controlplane/eks/api/v1beta2/groupversion_info.go index fcc0abb3a5..9fc8227082 100644 --- a/controlplane/eks/api/v1beta2/groupversion_info.go +++ b/controlplane/eks/api/v1beta2/groupversion_info.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// package v1beta2 contains API Schema definitions for the controlplane v1beta2 API group +// Package v1beta2 contains API Schema definitions for the controlplane v1beta2 API group // +kubebuilder:object:generate=true // +groupName=controlplane.cluster.x-k8s.io package v1beta2 diff --git a/controlplane/eks/api/v1beta2/types.go b/controlplane/eks/api/v1beta2/types.go index acaa53b419..1ef47215ce 100644 --- a/controlplane/eks/api/v1beta2/types.go +++ b/controlplane/eks/api/v1beta2/types.go @@ -218,8 +218,8 @@ const ( SecurityGroupCluster = infrav1.SecurityGroupRole("cluster") ) +// OIDCIdentityProviderConfig represents the configuration for an OIDC identity provider. type OIDCIdentityProviderConfig struct { - // This is also known as audience. The ID for the client application that makes // authentication requests to the OpenID identity provider. // +kubebuilder:validation:Required diff --git a/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go b/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go index 2ed1579ec1..1c4d29ed86 100644 --- a/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go +++ b/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go @@ -40,6 +40,7 @@ import ( expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/feature" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/awsnode" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ec2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks" @@ -87,6 +88,14 @@ type AWSManagedControlPlaneReconciler struct { Recorder record.EventRecorder Endpoints []scope.ServiceEndpoint + awsNodeServiceFactory func(scope.AWSNodeScope) services.AWSNodeInterface + ec2ServiceFactory func(scope.EC2Scope) services.EC2Interface + eksServiceFactory func(*scope.ManagedControlPlaneScope) *eks.Service + iamAuthenticatorServiceFactory func(scope.IAMAuthScope, iamauth.BackendType, client.Client) services.IAMAuthenticatorInterface + kubeProxyServiceFactory func(scope.KubeProxyScope) services.KubeProxyInterface + networkServiceFactory func(scope.NetworkScope) services.NetworkInterface + securityGroupServiceFactory func(*scope.ManagedControlPlaneScope) services.SecurityGroupInterface + EnableIAM bool AllowAdditionalRoles bool WatchFilterValue string @@ -96,6 +105,62 @@ type AWSManagedControlPlaneReconciler struct { TagUnmanagedNetworkResources bool } +// getAWSNodeService factory func is added for testing purpose so that we can inject mocked AWSNodeInterface to the AWSManagedControlPlaneReconciler. +func (r *AWSManagedControlPlaneReconciler) getAWSNodeService(scope scope.AWSNodeScope) services.AWSNodeInterface { + if r.awsNodeServiceFactory != nil { + return r.awsNodeServiceFactory(scope) + } + return awsnode.NewService(scope) +} + +// getEC2Service factory func is added for testing purpose so that we can inject mocked EC2Service to the AWSManagedControlPlaneReconciler. +func (r *AWSManagedControlPlaneReconciler) getEC2Service(scope scope.EC2Scope) services.EC2Interface { + if r.ec2ServiceFactory != nil { + return r.ec2ServiceFactory(scope) + } + return ec2.NewService(scope) +} + +// getEC2Service factory func is added for testing purpose so that we can inject mocked EC2Service to the AWSManagedControlPlaneReconciler. +func (r *AWSManagedControlPlaneReconciler) getEKSService(scope *scope.ManagedControlPlaneScope) *eks.Service { + if r.ec2ServiceFactory != nil { + return r.eksServiceFactory(scope) + } + return eks.NewService(scope) +} + +// getIAMAuthenticatorService factory func is added for testing purpose so that we can inject mocked IAMAuthenticatorInterface to the AWSManagedControlPlaneReconciler. +func (r *AWSManagedControlPlaneReconciler) getIAMAuthenticatorService(scope scope.IAMAuthScope, backend iamauth.BackendType, client client.Client) services.IAMAuthenticatorInterface { + if r.iamAuthenticatorServiceFactory != nil { + return r.iamAuthenticatorServiceFactory(scope, backend, client) + } + return iamauth.NewService(scope, backend, client) +} + +// getKubeProxyService factory func is added for testing purpose so that we can inject mocked KubeProxyInterface to the AWSManagedControlPlaneReconciler. +func (r *AWSManagedControlPlaneReconciler) getKubeProxyService(scope scope.KubeProxyScope) services.KubeProxyInterface { + if r.kubeProxyServiceFactory != nil { + return r.kubeProxyServiceFactory(scope) + } + return kubeproxy.NewService(scope) +} + +// getNetworkService factory func is added for testing purpose so that we can inject mocked NetworkService to the AWSManagedControlPlaneReconciler. +func (r *AWSManagedControlPlaneReconciler) getNetworkService(scope scope.NetworkScope) services.NetworkInterface { + if r.networkServiceFactory != nil { + return r.networkServiceFactory(scope) + } + return network.NewService(scope) +} + +// getSecurityGroupService factory func is added for testing purpose so that we can inject mocked SecurityGroupService to the AWSClusterReconciler. +func (r *AWSManagedControlPlaneReconciler) getSecurityGroupService(scope *scope.ManagedControlPlaneScope) services.SecurityGroupInterface { + if r.securityGroupServiceFactory != nil { + return r.securityGroupServiceFactory(scope) + } + return securitygroup.NewService(scope, securityGroupRolesForControlPlane(scope)) +} + // SetupWithManager is used to setup the controller. func (r *AWSManagedControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { log := logger.FromContext(ctx) @@ -238,6 +303,11 @@ func (r *AWSManagedControlPlaneReconciler) Reconcile(ctx context.Context, req ct func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, managedScope *scope.ManagedControlPlaneScope) (res ctrl.Result, reterr error) { managedScope.Info("Reconciling AWSManagedControlPlane") + if managedScope.Cluster.Spec.InfrastructureRef == nil { + managedScope.Info("InfrastructureRef not set, skipping reconciliation") + return ctrl.Result{}, nil + } + // TODO (richardcase): we can remove the if check here in the future when we have // allowed enough time for users to move away from using the single kind for // infrastructureRef and controlplaneRef. @@ -257,13 +327,13 @@ func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, } } - ec2Service := ec2.NewService(managedScope) - networkSvc := network.NewService(managedScope) - ekssvc := eks.NewService(managedScope) - sgService := securitygroup.NewService(managedScope, securityGroupRolesForControlPlane(managedScope)) - authService := iamauth.NewService(managedScope, iamauth.BackendTypeConfigMap, managedScope.Client) - awsnodeService := awsnode.NewService(managedScope) - kubeproxyService := kubeproxy.NewService(managedScope) + ec2Service := r.getEC2Service(managedScope) + networkSvc := r.getNetworkService(managedScope) + ekssvc := r.getEKSService(managedScope) + sgService := r.getSecurityGroupService(managedScope) + authService := r.getIAMAuthenticatorService(managedScope, iamauth.BackendTypeConfigMap, managedScope.Client) + awsnodeService := r.getAWSNodeService(managedScope) + kubeproxyService := r.getKubeProxyService(managedScope) if err := networkSvc.ReconcileNetwork(); err != nil { return reconcile.Result{}, fmt.Errorf("failed to reconcile network for AWSManagedControlPlane %s/%s: %w", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name, err) diff --git a/controlplane/eks/controllers/awsmanagedcontrolplane_controller_test.go b/controlplane/eks/controllers/awsmanagedcontrolplane_controller_test.go new file mode 100644 index 0000000000..b510d5cd2f --- /dev/null +++ b/controlplane/eks/controllers/awsmanagedcontrolplane_controller_test.go @@ -0,0 +1,930 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "encoding/base64" + "fmt" + "net/http" + "strconv" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + stsrequest "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/golang/mock/gomock" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services" + ec2Service "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ec2" + eksService "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks/mock_eksiface" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/iamauth" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/iamauth/mock_iamauth" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/mock_services" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/network" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/s3/mock_stsiface" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/securitygroup" + "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" + "sigs.k8s.io/cluster-api/util" +) + +func TestAWSManagedControlPlaneReconcilerIntegrationTests(t *testing.T) { + var ( + reconciler AWSManagedControlPlaneReconciler + mockCtrl *gomock.Controller + recorder *record.FakeRecorder + ctx context.Context + + ec2Mock *mocks.MockEC2API + eksMock *mock_eksiface.MockEKSAPI + iamMock *mock_iamauth.MockIAMAPI + stsMock *mock_stsiface.MockSTSAPI + awsNodeMock *mock_services.MockAWSNodeInterface + iamAuthenticatorMock *mock_services.MockIAMAuthenticatorInterface + kubeProxyMock *mock_services.MockKubeProxyInterface + ) + + setup := func(t *testing.T) { + t.Helper() + mockCtrl = gomock.NewController(t) + recorder = record.NewFakeRecorder(10) + reconciler = AWSManagedControlPlaneReconciler{ + Client: testEnv.Client, + Recorder: recorder, + EnableIAM: true, + } + ctx = context.TODO() + + ec2Mock = mocks.NewMockEC2API(mockCtrl) + eksMock = mock_eksiface.NewMockEKSAPI(mockCtrl) + iamMock = mock_iamauth.NewMockIAMAPI(mockCtrl) + stsMock = mock_stsiface.NewMockSTSAPI(mockCtrl) + + // Mocking these as well, since the actual implementation requires a remote client to an actual cluster + awsNodeMock = mock_services.NewMockAWSNodeInterface(mockCtrl) + iamAuthenticatorMock = mock_services.NewMockIAMAuthenticatorInterface(mockCtrl) + kubeProxyMock = mock_services.NewMockKubeProxyInterface(mockCtrl) + } + + teardown := func() { + mockCtrl.Finish() + } + + t.Run("Should successfully reconcile AWSManagedControlPlane creation with managed VPC", func(t *testing.T) { + g := NewWithT(t) + setup(t) + defer teardown() + + controllerIdentity := createControllerIdentity(g) + ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("integ-test-%s", util.RandomString(5))) + g.Expect(err).To(BeNil()) + + cluster, awsManagedCluster, awsManagedControlPlane := getManagedClusterObjects("test-cluster", ns.Name) + + // Make controller manage resources + awsManagedControlPlane.Spec.NetworkSpec.VPC.ID = "" + awsManagedControlPlane.Spec.NetworkSpec.Subnets[0].ID = "my-managed-subnet-priv" + awsManagedControlPlane.Spec.NetworkSpec.Subnets[1].ID = "my-managed-subnet-pub1" + awsManagedControlPlane.Spec.NetworkSpec.Subnets[2].ID = "my-managed-subnet-pub2" + + // NAT gateway of the public subnet will be accessed by the private subnet in the same zone, + // so use same zone for the 2 test subnets + awsManagedControlPlane.Spec.NetworkSpec.Subnets[0].AvailabilityZone = "us-east-1a" + awsManagedControlPlane.Spec.NetworkSpec.Subnets[1].AvailabilityZone = "us-east-1a" + // Our EKS code currently requires at least 2 different AZs + awsManagedControlPlane.Spec.NetworkSpec.Subnets[2].AvailabilityZone = "us-east-1c" + + mockedCallsForMissingEverything(ec2Mock.EXPECT(), awsManagedControlPlane.Spec.NetworkSpec.Subnets) + mockedCreateSGCalls(ec2Mock.EXPECT()) + mockedDescribeInstanceCall(ec2Mock.EXPECT()) + mockedEKSControlPlaneIAMRole(g, iamMock.EXPECT()) + mockedEKSCluster(g, eksMock.EXPECT(), iamMock.EXPECT(), ec2Mock.EXPECT(), stsMock.EXPECT(), awsNodeMock.EXPECT(), kubeProxyMock.EXPECT(), iamAuthenticatorMock.EXPECT()) + + g.Expect(testEnv.Create(ctx, &cluster)).To(Succeed()) + cluster.Status.InfrastructureReady = true + g.Expect(testEnv.Client.Status().Update(ctx, &cluster)).To(Succeed()) + g.Expect(testEnv.Create(ctx, &awsManagedCluster)).To(Succeed()) + g.Expect(testEnv.Create(ctx, &awsManagedControlPlane)).To(Succeed()) + g.Eventually(func() bool { + controlPlane := &ekscontrolplanev1.AWSManagedControlPlane{} + key := client.ObjectKey{ + Name: awsManagedControlPlane.Name, + Namespace: ns.Name, + } + err := testEnv.Get(ctx, key, controlPlane) + return err == nil + }, 10*time.Second).Should(BeTrue(), fmt.Sprintf("Eventually failed getting the newly created AWSManagedControlPlane %q", awsManagedControlPlane.Name)) + + defer t.Cleanup(func() { + g.Expect(testEnv.Cleanup(ctx, &cluster, &awsManagedCluster, &awsManagedControlPlane, controllerIdentity, ns)).To(Succeed()) + }) + + managedScope := getAWSManagedControlPlaneScope(&cluster, &awsManagedControlPlane) + + reconciler.awsNodeServiceFactory = func(scope scope.AWSNodeScope) services.AWSNodeInterface { + return awsNodeMock + } + + ec2Svc := ec2Service.NewService(managedScope) + ec2Svc.EC2Client = ec2Mock + reconciler.ec2ServiceFactory = func(scope scope.EC2Scope) services.EC2Interface { + return ec2Svc + } + + eksSvc := eksService.NewService(managedScope) + eksSvc.EC2Client = ec2Mock + eksSvc.EKSClient = eksMock + eksSvc.IAMService.IAMClient = iamMock + eksSvc.STSClient = stsMock + reconciler.eksServiceFactory = func(scope *scope.ManagedControlPlaneScope) *eksService.Service { + return eksSvc + } + + reconciler.iamAuthenticatorServiceFactory = func(scope.IAMAuthScope, iamauth.BackendType, client.Client) services.IAMAuthenticatorInterface { + return iamAuthenticatorMock + } + reconciler.kubeProxyServiceFactory = func(scope scope.KubeProxyScope) services.KubeProxyInterface { + return kubeProxyMock + } + + networkSvc := network.NewService(managedScope) + networkSvc.EC2Client = ec2Mock + reconciler.networkServiceFactory = func(clusterScope scope.NetworkScope) services.NetworkInterface { + return networkSvc + } + + testSecurityGroupRoles := []infrav1.SecurityGroupRole{ + infrav1.SecurityGroupEKSNodeAdditional, + infrav1.SecurityGroupBastion, + } + sgSvc := securitygroup.NewService(managedScope, testSecurityGroupRoles) + sgSvc.EC2Client = ec2Mock + + reconciler.securityGroupServiceFactory = func(scope *scope.ManagedControlPlaneScope) services.SecurityGroupInterface { + return sgSvc + } + + _, err = reconciler.Reconcile(ctx, ctrl.Request{ + NamespacedName: client.ObjectKey{ + Namespace: awsManagedControlPlane.Namespace, + Name: awsManagedControlPlane.Name, + }, + }) + g.Expect(err).To(BeNil()) + + g.Expect(testEnv.Get(ctx, client.ObjectKeyFromObject(&awsManagedControlPlane), &awsManagedControlPlane)).To(Succeed()) + g.Expect(awsManagedControlPlane.Finalizers).To(ContainElement(ekscontrolplanev1.ManagedControlPlaneFinalizer)) + }) +} + +func createControllerIdentity(g *WithT) *infrav1.AWSClusterControllerIdentity { + controllerIdentity := &infrav1.AWSClusterControllerIdentity{ + TypeMeta: metav1.TypeMeta{ + Kind: string(infrav1.ControllerIdentityKind), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + }, + Spec: infrav1.AWSClusterControllerIdentitySpec{ + AWSClusterIdentitySpec: infrav1.AWSClusterIdentitySpec{ + AllowedNamespaces: &infrav1.AllowedNamespaces{}, + }, + }, + } + g.Expect(testEnv.Create(ctx, controllerIdentity)).To(Succeed()) + return controllerIdentity +} + +// mockedCallsForMissingEverything mocks most of the AWSManagedControlPlane reconciliation calls to the AWS API, +// except for what other functions provide (see `mockedCreateSGCalls` and `mockedDescribeInstanceCall`). +func mockedCallsForMissingEverything(ec2Rec *mocks.MockEC2APIMockRecorder, subnets infrav1.Subnets) { + describeVPCByNameCall := ec2Rec.DescribeVpcsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeVpcsInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("tag:Name"), + Values: aws.StringSlice([]string{"test-cluster-vpc"}), + }, + }, + })).Return(&ec2.DescribeVpcsOutput{ + Vpcs: []*ec2.Vpc{}, + }, nil) + + ec2Rec.CreateVpcWithContext(context.TODO(), gomock.Eq(&ec2.CreateVpcInput{ + CidrBlock: aws.String("10.0.0.0/8"), + TagSpecifications: []*ec2.TagSpecification{ + { + ResourceType: aws.String("vpc"), + Tags: []*ec2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String("test-cluster-vpc"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("common"), + }, + }, + }, + }, + })).After(describeVPCByNameCall).Return(&ec2.CreateVpcOutput{ + Vpc: &ec2.Vpc{ + State: aws.String("available"), + VpcId: aws.String("vpc-new"), + CidrBlock: aws.String("10.0.0.0/8"), + Tags: []*ec2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String("test-cluster-vpc"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("common"), + }, + }, + }, + }, nil) + + ec2Rec.DescribeVpcAttributeWithContext(context.TODO(), gomock.Eq(&ec2.DescribeVpcAttributeInput{ + VpcId: aws.String("vpc-new"), + Attribute: aws.String("enableDnsHostnames"), + })).Return(&ec2.DescribeVpcAttributeOutput{ + EnableDnsHostnames: &ec2.AttributeBooleanValue{Value: aws.Bool(true)}, + }, nil) + + ec2Rec.DescribeVpcAttributeWithContext(context.TODO(), gomock.Eq(&ec2.DescribeVpcAttributeInput{ + VpcId: aws.String("vpc-new"), + Attribute: aws.String("enableDnsSupport"), + })).Return(&ec2.DescribeVpcAttributeOutput{ + EnableDnsSupport: &ec2.AttributeBooleanValue{Value: aws.Bool(true)}, + }, nil) + + ec2Rec.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("state"), + Values: aws.StringSlice([]string{ec2.VpcStatePending, ec2.VpcStateAvailable}), + }, + { + Name: aws.String("vpc-id"), + Values: aws.StringSlice([]string{"vpc-new"}), + }, + }, + })).Return(&ec2.DescribeSubnetsOutput{ + Subnets: []*ec2.Subnet{}, + }, nil) + + zones := []*ec2.AvailabilityZone{} + for _, subnet := range subnets { + zones = append(zones, &ec2.AvailabilityZone{ + ZoneName: aws.String(subnet.AvailabilityZone), + ZoneType: aws.String("availability-zone"), + }) + } + ec2Rec.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: zones, + }, nil).MaxTimes(2) + + for subnetIndex, subnet := range subnets { + subnetID := fmt.Sprintf("subnet-%d", subnetIndex+1) + var kubernetesRoleTagKey string + var capaRoleTagValue string + if subnet.IsPublic { + kubernetesRoleTagKey = "kubernetes.io/role/elb" + capaRoleTagValue = "public" + } else { + kubernetesRoleTagKey = "kubernetes.io/role/internal-elb" + capaRoleTagValue = "private" + } + ec2Rec.DescribeAvailabilityZonesWithContext(context.TODO(), &ec2.DescribeAvailabilityZonesInput{ + ZoneNames: aws.StringSlice([]string{subnet.AvailabilityZone}), + }). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String(subnet.AvailabilityZone), + ZoneType: aws.String("availability-zone"), + }, + }, + }, nil).MaxTimes(1) + ec2Rec.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ + VpcId: aws.String("vpc-new"), + CidrBlock: aws.String(subnet.CidrBlock), + AvailabilityZone: aws.String(subnet.AvailabilityZone), + TagSpecifications: []*ec2.TagSpecification{ + { + ResourceType: aws.String("subnet"), + Tags: []*ec2.Tag{ + { + Key: aws.String("Name"), + // Assume that `ID` doesn't start with `subnet-` so that it becomes managed and `ID` denotes the desired name + Value: aws.String(subnet.ID), + }, + { + Key: aws.String("kubernetes.io/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String(kubernetesRoleTagKey), + Value: aws.String("1"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String(capaRoleTagValue), + }, + }, + }, + }, + })).Return(&ec2.CreateSubnetOutput{ + Subnet: &ec2.Subnet{ + VpcId: aws.String("vpc-new"), + SubnetId: aws.String(subnetID), + CidrBlock: aws.String(subnet.CidrBlock), + AvailabilityZone: aws.String(subnet.AvailabilityZone), + MapPublicIpOnLaunch: aws.Bool(false), + Tags: []*ec2.Tag{ + { + Key: aws.String("Name"), + // Assume that `ID` doesn't start with `subnet-` so that it becomes managed and `ID` denotes the desired name + Value: aws.String(subnet.ID), + }, + { + Key: aws.String("kubernetes.io/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("kubernetes.io/role/elb"), + Value: aws.String("1"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("public"), + }, + }, + }, + }, nil) + + ec2Rec.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{ + SubnetIds: aws.StringSlice([]string{subnetID}), + })).Return(nil) + + if subnet.IsPublic { + ec2Rec.ModifySubnetAttributeWithContext(context.TODO(), gomock.Eq(&ec2.ModifySubnetAttributeInput{ + SubnetId: aws.String(subnetID), + MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + })).Return(&ec2.ModifySubnetAttributeOutput{}, nil) + } + } + + ec2Rec.DescribeRouteTablesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeRouteTablesInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("vpc-id"), + Values: aws.StringSlice([]string{"vpc-new"}), + }, + { + Name: aws.String("tag-key"), + Values: aws.StringSlice([]string{"sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"}), + }, + }})).Return(&ec2.DescribeRouteTablesOutput{ + RouteTables: []*ec2.RouteTable{ + { + Routes: []*ec2.Route{ + { + GatewayId: aws.String("igw-12345"), + }, + }, + }, + }, + }, nil).MinTimes(1).MaxTimes(2) + + ec2Rec.DescribeInternetGatewaysWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInternetGatewaysInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("attachment.vpc-id"), + Values: aws.StringSlice([]string{"vpc-new"}), + }, + }, + })).Return(&ec2.DescribeInternetGatewaysOutput{ + InternetGateways: []*ec2.InternetGateway{}, + }, nil) + + ec2Rec.CreateInternetGatewayWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.CreateInternetGatewayInput{})). + Return(&ec2.CreateInternetGatewayOutput{ + InternetGateway: &ec2.InternetGateway{ + InternetGatewayId: aws.String("igw-1"), + Tags: []*ec2.Tag{ + { + Key: aws.String(infrav1.ClusterTagKey("test-cluster")), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("common"), + }, + { + Key: aws.String("Name"), + Value: aws.String("test-cluster-igw"), + }, + }, + }, + }, nil) + + ec2Rec.AttachInternetGatewayWithContext(context.TODO(), gomock.Eq(&ec2.AttachInternetGatewayInput{ + InternetGatewayId: aws.String("igw-1"), + VpcId: aws.String("vpc-new"), + })). + Return(&ec2.AttachInternetGatewayOutput{}, nil) + + ec2Rec.DescribeNatGatewaysPagesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeNatGatewaysInput{ + Filter: []*ec2.Filter{ + { + Name: aws.String("vpc-id"), + Values: []*string{aws.String("vpc-new")}, + }, + { + Name: aws.String("state"), + Values: aws.StringSlice([]string{ec2.VpcStatePending, ec2.VpcStateAvailable}), + }, + }}), gomock.Any()).Return(nil).MinTimes(1).MaxTimes(2) + + ec2Rec.DescribeAddressesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeAddressesInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("tag-key"), + Values: aws.StringSlice([]string{"sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"}), + }, + { + Name: aws.String("tag:sigs.k8s.io/cluster-api-provider-aws/role"), + Values: aws.StringSlice([]string{"common"}), + }, + }, + })).Return(&ec2.DescribeAddressesOutput{ + Addresses: []*ec2.Address{}, + }, nil) + + for subnetIndex, subnet := range subnets { + subnetID := fmt.Sprintf("subnet-%d", subnetIndex+1) + + // NAT gateways are attached to public subnets + if subnet.IsPublic { + eipAllocationID := strconv.Itoa(1234 + subnetIndex) + natGatewayID := fmt.Sprintf("nat-%d", subnetIndex+1) + + ec2Rec.AllocateAddressWithContext(context.TODO(), gomock.Eq(&ec2.AllocateAddressInput{ + Domain: aws.String("vpc"), + TagSpecifications: []*ec2.TagSpecification{ + { + ResourceType: aws.String("elastic-ip"), + Tags: []*ec2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String("test-cluster-eip-common"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("common"), + }, + }, + }, + }, + })).Return(&ec2.AllocateAddressOutput{ + AllocationId: aws.String(eipAllocationID), + }, nil) + + ec2Rec.CreateNatGatewayWithContext(context.TODO(), gomock.Eq(&ec2.CreateNatGatewayInput{ + AllocationId: aws.String(eipAllocationID), + SubnetId: aws.String(subnetID), + TagSpecifications: []*ec2.TagSpecification{ + { + ResourceType: aws.String("natgateway"), + Tags: []*ec2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String("test-cluster-nat"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("common"), + }, + }, + }, + }, + })).Return(&ec2.CreateNatGatewayOutput{ + NatGateway: &ec2.NatGateway{ + NatGatewayId: aws.String(natGatewayID), + SubnetId: aws.String(subnetID), + }, + }, nil) + + ec2Rec.WaitUntilNatGatewayAvailableWithContext(context.TODO(), &ec2.DescribeNatGatewaysInput{ + NatGatewayIds: []*string{aws.String(natGatewayID)}, + }).Return(nil) + } + + routeTableID := fmt.Sprintf("rtb-%d", subnetIndex+1) + var routeTablePublicPrivate string + if subnet.IsPublic { + routeTablePublicPrivate = "public" + } else { + routeTablePublicPrivate = "private" + } + ec2Rec.CreateRouteTableWithContext(context.TODO(), gomock.Eq(&ec2.CreateRouteTableInput{ + TagSpecifications: []*ec2.TagSpecification{ + { + ResourceType: aws.String("route-table"), + Tags: []*ec2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String(fmt.Sprintf("test-cluster-rt-%s-%s", routeTablePublicPrivate, subnet.AvailabilityZone)), + }, + { + Key: aws.String("kubernetes.io/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("common"), + }, + }, + }, + }, + VpcId: aws.String("vpc-new"), + })).Return(&ec2.CreateRouteTableOutput{ + RouteTable: &ec2.RouteTable{ + RouteTableId: aws.String(routeTableID), + }, + }, nil) + + if subnet.IsPublic { + ec2Rec.CreateRouteWithContext(context.TODO(), gomock.Eq(&ec2.CreateRouteInput{ + DestinationCidrBlock: aws.String("0.0.0.0/0"), + GatewayId: aws.String("igw-1"), + RouteTableId: aws.String(routeTableID), + })).Return(&ec2.CreateRouteOutput{}, nil) + } else { + // Private subnet uses a NAT gateway attached to a public subnet in the same AZ + var natGatewayID string + for otherSubnetIndex, otherSubnet := range subnets { + if otherSubnet.IsPublic && subnet.AvailabilityZone == otherSubnet.AvailabilityZone { + natGatewayID = fmt.Sprintf("nat-%d", otherSubnetIndex+1) + break + } + } + if natGatewayID == "" { + panic("Could not find NAT gateway from public subnet of same AZ") + } + ec2Rec.CreateRouteWithContext(context.TODO(), gomock.Eq(&ec2.CreateRouteInput{ + DestinationCidrBlock: aws.String("0.0.0.0/0"), + NatGatewayId: aws.String(natGatewayID), + RouteTableId: aws.String(routeTableID), + })).Return(&ec2.CreateRouteOutput{}, nil) + } + + ec2Rec.AssociateRouteTableWithContext(context.TODO(), gomock.Eq(&ec2.AssociateRouteTableInput{ + RouteTableId: aws.String(routeTableID), + SubnetId: aws.String(subnetID), + })).Return(&ec2.AssociateRouteTableOutput{}, nil) + } +} + +func mockedCreateSGCalls(ec2Rec *mocks.MockEC2APIMockRecorder) { + ec2Rec.DescribeSecurityGroupsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSecurityGroupsInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("vpc-id"), + Values: aws.StringSlice([]string{"vpc-new"}), + }, + { + Name: aws.String("tag-key"), + Values: aws.StringSlice([]string{"sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"}), + }, + }, + })).Return( + &ec2.DescribeSecurityGroupsOutput{ + SecurityGroups: []*ec2.SecurityGroup{ + { + GroupId: aws.String("1"), + GroupName: aws.String("test-sg"), + }, + }, + }, nil) + securityGroupAdditionalCall := ec2Rec.CreateSecurityGroupWithContext(context.TODO(), gomock.Eq(&ec2.CreateSecurityGroupInput{ + VpcId: aws.String("vpc-new"), + GroupName: aws.String("test-cluster-node-eks-additional"), + Description: aws.String("Kubernetes cluster test-cluster: node-eks-additional"), + TagSpecifications: []*ec2.TagSpecification{ + { + ResourceType: aws.String("security-group"), + Tags: []*ec2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String("test-cluster-node-eks-additional"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("node-eks-additional"), + }, + }, + }, + }, + })). + Return(&ec2.CreateSecurityGroupOutput{GroupId: aws.String("sg-node-eks-additional")}, nil) + ec2Rec.CreateSecurityGroupWithContext(context.TODO(), gomock.Eq(&ec2.CreateSecurityGroupInput{ + VpcId: aws.String("vpc-new"), + GroupName: aws.String("test-cluster-bastion"), + Description: aws.String("Kubernetes cluster test-cluster: bastion"), + TagSpecifications: []*ec2.TagSpecification{ + { + ResourceType: aws.String("security-group"), + Tags: []*ec2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String("test-cluster-bastion"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("bastion"), + }, + }, + }, + }, + })). + Return(&ec2.CreateSecurityGroupOutput{GroupId: aws.String("sg-bastion")}, nil) + ec2Rec.AuthorizeSecurityGroupIngressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.AuthorizeSecurityGroupIngressInput{ + GroupId: aws.String("sg-node-eks-additional"), + })). + Return(&ec2.AuthorizeSecurityGroupIngressOutput{}, nil). + After(securityGroupAdditionalCall).Times(2) +} + +func mockedDescribeInstanceCall(ec2Rec *mocks.MockEC2APIMockRecorder) { + ec2Rec.DescribeInstancesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstancesInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("tag:sigs.k8s.io/cluster-api-provider-aws/role"), + Values: aws.StringSlice([]string{"bastion"}), + }, + { + Name: aws.String("tag-key"), + Values: aws.StringSlice([]string{"sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"}), + }, + { + Name: aws.String("instance-state-name"), + Values: aws.StringSlice([]string{"pending", "running", "stopping", "stopped"}), + }, + }, + })).Return(&ec2.DescribeInstancesOutput{ + Reservations: []*ec2.Reservation{ + { + Instances: []*ec2.Instance{ + { + InstanceId: aws.String("id-1"), + InstanceType: aws.String("m5.large"), + SubnetId: aws.String("subnet-1"), + ImageId: aws.String("ami-1"), + IamInstanceProfile: &ec2.IamInstanceProfile{ + Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"), + }, + State: &ec2.InstanceState{ + Code: aws.Int64(16), + Name: aws.String(ec2.StateAvailable), + }, + RootDeviceName: aws.String("device-1"), + BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{ + { + DeviceName: aws.String("device-1"), + Ebs: &ec2.EbsInstanceBlockDevice{ + VolumeId: aws.String("volume-1"), + }, + }, + }, + Placement: &ec2.Placement{ + AvailabilityZone: aws.String("us-east-1a"), + }, + }, + }, + }, + }, + }, nil) +} + +func mockedEKSControlPlaneIAMRole(g *WithT, iamRec *mock_iamauth.MockIAMAPIMockRecorder) { + getRoleCall := iamRec.GetRole(&iam.GetRoleInput{ + RoleName: aws.String("test-cluster-iam-service-role"), + }).Return(nil, awserr.New(iam.ErrCodeNoSuchEntityException, "", nil)) + + createRoleCall := iamRec.CreateRole(gomock.Any()).After(getRoleCall).DoAndReturn(func(input *iam.CreateRoleInput) (*iam.CreateRoleOutput, error) { + g.Expect(input.RoleName).To(BeComparableTo(aws.String("test-cluster-iam-service-role"))) + return &iam.CreateRoleOutput{ + Role: &iam.Role{ + RoleName: aws.String("test-cluster-iam-service-role"), + Arn: aws.String("arn:aws:iam::123456789012:role/test-cluster-iam-service-role"), + Tags: input.Tags, + }, + }, nil + }) + + iamRec.ListAttachedRolePolicies(&iam.ListAttachedRolePoliciesInput{ + RoleName: aws.String("test-cluster-iam-service-role"), + }).After(createRoleCall).Return(&iam.ListAttachedRolePoliciesOutput{ + AttachedPolicies: []*iam.AttachedPolicy{}, + }, nil) + + getPolicyCall := iamRec.GetPolicy(&iam.GetPolicyInput{ + PolicyArn: aws.String("arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"), + }).Return(&iam.GetPolicyOutput{ + // This policy is predefined by AWS + Policy: &iam.Policy{ + // Fields are not used. Our code only checks for existence of the policy. + }, + }, nil) + + iamRec.AttachRolePolicy(&iam.AttachRolePolicyInput{ + PolicyArn: aws.String("arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"), + RoleName: aws.String("test-cluster-iam-service-role"), + }).After(getPolicyCall).Return(&iam.AttachRolePolicyOutput{}, nil) +} + +func mockedEKSCluster(g *WithT, eksRec *mock_eksiface.MockEKSAPIMockRecorder, iamRec *mock_iamauth.MockIAMAPIMockRecorder, ec2Rec *mocks.MockEC2APIMockRecorder, stsRec *mock_stsiface.MockSTSAPIMockRecorder, awsNodeRec *mock_services.MockAWSNodeInterfaceMockRecorder, kubeProxyRec *mock_services.MockKubeProxyInterfaceMockRecorder, iamAuthenticatorRec *mock_services.MockIAMAuthenticatorInterfaceMockRecorder) { + describeClusterCall := eksRec.DescribeCluster(&eks.DescribeClusterInput{ + Name: aws.String("test-cluster"), + }).Return(nil, awserr.New(eks.ErrCodeResourceNotFoundException, "", nil)) + + getRoleCall := iamRec.GetRole(&iam.GetRoleInput{ + RoleName: aws.String("test-cluster-iam-service-role"), + }).After(describeClusterCall).Return(&iam.GetRoleOutput{ + Role: &iam.Role{ + RoleName: aws.String("test-cluster-iam-service-role"), + Arn: aws.String("arn:aws:iam::123456789012:role/test-cluster-iam-service-role"), + }, + }, nil) + + resourcesVpcConfig := &eks.VpcConfigResponse{ + ClusterSecurityGroupId: aws.String("eks-cluster-sg-test-cluster-44556677"), + } + + clusterARN := aws.String("arn:aws:eks:us-east-1:1133557799:cluster/test-cluster") + clusterCreating := eks.Cluster{ + Arn: clusterARN, + Name: aws.String("test-cluster"), + Status: aws.String(eks.ClusterStatusCreating), + ResourcesVpcConfig: resourcesVpcConfig, + CertificateAuthority: &eks.Certificate{ + Data: aws.String(base64.StdEncoding.EncodeToString([]byte("foobar"))), + }, + Logging: &eks.Logging{ + ClusterLogging: []*eks.LogSetup{ + { + Enabled: aws.Bool(true), + Types: []*string{aws.String(eks.LogTypeApi)}, + }, + { + Enabled: aws.Bool(false), + Types: []*string{ + aws.String(eks.LogTypeAudit), + aws.String(eks.LogTypeAuthenticator), + aws.String(eks.LogTypeControllerManager), + aws.String(eks.LogTypeScheduler), + }, + }, + }, + }, + } + + createClusterCall := eksRec.CreateCluster(gomock.Any()).After(getRoleCall).DoAndReturn(func(input *eks.CreateClusterInput) (*eks.CreateClusterOutput, error) { + g.Expect(input.Name).To(BeComparableTo(aws.String("test-cluster"))) + return &eks.CreateClusterOutput{ + Cluster: &clusterCreating, + }, nil + }) + + waitUntilClusterActiveCall := eksRec.WaitUntilClusterActive(&eks.DescribeClusterInput{ + Name: aws.String("test-cluster"), + }).After(createClusterCall).Return(nil) + + clusterActive := clusterCreating // copy + clusterActive.Status = aws.String(eks.ClusterStatusActive) + clusterActive.Endpoint = aws.String("https://F00D133712341337.gr7.us-east-1.eks.amazonaws.com") + clusterActive.Version = aws.String("1.24") + + eksRec.DescribeCluster(&eks.DescribeClusterInput{ + Name: aws.String("test-cluster"), + }).After(waitUntilClusterActiveCall).Return(&eks.DescribeClusterOutput{ + Cluster: &clusterActive, + }, nil) + + // AWS precreates a default security group together with the cluster + // (https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) + clusterSgDesc := &ec2.DescribeSecurityGroupsOutput{ + SecurityGroups: []*ec2.SecurityGroup{ + { + GroupId: aws.String("sg-11223344"), + GroupName: aws.String("eks-cluster-sg-test-cluster-44556677"), + }, + }, + } + ec2Rec.DescribeSecurityGroupsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSecurityGroupsInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("tag:aws:eks:cluster-name"), + Values: aws.StringSlice([]string{"test-cluster"}), + }, + }, + })).Return( + clusterSgDesc, nil) + ec2Rec.DescribeSecurityGroupsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSecurityGroupsInput{ + GroupIds: aws.StringSlice([]string{"eks-cluster-sg-test-cluster-44556677"}), + })).Return( + clusterSgDesc, nil) + + req, err := http.NewRequest(http.MethodGet, "foobar", http.NoBody) + g.Expect(err).To(BeNil()) + stsRec.GetCallerIdentityRequest(&sts.GetCallerIdentityInput{}).Return(&stsrequest.Request{ + HTTPRequest: req, + Operation: &stsrequest.Operation{}, + }, &sts.GetCallerIdentityOutput{}) + + eksRec.TagResource(&eks.TagResourceInput{ + ResourceArn: clusterARN, + Tags: aws.StringMap(map[string]string{ + "Name": "test-cluster", + "sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster": "owned", + "sigs.k8s.io/cluster-api-provider-aws/role": "common", + }), + }).Return(&eks.TagResourceOutput{}, nil) + + eksRec.ListAddons(&eks.ListAddonsInput{ + ClusterName: aws.String("test-cluster"), + }).Return(&eks.ListAddonsOutput{}, nil) + + awsNodeRec.ReconcileCNI(gomock.Any()).Return(nil) + kubeProxyRec.ReconcileKubeProxy(gomock.Any()).Return(nil) + iamAuthenticatorRec.ReconcileIAMAuthenticator(gomock.Any()).Return(nil) +} diff --git a/controlplane/eks/controllers/awsmanagedcontrolplane_controller_unit_test.go b/controlplane/eks/controllers/awsmanagedcontrolplane_controller_unit_test.go index efb7d5053d..f2f6b169e8 100644 --- a/controlplane/eks/controllers/awsmanagedcontrolplane_controller_unit_test.go +++ b/controlplane/eks/controllers/awsmanagedcontrolplane_controller_unit_test.go @@ -43,9 +43,9 @@ func TestSecurityGroupRolesForCluster(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - c := getAWSManagedControlPlane("test", "test") - c.Spec.Bastion.Enabled = tt.bastionEnabled - s, err := getManagedControlPlaneScope(c) + _, _, awsManagedControlPlane := getManagedClusterObjects("test", "test") + awsManagedControlPlane.Spec.Bastion.Enabled = tt.bastionEnabled + s, err := getManagedControlPlaneScope(awsManagedControlPlane) g.Expect(err).To(BeNil(), "failed to create cluster scope for test") got := securityGroupRolesForControlPlane(s) diff --git a/controlplane/eks/controllers/helpers_test.go b/controlplane/eks/controllers/helpers_test.go index 5970842a03..77f739014f 100644 --- a/controlplane/eks/controllers/helpers_test.go +++ b/controlplane/eks/controllers/helpers_test.go @@ -16,8 +16,10 @@ limitations under the License. package controllers import ( + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" @@ -26,14 +28,63 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) -func getAWSManagedControlPlane(name, namespace string) ekscontrolplanev1.AWSManagedControlPlane { - return ekscontrolplanev1.AWSManagedControlPlane{ +func getAWSManagedControlPlaneScope(cluster *clusterv1.Cluster, awsManagedControlPlane *ekscontrolplanev1.AWSManagedControlPlane) *scope.ManagedControlPlaneScope { + scope, err := scope.NewManagedControlPlaneScope( + scope.ManagedControlPlaneScopeParams{ + Client: testEnv.Client, + Cluster: cluster, + ControlPlane: awsManagedControlPlane, + EnableIAM: true, + }, + ) + utilruntime.Must(err) + return scope +} + +func getManagedClusterObjects(name, namespace string) (clusterv1.Cluster, infrav1.AWSManagedCluster, ekscontrolplanev1.AWSManagedControlPlane) { + cluster := clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, Name: name, + UID: "1", + }, + Spec: clusterv1.ClusterSpec{ + ControlPlaneRef: &corev1.ObjectReference{ + APIVersion: ekscontrolplanev1.GroupVersion.String(), + Name: name, + Kind: "AWSManagedControlPlane", + Namespace: namespace, + }, + InfrastructureRef: &corev1.ObjectReference{ + APIVersion: infrav1.GroupVersion.String(), + Name: name, + Kind: "AWSManagedCluster", + Namespace: namespace, + }, + }, + } + awsManagedCluster := infrav1.AWSManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, + Name: name, + }, + } + awsManagedControlPlane := ekscontrolplanev1.AWSManagedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Cluster", + Name: cluster.Name, + UID: "1", + }, + }, }, Spec: ekscontrolplanev1.AWSManagedControlPlaneSpec{ - Region: "us-east-1", + EKSClusterName: name, + Region: "us-east-1", NetworkSpec: infrav1.NetworkSpec{ VPC: infrav1.VPCSpec{ ID: "vpc-exists", @@ -48,16 +99,23 @@ func getAWSManagedControlPlane(name, namespace string) ekscontrolplanev1.AWSMana }, { ID: "subnet-2", - AvailabilityZone: "us-east-1c", + AvailabilityZone: "us-east-1b", CidrBlock: "10.0.11.0/24", IsPublic: true, }, + { + ID: "subnet-3", + AvailabilityZone: "us-east-1c", + CidrBlock: "10.0.12.0/24", + IsPublic: true, + }, }, SecurityGroupOverrides: map[infrav1.SecurityGroupRole]string{}, }, Bastion: infrav1.Bastion{Enabled: true}, }, } + return cluster, awsManagedCluster, awsManagedControlPlane } func getManagedControlPlaneScope(cp ekscontrolplanev1.AWSManagedControlPlane) (*scope.ManagedControlPlaneScope, error) { diff --git a/controlplane/eks/controllers/suite_test.go b/controlplane/eks/controllers/suite_test.go new file mode 100644 index 0000000000..c284f3dec2 --- /dev/null +++ b/controlplane/eks/controllers/suite_test.go @@ -0,0 +1,78 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "fmt" + "path" + "testing" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + + // +kubebuilder:scaffold:imports + infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +) + +var ( + testEnv *helpers.TestEnvironment + ctx = ctrl.SetupSignalHandler() +) + +func TestMain(m *testing.M) { + setup() + defer teardown() + m.Run() +} + +func setup() { + utilruntime.Must(infrav1.AddToScheme(scheme.Scheme)) + utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) + utilruntime.Must(ekscontrolplanev1.AddToScheme(scheme.Scheme)) + testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{ + path.Join("config", "crd", "bases"), + }, + ).WithWebhookConfiguration("managed", path.Join("config", "webhook", "manifests.yaml")) + var err error + testEnv, err = testEnvConfig.Build() + if err != nil { + panic(err) + } + if err := (&ekscontrolplanev1.AWSManagedControlPlane{}).SetupWebhookWithManager(testEnv); err != nil { + panic(fmt.Sprintf("Unable to setup AWSManagedControlPlane webhook: %v", err)) + } + if err := (&infrav1.AWSClusterControllerIdentity{}).SetupWebhookWithManager(testEnv); err != nil { + panic(fmt.Sprintf("Unable to setup AWSClusterControllerIdentity webhook: %v", err)) + } + go func() { + fmt.Println("Starting the manager") + if err := testEnv.StartManager(ctx); err != nil { + panic(fmt.Sprintf("Failed to start the envtest manager: %v", err)) + } + }() + testEnv.WaitForWebhooks() +} + +func teardown() { + if err := testEnv.Stop(); err != nil { + panic(fmt.Sprintf("Failed to stop envtest: %v", err)) + } +} diff --git a/controlplane/rosa/api/v1beta2/conditions_consts.go b/controlplane/rosa/api/v1beta2/conditions_consts.go index 797e04a0a5..8bb0f50427 100644 --- a/controlplane/rosa/api/v1beta2/conditions_consts.go +++ b/controlplane/rosa/api/v1beta2/conditions_consts.go @@ -22,6 +22,21 @@ const ( // ROSAControlPlaneReadyCondition condition reports on the successful reconciliation of ROSAControlPlane. ROSAControlPlaneReadyCondition clusterv1.ConditionType = "ROSAControlPlaneReady" + // ROSAControlPlaneValidCondition condition reports whether ROSAControlPlane configuration is valid. + ROSAControlPlaneValidCondition clusterv1.ConditionType = "ROSAControlPlaneValid" + // ROSAControlPlaneUpgradingCondition condition reports whether ROSAControlPlane is upgrading or not. ROSAControlPlaneUpgradingCondition clusterv1.ConditionType = "ROSAControlPlaneUpgrading" + + // ExternalAuthConfiguredCondition condition reports whether external auth has beed correctly configured. + ExternalAuthConfiguredCondition clusterv1.ConditionType = "ExternalAuthConfigured" + + // ReconciliationFailedReason used to report reconciliation failures. + ReconciliationFailedReason = "ReconciliationFailed" + + // ROSAControlPlaneDeletionFailedReason used to report failures while deleting ROSAControlPlane. + ROSAControlPlaneDeletionFailedReason = "DeletionFailed" + + // ROSAControlPlaneInvalidConfigurationReason used to report invalid user input. + ROSAControlPlaneInvalidConfigurationReason = "InvalidConfiguration" ) diff --git a/controlplane/rosa/api/v1beta2/doc.go b/controlplane/rosa/api/v1beta2/doc.go new file mode 100644 index 0000000000..9308d1fb62 --- /dev/null +++ b/controlplane/rosa/api/v1beta2/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta2 contains API Schema definitions for the controlplane v1beta2 API group +// +gencrdrefdocs:force +// +groupName=controlplane.cluster.x-k8s.io +// +k8s:defaulter-gen=TypeMeta +package v1beta2 diff --git a/controlplane/rosa/api/v1beta2/external_auth_types.go b/controlplane/rosa/api/v1beta2/external_auth_types.go new file mode 100644 index 0000000000..7bd16d4585 --- /dev/null +++ b/controlplane/rosa/api/v1beta2/external_auth_types.go @@ -0,0 +1,249 @@ +package v1beta2 + +// ExternalAuthProvider is an external OIDC identity provider that can issue tokens for this cluster +type ExternalAuthProvider struct { + // Name of the OIDC provider + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` + // Issuer describes attributes of the OIDC token issuer + // + // +kubebuilder:validation:Required + // +required + Issuer TokenIssuer `json:"issuer"` + + // OIDCClients contains configuration for the platform's clients that + // need to request tokens from the issuer + // + // +listType=map + // +listMapKey=componentNamespace + // +listMapKey=componentName + // +kubebuilder:validation:MaxItems=20 + // +optional + OIDCClients []OIDCClientConfig `json:"oidcClients,omitempty"` + + // ClaimMappings describes rules on how to transform information from an + // ID token into a cluster identity + // +optional + ClaimMappings *TokenClaimMappings `json:"claimMappings,omitempty"` + + // ClaimValidationRules are rules that are applied to validate token claims to authenticate users. + // + // +listType=atomic + ClaimValidationRules []TokenClaimValidationRule `json:"claimValidationRules,omitempty"` +} + +// TokenAudience is the audience that the token was issued for. +// +// +kubebuilder:validation:MinLength=1 +type TokenAudience string + +// TokenIssuer describes attributes of the OIDC token issuer +type TokenIssuer struct { + // URL is the serving URL of the token issuer. + // Must use the https:// scheme. + // + // +kubebuilder:validation:Pattern=`^https:\/\/[^\s]` + // +kubebuilder:validation:Required + // +required + URL string `json:"issuerURL"` + + // Audiences is an array of audiences that the token was issued for. + // Valid tokens must include at least one of these values in their + // "aud" claim. + // Must be set to exactly one value. + // + // +listType=set + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=10 + // +required + Audiences []TokenAudience `json:"audiences"` + + // CertificateAuthority is a reference to a config map in the + // configuration namespace. The .data of the configMap must contain + // the "ca-bundle.crt" key. + // If unset, system trust is used instead. + CertificateAuthority *LocalObjectReference `json:"issuerCertificateAuthority,omitempty"` +} + +// OIDCClientConfig contains configuration for the platform's client that +// need to request tokens from the issuer. +type OIDCClientConfig struct { + // ComponentName is the name of the component that is supposed to consume this + // client configuration + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:Required + // +required + ComponentName string `json:"componentName"` + + // ComponentNamespace is the namespace of the component that is supposed to consume this + // client configuration + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Required + // +required + ComponentNamespace string `json:"componentNamespace"` + + // ClientID is the identifier of the OIDC client from the OIDC provider + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Required + // +required + ClientID string `json:"clientID"` + + // ClientSecret refers to a secret that + // contains the client secret in the `clientSecret` key of the `.data` field + ClientSecret LocalObjectReference `json:"clientSecret"` + + // ExtraScopes is an optional set of scopes to request tokens with. + // + // +listType=set + // +optional + ExtraScopes []string `json:"extraScopes,omitempty"` +} + +// TokenClaimMappings describes rules on how to transform information from an +// ID token into a cluster identity. +type TokenClaimMappings struct { + // Username is a name of the claim that should be used to construct + // usernames for the cluster identity. + // + // Default value: "sub" + // +optional + Username *UsernameClaimMapping `json:"username,omitempty"` + + // Groups is a name of the claim that should be used to construct + // groups for the cluster identity. + // The referenced claim must use array of strings values. + // +optional + Groups *PrefixedClaimMapping `json:"groups,omitempty"` +} + +// PrefixedClaimMapping defines claims with a prefix. +type PrefixedClaimMapping struct { + // Claim is a JWT token claim to be used in the mapping + // + // +kubebuilder:validation:Required + // +required + Claim string `json:"claim"` + + // Prefix is a string to prefix the value from the token in the result of the + // claim mapping. + // + // By default, no prefixing occurs. + // + // Example: if `prefix` is set to "myoidc:"" and the `claim` in JWT contains + // an array of strings "a", "b" and "c", the mapping will result in an + // array of string "myoidc:a", "myoidc:b" and "myoidc:c". + Prefix string `json:"prefix,omitempty"` +} + +// UsernameClaimMapping defines the claim that should be used to construct usernames for the cluster identity. +// +// +kubebuilder:validation:XValidation:rule="self.prefixPolicy == 'Prefix' ? has(self.prefix) : !has(self.prefix)",message="prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" +type UsernameClaimMapping struct { + // Claim is a JWT token claim to be used in the mapping + // + // +kubebuilder:validation:Required + // +required + Claim string `json:"claim"` + + // PrefixPolicy specifies how a prefix should apply. + // + // By default, claims other than `email` will be prefixed with the issuer URL to + // prevent naming clashes with other plugins. + // + // Set to "NoPrefix" to disable prefixing. + // + // Example: + // (1) `prefix` is set to "myoidc:" and `claim` is set to "username". + // If the JWT claim `username` contains value `userA`, the resulting + // mapped value will be "myoidc:userA". + // (2) `prefix` is set to "myoidc:" and `claim` is set to "email". If the + // JWT `email` claim contains value "userA@myoidc.tld", the resulting + // mapped value will be "myoidc:userA@myoidc.tld". + // (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, + // the JWT claims include "username":"userA" and "email":"userA@myoidc.tld", + // and `claim` is set to: + // (a) "username": the mapped value will be "https://myoidc.tld#userA" + // (b) "email": the mapped value will be "userA@myoidc.tld" + // + // +kubebuilder:validation:Enum={"", "NoPrefix", "Prefix"} + // +optional + PrefixPolicy UsernamePrefixPolicy `json:"prefixPolicy,omitempty"` + + // Prefix is prepended to claim to prevent clashes with existing names. + // + // +kubebuilder:validation:MinLength=1 + // +optional + Prefix *string `json:"prefix,omitempty"` +} + +// UsernamePrefixPolicy specifies how a prefix should apply. +type UsernamePrefixPolicy string + +const ( + // NoOpinion let's the cluster assign prefixes. If the username claim is email, there is no prefix + // If the username claim is anything else, it is prefixed by the issuerURL + NoOpinion UsernamePrefixPolicy = "" + + // NoPrefix means the username claim value will not have any prefix + NoPrefix UsernamePrefixPolicy = "NoPrefix" + + // Prefix means the prefix value must be specified. It cannot be empty + Prefix UsernamePrefixPolicy = "Prefix" +) + +// TokenValidationRuleType defines the type of the validation rule. +type TokenValidationRuleType string + +const ( + // TokenValidationRuleTypeRequiredClaim defines the type for RequiredClaim. + TokenValidationRuleTypeRequiredClaim TokenValidationRuleType = "RequiredClaim" +) + +// TokenClaimValidationRule validates token claims to authenticate users. +type TokenClaimValidationRule struct { + // Type sets the type of the validation rule + // + // +kubebuilder:validation:Enum={"RequiredClaim"} + // +kubebuilder:default="RequiredClaim" + Type TokenValidationRuleType `json:"type"` + + // RequiredClaim allows configuring a required claim name and its expected value + // +kubebuilder:validation:Required + RequiredClaim TokenRequiredClaim `json:"requiredClaim"` +} + +// TokenRequiredClaim allows configuring a required claim name and its expected value. +type TokenRequiredClaim struct { + // Claim is a name of a required claim. Only claims with string values are + // supported. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Required + // +required + Claim string `json:"claim"` + + // RequiredValue is the required value for the claim. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Required + // +required + RequiredValue string `json:"requiredValue"` +} + +// LocalObjectReference references an object in the same namespace. +type LocalObjectReference struct { + // Name is the metadata.name of the referenced object. + // + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` +} diff --git a/controlplane/rosa/api/v1beta2/groupversion_info.go b/controlplane/rosa/api/v1beta2/groupversion_info.go index 9eeee3d76c..ea4ec8f784 100644 --- a/controlplane/rosa/api/v1beta2/groupversion_info.go +++ b/controlplane/rosa/api/v1beta2/groupversion_info.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// package v1beta2 contains API Schema definitions for the controlplane v1beta2 API group +// Package v1beta2 contains API Schema definitions for the controlplane v1beta2 API group. // +kubebuilder:object:generate=true // +groupName=controlplane.cluster.x-k8s.io package v1beta2 diff --git a/controlplane/rosa/api/v1beta2/rosacontrolplane_types.go b/controlplane/rosa/api/v1beta2/rosacontrolplane_types.go index 91728fc04b..0fad71f9bd 100644 --- a/controlplane/rosa/api/v1beta2/rosacontrolplane_types.go +++ b/controlplane/rosa/api/v1beta2/rosacontrolplane_types.go @@ -21,51 +21,148 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) +// RosaEndpointAccessType specifies the publishing scope of cluster endpoints. +type RosaEndpointAccessType string + +const ( + // Public endpoint access allows public API server access and + // private node communication with the control plane. + Public RosaEndpointAccessType = "Public" + + // Private endpoint access allows only private API server access and private + // node communication with the control plane. + Private RosaEndpointAccessType = "Private" +) + +// RosaControlPlaneSpec defines the desired state of ROSAControlPlane. type RosaControlPlaneSpec struct { //nolint: maligned // Cluster name must be valid DNS-1035 label, so it must consist of lower case alphanumeric // characters or '-', start with an alphabetic character, end with an alphanumeric character - // and have a max length of 15 characters. + // and have a max length of 54 characters. // // +immutable // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="rosaClusterName is immutable" - // +kubebuilder:validation:MaxLength:=15 + // +kubebuilder:validation:MaxLength:=54 // +kubebuilder:validation:Pattern:=`^[a-z]([-a-z0-9]*[a-z0-9])?$` RosaClusterName string `json:"rosaClusterName"` + // DomainPrefix is an optional prefix added to the cluster's domain name. It will be used + // when generating a sub-domain for the cluster on openshiftapps domain. It must be valid DNS-1035 label + // consisting of lower case alphanumeric characters or '-', start with an alphabetic character + // end with an alphanumeric character and have a max length of 15 characters. + // + // +immutable + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="domainPrefix is immutable" + // +kubebuilder:validation:MaxLength:=15 + // +kubebuilder:validation:Pattern:=`^[a-z]([-a-z0-9]*[a-z0-9])?$` + // +optional + DomainPrefix string `json:"domainPrefix,omitempty"` + // The Subnet IDs to use when installing the cluster. // SubnetIDs should come in pairs; two per availability zone, one private and one public. Subnets []string `json:"subnets"` - // AWS AvailabilityZones of the worker nodes - // should match the AvailabilityZones of the Subnets. + // AvailabilityZones describe AWS AvailabilityZones of the worker nodes. + // should match the AvailabilityZones of the provided Subnets. + // a machinepool will be created for each availabilityZone. AvailabilityZones []string `json:"availabilityZones"` - // Block of IP addresses used by OpenShift while installing the cluster, for example "10.0.0.0/16". - MachineCIDR *string `json:"machineCIDR"` - // The AWS Region the cluster lives in. - Region *string `json:"region"` + Region string `json:"region"` - // Openshift version, for example "4.14.5". + // OpenShift semantic version, for example "4.14.5". Version string `json:"version"` - // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. - // +optional - ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` - // AWS IAM roles used to perform credential requests by the openshift operators. RolesRef AWSRolesRef `json:"rolesRef"` - // The ID of the OpenID Connect Provider. - OIDCID *string `json:"oidcID"` + // The ID of the internal OpenID Connect Provider. + // + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="oidcID is immutable" + OIDCID string `json:"oidcID"` + + // EnableExternalAuthProviders enables external authentication configuration for the cluster. + // + // +kubebuilder:default=false + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="enableExternalAuthProviders is immutable" + // +optional + EnableExternalAuthProviders bool `json:"enableExternalAuthProviders,omitempty"` + + // ExternalAuthProviders are external OIDC identity providers that can issue tokens for this cluster. + // Can only be set if "enableExternalAuthProviders" is set to "True". + // + // At most one provider can be configured. + // + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=1 + ExternalAuthProviders []ExternalAuthProvider `json:"externalAuthProviders,omitempty"` + + // InstallerRoleARN is an AWS IAM role that OpenShift Cluster Manager will assume to create the cluster.. + InstallerRoleARN string `json:"installerRoleARN"` + // SupportRoleARN is an AWS IAM role used by Red Hat SREs to enable + // access to the cluster account in order to provide support. + SupportRoleARN string `json:"supportRoleARN"` + // WorkerRoleARN is an AWS IAM role that will be attached to worker instances. + WorkerRoleARN string `json:"workerRoleARN"` + + // BillingAccount is an optional AWS account to use for billing the subscription fees for ROSA clusters. + // The cost of running each ROSA cluster will be billed to the infrastructure account in which the cluster + // is running. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="billingAccount is immutable" + // +kubebuilder:validation:XValidation:rule="self.matches('^[0-9]{12}$')", message="billingAccount must be a valid AWS account ID" + // +immutable + // +optional + BillingAccount string `json:"billingAccount,omitempty"` + + // DefaultMachinePoolSpec defines the configuration for the default machinepool(s) provisioned as part of the cluster creation. + // One MachinePool will be created with this configuration per AvailabilityZone. Those default machinepools are required for openshift cluster operators + // to work properly. + // As these machinepool not created using ROSAMachinePool CR, they will not be visible/managed by ROSA CAPI provider. + // `rosa list machinepools -c ` can be used to view those machinepools. + // + // This field will be removed in the future once the current limitation is resolved. + // + // +optional + DefaultMachinePoolSpec DefaultMachinePoolSpec `json:"defaultMachinePoolSpec,omitempty"` + + // Network config for the ROSA HCP cluster. + // +optional + Network *NetworkSpec `json:"network,omitempty"` + + // EndpointAccess specifies the publishing scope of cluster endpoints. The + // default is Public. + // + // +kubebuilder:validation:Enum=Public;Private + // +kubebuilder:default=Public + // +optional + EndpointAccess RosaEndpointAccessType `json:"endpointAccess,omitempty"` - // TODO: these are to satisfy ocm sdk. Explore how to drop them. - InstallerRoleARN *string `json:"installerRoleARN"` - SupportRoleARN *string `json:"supportRoleARN"` - WorkerRoleARN *string `json:"workerRoleARN"` + // AdditionalTags are user-defined tags to be added on the AWS resources associated with the control plane. + // +optional + AdditionalTags infrav1.Tags `json:"additionalTags,omitempty"` + + // EtcdEncryptionKMSARN is the ARN of the KMS key used to encrypt etcd. The key itself needs to be + // created out-of-band by the user and tagged with `red-hat:true`. + // +optional + EtcdEncryptionKMSARN string `json:"etcdEncryptionKMSARN,omitempty"` + + // AuditLogRoleARN defines the role that is used to forward audit logs to AWS CloudWatch. + // If not set, audit log forwarding is disabled. + // +optional + AuditLogRoleARN string `json:"auditLogRoleARN,omitempty"` + + // ProvisionShardID defines the shard where rosa control plane components will be hosted. + // + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="provisionShardID is immutable" + // +optional + ProvisionShardID string `json:"provisionShardID,omitempty"` // CredentialsSecretRef references a secret with necessary credentials to connect to the OCM API. // The secret should contain the following data keys: @@ -74,11 +171,56 @@ type RosaControlPlaneSpec struct { //nolint: maligned // +optional CredentialsSecretRef *corev1.LocalObjectReference `json:"credentialsSecretRef,omitempty"` - // +optional - // IdentityRef is a reference to an identity to be used when reconciling the managed control plane. // If no identity is specified, the default identity for this controller will be used. + // + // +optional IdentityRef *infrav1.AWSIdentityReference `json:"identityRef,omitempty"` + + // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + // +optional + ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` +} + +// NetworkSpec for ROSA-HCP. +type NetworkSpec struct { + // IP addresses block used by OpenShift while installing the cluster, for example "10.0.0.0/16". + // +kubebuilder:validation:Format=cidr + // +optional + MachineCIDR string `json:"machineCIDR,omitempty"` + + // IP address block from which to assign pod IP addresses, for example `10.128.0.0/14`. + // +kubebuilder:validation:Format=cidr + // +optional + PodCIDR string `json:"podCIDR,omitempty"` + + // IP address block from which to assign service IP addresses, for example `172.30.0.0/16`. + // +kubebuilder:validation:Format=cidr + // +optional + ServiceCIDR string `json:"serviceCIDR,omitempty"` + + // Network host prefix which is defaulted to `23` if not specified. + // +kubebuilder:default=23 + // +optional + HostPrefix int `json:"hostPrefix,omitempty"` + + // The CNI network type default is OVNKubernetes. + // +kubebuilder:validation:Enum=OVNKubernetes;Other + // +kubebuilder:default=OVNKubernetes + // +optional + NetworkType string `json:"networkType,omitempty"` +} + +// DefaultMachinePoolSpec defines the configuration for the required worker nodes provisioned as part of the cluster creation. +type DefaultMachinePoolSpec struct { + // The instance type to use, for example `r5.xlarge`. Instance type ref; https://aws.amazon.com/ec2/instance-types/ + // +optional + InstanceType string `json:"instanceType,omitempty"` + + // Autoscaling specifies auto scaling behaviour for the default MachinePool. Autoscaling min/max value + // must be equal or multiple of the availability zones count. + // +optional + Autoscaling *expinfrav1.RosaMachinePoolAutoScaling `json:"autoscaling,omitempty"` } // AWSRolesRef contains references to various AWS IAM roles required for operators to make calls against the AWS API. @@ -459,6 +601,7 @@ type AWSRolesRef struct { KMSProviderARN string `json:"kmsProviderARN"` } +// RosaControlPlaneStatus defines the observed state of ROSAControlPlane. type RosaControlPlaneStatus struct { // ExternalManagedControlPlane indicates to cluster-api that the control plane // is managed by an external service such as AKS, EKS, GKE, etc. @@ -482,14 +625,14 @@ type RosaControlPlaneStatus struct { // // +optional FailureMessage *string `json:"failureMessage,omitempty"` - // Conditions specifies the cpnditions for the managed control plane + // Conditions specifies the conditions for the managed control plane Conditions clusterv1.Conditions `json:"conditions,omitempty"` // ID is the cluster ID given by ROSA. - ID *string `json:"id,omitempty"` + ID string `json:"id,omitempty"` // ConsoleURL is the url for the openshift console. ConsoleURL string `json:"consoleURL,omitempty"` - // OIDCEndpointURL is the endpoint url for the managed OIDC porvider. + // OIDCEndpointURL is the endpoint url for the managed OIDC provider. OIDCEndpointURL string `json:"oidcEndpointURL,omitempty"` } @@ -501,6 +644,7 @@ type RosaControlPlaneStatus struct { // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Control plane infrastructure is ready for worker nodes" // +k8s:defaulter-gen=true +// ROSAControlPlane is the Schema for the ROSAControlPlanes API. type ROSAControlPlane struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -511,6 +655,7 @@ type ROSAControlPlane struct { // +kubebuilder:object:root=true +// ROSAControlPlaneList contains a list of ROSAControlPlane. type ROSAControlPlaneList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` diff --git a/controlplane/rosa/api/v1beta2/rosacontrolplane_webhook.go b/controlplane/rosa/api/v1beta2/rosacontrolplane_webhook.go index 13562167e5..ae4ae66417 100644 --- a/controlplane/rosa/api/v1beta2/rosacontrolplane_webhook.go +++ b/controlplane/rosa/api/v1beta2/rosacontrolplane_webhook.go @@ -1,7 +1,10 @@ package v1beta2 import ( + "net" + "github.com/blang/semver" + kmsArnRegexpValidator "github.com/openshift-online/ocm-common/pkg/resource/validations" apierrors "k8s.io/apimachinery/pkg/api/errors" runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" @@ -31,6 +34,17 @@ func (r *ROSAControlPlane) ValidateCreate() (warnings admission.Warnings, err er allErrs = append(allErrs, err) } + if err := r.validateEtcdEncryptionKMSArn(); err != nil { + allErrs = append(allErrs, err) + } + + if err := r.validateExternalAuthProviders(); err != nil { + allErrs = append(allErrs, err) + } + + allErrs = append(allErrs, r.validateNetwork()...) + allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...) + if len(allErrs) == 0 { return nil, nil } @@ -50,6 +64,13 @@ func (r *ROSAControlPlane) ValidateUpdate(old runtime.Object) (warnings admissio allErrs = append(allErrs, err) } + if err := r.validateEtcdEncryptionKMSArn(); err != nil { + allErrs = append(allErrs, err) + } + + allErrs = append(allErrs, r.validateNetwork()...) + allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...) + if len(allErrs) == 0 { return nil, nil } @@ -69,7 +90,57 @@ func (r *ROSAControlPlane) ValidateDelete() (warnings admission.Warnings, err er func (r *ROSAControlPlane) validateVersion() *field.Error { _, err := semver.Parse(r.Spec.Version) if err != nil { - return field.Invalid(field.NewPath("spec.version"), r.Spec.Version, "version must be a valid semantic version") + return field.Invalid(field.NewPath("spec.version"), r.Spec.Version, "must be a valid semantic version") + } + + return nil +} + +func (r *ROSAControlPlane) validateNetwork() field.ErrorList { + var allErrs field.ErrorList + if r.Spec.Network == nil { + return allErrs + } + + rootPath := field.NewPath("spec", "network") + + if r.Spec.Network.MachineCIDR != "" { + _, _, err := net.ParseCIDR(r.Spec.Network.MachineCIDR) + if err != nil { + allErrs = append(allErrs, field.Invalid(rootPath.Child("machineCIDR"), r.Spec.Network.MachineCIDR, "must be valid CIDR block")) + } + } + + if r.Spec.Network.PodCIDR != "" { + _, _, err := net.ParseCIDR(r.Spec.Network.PodCIDR) + if err != nil { + allErrs = append(allErrs, field.Invalid(rootPath.Child("podCIDR"), r.Spec.Network.PodCIDR, "must be valid CIDR block")) + } + } + + if r.Spec.Network.ServiceCIDR != "" { + _, _, err := net.ParseCIDR(r.Spec.Network.ServiceCIDR) + if err != nil { + allErrs = append(allErrs, field.Invalid(rootPath.Child("serviceCIDR"), r.Spec.Network.ServiceCIDR, "must be valid CIDR block")) + } + } + + return allErrs +} + +func (r *ROSAControlPlane) validateEtcdEncryptionKMSArn() *field.Error { + err := kmsArnRegexpValidator.ValidateKMSKeyARN(&r.Spec.EtcdEncryptionKMSARN) + if err != nil { + return field.Invalid(field.NewPath("spec.etcdEncryptionKMSARN"), r.Spec.EtcdEncryptionKMSARN, err.Error()) + } + + return nil +} + +func (r *ROSAControlPlane) validateExternalAuthProviders() *field.Error { + if !r.Spec.EnableExternalAuthProviders && len(r.Spec.ExternalAuthProviders) > 0 { + return field.Invalid(field.NewPath("spec.ExternalAuthProviders"), r.Spec.ExternalAuthProviders, + "can only be set if spec.EnableExternalAuthProviders is set to 'True'") } return nil diff --git a/controlplane/rosa/api/v1beta2/zz_generated.deepcopy.go b/controlplane/rosa/api/v1beta2/zz_generated.deepcopy.go index 68ab8bae6c..3994429d4b 100644 --- a/controlplane/rosa/api/v1beta2/zz_generated.deepcopy.go +++ b/controlplane/rosa/api/v1beta2/zz_generated.deepcopy.go @@ -24,6 +24,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" apiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + expapiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api/api/v1beta1" ) @@ -42,6 +43,125 @@ func (in *AWSRolesRef) DeepCopy() *AWSRolesRef { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultMachinePoolSpec) DeepCopyInto(out *DefaultMachinePoolSpec) { + *out = *in + if in.Autoscaling != nil { + in, out := &in.Autoscaling, &out.Autoscaling + *out = new(expapiv1beta2.RosaMachinePoolAutoScaling) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultMachinePoolSpec. +func (in *DefaultMachinePoolSpec) DeepCopy() *DefaultMachinePoolSpec { + if in == nil { + return nil + } + out := new(DefaultMachinePoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalAuthProvider) DeepCopyInto(out *ExternalAuthProvider) { + *out = *in + in.Issuer.DeepCopyInto(&out.Issuer) + if in.OIDCClients != nil { + in, out := &in.OIDCClients, &out.OIDCClients + *out = make([]OIDCClientConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClaimMappings != nil { + in, out := &in.ClaimMappings, &out.ClaimMappings + *out = new(TokenClaimMappings) + (*in).DeepCopyInto(*out) + } + if in.ClaimValidationRules != nil { + in, out := &in.ClaimValidationRules, &out.ClaimValidationRules + *out = make([]TokenClaimValidationRule, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalAuthProvider. +func (in *ExternalAuthProvider) DeepCopy() *ExternalAuthProvider { + if in == nil { + return nil + } + out := new(ExternalAuthProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference. +func (in *LocalObjectReference) DeepCopy() *LocalObjectReference { + if in == nil { + return nil + } + out := new(LocalObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec. +func (in *NetworkSpec) DeepCopy() *NetworkSpec { + if in == nil { + return nil + } + out := new(NetworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OIDCClientConfig) DeepCopyInto(out *OIDCClientConfig) { + *out = *in + out.ClientSecret = in.ClientSecret + if in.ExtraScopes != nil { + in, out := &in.ExtraScopes, &out.ExtraScopes + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCClientConfig. +func (in *OIDCClientConfig) DeepCopy() *OIDCClientConfig { + if in == nil { + return nil + } + out := new(OIDCClientConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrefixedClaimMapping) DeepCopyInto(out *PrefixedClaimMapping) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrefixedClaimMapping. +func (in *PrefixedClaimMapping) DeepCopy() *PrefixedClaimMapping { + if in == nil { + return nil + } + out := new(PrefixedClaimMapping) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ROSAControlPlane) DeepCopyInto(out *ROSAControlPlane) { *out = *in @@ -114,37 +234,26 @@ func (in *RosaControlPlaneSpec) DeepCopyInto(out *RosaControlPlaneSpec) { *out = make([]string, len(*in)) copy(*out, *in) } - if in.MachineCIDR != nil { - in, out := &in.MachineCIDR, &out.MachineCIDR - *out = new(string) - **out = **in - } - if in.Region != nil { - in, out := &in.Region, &out.Region - *out = new(string) - **out = **in - } - out.ControlPlaneEndpoint = in.ControlPlaneEndpoint out.RolesRef = in.RolesRef - if in.OIDCID != nil { - in, out := &in.OIDCID, &out.OIDCID - *out = new(string) - **out = **in - } - if in.InstallerRoleARN != nil { - in, out := &in.InstallerRoleARN, &out.InstallerRoleARN - *out = new(string) - **out = **in + if in.ExternalAuthProviders != nil { + in, out := &in.ExternalAuthProviders, &out.ExternalAuthProviders + *out = make([]ExternalAuthProvider, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } - if in.SupportRoleARN != nil { - in, out := &in.SupportRoleARN, &out.SupportRoleARN - *out = new(string) + in.DefaultMachinePoolSpec.DeepCopyInto(&out.DefaultMachinePoolSpec) + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(NetworkSpec) **out = **in } - if in.WorkerRoleARN != nil { - in, out := &in.WorkerRoleARN, &out.WorkerRoleARN - *out = new(string) - **out = **in + if in.AdditionalTags != nil { + in, out := &in.AdditionalTags, &out.AdditionalTags + *out = make(apiv1beta2.Tags, len(*in)) + for key, val := range *in { + (*out)[key] = val + } } if in.CredentialsSecretRef != nil { in, out := &in.CredentialsSecretRef, &out.CredentialsSecretRef @@ -156,6 +265,7 @@ func (in *RosaControlPlaneSpec) DeepCopyInto(out *RosaControlPlaneSpec) { *out = new(apiv1beta2.AWSIdentityReference) **out = **in } + out.ControlPlaneEndpoint = in.ControlPlaneEndpoint } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RosaControlPlaneSpec. @@ -188,11 +298,6 @@ func (in *RosaControlPlaneStatus) DeepCopyInto(out *RosaControlPlaneStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.ID != nil { - in, out := &in.ID, &out.ID - *out = new(string) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RosaControlPlaneStatus. @@ -204,3 +309,104 @@ func (in *RosaControlPlaneStatus) DeepCopy() *RosaControlPlaneStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenClaimMappings) DeepCopyInto(out *TokenClaimMappings) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(UsernameClaimMapping) + (*in).DeepCopyInto(*out) + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = new(PrefixedClaimMapping) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimMappings. +func (in *TokenClaimMappings) DeepCopy() *TokenClaimMappings { + if in == nil { + return nil + } + out := new(TokenClaimMappings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenClaimValidationRule) DeepCopyInto(out *TokenClaimValidationRule) { + *out = *in + out.RequiredClaim = in.RequiredClaim +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimValidationRule. +func (in *TokenClaimValidationRule) DeepCopy() *TokenClaimValidationRule { + if in == nil { + return nil + } + out := new(TokenClaimValidationRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenIssuer) DeepCopyInto(out *TokenIssuer) { + *out = *in + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]TokenAudience, len(*in)) + copy(*out, *in) + } + if in.CertificateAuthority != nil { + in, out := &in.CertificateAuthority, &out.CertificateAuthority + *out = new(LocalObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenIssuer. +func (in *TokenIssuer) DeepCopy() *TokenIssuer { + if in == nil { + return nil + } + out := new(TokenIssuer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenRequiredClaim) DeepCopyInto(out *TokenRequiredClaim) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequiredClaim. +func (in *TokenRequiredClaim) DeepCopy() *TokenRequiredClaim { + if in == nil { + return nil + } + out := new(TokenRequiredClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UsernameClaimMapping) DeepCopyInto(out *UsernameClaimMapping) { + *out = *in + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UsernameClaimMapping. +func (in *UsernameClaimMapping) DeepCopy() *UsernameClaimMapping { + if in == nil { + return nil + } + out := new(UsernameClaimMapping) + in.DeepCopyInto(out) + return out +} diff --git a/controlplane/rosa/api/v1beta2/zz_generated.defaults.go b/controlplane/rosa/api/v1beta2/zz_generated.defaults.go index 510687638d..60d82ff4d7 100644 --- a/controlplane/rosa/api/v1beta2/zz_generated.defaults.go +++ b/controlplane/rosa/api/v1beta2/zz_generated.defaults.go @@ -30,9 +30,17 @@ import ( // All generated defaulters are covering - they call all nested defaulters. func RegisterDefaults(scheme *runtime.Scheme) error { scheme.AddTypeDefaultingFunc(&ROSAControlPlane{}, func(obj interface{}) { SetObjectDefaults_ROSAControlPlane(obj.(*ROSAControlPlane)) }) + scheme.AddTypeDefaultingFunc(&ROSAControlPlaneList{}, func(obj interface{}) { SetObjectDefaults_ROSAControlPlaneList(obj.(*ROSAControlPlaneList)) }) return nil } func SetObjectDefaults_ROSAControlPlane(in *ROSAControlPlane) { SetDefaults_RosaControlPlaneSpec(&in.Spec) } + +func SetObjectDefaults_ROSAControlPlaneList(in *ROSAControlPlaneList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ROSAControlPlane(a) + } +} diff --git a/controlplane/rosa/controllers/rosacontrolplane_controller.go b/controlplane/rosa/controllers/rosacontrolplane_controller.go index 34a28ae4c8..e846a7a718 100644 --- a/controlplane/rosa/controllers/rosacontrolplane_controller.go +++ b/controlplane/rosa/controllers/rosacontrolplane_controller.go @@ -14,10 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package controllers provides a way to reconcile ROSA resources. package controllers import ( "context" + "encoding/json" "errors" "fmt" "net" @@ -26,15 +28,23 @@ import ( "strings" "time" + "github.com/google/go-cmp/cmp" + idputils "github.com/openshift-online/ocm-common/pkg/idp/utils" cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" + rosaaws "github.com/openshift/rosa/pkg/aws" + "github.com/openshift/rosa/pkg/ocm" + "github.com/zgalor/weberr" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apiserver/pkg/storage/names" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd/api" "k8s.io/klog/v2" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -44,9 +54,11 @@ import ( rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/annotations" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util" capiannotations "sigs.k8s.io/cluster-api/util/annotations" @@ -57,13 +69,18 @@ import ( ) const ( - rosaCreatorArnProperty = "rosa_creator_arn" - rosaControlPlaneKind = "ROSAControlPlane" // ROSAControlPlaneFinalizer allows the controller to clean up resources on delete. ROSAControlPlaneFinalizer = "rosacontrolplane.controlplane.cluster.x-k8s.io" + + // ROSAControlPlaneForceDeleteAnnotation annotation can be set to force the deletion of ROSAControlPlane bypassing any deletion validations/errors. + ROSAControlPlaneForceDeleteAnnotation = "controlplane.cluster.x-k8s.io/rosacontrolplane-force-delete" + + // ExternalAuthProviderLastAppliedAnnotation annotation tracks the last applied external auth configuration to inform if an update is required. + ExternalAuthProviderLastAppliedAnnotation = "controlplane.cluster.x-k8s.io/rosacontrolplane-last-applied-external-auth-provider" ) +// ROSAControlPlaneReconciler reconciles a ROSAControlPlane object. type ROSAControlPlaneReconciler struct { client.Client WatchFilterValue string @@ -106,12 +123,14 @@ func (r *ROSAControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr c // +kubebuilder:rbac:groups=core,resources=events,verbs=get;list;watch;create;patch // +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;delete;patch +// +kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;delete;patch // +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinedeployments,verbs=get;list;watch // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinepools,verbs=get;list;watch // +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=rosacontrolplanes,verbs=get;list;watch;update;patch;delete // +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=rosacontrolplanes/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=rosacontrolplanes/finalizers,verbs=update // Reconcile will reconcile RosaControlPlane Resources. func (r *ROSAControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, reterr error) { @@ -175,41 +194,47 @@ func (r *ROSAControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.Req func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (res ctrl.Result, reterr error) { rosaScope.Info("Reconciling ROSAControlPlane") - // if !rosaScope.Cluster.Status.InfrastructureReady { - // rosaScope.Info("Cluster infrastructure is not ready yet") - // return ctrl.Result{RequeueAfter: r.WaitInfraPeriod}, nil - //} if controllerutil.AddFinalizer(rosaScope.ControlPlane, ROSAControlPlaneFinalizer) { if err := rosaScope.PatchObject(); err != nil { return ctrl.Result{}, err } } - rosaClient, err := rosa.NewRosaClient(ctx, rosaScope) + ocmClient, err := rosa.NewOCMClient(ctx, rosaScope) + if err != nil { + // TODO: need to expose in status, as likely the credentials are invalid + return ctrl.Result{}, fmt.Errorf("failed to create OCM client: %w", err) + } + + creator, err := rosaaws.CreatorForCallerIdentity(rosaScope.Identity) if err != nil { - return ctrl.Result{}, fmt.Errorf("failed to create a rosa client: %w", err) + return ctrl.Result{}, fmt.Errorf("failed to transform caller identity to creator: %w", err) } - defer rosaClient.Close() - failureMessage, err := validateControlPlaneSpec(rosaClient, rosaScope) + validationMessage, err := validateControlPlaneSpec(ocmClient, rosaScope) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to validate ROSAControlPlane.spec: %w", err) } - if failureMessage != nil { - rosaScope.ControlPlane.Status.FailureMessage = failureMessage + + conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1.ROSAControlPlaneValidCondition) + if validationMessage != "" { + conditions.MarkFalse(rosaScope.ControlPlane, + rosacontrolplanev1.ROSAControlPlaneValidCondition, + rosacontrolplanev1.ROSAControlPlaneInvalidConfigurationReason, + clusterv1.ConditionSeverityError, + validationMessage) // dont' requeue because input is invalid and manual intervention is needed. return ctrl.Result{}, nil - } else { - rosaScope.ControlPlane.Status.FailureMessage = nil } + rosaScope.ControlPlane.Status.FailureMessage = nil - cluster, err := rosaClient.GetCluster() - if err != nil { + cluster, err := ocmClient.GetCluster(rosaScope.ControlPlane.Spec.RosaClusterName, creator) + if err != nil && weberr.GetType(err) != weberr.NotFound { return ctrl.Result{}, err } - if clusterID := cluster.ID(); clusterID != "" { - rosaScope.ControlPlane.Status.ID = &clusterID + if cluster != nil { + rosaScope.ControlPlane.Status.ID = cluster.ID() rosaScope.ControlPlane.Status.ConsoleURL = cluster.Console().URL() rosaScope.ControlPlane.Status.OIDCEndpointURL = cluster.AWS().STS().OIDCEndpointURL() rosaScope.ControlPlane.Status.Ready = false @@ -225,12 +250,25 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc } rosaScope.ControlPlane.Spec.ControlPlaneEndpoint = *apiEndpoint - if err := r.reconcileKubeconfig(ctx, rosaScope, rosaClient, cluster); err != nil { - return ctrl.Result{}, fmt.Errorf("failed to reconcile kubeconfig: %w", err) + if err := r.updateOCMCluster(rosaScope, ocmClient, cluster, creator); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to update rosa control plane: %w", err) } - if err := r.reconcileClusterVersion(rosaScope, rosaClient, cluster); err != nil { + if err := r.reconcileClusterVersion(rosaScope, ocmClient, cluster); err != nil { return ctrl.Result{}, err } + + if rosaScope.ControlPlane.Spec.EnableExternalAuthProviders { + if err := r.reconcileExternalAuth(ctx, rosaScope, cluster); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to reconcile external auth: %w", err) + } + } else { + // only reconcile a kubeconfig when external auth is not enabled. + // The user is expected to provide the kubeconfig for CAPI. + if err := r.reconcileKubeconfig(ctx, rosaScope, ocmClient, cluster); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to reconcile kubeconfig: %w", err) + } + } + return ctrl.Result{}, nil case cmv1.ClusterStateError: errorMessage := cluster.Status().ProvisionErrorMessage() @@ -256,126 +294,23 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc return ctrl.Result{RequeueAfter: time.Second * 60}, nil } - // Create the cluster: - clusterBuilder := cmv1.NewCluster(). - Name(rosaScope.RosaClusterName()). - MultiAZ(true). - Product( - cmv1.NewProduct(). - ID("rosa"), - ). - Region( - cmv1.NewCloudRegion(). - ID(*rosaScope.ControlPlane.Spec.Region), - ). - FIPS(false). - EtcdEncryption(false). - DisableUserWorkloadMonitoring(true). - Version( - cmv1.NewVersion(). - ID(rosa.VersionID(rosaScope.ControlPlane.Spec.Version)). - ChannelGroup("stable"), - ). - ExpirationTimestamp(time.Now().Add(1 * time.Hour)). - Hypershift(cmv1.NewHypershift().Enabled(true)) - - networkBuilder := cmv1.NewNetwork() - networkBuilder = networkBuilder.Type("OVNKubernetes") - networkBuilder = networkBuilder.MachineCIDR(*rosaScope.ControlPlane.Spec.MachineCIDR) - clusterBuilder = clusterBuilder.Network(networkBuilder) - - stsBuilder := cmv1.NewSTS().RoleARN(*rosaScope.ControlPlane.Spec.InstallerRoleARN) - // stsBuilder = stsBuilder.ExternalID(config.ExternalID) - stsBuilder = stsBuilder.SupportRoleARN(*rosaScope.ControlPlane.Spec.SupportRoleARN) - roles := []*cmv1.OperatorIAMRoleBuilder{} - for _, role := range []struct { - Name string - Namespace string - RoleARN string - Path string - }{ - { - Name: "cloud-credentials", - Namespace: "openshift-ingress-operator", - RoleARN: rosaScope.ControlPlane.Spec.RolesRef.IngressARN, - }, - { - Name: "installer-cloud-credentials", - Namespace: "openshift-image-registry", - RoleARN: rosaScope.ControlPlane.Spec.RolesRef.ImageRegistryARN, - }, - { - Name: "ebs-cloud-credentials", - Namespace: "openshift-cluster-csi-drivers", - RoleARN: rosaScope.ControlPlane.Spec.RolesRef.StorageARN, - }, - { - Name: "cloud-credentials", - Namespace: "openshift-cloud-network-config-controller", - RoleARN: rosaScope.ControlPlane.Spec.RolesRef.NetworkARN, - }, - { - Name: "kube-controller-manager", - Namespace: "kube-system", - RoleARN: rosaScope.ControlPlane.Spec.RolesRef.KubeCloudControllerARN, - }, - { - Name: "kms-provider", - Namespace: "kube-system", - RoleARN: rosaScope.ControlPlane.Spec.RolesRef.KMSProviderARN, - }, - { - Name: "control-plane-operator", - Namespace: "kube-system", - RoleARN: rosaScope.ControlPlane.Spec.RolesRef.ControlPlaneOperatorARN, - }, - { - Name: "capa-controller-manager", - Namespace: "kube-system", - RoleARN: rosaScope.ControlPlane.Spec.RolesRef.NodePoolManagementARN, - }, - } { - roles = append(roles, cmv1.NewOperatorIAMRole(). - Name(role.Name). - Namespace(role.Namespace). - RoleARN(role.RoleARN)) - } - stsBuilder = stsBuilder.OperatorIAMRoles(roles...) - - instanceIAMRolesBuilder := cmv1.NewInstanceIAMRoles() - instanceIAMRolesBuilder.WorkerRoleARN(*rosaScope.ControlPlane.Spec.WorkerRoleARN) - stsBuilder = stsBuilder.InstanceIAMRoles(instanceIAMRolesBuilder) - stsBuilder.OidcConfig(cmv1.NewOidcConfig().ID(*rosaScope.ControlPlane.Spec.OIDCID)) - stsBuilder.AutoMode(true) - - awsBuilder := cmv1.NewAWS(). - AccountID(*rosaScope.Identity.Account). - BillingAccountID(*rosaScope.Identity.Account). - SubnetIDs(rosaScope.ControlPlane.Spec.Subnets...). - STS(stsBuilder) - clusterBuilder = clusterBuilder.AWS(awsBuilder) - - clusterNodesBuilder := cmv1.NewClusterNodes() - clusterNodesBuilder = clusterNodesBuilder.AvailabilityZones(rosaScope.ControlPlane.Spec.AvailabilityZones...) - clusterBuilder = clusterBuilder.Nodes(clusterNodesBuilder) - - clusterProperties := map[string]string{} - clusterProperties[rosaCreatorArnProperty] = *rosaScope.Identity.Arn - - clusterBuilder = clusterBuilder.Properties(clusterProperties) - clusterSpec, err := clusterBuilder.Build() + ocmClusterSpec, err := buildOCMClusterSpec(rosaScope.ControlPlane.Spec, creator) if err != nil { - return ctrl.Result{}, fmt.Errorf("failed to create description of cluster: %v", err) + return ctrl.Result{}, err } - newCluster, err := rosaClient.CreateCluster(clusterSpec) + cluster, err = ocmClient.CreateCluster(ocmClusterSpec) if err != nil { - return ctrl.Result{}, fmt.Errorf("failed to create ROSA cluster: %w", err) + conditions.MarkFalse(rosaScope.ControlPlane, + rosacontrolplanev1.ROSAControlPlaneReadyCondition, + rosacontrolplanev1.ReconciliationFailedReason, + clusterv1.ConditionSeverityError, + err.Error()) + return ctrl.Result{}, fmt.Errorf("failed to create OCM cluster: %w", err) } - rosaScope.Info("cluster created", "state", newCluster.Status().State()) - clusterID := newCluster.ID() - rosaScope.ControlPlane.Status.ID = &clusterID + rosaScope.Info("cluster created", "state", cluster.Status().State()) + rosaScope.ControlPlane.Status.ID = cluster.ID() return ctrl.Result{}, nil } @@ -383,49 +318,105 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc func (r *ROSAControlPlaneReconciler) reconcileDelete(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (res ctrl.Result, reterr error) { rosaScope.Info("Reconciling ROSAControlPlane delete") - rosaClient, err := rosa.NewRosaClient(ctx, rosaScope) + // Deleting MachinePools first. + deleted, err := r.deleteMachinePools(ctx, rosaScope) if err != nil { - return ctrl.Result{}, fmt.Errorf("failed to create a rosa client: %w", err) + return ctrl.Result{}, err + } + if !deleted { + // Reconcile after 1 min giving time for machinePools to be deleted. + return ctrl.Result{RequeueAfter: time.Minute}, nil } - defer rosaClient.Close() - cluster, err := rosaClient.GetCluster() + ocmClient, err := rosa.NewOCMClient(ctx, rosaScope) if err != nil { - return ctrl.Result{}, err + // TODO: need to expose in status, as likely the credentials are invalid + return ctrl.Result{}, fmt.Errorf("failed to create OCM client: %w", err) } + creator, err := rosaaws.CreatorForCallerIdentity(rosaScope.Identity) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to transform caller identity to creator: %w", err) + } + + cluster, err := ocmClient.GetCluster(rosaScope.ControlPlane.Spec.RosaClusterName, creator) + if err != nil && weberr.GetType(err) != weberr.NotFound { + return ctrl.Result{}, err + } if cluster == nil { - // cluster is fully deleted, remove finalizer. + // cluster and machinepools are deleted, removing finalizer. controllerutil.RemoveFinalizer(rosaScope.ControlPlane, ROSAControlPlaneFinalizer) + return ctrl.Result{}, nil } + bestEffort := false + if value, found := annotations.Get(rosaScope.ControlPlane, ROSAControlPlaneForceDeleteAnnotation); found && value != "false" { + bestEffort = true + } + if cluster.Status().State() != cmv1.ClusterStateUninstalling { - if err := rosaClient.DeleteCluster(cluster.ID()); err != nil { + if _, err := ocmClient.DeleteCluster(cluster.ID(), bestEffort, creator); err != nil { + conditions.MarkFalse(rosaScope.ControlPlane, + rosacontrolplanev1.ROSAControlPlaneReadyCondition, + rosacontrolplanev1.ROSAControlPlaneDeletionFailedReason, + clusterv1.ConditionSeverityError, + "failed to delete ROSAControlPlane: %s; if the error can't be resolved, set '%s' annotation to force the deletion", + err.Error(), + ROSAControlPlaneForceDeleteAnnotation) return ctrl.Result{}, err } } + conditions.MarkFalse(rosaScope.ControlPlane, + rosacontrolplanev1.ROSAControlPlaneReadyCondition, + string(cluster.Status().State()), + clusterv1.ConditionSeverityInfo, + "deleting") rosaScope.ControlPlane.Status.Ready = false rosaScope.Info("waiting for cluster to be deleted") // Requeue to remove the finalizer when the cluster is fully deleted. return ctrl.Result{RequeueAfter: time.Second * 60}, nil } -func (r *ROSAControlPlaneReconciler) reconcileClusterVersion(rosaScope *scope.ROSAControlPlaneScope, rosaClient *rosa.RosaClient, cluster *cmv1.Cluster) error { +// deleteMachinePools check if the controlplane has related machinePools and delete them. +func (r *ROSAControlPlaneReconciler) deleteMachinePools(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (bool, error) { + machinePools, err := utils.GetMachinePools(ctx, rosaScope.Client, rosaScope.Cluster.Name, rosaScope.Cluster.Namespace) + if err != nil { + return false, err + } + + var errs []error + for id, mp := range machinePools { + if !mp.DeletionTimestamp.IsZero() { + continue + } + if err = rosaScope.Client.Delete(ctx, &machinePools[id]); err != nil { + errs = append(errs, err) + } + } + + if len(errs) > 0 { + return false, kerrors.NewAggregate(errs) + } + + return len(machinePools) == 0, nil +} + +func (r *ROSAControlPlaneReconciler) reconcileClusterVersion(rosaScope *scope.ROSAControlPlaneScope, ocmClient *ocm.Client, cluster *cmv1.Cluster) error { version := rosaScope.ControlPlane.Spec.Version if version == rosa.RawVersionID(cluster.Version()) { conditions.MarkFalse(rosaScope.ControlPlane, rosacontrolplanev1.ROSAControlPlaneUpgradingCondition, "upgraded", clusterv1.ConditionSeverityInfo, "") return nil } - scheduledUpgrade, err := rosaClient.CheckExistingScheduledUpgrade(cluster) + scheduledUpgrade, err := rosa.CheckExistingScheduledUpgrade(ocmClient, cluster) if err != nil { return fmt.Errorf("failed to get existing scheduled upgrades: %w", err) } if scheduledUpgrade == nil { - scheduledUpgrade, err = rosaClient.ScheduleControlPlaneUpgrade(cluster, version, time.Now()) + scheduledUpgrade, err = rosa.ScheduleControlPlaneUpgrade(ocmClient, cluster, version, time.Now()) if err != nil { return fmt.Errorf("failed to schedule control plane upgrade to version %s: %w", version, err) } @@ -447,7 +438,242 @@ func (r *ROSAControlPlaneReconciler) reconcileClusterVersion(rosaScope *scope.RO return nil } -func (r *ROSAControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope, rosaClient *rosa.RosaClient, cluster *cmv1.Cluster) error { +func (r *ROSAControlPlaneReconciler) updateOCMCluster(rosaScope *scope.ROSAControlPlaneScope, ocmClient *ocm.Client, cluster *cmv1.Cluster, creator *rosaaws.Creator) error { + currentAuditLogRole := cluster.AWS().AuditLog().RoleArn() + if currentAuditLogRole == rosaScope.ControlPlane.Spec.AuditLogRoleARN { + return nil + } + + ocmClusterSpec := ocm.Spec{ + AuditLogRoleARN: ptr.To(rosaScope.ControlPlane.Spec.AuditLogRoleARN), + } + + // if this fails, the provided role is likely invalid or it doesn't have the required permissions. + if err := ocmClient.UpdateCluster(cluster.ID(), creator, ocmClusterSpec); err != nil { + conditions.MarkFalse(rosaScope.ControlPlane, + rosacontrolplanev1.ROSAControlPlaneValidCondition, + rosacontrolplanev1.ROSAControlPlaneInvalidConfigurationReason, + clusterv1.ConditionSeverityError, + err.Error()) + return err + } + + return nil +} + +func (r *ROSAControlPlaneReconciler) reconcileExternalAuth(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope, cluster *cmv1.Cluster) error { + externalAuthClient, err := rosa.NewExternalAuthClient(ctx, rosaScope) + if err != nil { + return fmt.Errorf("failed to create external auth client: %v", err) + } + defer externalAuthClient.Close() + + var errs []error + if err := r.reconcileExternalAuthProviders(ctx, externalAuthClient, rosaScope, cluster); err != nil { + errs = append(errs, err) + conditions.MarkFalse(rosaScope.ControlPlane, + rosacontrolplanev1.ExternalAuthConfiguredCondition, + rosacontrolplanev1.ReconciliationFailedReason, + clusterv1.ConditionSeverityError, + err.Error()) + } else { + conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1.ExternalAuthConfiguredCondition) + } + + if err := r.reconcileExternalAuthBootstrapKubeconfig(ctx, externalAuthClient, rosaScope, cluster); err != nil { + errs = append(errs, err) + } + + return kerrors.NewAggregate(errs) +} + +func (r *ROSAControlPlaneReconciler) reconcileExternalAuthProviders(ctx context.Context, externalAuthClient *rosa.ExternalAuthClient, rosaScope *scope.ROSAControlPlaneScope, cluster *cmv1.Cluster) error { + externalAuths, err := externalAuthClient.ListExternalAuths(cluster.ID()) + if err != nil { + return fmt.Errorf("failed to list external auths: %v", err) + } + + if len(rosaScope.ControlPlane.Spec.ExternalAuthProviders) == 0 { + if len(externalAuths) > 0 { + if err := externalAuthClient.DeleteExternalAuth(cluster.ID(), externalAuths[0].ID()); err != nil { + return fmt.Errorf("failed to delete external auth provider %s: %v", externalAuths[0].ID(), err) + } + } + + return nil + } + + authProvider := rosaScope.ControlPlane.Spec.ExternalAuthProviders[0] + shouldUpdate := false + if len(externalAuths) > 0 { + existingProvider := externalAuths[0] + // name/ID can't be patched, we need to delete the old provider and create a new one. + if existingProvider.ID() != authProvider.Name { + if err := externalAuthClient.DeleteExternalAuth(cluster.ID(), existingProvider.ID()); err != nil { + return fmt.Errorf("failed to delete external auth provider %s: %v", existingProvider.ID(), err) + } + } else { + jsonAnnotation := rosaScope.ControlPlane.Annotations[ExternalAuthProviderLastAppliedAnnotation] + if len(jsonAnnotation) != 0 { + var lastAppliedAuthProvider rosacontrolplanev1.ExternalAuthProvider + err := json.Unmarshal([]byte(jsonAnnotation), &lastAppliedAuthProvider) + if err != nil { + return fmt.Errorf("failed to unmarshal '%s' annotaion content: %v", ExternalAuthProviderLastAppliedAnnotation, err) + } + + // if there were no changes, return. + if cmp.Equal(authProvider, lastAppliedAuthProvider) { + return nil + } + } + + shouldUpdate = true + } + } + + externalAuthBuilder := cmv1.NewExternalAuth().ID(authProvider.Name) + + // issuer builder + audiences := make([]string, 0, len(authProvider.Issuer.Audiences)) + for _, a := range authProvider.Issuer.Audiences { + audiences = append(audiences, string(a)) + } + tokenIssuerBuilder := cmv1.NewTokenIssuer().URL(authProvider.Issuer.URL). + Audiences(audiences...) + + if authProvider.Issuer.CertificateAuthority != nil { + CertificateAuthorityConfigMap := &corev1.ConfigMap{} + err := rosaScope.Client.Get(ctx, types.NamespacedName{Namespace: rosaScope.Namespace(), Name: authProvider.Issuer.CertificateAuthority.Name}, CertificateAuthorityConfigMap) + if err != nil { + return fmt.Errorf("failed to get issuer CertificateAuthority configMap %s: %v", authProvider.Issuer.CertificateAuthority.Name, err) + } + CertificateAuthorityValue := CertificateAuthorityConfigMap.Data["ca-bundle.crt"] + + tokenIssuerBuilder.CA(CertificateAuthorityValue) + } + externalAuthBuilder.Issuer(tokenIssuerBuilder) + + // oidc-clients builder + clientsBuilders := make([]*cmv1.ExternalAuthClientConfigBuilder, 0, len(authProvider.OIDCClients)) + for _, client := range authProvider.OIDCClients { + secretObj := &corev1.Secret{} + err := rosaScope.Client.Get(ctx, types.NamespacedName{Namespace: rosaScope.Namespace(), Name: client.ClientSecret.Name}, secretObj) + if err != nil { + return fmt.Errorf("failed to get client secret %s: %v", client.ClientSecret.Name, err) + } + clientSecretValue := string(secretObj.Data["clientSecret"]) + + clientsBuilders = append(clientsBuilders, cmv1.NewExternalAuthClientConfig(). + ID(client.ClientID).Secret(clientSecretValue). + Component(cmv1.NewClientComponent().Name(client.ComponentName).Namespace(client.ComponentNamespace))) + } + externalAuthBuilder.Clients(clientsBuilders...) + + // claims builder + if authProvider.ClaimMappings != nil { + clainMappingsBuilder := cmv1.NewTokenClaimMappings() + if authProvider.ClaimMappings.Groups != nil { + clainMappingsBuilder.Groups(cmv1.NewGroupsClaim().Claim(authProvider.ClaimMappings.Groups.Claim). + Prefix(authProvider.ClaimMappings.Groups.Prefix)) + } + + if authProvider.ClaimMappings.Username != nil { + usernameClaimBuilder := cmv1.NewUsernameClaim().Claim(authProvider.ClaimMappings.Username.Claim). + PrefixPolicy(string(authProvider.ClaimMappings.Username.PrefixPolicy)) + if authProvider.ClaimMappings.Username.Prefix != nil { + usernameClaimBuilder.Prefix(*authProvider.ClaimMappings.Username.Prefix) + } + + clainMappingsBuilder.UserName(usernameClaimBuilder) + } + + claimBuilder := cmv1.NewExternalAuthClaim().Mappings(clainMappingsBuilder) + + validationRulesbuilders := make([]*cmv1.TokenClaimValidationRuleBuilder, 0, len(authProvider.ClaimValidationRules)) + for _, rule := range authProvider.ClaimValidationRules { + validationRulesbuilders = append(validationRulesbuilders, cmv1.NewTokenClaimValidationRule(). + Claim(rule.RequiredClaim.Claim).RequiredValue(rule.RequiredClaim.RequiredValue)) + } + claimBuilder.ValidationRules(validationRulesbuilders...) + + externalAuthBuilder.Claim(claimBuilder) + } + + externalAuthConfig, err := externalAuthBuilder.Build() + if err != nil { + return fmt.Errorf("failed to build external auth config: %v", err) + } + + if shouldUpdate { + _, err = externalAuthClient.UpdateExternalAuth(cluster.ID(), externalAuthConfig) + if err != nil { + return fmt.Errorf("failed to update external authentication provider '%s' for cluster '%s': %v", + externalAuthConfig.ID(), rosaScope.InfraClusterName(), err) + } + } else { + _, err = externalAuthClient.CreateExternalAuth(cluster.ID(), externalAuthConfig) + if err != nil { + return fmt.Errorf("failed to create external authentication provider '%s' for cluster '%s': %v", + externalAuthConfig.ID(), rosaScope.InfraClusterName(), err) + } + } + + lastAppliedAnnotation, err := json.Marshal(authProvider) + if err != nil { + return err + } + + if rosaScope.ControlPlane.Annotations == nil { + rosaScope.ControlPlane.Annotations = make(map[string]string) + } + rosaScope.ControlPlane.Annotations[ExternalAuthProviderLastAppliedAnnotation] = string(lastAppliedAnnotation) + + return nil +} + +// Generates a temporarily admin kubeconfig using break-glass credentials for the user to bootstreap their environment like setting up RBAC for oidc users/groups. +// This Kubeonconfig will be created only once initially and be valid for only 24h. +// The kubeconfig secret will not be autoamticallty rotated and will be invalid after the 24h. However, users can opt to manually delete the secret to trigger the generation of a new one which will be valid for another 24h. +func (r *ROSAControlPlaneReconciler) reconcileExternalAuthBootstrapKubeconfig(ctx context.Context, externalAuthClient *rosa.ExternalAuthClient, rosaScope *scope.ROSAControlPlaneScope, cluster *cmv1.Cluster) error { + kubeconfigSecret := rosaScope.ExternalAuthBootstrapKubeconfigSecret() + err := r.Client.Get(ctx, client.ObjectKeyFromObject(kubeconfigSecret), kubeconfigSecret) + if err == nil { + // already exist. + return nil + } else if !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to get bootstrap kubeconfig secret: %w", err) + } + + // kubeconfig doesn't exist, generate a new one. + breakGlassConfig, err := cmv1.NewBreakGlassCredential(). + Username(names.SimpleNameGenerator.GenerateName("capi-admin-")). // OCM requires unique usernames + ExpirationTimestamp(time.Now().Add(time.Hour * 24)). + Build() + if err != nil { + return fmt.Errorf("failed to build break glass config: %v", err) + } + + breakGlassCredential, err := externalAuthClient.CreateBreakGlassCredential(cluster.ID(), breakGlassConfig) + if err != nil { + return fmt.Errorf("failed to create break glass credential: %v", err) + } + + kubeconfigData, err := externalAuthClient.PollKubeconfig(ctx, cluster.ID(), breakGlassCredential.ID()) + if err != nil { + return fmt.Errorf("failed to poll break glass kubeconfig: %v", err) + } + + kubeconfigSecret.Data = map[string][]byte{ + "value": []byte(kubeconfigData), + } + if err := r.Client.Create(ctx, kubeconfigSecret); err != nil { + return fmt.Errorf("failed to create external auth bootstrap kubeconfig: %v", err) + } + + return nil +} + +func (r *ROSAControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope, ocmClient *ocm.Client, cluster *cmv1.Cluster) error { rosaScope.Debug("Reconciling ROSA kubeconfig for cluster", "cluster-name", rosaScope.RosaClusterName()) clusterRef := client.ObjectKeyFromObject(rosaScope.Cluster) @@ -469,7 +695,7 @@ func (r *ROSAControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, ro apiServerURL := cluster.API().URL() // create new user with admin privileges in the ROSA cluster if 'userName' doesn't already exist. - err = rosaClient.CreateAdminUserIfNotExist(cluster.ID(), userName, password) + err = rosa.CreateAdminUserIfNotExist(ocmClient, cluster.ID(), userName, password) if err != nil { return err } @@ -543,19 +769,15 @@ func (r *ROSAControlPlaneReconciler) reconcileClusterAdminPassword(ctx context.C } else if !apierrors.IsNotFound(err) { return "", fmt.Errorf("failed to get cluster admin password secret: %w", err) } + password, err := idputils.GenerateRandomPassword() // Generate a new password and create the secret - password, err := rosa.GenerateRandomPassword() if err != nil { return "", err } - controllerOwnerRef := *metav1.NewControllerRef(rosaScope.ControlPlane, rosacontrolplanev1.GroupVersion.WithKind("ROSAControlPlane")) passwordSecret.Data = map[string][]byte{ "value": []byte(password), } - passwordSecret.OwnerReferences = []metav1.OwnerReference{ - controllerOwnerRef, - } if err := r.Client.Create(ctx, passwordSecret); err != nil { return "", err } @@ -563,20 +785,155 @@ func (r *ROSAControlPlaneReconciler) reconcileClusterAdminPassword(ctx context.C return password, nil } -func validateControlPlaneSpec(rosaClient *rosa.RosaClient, rosaScope *scope.ROSAControlPlaneScope) (*string, error) { +func validateControlPlaneSpec(ocmClient *ocm.Client, rosaScope *scope.ROSAControlPlaneScope) (string, error) { version := rosaScope.ControlPlane.Spec.Version - isSupported, err := rosaClient.IsVersionSupported(version) + valid, err := ocmClient.ValidateHypershiftVersion(version, ocm.DefaultChannelGroup) if err != nil { - return nil, fmt.Errorf("failed to verify if version is supported: %w", err) + return "", fmt.Errorf("failed to check if version is valid: %w", err) } - - if !isSupported { - message := fmt.Sprintf("version %s is not supported", version) - return &message, nil + if !valid { + return fmt.Sprintf("version %s is not supported", version), nil } // TODO: add more input validations - return nil, nil + return "", nil +} + +func buildOCMClusterSpec(controlPlaneSpec rosacontrolplanev1.RosaControlPlaneSpec, creator *rosaaws.Creator) (ocm.Spec, error) { + billingAccount := controlPlaneSpec.BillingAccount + if billingAccount == "" { + billingAccount = creator.AccountID + } + + ocmClusterSpec := ocm.Spec{ + DryRun: ptr.To(false), + Name: controlPlaneSpec.RosaClusterName, + DomainPrefix: controlPlaneSpec.DomainPrefix, + Region: controlPlaneSpec.Region, + MultiAZ: true, + Version: ocm.CreateVersionID(controlPlaneSpec.Version, ocm.DefaultChannelGroup), + ChannelGroup: ocm.DefaultChannelGroup, + DisableWorkloadMonitoring: ptr.To(true), + DefaultIngress: ocm.NewDefaultIngressSpec(), // n.b. this is a no-op when it's set to the default value + ComputeMachineType: controlPlaneSpec.DefaultMachinePoolSpec.InstanceType, + AvailabilityZones: controlPlaneSpec.AvailabilityZones, + Tags: controlPlaneSpec.AdditionalTags, + EtcdEncryption: controlPlaneSpec.EtcdEncryptionKMSARN != "", + EtcdEncryptionKMSArn: controlPlaneSpec.EtcdEncryptionKMSARN, + + SubnetIds: controlPlaneSpec.Subnets, + IsSTS: true, + RoleARN: controlPlaneSpec.InstallerRoleARN, + SupportRoleARN: controlPlaneSpec.SupportRoleARN, + WorkerRoleARN: controlPlaneSpec.WorkerRoleARN, + OperatorIAMRoles: operatorIAMRoles(controlPlaneSpec.RolesRef), + OidcConfigId: controlPlaneSpec.OIDCID, + Mode: "auto", + Hypershift: ocm.Hypershift{ + Enabled: true, + }, + BillingAccount: billingAccount, + AWSCreator: creator, + AuditLogRoleARN: ptr.To(controlPlaneSpec.AuditLogRoleARN), + ExternalAuthProvidersEnabled: controlPlaneSpec.EnableExternalAuthProviders, + } + + if controlPlaneSpec.EndpointAccess == rosacontrolplanev1.Private { + ocmClusterSpec.Private = ptr.To(true) + ocmClusterSpec.PrivateLink = ptr.To(true) + } + + if networkSpec := controlPlaneSpec.Network; networkSpec != nil { + if networkSpec.MachineCIDR != "" { + _, machineCIDR, err := net.ParseCIDR(networkSpec.MachineCIDR) + if err != nil { + return ocmClusterSpec, err + } + ocmClusterSpec.MachineCIDR = *machineCIDR + } + + if networkSpec.PodCIDR != "" { + _, podCIDR, err := net.ParseCIDR(networkSpec.PodCIDR) + if err != nil { + return ocmClusterSpec, err + } + ocmClusterSpec.PodCIDR = *podCIDR + } + + if networkSpec.ServiceCIDR != "" { + _, serviceCIDR, err := net.ParseCIDR(networkSpec.ServiceCIDR) + if err != nil { + return ocmClusterSpec, err + } + ocmClusterSpec.ServiceCIDR = *serviceCIDR + } + + ocmClusterSpec.HostPrefix = networkSpec.HostPrefix + ocmClusterSpec.NetworkType = networkSpec.NetworkType + } + + // Set cluster compute autoscaling replicas + // In case autoscaling is not defined and multiple zones defined, set the compute nodes equal to the zones count. + if computeAutoscaling := controlPlaneSpec.DefaultMachinePoolSpec.Autoscaling; computeAutoscaling != nil { + ocmClusterSpec.Autoscaling = true + ocmClusterSpec.MaxReplicas = computeAutoscaling.MaxReplicas + ocmClusterSpec.MinReplicas = computeAutoscaling.MinReplicas + } else if len(controlPlaneSpec.AvailabilityZones) > 1 { + ocmClusterSpec.ComputeNodes = len(controlPlaneSpec.AvailabilityZones) + } + + if controlPlaneSpec.ProvisionShardID != "" { + ocmClusterSpec.CustomProperties = map[string]string{ + "provision_shard_id": controlPlaneSpec.ProvisionShardID, + } + } + + return ocmClusterSpec, nil +} + +func operatorIAMRoles(rolesRef rosacontrolplanev1.AWSRolesRef) []ocm.OperatorIAMRole { + return []ocm.OperatorIAMRole{ + { + Name: "cloud-credentials", + Namespace: "openshift-ingress-operator", + RoleARN: rolesRef.IngressARN, + }, + { + Name: "installer-cloud-credentials", + Namespace: "openshift-image-registry", + RoleARN: rolesRef.ImageRegistryARN, + }, + { + Name: "ebs-cloud-credentials", + Namespace: "openshift-cluster-csi-drivers", + RoleARN: rolesRef.StorageARN, + }, + { + Name: "cloud-credentials", + Namespace: "openshift-cloud-network-config-controller", + RoleARN: rolesRef.NetworkARN, + }, + { + Name: "kube-controller-manager", + Namespace: "kube-system", + RoleARN: rolesRef.KubeCloudControllerARN, + }, + { + Name: "kms-provider", + Namespace: "kube-system", + RoleARN: rolesRef.KMSProviderARN, + }, + { + Name: "control-plane-operator", + Namespace: "kube-system", + RoleARN: rolesRef.ControlPlaneOperatorARN, + }, + { + Name: "capa-controller-manager", + Namespace: "kube-system", + RoleARN: rolesRef.NodePoolManagementARN, + }, + } } func (r *ROSAControlPlaneReconciler) rosaClusterToROSAControlPlane(log *logger.Logger) handler.MapFunc { diff --git a/docs/book/cmd/amilist/main.go b/docs/book/cmd/amilist/main.go index 33cc0113e8..a6e5513bbe 100644 --- a/docs/book/cmd/amilist/main.go +++ b/docs/book/cmd/amilist/main.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package main provides a Lambda function to list AMIs and upload them to an S3 bucket. package main import ( diff --git a/docs/book/cmd/clusterawsadmdocs/main.go b/docs/book/cmd/clusterawsadmdocs/main.go index 05c6de2866..69c7c1d42d 100644 --- a/docs/book/cmd/clusterawsadmdocs/main.go +++ b/docs/book/cmd/clusterawsadmdocs/main.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package main provides a way to generate a command reference for clusterawsadm. package main import ( diff --git a/docs/book/src/SUMMARY_PREFIX.md b/docs/book/src/SUMMARY_PREFIX.md index 55a3083769..3bea32cfa8 100644 --- a/docs/book/src/SUMMARY_PREFIX.md +++ b/docs/book/src/SUMMARY_PREFIX.md @@ -24,6 +24,10 @@ - [ROSA Support](./topics/rosa/index.md) - [Enabling ROSA Support](./topics/rosa/enabling.md) - [Creating a cluster](./topics/rosa/creating-a-cluster.md) + - [Creating MachinePools](./topics/rosa/creating-rosa-machinepools.md) + - [Upgrades](./topics/rosa/upgrades.md) + - [External Auth Providers](./topics/rosa/external-auth.md) + - [Support](./topics/rosa/support.md) - [Bring Your Own AWS Infrastructure](./topics/bring-your-own-aws-infrastructure.md) - [Specifying the IAM Role to use for Management Components](./topics/specify-management-iam-role.md) - [Using external cloud provider with EBS CSI driver](./topics/external-cloud-provider-with-ebs-csi-driver.md) @@ -40,3 +44,4 @@ - [Instance Metadata](./topics/instance-metadata.md) - [Network Load Balancers](./topics/network-load-balancer-with-awscluster.md) - [Secondary Control Plane Load Balancer](./topics/secondary-load-balancer.md) + - [Provision AWS Local Zone subnets](./topics/provision-edge-zones.md) diff --git a/docs/book/src/SUMMARY_SUFFIX.md b/docs/book/src/SUMMARY_SUFFIX.md index 6a45ce811d..4470b87f47 100644 --- a/docs/book/src/SUMMARY_SUFFIX.md +++ b/docs/book/src/SUMMARY_SUFFIX.md @@ -2,6 +2,7 @@ - [Development with Tilt](./development/tilt-setup.md) - [Developing E2E tests](./development/e2e.md) - [Coding Conventions](./development/conventions.md) + - [Try unreleased changes with Nightly Builds](./development/nightlies.md) - [CRD Reference](./crd/index.md) - [Reference](./topics/reference/reference.md) - [Glossary](./topics/reference/glossary.md) diff --git a/docs/book/src/crd/index.md b/docs/book/src/crd/index.md index 9ee3ff5135..4c456b22ce 100644 --- a/docs/book/src/crd/index.md +++ b/docs/book/src/crd/index.md @@ -2636,6 +2636,9 @@ string

bootstrap.cluster.x-k8s.io/v1beta2

+

+

Package v1beta2 contains API Schema definitions for the Amazon EKS Bootstrap v1beta2 API group.

+

Resource Types:

    DiskSetup @@ -4351,8 +4354,8 @@ AWSIdentityReference -(Optional) -

    IdentityRef is a reference to a identity to be used when reconciling the managed control plane.

    +

    IdentityRef is a reference to an identity to be used when reconciling the managed control plane. +If no identity is specified, the default identity for this controller will be used.

    @@ -4757,8 +4760,8 @@ AWSIdentityReference -(Optional) -

    IdentityRef is a reference to a identity to be used when reconciling the managed control plane.

    +

    IdentityRef is a reference to an identity to be used when reconciling the managed control plane. +If no identity is specified, the default identity for this controller will be used.

    @@ -5752,6 +5755,7 @@ bool (Appears on:AWSManagedControlPlaneStatus)

    +

    IdentityProviderStatus holds the status for associated identity provider

    @@ -5864,6 +5868,7 @@ string (Appears on:AWSManagedControlPlaneSpec)

    +

    OIDCIdentityProviderConfig defines the configuration for an OIDC identity provider.

    @@ -6168,7 +6173,7 @@ KubernetesMapping

    controlplane.cluster.x-k8s.io/v1beta2

    -

    package v1beta2 contains API Schema definitions for the controlplane v1beta2 API group

    +

    Package v1beta2 contains API Schema definitions for the controlplane v1beta2 API group

    Resource Types:
      @@ -6236,8 +6241,8 @@ AWSIdentityReference @@ -6639,8 +6644,8 @@ AWSIdentityReference @@ -7631,6 +7636,7 @@ bool (Appears on:AWSManagedControlPlaneStatus)

      +

      IdentityProviderStatus holds the status for associated identity provider.

      -(Optional) -

      IdentityRef is a reference to a identity to be used when reconciling the managed control plane.

      +

      IdentityRef is a reference to an identity to be used when reconciling the managed control plane. +If no identity is specified, the default identity for this controller will be used.

      -(Optional) -

      IdentityRef is a reference to a identity to be used when reconciling the managed control plane.

      +

      IdentityRef is a reference to an identity to be used when reconciling the managed control plane. +If no identity is specified, the default identity for this controller will be used.

      @@ -7743,6 +7749,7 @@ string (Appears on:AWSManagedControlPlaneSpec)

      +

      OIDCIdentityProviderConfig represents the configuration for an OIDC identity provider.

      @@ -8059,22 +8066,452 @@ Amazon VPC CNI addon.

      -
      -

      infrastructure.cluster.x-k8s.io/v1beta1

      +

      AWSRolesRef +

      -

      Package v1beta1 contains the v1beta1 API implementation.

      +(Appears on:RosaControlPlaneSpec)

      -Resource Types: -
        -

        AMIReference +

        +

        AWSRolesRef contains references to various AWS IAM roles required for operators to make calls against the AWS API.

        +

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        FieldDescription
        +ingressARN
        + +string + +
        +

        The referenced role must have a trust relationship that allows it to be assumed via web identity. +https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc.html. +Example: +{ +“Version”: “2012-10-17”, +“Statement”: [ +{ +“Effect”: “Allow”, +“Principal”: { +“Federated”: “{{ .ProviderARN }}” +}, +“Action”: “sts:AssumeRoleWithWebIdentity”, +“Condition”: { +“StringEquals”: { +“{{ .ProviderName }}:sub”: {{ .ServiceAccounts }} +} +} +} +] +}

        +

        IngressARN is an ARN value referencing a role appropriate for the Ingress Operator.

        +

        The following is an example of a valid policy document:

        +

        { +“Version”: “2012-10-17”, +“Statement”: [ +{ +“Effect”: “Allow”, +“Action”: [ +“elasticloadbalancing:DescribeLoadBalancers”, +“tag:GetResources”, +“route53:ListHostedZones” +], +“Resource”: “*” +}, +{ +“Effect”: “Allow”, +“Action”: [ +“route53:ChangeResourceRecordSets” +], +“Resource”: [ +“arn:aws:route53:::PUBLIC_ZONE_ID”, +“arn:aws:route53:::PRIVATE_ZONE_ID” +] +} +] +}

        +
        +imageRegistryARN
        + +string + +
        +

        ImageRegistryARN is an ARN value referencing a role appropriate for the Image Registry Operator.

        +

        The following is an example of a valid policy document:

        +

        { +“Version”: “2012-10-17”, +“Statement”: [ +{ +“Effect”: “Allow”, +“Action”: [ +“s3:CreateBucket”, +“s3:DeleteBucket”, +“s3:PutBucketTagging”, +“s3:GetBucketTagging”, +“s3:PutBucketPublicAccessBlock”, +“s3:GetBucketPublicAccessBlock”, +“s3:PutEncryptionConfiguration”, +“s3:GetEncryptionConfiguration”, +“s3:PutLifecycleConfiguration”, +“s3:GetLifecycleConfiguration”, +“s3:GetBucketLocation”, +“s3:ListBucket”, +“s3:GetObject”, +“s3:PutObject”, +“s3:DeleteObject”, +“s3:ListBucketMultipartUploads”, +“s3:AbortMultipartUpload”, +“s3:ListMultipartUploadParts” +], +“Resource”: “*” +} +] +}

        +
        +storageARN
        + +string + +
        +

        StorageARN is an ARN value referencing a role appropriate for the Storage Operator.

        +

        The following is an example of a valid policy document:

        +

        { +“Version”: “2012-10-17”, +“Statement”: [ +{ +“Effect”: “Allow”, +“Action”: [ +“ec2:AttachVolume”, +“ec2:CreateSnapshot”, +“ec2:CreateTags”, +“ec2:CreateVolume”, +“ec2:DeleteSnapshot”, +“ec2:DeleteTags”, +“ec2:DeleteVolume”, +“ec2:DescribeInstances”, +“ec2:DescribeSnapshots”, +“ec2:DescribeTags”, +“ec2:DescribeVolumes”, +“ec2:DescribeVolumesModifications”, +“ec2:DetachVolume”, +“ec2:ModifyVolume” +], +“Resource”: “*” +} +] +}

        +
        +networkARN
        + +string + +
        +

        NetworkARN is an ARN value referencing a role appropriate for the Network Operator.

        +

        The following is an example of a valid policy document:

        +

        { +“Version”: “2012-10-17”, +“Statement”: [ +{ +“Effect”: “Allow”, +“Action”: [ +“ec2:DescribeInstances”, +“ec2:DescribeInstanceStatus”, +“ec2:DescribeInstanceTypes”, +“ec2:UnassignPrivateIpAddresses”, +“ec2:AssignPrivateIpAddresses”, +“ec2:UnassignIpv6Addresses”, +“ec2:AssignIpv6Addresses”, +“ec2:DescribeSubnets”, +“ec2:DescribeNetworkInterfaces” +], +“Resource”: “*” +} +] +}

        +
        +kubeCloudControllerARN
        + +string + +
        +

        KubeCloudControllerARN is an ARN value referencing a role appropriate for the KCM/KCC. +Source: https://cloud-provider-aws.sigs.k8s.io/prerequisites/#iam-policies

        +

        The following is an example of a valid policy document:

        +

        { +“Version”: “2012-10-17”, +“Statement”: [ +{ +“Action”: [ +“autoscaling:DescribeAutoScalingGroups”, +“autoscaling:DescribeLaunchConfigurations”, +“autoscaling:DescribeTags”, +“ec2:DescribeAvailabilityZones”, +“ec2:DescribeInstances”, +“ec2:DescribeImages”, +“ec2:DescribeRegions”, +“ec2:DescribeRouteTables”, +“ec2:DescribeSecurityGroups”, +“ec2:DescribeSubnets”, +“ec2:DescribeVolumes”, +“ec2:CreateSecurityGroup”, +“ec2:CreateTags”, +“ec2:CreateVolume”, +“ec2:ModifyInstanceAttribute”, +“ec2:ModifyVolume”, +“ec2:AttachVolume”, +“ec2:AuthorizeSecurityGroupIngress”, +“ec2:CreateRoute”, +“ec2:DeleteRoute”, +“ec2:DeleteSecurityGroup”, +“ec2:DeleteVolume”, +“ec2:DetachVolume”, +“ec2:RevokeSecurityGroupIngress”, +“ec2:DescribeVpcs”, +“elasticloadbalancing:AddTags”, +“elasticloadbalancing:AttachLoadBalancerToSubnets”, +“elasticloadbalancing:ApplySecurityGroupsToLoadBalancer”, +“elasticloadbalancing:CreateLoadBalancer”, +“elasticloadbalancing:CreateLoadBalancerPolicy”, +“elasticloadbalancing:CreateLoadBalancerListeners”, +“elasticloadbalancing:ConfigureHealthCheck”, +“elasticloadbalancing:DeleteLoadBalancer”, +“elasticloadbalancing:DeleteLoadBalancerListeners”, +“elasticloadbalancing:DescribeLoadBalancers”, +“elasticloadbalancing:DescribeLoadBalancerAttributes”, +“elasticloadbalancing:DetachLoadBalancerFromSubnets”, +“elasticloadbalancing:DeregisterInstancesFromLoadBalancer”, +“elasticloadbalancing:ModifyLoadBalancerAttributes”, +“elasticloadbalancing:RegisterInstancesWithLoadBalancer”, +“elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer”, +“elasticloadbalancing:AddTags”, +“elasticloadbalancing:CreateListener”, +“elasticloadbalancing:CreateTargetGroup”, +“elasticloadbalancing:DeleteListener”, +“elasticloadbalancing:DeleteTargetGroup”, +“elasticloadbalancing:DeregisterTargets”, +“elasticloadbalancing:DescribeListeners”, +“elasticloadbalancing:DescribeLoadBalancerPolicies”, +“elasticloadbalancing:DescribeTargetGroups”, +“elasticloadbalancing:DescribeTargetHealth”, +“elasticloadbalancing:ModifyListener”, +“elasticloadbalancing:ModifyTargetGroup”, +“elasticloadbalancing:RegisterTargets”, +“elasticloadbalancing:SetLoadBalancerPoliciesOfListener”, +“iam:CreateServiceLinkedRole”, +“kms:DescribeKey” +], +“Resource”: [ +“*” +], +“Effect”: “Allow” +} +] +}

        +
        +nodePoolManagementARN
        + +string + +
        +

        NodePoolManagementARN is an ARN value referencing a role appropriate for the CAPI Controller.

        +

        The following is an example of a valid policy document:

        +

        { +“Version”: “2012-10-17”, +“Statement”: [ +{ +“Action”: [ +“ec2:AssociateRouteTable”, +“ec2:AttachInternetGateway”, +“ec2:AuthorizeSecurityGroupIngress”, +“ec2:CreateInternetGateway”, +“ec2:CreateNatGateway”, +“ec2:CreateRoute”, +“ec2:CreateRouteTable”, +“ec2:CreateSecurityGroup”, +“ec2:CreateSubnet”, +“ec2:CreateTags”, +“ec2:DeleteInternetGateway”, +“ec2:DeleteNatGateway”, +“ec2:DeleteRouteTable”, +“ec2:DeleteSecurityGroup”, +“ec2:DeleteSubnet”, +“ec2:DeleteTags”, +“ec2:DescribeAccountAttributes”, +“ec2:DescribeAddresses”, +“ec2:DescribeAvailabilityZones”, +“ec2:DescribeImages”, +“ec2:DescribeInstances”, +“ec2:DescribeInternetGateways”, +“ec2:DescribeNatGateways”, +“ec2:DescribeNetworkInterfaces”, +“ec2:DescribeNetworkInterfaceAttribute”, +“ec2:DescribeRouteTables”, +“ec2:DescribeSecurityGroups”, +“ec2:DescribeSubnets”, +“ec2:DescribeVpcs”, +“ec2:DescribeVpcAttribute”, +“ec2:DescribeVolumes”, +“ec2:DetachInternetGateway”, +“ec2:DisassociateRouteTable”, +“ec2:DisassociateAddress”, +“ec2:ModifyInstanceAttribute”, +“ec2:ModifyNetworkInterfaceAttribute”, +“ec2:ModifySubnetAttribute”, +“ec2:RevokeSecurityGroupIngress”, +“ec2:RunInstances”, +“ec2:TerminateInstances”, +“tag:GetResources”, +“ec2:CreateLaunchTemplate”, +“ec2:CreateLaunchTemplateVersion”, +“ec2:DescribeLaunchTemplates”, +“ec2:DescribeLaunchTemplateVersions”, +“ec2:DeleteLaunchTemplate”, +“ec2:DeleteLaunchTemplateVersions” +], +“Resource”: [ +“” +], +“Effect”: “Allow” +}, +{ +“Condition”: { +“StringLike”: { +“iam:AWSServiceName”: “elasticloadbalancing.amazonaws.com” +} +}, +“Action”: [ +“iam:CreateServiceLinkedRole” +], +“Resource”: [ +“arn::iam:::role/aws-service-role/elasticloadbalancing.amazonaws.com/AWSServiceRoleForElasticLoadBalancing” +], +“Effect”: “Allow” +}, +{ +“Action”: [ +“iam:PassRole” +], +“Resource”: [ +“arn::iam:::role/-worker-role” +], +“Effect”: “Allow” +}, +{ +“Effect”: “Allow”, +“Action”: [ +“kms:Decrypt”, +“kms:ReEncrypt”, +“kms:GenerateDataKeyWithoutPlainText”, +“kms:DescribeKey” +], +“Resource”: “” +}, +{ +“Effect”: “Allow”, +“Action”: [ +“kms:CreateGrant” +], +“Resource”: “”, +“Condition”: { +“Bool”: { +“kms:GrantIsForAWSResource”: true +} +} +} +] +}

        +
        +controlPlaneOperatorARN
        + +string + +
        +

        ControlPlaneOperatorARN is an ARN value referencing a role appropriate for the Control Plane Operator.

        +

        The following is an example of a valid policy document:

        +

        { +“Version”: “2012-10-17”, +“Statement”: [ +{ +“Effect”: “Allow”, +“Action”: [ +“ec2:CreateVpcEndpoint”, +“ec2:DescribeVpcEndpoints”, +“ec2:ModifyVpcEndpoint”, +“ec2:DeleteVpcEndpoints”, +“ec2:CreateTags”, +“route53:ListHostedZones”, +“ec2:CreateSecurityGroup”, +“ec2:AuthorizeSecurityGroupIngress”, +“ec2:AuthorizeSecurityGroupEgress”, +“ec2:DeleteSecurityGroup”, +“ec2:RevokeSecurityGroupIngress”, +“ec2:RevokeSecurityGroupEgress”, +“ec2:DescribeSecurityGroups”, +“ec2:DescribeVpcs”, +], +“Resource”: “*” +}, +{ +“Effect”: “Allow”, +“Action”: [ +“route53:ChangeResourceRecordSets”, +“route53:ListResourceRecordSets” +], +“Resource”: “arn:aws:route53:::%s” +} +] +}

        +
        +kmsProviderARN
        + +string + +
        +
        +

        DefaultMachinePoolSpec

        -(Appears on:AWSMachineSpec) +(Appears on:RosaControlPlaneSpec)

        -

        AMIReference is a reference to a specific AWS resource by ID, ARN, or filters. -Only one of ID, ARN or Filters may be specified. Specifying more than one will result in -a validation error.

        +

        DefaultMachinePoolSpec defines the configuration for the required worker nodes provisioned as part of the cluster creation.

        @@ -8086,36 +8523,40 @@ a validation error.

        -id
        +instanceType
        string
        (Optional) -

        ID of resource

        +

        The instance type to use, for example r5.xlarge. Instance type ref; https://aws.amazon.com/ec2/instance-types/

        -eksLookupType
        +autoscaling
        - -EKSAMILookupType + +RosaMachinePoolAutoScaling
        (Optional) -

        EKSOptimizedLookupType If specified, will look up an EKS Optimized image in SSM Parameter store

        +

        Autoscaling specifies auto scaling behaviour for the default MachinePool. Autoscaling min/max value +must be equal or multiple of the availability zones count.

        -

        AWSCluster +

        ExternalAuthProvider

        -

        AWSCluster is the schema for Amazon EC2 based Kubernetes Cluster API.

        +(Appears on:RosaControlPlaneSpec) +

        +

        +

        ExternalAuthProvider is an external OIDC identity provider that can issue tokens for this cluster

        @@ -8127,228 +8568,309 @@ EKSAMILookupType + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        -metadata
        +name
        - -Kubernetes meta/v1.ObjectMeta +string + +
        +

        Name of the OIDC provider

        +
        +issuer
        + + +TokenIssuer
        -Refer to the Kubernetes API documentation for the fields of the -metadata field. +

        Issuer describes attributes of the OIDC token issuer

        -spec
        +oidcClients
        - -AWSClusterSpec + +[]OIDCClientConfig
        -
        -
        - +(Optional) +

        OIDCClients contains configuration for the platform’s clients that +need to request tokens from the issuer

        + + + + + +
        -network
        +claimMappings
        - -NetworkSpec + +TokenClaimMappings
        -

        NetworkSpec encapsulates all things related to AWS network.

        +(Optional) +

        ClaimMappings describes rules on how to transform information from an +ID token into a cluster identity

        -region
        +claimValidationRules
        + + +[]TokenClaimValidationRule + + +
        +

        ClaimValidationRules are rules that are applied to validate token claims to authenticate users.

        +
        +

        LocalObjectReference +

        +

        +(Appears on:OIDCClientConfig, TokenIssuer) +

        +

        +

        LocalObjectReference references an object in the same namespace.

        +

        + + + + + + + + + + + +
        FieldDescription
        +name
        string
        -

        The AWS Region the cluster lives in.

        +

        Name is the metadata.name of the referenced object.

        +

        NetworkSpec +

        +

        +(Appears on:RosaControlPlaneSpec) +

        +

        +

        NetworkSpec for ROSA-HCP.

        +

        + + + + + + + + + +
        FieldDescription
        -sshKeyName
        +machineCIDR
        string
        (Optional) -

        SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)

        +

        IP addresses block used by OpenShift while installing the cluster, for example “10.0.0.0/16”.

        -controlPlaneEndpoint
        +podCIDR
        - -Cluster API api/v1beta1.APIEndpoint - +string
        (Optional) -

        ControlPlaneEndpoint represents the endpoint used to communicate with the control plane.

        +

        IP address block from which to assign pod IP addresses, for example 10.128.0.0/14.

        -additionalTags
        +serviceCIDR
        - -Tags - +string
        (Optional) -

        AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the -ones added by default.

        +

        IP address block from which to assign service IP addresses, for example 172.30.0.0/16.

        -controlPlaneLoadBalancer
        +hostPrefix
        - -AWSLoadBalancerSpec - +int
        (Optional) -

        ControlPlaneLoadBalancer is optional configuration for customizing control plane behavior.

        +

        Network host prefix which is defaulted to 23 if not specified.

        -imageLookupFormat
        +networkType
        string
        (Optional) -

        ImageLookupFormat is the AMI naming format to look up machine images when -a machine does not specify an AMI. When set, this will be used for all -cluster machines unless a machine specifies a different ImageLookupOrg. -Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base -OS and kubernetes version, respectively. The BaseOS will be the value in -ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as -defined by the packages produced by kubernetes/release without v as a -prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default -image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up -searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a -Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See -also: https://golang.org/pkg/text/template/

        +

        The CNI network type default is OVNKubernetes.

        +

        OIDCClientConfig +

        +

        +(Appears on:ExternalAuthProvider) +

        +

        +

        OIDCClientConfig contains configuration for the platform’s client that +need to request tokens from the issuer.

        +

        + + + + + + + + +
        FieldDescription
        -imageLookupOrg
        +componentName
        string
        -(Optional) -

        ImageLookupOrg is the AWS Organization ID to look up machine images when a -machine does not specify an AMI. When set, this will be used for all -cluster machines unless a machine specifies a different ImageLookupOrg.

        +

        ComponentName is the name of the component that is supposed to consume this +client configuration

        -imageLookupBaseOS
        +componentNamespace
        string
        -

        ImageLookupBaseOS is the name of the base operating system used to look -up machine images when a machine does not specify an AMI. When set, this -will be used for all cluster machines unless a machine specifies a -different ImageLookupBaseOS.

        +

        ComponentNamespace is the namespace of the component that is supposed to consume this +client configuration

        -bastion
        +clientID
        - -Bastion - +string
        -(Optional) -

        Bastion contains options to configure the bastion host.

        +

        ClientID is the identifier of the OIDC client from the OIDC provider

        -identityRef
        +clientSecret
        - -AWSIdentityReference + +LocalObjectReference
        -(Optional) -

        IdentityRef is a reference to a identity to be used when reconciling this cluster

        +

        ClientSecret refers to a secret that +contains the client secret in the clientSecret key of the .data field

        -s3Bucket
        +extraScopes
        - -S3Bucket - +[]string
        (Optional) -

        S3Bucket contains options to configure a supporting S3 bucket for this -cluster - currently used for nodes requiring Ignition -(https://coreos.github.io/ignition/) for bootstrapping (requires -BootstrapFormatIgnition feature flag to be enabled).

        +

        ExtraScopes is an optional set of scopes to request tokens with.

        +

        PrefixedClaimMapping +

        +

        +(Appears on:TokenClaimMappings) +

        +

        +

        PrefixedClaimMapping defines claims with a prefix.

        +

        + + + + + + + + + + +
        FieldDescription
        +claim
        + +string + +
        +

        Claim is a JWT token claim to be used in the mapping

        -status
        +prefix
        - -AWSClusterStatus - +string
        +

        Prefix is a string to prefix the value from the token in the result of the +claim mapping.

        +

        By default, no prefixing occurs.

        +

        Example: if prefix is set to “myoidc:”” and the claim in JWT contains +an array of strings “a”, “b” and “c”, the mapping will result in an +array of string “myoidc:a”, “myoidc:b” and “myoidc:c”.

        -

        AWSClusterControllerIdentity +

        ROSAControlPlane

        -

        AWSClusterControllerIdentity is the Schema for the awsclustercontrolleridentities API -It is used to grant access to use Cluster API Provider AWS Controller credentials.

        +

        ROSAControlPlane is the Schema for the ROSAControlPlanes API.

        @@ -8376,39 +8898,1506 @@ Refer to the Kubernetes API documentation for the fields of the + + - -
        spec
        - -AWSClusterControllerIdentitySpec + +RosaControlPlaneSpec
        -

        Spec for this AWSClusterControllerIdentity.



        -
        -AWSClusterIdentitySpec
        +rosaClusterName
        - -AWSClusterIdentitySpec - +string
        -

        -(Members of AWSClusterIdentitySpec are embedded into this type.) -

        +

        Cluster name must be valid DNS-1035 label, so it must consist of lower case alphanumeric +characters or ‘-’, start with an alphabetic character, end with an alphanumeric character +and have a max length of 54 characters.

        +
        +domainPrefix
        + +string + +
        +(Optional) +

        DomainPrefix is an optional prefix added to the cluster’s domain name. It will be used +when generating a sub-domain for the cluster on openshiftapps domain. It must be valid DNS-1035 label +consisting of lower case alphanumeric characters or ‘-’, start with an alphabetic character +end with an alphanumeric character and have a max length of 15 characters.

        -

        AWSClusterControllerIdentitySpec -

        -

        +

        +subnets
        + +[]string + +
        +

        The Subnet IDs to use when installing the cluster. +SubnetIDs should come in pairs; two per availability zone, one private and one public.

        +
        +availabilityZones
        + +[]string + +
        +

        AvailabilityZones describe AWS AvailabilityZones of the worker nodes. +should match the AvailabilityZones of the provided Subnets. +a machinepool will be created for each availabilityZone.

        +
        +region
        + +string + +
        +

        The AWS Region the cluster lives in.

        +
        +version
        + +string + +
        +

        OpenShift semantic version, for example “4.14.5”.

        +
        +rolesRef
        + + +AWSRolesRef + + +
        +

        AWS IAM roles used to perform credential requests by the openshift operators.

        +
        +oidcID
        + +string + +
        +

        The ID of the internal OpenID Connect Provider.

        +
        +enableExternalAuthProviders
        + +bool + +
        +(Optional) +

        EnableExternalAuthProviders enables external authentication configuration for the cluster.

        +
        +externalAuthProviders
        + + +[]ExternalAuthProvider + + +
        +

        ExternalAuthProviders are external OIDC identity providers that can issue tokens for this cluster. +Can only be set if “enableExternalAuthProviders” is set to “True”.

        +

        At most one provider can be configured.

        +
        +installerRoleARN
        + +string + +
        +

        InstallerRoleARN is an AWS IAM role that OpenShift Cluster Manager will assume to create the cluster..

        +
        +supportRoleARN
        + +string + +
        +

        SupportRoleARN is an AWS IAM role used by Red Hat SREs to enable +access to the cluster account in order to provide support.

        +
        +workerRoleARN
        + +string + +
        +

        WorkerRoleARN is an AWS IAM role that will be attached to worker instances.

        +
        +billingAccount
        + +string + +
        +(Optional) +

        BillingAccount is an optional AWS account to use for billing the subscription fees for ROSA clusters. +The cost of running each ROSA cluster will be billed to the infrastructure account in which the cluster +is running.

        +
        +defaultMachinePoolSpec
        + + +DefaultMachinePoolSpec + + +
        +(Optional) +

        DefaultMachinePoolSpec defines the configuration for the default machinepool(s) provisioned as part of the cluster creation. +One MachinePool will be created with this configuration per AvailabilityZone. Those default machinepools are required for openshift cluster operators +to work properly. +As these machinepool not created using ROSAMachinePool CR, they will not be visible/managed by ROSA CAPI provider. +rosa list machinepools -c <rosaClusterName> can be used to view those machinepools.

        +

        This field will be removed in the future once the current limitation is resolved.

        +
        +network
        + + +NetworkSpec + + +
        +(Optional) +

        Network config for the ROSA HCP cluster.

        +
        +endpointAccess
        + + +RosaEndpointAccessType + + +
        +(Optional) +

        EndpointAccess specifies the publishing scope of cluster endpoints. The +default is Public.

        +
        +additionalTags
        + + +Tags + + +
        +(Optional) +

        AdditionalTags are user-defined tags to be added on the AWS resources associated with the control plane.

        +
        +etcdEncryptionKMSARN
        + +string + +
        +(Optional) +

        EtcdEncryptionKMSARN is the ARN of the KMS key used to encrypt etcd. The key itself needs to be +created out-of-band by the user and tagged with red-hat:true.

        +
        +auditLogRoleARN
        + +string + +
        +(Optional) +

        AuditLogRoleARN defines the role that is used to forward audit logs to AWS CloudWatch. +If not set, audit log forwarding is disabled.

        +
        +provisionShardID
        + +string + +
        +(Optional) +

        ProvisionShardID defines the shard where rosa control plane components will be hosted.

        +
        +credentialsSecretRef
        + + +Kubernetes core/v1.LocalObjectReference + + +
        +(Optional) +

        CredentialsSecretRef references a secret with necessary credentials to connect to the OCM API. +The secret should contain the following data keys: +- ocmToken: eyJhbGciOiJIUzI1NiIsI…. +- ocmApiUrl: Optional, defaults to ‘https://api.openshift.com’

        +
        +identityRef
        + + +AWSIdentityReference + + +
        +(Optional) +

        IdentityRef is a reference to an identity to be used when reconciling the managed control plane. +If no identity is specified, the default identity for this controller will be used.

        +
        +controlPlaneEndpoint
        + + +Cluster API api/v1beta1.APIEndpoint + + +
        +(Optional) +

        ControlPlaneEndpoint represents the endpoint used to communicate with the control plane.

        +
        + + + + +status
        + + +RosaControlPlaneStatus + + + + + + + + +

        RosaControlPlaneSpec +

        +

        +(Appears on:ROSAControlPlane) +

        +

        +

        RosaControlPlaneSpec defines the desired state of ROSAControlPlane.

        +

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        FieldDescription
        +rosaClusterName
        + +string + +
        +

        Cluster name must be valid DNS-1035 label, so it must consist of lower case alphanumeric +characters or ‘-’, start with an alphabetic character, end with an alphanumeric character +and have a max length of 54 characters.

        +
        +domainPrefix
        + +string + +
        +(Optional) +

        DomainPrefix is an optional prefix added to the cluster’s domain name. It will be used +when generating a sub-domain for the cluster on openshiftapps domain. It must be valid DNS-1035 label +consisting of lower case alphanumeric characters or ‘-’, start with an alphabetic character +end with an alphanumeric character and have a max length of 15 characters.

        +
        +subnets
        + +[]string + +
        +

        The Subnet IDs to use when installing the cluster. +SubnetIDs should come in pairs; two per availability zone, one private and one public.

        +
        +availabilityZones
        + +[]string + +
        +

        AvailabilityZones describe AWS AvailabilityZones of the worker nodes. +should match the AvailabilityZones of the provided Subnets. +a machinepool will be created for each availabilityZone.

        +
        +region
        + +string + +
        +

        The AWS Region the cluster lives in.

        +
        +version
        + +string + +
        +

        OpenShift semantic version, for example “4.14.5”.

        +
        +rolesRef
        + + +AWSRolesRef + + +
        +

        AWS IAM roles used to perform credential requests by the openshift operators.

        +
        +oidcID
        + +string + +
        +

        The ID of the internal OpenID Connect Provider.

        +
        +enableExternalAuthProviders
        + +bool + +
        +(Optional) +

        EnableExternalAuthProviders enables external authentication configuration for the cluster.

        +
        +externalAuthProviders
        + + +[]ExternalAuthProvider + + +
        +

        ExternalAuthProviders are external OIDC identity providers that can issue tokens for this cluster. +Can only be set if “enableExternalAuthProviders” is set to “True”.

        +

        At most one provider can be configured.

        +
        +installerRoleARN
        + +string + +
        +

        InstallerRoleARN is an AWS IAM role that OpenShift Cluster Manager will assume to create the cluster..

        +
        +supportRoleARN
        + +string + +
        +

        SupportRoleARN is an AWS IAM role used by Red Hat SREs to enable +access to the cluster account in order to provide support.

        +
        +workerRoleARN
        + +string + +
        +

        WorkerRoleARN is an AWS IAM role that will be attached to worker instances.

        +
        +billingAccount
        + +string + +
        +(Optional) +

        BillingAccount is an optional AWS account to use for billing the subscription fees for ROSA clusters. +The cost of running each ROSA cluster will be billed to the infrastructure account in which the cluster +is running.

        +
        +defaultMachinePoolSpec
        + + +DefaultMachinePoolSpec + + +
        +(Optional) +

        DefaultMachinePoolSpec defines the configuration for the default machinepool(s) provisioned as part of the cluster creation. +One MachinePool will be created with this configuration per AvailabilityZone. Those default machinepools are required for openshift cluster operators +to work properly. +As these machinepool not created using ROSAMachinePool CR, they will not be visible/managed by ROSA CAPI provider. +rosa list machinepools -c <rosaClusterName> can be used to view those machinepools.

        +

        This field will be removed in the future once the current limitation is resolved.

        +
        +network
        + + +NetworkSpec + + +
        +(Optional) +

        Network config for the ROSA HCP cluster.

        +
        +endpointAccess
        + + +RosaEndpointAccessType + + +
        +(Optional) +

        EndpointAccess specifies the publishing scope of cluster endpoints. The +default is Public.

        +
        +additionalTags
        + + +Tags + + +
        +(Optional) +

        AdditionalTags are user-defined tags to be added on the AWS resources associated with the control plane.

        +
        +etcdEncryptionKMSARN
        + +string + +
        +(Optional) +

        EtcdEncryptionKMSARN is the ARN of the KMS key used to encrypt etcd. The key itself needs to be +created out-of-band by the user and tagged with red-hat:true.

        +
        +auditLogRoleARN
        + +string + +
        +(Optional) +

        AuditLogRoleARN defines the role that is used to forward audit logs to AWS CloudWatch. +If not set, audit log forwarding is disabled.

        +
        +provisionShardID
        + +string + +
        +(Optional) +

        ProvisionShardID defines the shard where rosa control plane components will be hosted.

        +
        +credentialsSecretRef
        + + +Kubernetes core/v1.LocalObjectReference + + +
        +(Optional) +

        CredentialsSecretRef references a secret with necessary credentials to connect to the OCM API. +The secret should contain the following data keys: +- ocmToken: eyJhbGciOiJIUzI1NiIsI…. +- ocmApiUrl: Optional, defaults to ‘https://api.openshift.com’

        +
        +identityRef
        + + +AWSIdentityReference + + +
        +(Optional) +

        IdentityRef is a reference to an identity to be used when reconciling the managed control plane. +If no identity is specified, the default identity for this controller will be used.

        +
        +controlPlaneEndpoint
        + + +Cluster API api/v1beta1.APIEndpoint + + +
        +(Optional) +

        ControlPlaneEndpoint represents the endpoint used to communicate with the control plane.

        +
        +

        RosaControlPlaneStatus +

        +

        +(Appears on:ROSAControlPlane) +

        +

        +

        RosaControlPlaneStatus defines the observed state of ROSAControlPlane.

        +

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        FieldDescription
        +externalManagedControlPlane
        + +bool + +
        +

        ExternalManagedControlPlane indicates to cluster-api that the control plane +is managed by an external service such as AKS, EKS, GKE, etc.

        +
        +initialized
        + +bool + +
        +(Optional) +

        Initialized denotes whether or not the control plane has the +uploaded kubernetes config-map.

        +
        +ready
        + +bool + +
        +

        Ready denotes that the ROSAControlPlane API Server is ready to receive requests.

        +
        +failureMessage
        + +string + +
        +(Optional) +

        FailureMessage will be set in the event that there is a terminal problem +reconciling the state and will be set to a descriptive error message.

        +

        This field should not be set for transitive errors that a controller +faces that are expected to be fixed automatically over +time (like service outages), but instead indicate that something is +fundamentally wrong with the spec or the configuration of +the controller, and that manual intervention is required.

        +
        +conditions
        + + +Cluster API api/v1beta1.Conditions + + +
        +

        Conditions specifies the conditions for the managed control plane

        +
        +id
        + +string + +
        +

        ID is the cluster ID given by ROSA.

        +
        +consoleURL
        + +string + +
        +

        ConsoleURL is the url for the openshift console.

        +
        +oidcEndpointURL
        + +string + +
        +

        OIDCEndpointURL is the endpoint url for the managed OIDC provider.

        +
        +

        RosaEndpointAccessType +(string alias)

        +

        +(Appears on:RosaControlPlaneSpec) +

        +

        +

        RosaEndpointAccessType specifies the publishing scope of cluster endpoints.

        +

        + + + + + + + + + + + + +
        ValueDescription

        "Private"

        Private endpoint access allows only private API server access and private +node communication with the control plane.

        +

        "Public"

        Public endpoint access allows public API server access and +private node communication with the control plane.

        +
        +

        TokenAudience +(string alias)

        +

        +(Appears on:TokenIssuer) +

        +

        +

        TokenAudience is the audience that the token was issued for.

        +

        +

        TokenClaimMappings +

        +

        +(Appears on:ExternalAuthProvider) +

        +

        +

        TokenClaimMappings describes rules on how to transform information from an +ID token into a cluster identity.

        +

        + + + + + + + + + + + + + + + + + +
        FieldDescription
        +username
        + + +UsernameClaimMapping + + +
        +(Optional) +

        Username is a name of the claim that should be used to construct +usernames for the cluster identity.

        +

        Default value: “sub”

        +
        +groups
        + + +PrefixedClaimMapping + + +
        +(Optional) +

        Groups is a name of the claim that should be used to construct +groups for the cluster identity. +The referenced claim must use array of strings values.

        +
        +

        TokenClaimValidationRule +

        +

        +(Appears on:ExternalAuthProvider) +

        +

        +

        TokenClaimValidationRule validates token claims to authenticate users.

        +

        + + + + + + + + + + + + + + + + + +
        FieldDescription
        +type
        + + +TokenValidationRuleType + + +
        +

        Type sets the type of the validation rule

        +
        +requiredClaim
        + + +TokenRequiredClaim + + +
        +

        RequiredClaim allows configuring a required claim name and its expected value

        +
        +

        TokenIssuer +

        +

        +(Appears on:ExternalAuthProvider) +

        +

        +

        TokenIssuer describes attributes of the OIDC token issuer

        +

        + + + + + + + + + + + + + + + + + + + + + +
        FieldDescription
        +issuerURL
        + +string + +
        +

        URL is the serving URL of the token issuer. +Must use the https:// scheme.

        +
        +audiences
        + + +[]TokenAudience + + +
        +

        Audiences is an array of audiences that the token was issued for. +Valid tokens must include at least one of these values in their +“aud” claim. +Must be set to exactly one value.

        +
        +issuerCertificateAuthority
        + + +LocalObjectReference + + +
        +

        CertificateAuthority is a reference to a config map in the +configuration namespace. The .data of the configMap must contain +the “ca-bundle.crt” key. +If unset, system trust is used instead.

        +
        +

        TokenRequiredClaim +

        +

        +(Appears on:TokenClaimValidationRule) +

        +

        +

        TokenRequiredClaim allows configuring a required claim name and its expected value.

        +

        + + + + + + + + + + + + + + + + + +
        FieldDescription
        +claim
        + +string + +
        +

        Claim is a name of a required claim. Only claims with string values are +supported.

        +
        +requiredValue
        + +string + +
        +

        RequiredValue is the required value for the claim.

        +
        +

        TokenValidationRuleType +(string alias)

        +

        +(Appears on:TokenClaimValidationRule) +

        +

        +

        TokenValidationRuleType defines the type of the validation rule.

        +

        + + + + + + + + + + +
        ValueDescription

        "RequiredClaim"

        TokenValidationRuleTypeRequiredClaim defines the type for RequiredClaim.

        +
        +

        UsernameClaimMapping +

        +

        +(Appears on:TokenClaimMappings) +

        +

        +

        UsernameClaimMapping defines the claim that should be used to construct usernames for the cluster identity.

        +

        + + + + + + + + + + + + + + + + + + + + + +
        FieldDescription
        +claim
        + +string + +
        +

        Claim is a JWT token claim to be used in the mapping

        +
        +prefixPolicy
        + + +UsernamePrefixPolicy + + +
        +(Optional) +

        PrefixPolicy specifies how a prefix should apply.

        +

        By default, claims other than email will be prefixed with the issuer URL to +prevent naming clashes with other plugins.

        +

        Set to “NoPrefix” to disable prefixing.

        +

        Example: +(1) prefix is set to “myoidc:” and claim is set to “username”. +If the JWT claim username contains value userA, the resulting +mapped value will be “myoidc:userA”. +(2) prefix is set to “myoidc:” and claim is set to “email”. If the +JWT email claim contains value “userA@myoidc.tld”, the resulting +mapped value will be “myoidc:userA@myoidc.tld”. +(3) prefix is unset, issuerURL is set to https://myoidc.tld, +the JWT claims include “username”:“userA” and “email”:“userA@myoidc.tld”, +and claim is set to: +(a) “username”: the mapped value will be “https://myoidc.tld#userA” +(b) “email”: the mapped value will be “userA@myoidc.tld”

        +
        +prefix
        + +string + +
        +(Optional) +

        Prefix is prepended to claim to prevent clashes with existing names.

        +
        +

        UsernamePrefixPolicy +(string alias)

        +

        +(Appears on:UsernameClaimMapping) +

        +

        +

        UsernamePrefixPolicy specifies how a prefix should apply.

        +

        + + + + + + + + + + + + + + +
        ValueDescription

        ""

        NoOpinion let’s the cluster assign prefixes. If the username claim is email, there is no prefix +If the username claim is anything else, it is prefixed by the issuerURL

        +

        "NoPrefix"

        NoPrefix means the username claim value will not have any prefix

        +

        "Prefix"

        Prefix means the prefix value must be specified. It cannot be empty

        +
        +
        +

        infrastructure.cluster.x-k8s.io/v1beta1

        +

        +

        Package v1beta1 contains the v1beta1 API implementation.

        +

        +Resource Types: +
          +

          AMIReference +

          +

          +(Appears on:AWSMachineSpec) +

          +

          +

          AMIReference is a reference to a specific AWS resource by ID, ARN, or filters. +Only one of ID, ARN or Filters may be specified. Specifying more than one will result in +a validation error.

          +

          + + + + + + + + + + + + + + + + + +
          FieldDescription
          +id
          + +string + +
          +(Optional) +

          ID of resource

          +
          +eksLookupType
          + + +EKSAMILookupType + + +
          +(Optional) +

          EKSOptimizedLookupType If specified, will look up an EKS Optimized image in SSM Parameter store

          +
          +

          AWSCluster +

          +

          +

          AWSCluster is the schema for Amazon EC2 based Kubernetes Cluster API.

          +

          + + + + + + + + + + + + + + + + + + + + + +
          FieldDescription
          +metadata
          + + +Kubernetes meta/v1.ObjectMeta + + +
          +Refer to the Kubernetes API documentation for the fields of the +metadata field. +
          +spec
          + + +AWSClusterSpec + + +
          +
          +
          + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          +network
          + + +NetworkSpec + + +
          +

          NetworkSpec encapsulates all things related to AWS network.

          +
          +region
          + +string + +
          +

          The AWS Region the cluster lives in.

          +
          +sshKeyName
          + +string + +
          +(Optional) +

          SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)

          +
          +controlPlaneEndpoint
          + + +Cluster API api/v1beta1.APIEndpoint + + +
          +(Optional) +

          ControlPlaneEndpoint represents the endpoint used to communicate with the control plane.

          +
          +additionalTags
          + + +Tags + + +
          +(Optional) +

          AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the +ones added by default.

          +
          +controlPlaneLoadBalancer
          + + +AWSLoadBalancerSpec + + +
          +(Optional) +

          ControlPlaneLoadBalancer is optional configuration for customizing control plane behavior.

          +
          +imageLookupFormat
          + +string + +
          +(Optional) +

          ImageLookupFormat is the AMI naming format to look up machine images when +a machine does not specify an AMI. When set, this will be used for all +cluster machines unless a machine specifies a different ImageLookupOrg. +Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base +OS and kubernetes version, respectively. The BaseOS will be the value in +ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as +defined by the packages produced by kubernetes/release without v as a +prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default +image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up +searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a +Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See +also: https://golang.org/pkg/text/template/

          +
          +imageLookupOrg
          + +string + +
          +(Optional) +

          ImageLookupOrg is the AWS Organization ID to look up machine images when a +machine does not specify an AMI. When set, this will be used for all +cluster machines unless a machine specifies a different ImageLookupOrg.

          +
          +imageLookupBaseOS
          + +string + +
          +

          ImageLookupBaseOS is the name of the base operating system used to look +up machine images when a machine does not specify an AMI. When set, this +will be used for all cluster machines unless a machine specifies a +different ImageLookupBaseOS.

          +
          +bastion
          + + +Bastion + + +
          +(Optional) +

          Bastion contains options to configure the bastion host.

          +
          +identityRef
          + + +AWSIdentityReference + + +
          +

          IdentityRef is a reference to an identity to be used when reconciling the managed control plane. +If no identity is specified, the default identity for this controller will be used.

          +
          +s3Bucket
          + + +S3Bucket + + +
          +(Optional) +

          S3Bucket contains options to configure a supporting S3 bucket for this +cluster - currently used for nodes requiring Ignition +(https://coreos.github.io/ignition/) for bootstrapping (requires +BootstrapFormatIgnition feature flag to be enabled).

          +
          +
          +status
          + + +AWSClusterStatus + + +
          +
          +

          AWSClusterControllerIdentity +

          +

          +

          AWSClusterControllerIdentity is the Schema for the awsclustercontrolleridentities API +It is used to grant access to use Cluster API Provider AWS Controller credentials.

          +

          + + + + + + + + + + + + + + + + + +
          FieldDescription
          +metadata
          + + +Kubernetes meta/v1.ObjectMeta + + +
          +Refer to the Kubernetes API documentation for the fields of the +metadata field. +
          +spec
          + + +AWSClusterControllerIdentitySpec + + +
          +

          Spec for this AWSClusterControllerIdentity.

          +
          +
          + + + + + +
          +AWSClusterIdentitySpec
          + + +AWSClusterIdentitySpec + + +
          +

          +(Members of AWSClusterIdentitySpec are embedded into this type.) +

          +
          +
          +

          AWSClusterControllerIdentitySpec +

          +

          (Appears on:AWSClusterControllerIdentity)

          @@ -8838,8 +10827,8 @@ AWSIdentityReference -(Optional) -

          IdentityRef is a reference to a identity to be used when reconciling this cluster

          +

          IdentityRef is a reference to an identity to be used when reconciling the managed control plane. +If no identity is specified, the default identity for this controller will be used.

          @@ -9125,6 +11114,7 @@ AWSClusterTemplateResource (Appears on:AWSClusterTemplateSpec)

          +

          AWSClusterTemplateResource defines the desired state of AWSClusterTemplate.

          @@ -9318,8 +11308,8 @@ AWSIdentityReference @@ -12373,6 +14363,7 @@ string (Appears on:AWSClusterSpec)

          +

          S3Bucket defines a supporting S3 bucket for the cluster, currently can be optionally used for Ignition.

          -(Optional) -

          IdentityRef is a reference to a identity to be used when reconciling this cluster

          +

          IdentityRef is a reference to an identity to be used when reconciling the managed control plane. +If no identity is specified, the default identity for this controller will be used.

          @@ -15090,6 +17081,12 @@ int64 + + + +

          "AL2_x86_64_GPU"

          Al2x86_64GPU is the x86-64 GPU AMI type.

          "AL2023_ARM_64_STANDARD"

          Al2023Arm64 is the AL2023 Arm AMI type.

          +

          "AL2023_x86_64_STANDARD"

          Al2023x86_64 is the AL2023 x86 AMI type.

          +

          ManagedMachinePoolCapacityType @@ -15470,7 +17467,7 @@ percentage of nodes will be updated in parallel, up to 100 nodes at once.


          infrastructure.cluster.x-k8s.io/v1beta2

          -

          package v1beta2 contains the v1beta2 API implementation.

          +

          Package v1beta2 contains the v1beta2 API implementation.

          Resource Types:
            @@ -15742,8 +17739,8 @@ AWSIdentityReference -(Optional) -

            IdentityRef is a reference to a identity to be used when reconciling this cluster

            +

            IdentityRef is a reference to an identity to be used when reconciling the managed control plane. +If no identity is specified, the default identity for this controller will be used.

            @@ -16302,8 +18299,8 @@ AWSIdentityReference -(Optional) -

            IdentityRef is a reference to a identity to be used when reconciling this cluster

            +

            IdentityRef is a reference to an identity to be used when reconciling the managed control plane. +If no identity is specified, the default identity for this controller will be used.

            @@ -16589,6 +18586,7 @@ AWSClusterTemplateResource (Appears on:AWSClusterTemplateSpec)

            +

            AWSClusterTemplateResource defines the desired state of AWSClusterTemplateResource.

            @@ -16810,8 +18808,8 @@ AWSIdentityReference @@ -16877,7 +18875,7 @@ AWSClusterTemplateResource

            AWSIdentityReference

            -(Appears on:AWSClusterRoleIdentitySpec, AWSClusterSpec, AWSManagedControlPlaneSpec, AWSManagedControlPlaneSpec) +(Appears on:AWSClusterRoleIdentitySpec, AWSClusterSpec, AWSManagedControlPlaneSpec, AWSManagedControlPlaneSpec, RosaControlPlaneSpec)

            AWSIdentityReference specifies a identity.

            @@ -17309,6 +19307,19 @@ the cluster subnet will be used.

            + + + + + + + + + + + +
            -(Optional) -

            IdentityRef is a reference to a identity to be used when reconciling this cluster

            +

            IdentityRef is a reference to an identity to be used when reconciling the managed control plane. +If no identity is specified, the default identity for this controller will be used.

            +securityGroupOverrides
            + +map[sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2.SecurityGroupRole]string + +
            +(Optional) +

            SecurityGroupOverrides is an optional set of security groups to use for the node. +This is optional - if not provided security groups from the cluster will be used.

            +
            sshKeyName
            string @@ -17431,6 +19442,20 @@ string
            +placementGroupPartition
            + +int64 + +
            +(Optional) +

            PlacementGroupPartition is the partition number within the placement group in which to launch the instance. +This value is only valid if the placement group, referred in PlacementGroupName, was created with +strategy set to partition.

            +
            tenancy
            string @@ -17441,6 +19466,20 @@ string

            Tenancy indicates if instance should run on shared or single-tenant hardware.

            +privateDnsName
            + + +PrivateDNSName + + +
            +(Optional) +

            PrivateDNSName is the options for the instance hostname.

            +
            @@ -17662,6 +19701,19 @@ the cluster subnet will be used.

            +securityGroupOverrides
            + +map[sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2.SecurityGroupRole]string + + + +(Optional) +

            SecurityGroupOverrides is an optional set of security groups to use for the node. +This is optional - if not provided security groups from the cluster will be used.

            + + + + sshKeyName
            string @@ -17753,45 +19805,73 @@ Ignition (Optional) -

            Ignition defined options related to the bootstrapping systems where Ignition is used.

            +

            Ignition defined options related to the bootstrapping systems where Ignition is used.

            + + + + +spotMarketOptions
            + + +SpotMarketOptions + + + + +(Optional) +

            SpotMarketOptions allows users to configure instances to be run using AWS Spot instances.

            + + + + +placementGroupName
            + +string + + + +(Optional) +

            PlacementGroupName specifies the name of the placement group in which to launch the instance.

            -spotMarketOptions
            +placementGroupPartition
            - -SpotMarketOptions - +int64 (Optional) -

            SpotMarketOptions allows users to configure instances to be run using AWS Spot instances.

            +

            PlacementGroupPartition is the partition number within the placement group in which to launch the instance. +This value is only valid if the placement group, referred in PlacementGroupName, was created with +strategy set to partition.

            -placementGroupName
            +tenancy
            string (Optional) -

            PlacementGroupName specifies the name of the placement group in which to launch the instance.

            +

            Tenancy indicates if instance should run on shared or single-tenant hardware.

            -tenancy
            +privateDnsName
            -string + +PrivateDNSName + (Optional) -

            Tenancy indicates if instance should run on shared or single-tenant hardware.

            +

            PrivateDNSName is the options for the instance hostname.

            @@ -18231,6 +20311,19 @@ the cluster subnet will be used.

            +securityGroupOverrides
            + +map[sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2.SecurityGroupRole]string + + + +(Optional) +

            SecurityGroupOverrides is an optional set of security groups to use for the node. +This is optional - if not provided security groups from the cluster will be used.

            + + + + sshKeyName
            string @@ -18353,6 +20446,20 @@ string +placementGroupPartition
            + +int64 + + + +(Optional) +

            PlacementGroupPartition is the partition number within the placement group in which to launch the instance. +This value is only valid if the placement group, referred in PlacementGroupName, was created with +strategy set to partition.

            + + + + tenancy
            string @@ -18363,6 +20470,20 @@ string

            Tenancy indicates if instance should run on shared or single-tenant hardware.

            + + +privateDnsName
            + + +PrivateDNSName + + + + +(Optional) +

            PrivateDNSName is the options for the instance hostname.

            + + @@ -19396,6 +21517,7 @@ string

            GCTask (string alias)

            +

            GCTask defines a task to be executed by the garbage collector.

            HTTPTokensState (string alias)

            @@ -19536,7 +21658,8 @@ Mutually exclusive with CidrBlock.

            (Appears on:AWSMachineSpec)

            -

            Ignition defines options related to the bootstrapping systems where Ignition is used.

            +

            Ignition defines options related to the bootstrapping systems where Ignition is used. +For more information on Ignition configuration, see https://coreos.github.io/butane/specs/

            @@ -19558,6 +21681,187 @@ string

            Version defines which version of Ignition will be used to generate bootstrap data.

            + + + + + + + + + + + + + +
            +storageType
            + + +IgnitionStorageTypeOption + + +
            +(Optional) +

            StorageType defines how to store the boostrap user data for Ignition. +This can be used to instruct Ignition from where to fetch the user data to bootstrap an instance.

            +

            When omitted, the storage option will default to ClusterObjectStore.

            +

            When set to “ClusterObjectStore”, if the capability is available and a Cluster ObjectStore configuration +is correctly provided in the Cluster object (under .spec.s3Bucket), +an object store will be used to store bootstrap user data.

            +

            When set to “UnencryptedUserData”, EC2 Instance User Data will be used to store the machine bootstrap user data, unencrypted. +This option is considered less secure than others as user data may contain sensitive informations (keys, certificates, etc.) +and users with ec2:DescribeInstances permission or users running pods +that can access the ec2 metadata service have access to this sensitive information. +So this is only to be used at ones own risk, and only when other more secure options are not viable.

            +
            +proxy
            + + +IgnitionProxy + + +
            +(Optional) +

            Proxy defines proxy settings for Ignition. +Only valid for Ignition versions 3.1 and above.

            +
            +tls
            + + +IgnitionTLS + + +
            +(Optional) +

            TLS defines TLS settings for Ignition. +Only valid for Ignition versions 3.1 and above.

            +
            +

            IgnitionCASource +(string alias)

            +

            +(Appears on:IgnitionTLS) +

            +

            +

            IgnitionCASource defines the source of the certificate authority to use for Ignition.

            +

            +

            IgnitionNoProxy +(string alias)

            +

            +(Appears on:IgnitionProxy) +

            +

            +

            IgnitionNoProxy defines the list of domains to not proxy for Ignition.

            +

            +

            IgnitionProxy +

            +

            +(Appears on:Ignition) +

            +

            +

            IgnitionProxy defines proxy settings for Ignition.

            +

            + + + + + + + + + + + + + + + + + + + + + +
            FieldDescription
            +httpProxy
            + +string + +
            +(Optional) +

            HTTPProxy is the HTTP proxy to use for Ignition. +A single URL that specifies the proxy server to use for HTTP and HTTPS requests, +unless overridden by the HTTPSProxy or NoProxy options.

            +
            +httpsProxy
            + +string + +
            +(Optional) +

            HTTPSProxy is the HTTPS proxy to use for Ignition. +A single URL that specifies the proxy server to use for HTTPS requests, +unless overridden by the NoProxy option.

            +
            +noProxy
            + + +[]IgnitionNoProxy + + +
            +(Optional) +

            NoProxy is the list of domains to not proxy for Ignition. +Specifies a list of strings to hosts that should be excluded from proxying.

            +

            Each value is represented by: +- An IP address prefix (1.2.3.4) +- An IP address prefix in CIDR notation (1.2.3.48) +- A domain name +- A domain name matches that name and all subdomains +- A domain name with a leading . matches subdomains only +- A special DNS label (*), indicates that no proxying should be done

            +

            An IP address prefix and domain name can also include a literal port number (1.2.3.4:80).

            +
            +

            IgnitionStorageTypeOption +(string alias)

            +

            +(Appears on:Ignition) +

            +

            +

            IgnitionStorageTypeOption defines the different storage types for Ignition.

            +

            +

            IgnitionTLS +

            +

            +(Appears on:Ignition) +

            +

            +

            IgnitionTLS defines TLS settings for Ignition.

            +

            + + + + + + + + + + + +
            FieldDescription
            +certificateAuthorities
            + + +[]IgnitionCASource + + +
            +(Optional) +

            CASources defines the list of certificate authorities to use for Ignition. +The value is the certificate bundle (in PEM format). The bundle can contain multiple concatenated certificates. +Supported schemes are http, https, tftp, s3, arn, gs, and data (RFC 2397) URL scheme.

            +

            IngressRule @@ -19945,6 +22249,20 @@ string +placementGroupPartition
            + +int64 + + + +(Optional) +

            PlacementGroupPartition is the partition number within the placement group in which to launch the instance. +This value is only valid if the placement group, referred in PlacementGroupName, was created with +strategy set to partition.

            + + + + tenancy
            string @@ -19981,6 +22299,32 @@ InstanceMetadataOptions

            InstanceMetadataOptions is the metadata options for the EC2 instance.

            + + +privateDnsName
            + + +PrivateDNSName + + + + +(Optional) +

            PrivateDNSName is the options for the instance hostname.

            + + + + +publicIPOnLaunch
            + +bool + + + +(Optional) +

            PublicIPOnLaunch is the option to associate a public IP on instance launch

            + +

            InstanceMetadataOptions @@ -20335,6 +22679,7 @@ LoadBalancerType (Appears on:AWSLoadBalancerSpec, LoadBalancer)

            +

            LoadBalancerType defines the type of load balancer to use.

            NetworkSpec

            @@ -20491,6 +22836,60 @@ LoadBalancer +

            PrivateDNSName +

            +

            +(Appears on:AWSMachineSpec, Instance, AWSLaunchTemplate) +

            +

            +

            PrivateDNSName is the options for the instance hostname.

            +

            + + + + + + + + + + + + + + + + + + + + + +
            FieldDescription
            +enableResourceNameDnsAAAARecord
            + +bool + +
            +(Optional) +

            EnableResourceNameDNSAAAARecord indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records.

            +
            +enableResourceNameDnsARecord
            + +bool + +
            +(Optional) +

            EnableResourceNameDNSARecord indicates whether to respond to DNS queries for instance hostnames with DNS A records.

            +
            +hostnameType
            + +string + +
            +(Optional) +

            The type of hostname to assign to an instance.

            +

            ResourceLifecycle (string alias)

            @@ -20530,6 +22929,7 @@ string (Appears on:AWSClusterSpec)

            +

            S3Bucket defines a supporting S3 bucket for the cluster, currently can be optionally used for Ignition.

            @@ -20873,7 +23273,7 @@ Tags

            Tags (map[string]string alias)

            -(Appears on:AWSClusterSpec, AWSMachineSpec, BuildParams, SecurityGroup, SubnetSpec, VPCSpec, AWSIAMRoleSpec, BootstrapUser, AWSIAMRoleSpec, BootstrapUser, AWSManagedControlPlaneSpec, OIDCIdentityProviderConfig, AWSManagedControlPlaneSpec, OIDCIdentityProviderConfig, AWSMachinePoolSpec, AWSManagedMachinePoolSpec, AutoScalingGroup, FargateProfileSpec, AWSMachinePoolSpec, AWSManagedMachinePoolSpec, AutoScalingGroup, FargateProfileSpec) +(Appears on:AWSClusterSpec, AWSMachineSpec, BuildParams, SecurityGroup, SubnetSpec, VPCSpec, AWSIAMRoleSpec, BootstrapUser, AWSIAMRoleSpec, BootstrapUser, AWSManagedControlPlaneSpec, OIDCIdentityProviderConfig, AWSManagedControlPlaneSpec, OIDCIdentityProviderConfig, RosaControlPlaneSpec, AWSMachinePoolSpec, AWSManagedMachinePoolSpec, AutoScalingGroup, FargateProfileSpec, AWSMachinePoolSpec, AWSManagedMachinePoolSpec, AutoScalingGroup, FargateProfileSpec, RosaMachinePoolSpec)

            Tags defines a map of tags.

            @@ -21166,17 +23566,31 @@ Defaults to Ordered

            + + + + @@ -21634,6 +24048,20 @@ InstanceMetadataOptions

            InstanceMetadataOptions defines the behavior for applying metadata to instances.

            + + + +
            emptyRoutesDefaultVPCSecurityGroup
            -bool +bool + +
            +(Optional) +

            EmptyRoutesDefaultVPCSecurityGroup specifies whether the default VPC security group ingress +and egress rules should be removed.

            +

            By default, when creating a VPC, AWS creates a security group called default with ingress and egress +rules that allow traffic from anywhere. The group could be used as a potential surface attack and +it’s generally suggested that the group rules are removed or modified appropriately.

            +

            NOTE: This only applies when the VPC is managed by the Cluster API AWS controller.

            +
            +privateDnsHostnameTypeOnLaunch
            + +string
            (Optional) -

            EmptyRoutesDefaultVPCSecurityGroup specifies whether the default VPC security group ingress -and egress rules should be removed.

            -

            By default, when creating a VPC, AWS creates a security group called default with ingress and egress -rules that allow traffic from anywhere. The group could be used as a potential surface attack and -it’s generally suggested that the group rules are removed or modified appropriately.

            -

            NOTE: This only applies when the VPC is managed by the Cluster API AWS controller.

            +

            PrivateDNSHostnameTypeOnLaunch is the type of hostname to assign to instances in the subnet at launch. +For IPv4-only and dual-stack (IPv4 and IPv6) subnets, an instance DNS name can be based on the instance IPv4 address (ip-name) +or the instance ID (resource-name). For IPv6 only subnets, an instance DNS name must be based on the instance ID (resource-name).

            +privateDnsName
            + + +PrivateDNSName + + +
            +(Optional) +

            PrivateDNSName is the options for the instance hostname.

            +

            AWSMachinePool @@ -21820,6 +24248,23 @@ If no value is supplied by user a default value of 300 seconds is set

            +defaultInstanceWarmup
            + + +Kubernetes meta/v1.Duration + + + + +(Optional) +

            The amount of time, in seconds, until a new instance is considered to +have finished initializing and resource consumption to become stable +after it enters the InService state. +If no value is supplied by user a default value of 300 seconds is set

            + + + + refreshPreferences
            @@ -22077,6 +24522,23 @@ If no value is supplied by user a default value of 300 seconds is set

            +defaultInstanceWarmup
            + +
            +Kubernetes meta/v1.Duration + + + + +(Optional) +

            The amount of time, in seconds, until a new instance is considered to +have finished initializing and resource consumption to become stable +after it enters the InService state. +If no value is supplied by user a default value of 300 seconds is set

            + + + + refreshPreferences
            @@ -23141,6 +25603,18 @@ Kubernetes meta/v1.Duration +defaultInstanceWarmup
            + +
            +Kubernetes meta/v1.Duration + + + + + + + + capacityRebalance
            bool @@ -23918,6 +26392,7 @@ bool

            ROSACluster

            +

            ROSACluster is the Schema for the ROSAClusters API.

            @@ -23991,6 +26466,7 @@ ROSAClusterStatus (Appears on:ROSACluster)

            +

            ROSAClusterSpec defines the desired state of ROSACluster.

            @@ -24022,7 +26498,7 @@ Cluster API api/v1beta1.APIEndpoint (Appears on:ROSACluster)

            -

            ROSAClusterStatus defines the observed state of ROSACluster

            +

            ROSAClusterStatus defines the observed state of ROSACluster.

            @@ -24114,6 +26590,19 @@ must be a valid DNS-1035 label, so it must consist of lower case alphanumeric an + + + + + + + + + + + + + + + + + + + + + + + +
            +version
            + +string + +
            +(Optional) +

            Version specifies the OpenShift version of the nodes associated with this machinepool. +ROSAControlPlane version is used if not set.

            +
            availabilityZone
            string @@ -24150,6 +26639,34 @@ map[string]string
            +taints
            + + +[]RosaTaint + + +
            +(Optional) +

            Taints specifies the taints to apply to the nodes of the machine pool

            +
            +additionalTags
            + + +Tags + + +
            +(Optional) +

            AdditionalTags are user-defined tags to be added on the underlying EC2 instances associated with this machine pool.

            +
            autoRepair
            bool @@ -24189,6 +26706,32 @@ required if Replicas is not configured

            +tuningConfigs
            + +[]string + +
            +(Optional) +

            TuningConfigs specifies the names of the tuning configs to be applied to this MachinePool. +Tuning configs must already exist.

            +
            +additionalSecurityGroups
            + +[]string + +
            +(Optional) +

            AdditionalSecurityGroups is an optional set of security groups to associate +with all node instances of the machine pool.

            +
            providerIDList
            []string @@ -24199,6 +26742,24 @@ required if Replicas is not configured

            ProviderIDList contain a ProviderID for each machine instance that’s currently managed by this machine pool.

            +nodeDrainGracePeriod
            + + +Kubernetes meta/v1.Duration + + +
            +(Optional) +

            NodeDrainGracePeriod is grace period for how long Pod Disruption Budget-protected workloads will be +respected during upgrades. After this grace period, any workloads protected by Pod Disruption +Budgets that have not been successfully drained from a node will be forcibly evicted.

            +

            Valid values are from 0 to 1 week(10080m|168h) . +0 or empty value means that the MachinePool can be drained without any time limitation.

            +
            @@ -24291,7 +26852,7 @@ during an instance refresh. The default is 90.

            RosaMachinePoolAutoScaling

            -(Appears on:RosaMachinePoolSpec) +(Appears on:DefaultMachinePoolSpec, RosaMachinePoolSpec)

            RosaMachinePoolAutoScaling specifies scaling options.

            @@ -24356,6 +26917,19 @@ must be a valid DNS-1035 label, so it must consist of lower case alphanumeric an +version
            + +string + + + +(Optional) +

            Version specifies the OpenShift version of the nodes associated with this machinepool. +ROSAControlPlane version is used if not set.

            + + + + availabilityZone
            string @@ -24392,6 +26966,34 @@ map[string]string +taints
            + + +[]RosaTaint + + + + +(Optional) +

            Taints specifies the taints to apply to the nodes of the machine pool

            + + + + +additionalTags
            + + +Tags + + + + +(Optional) +

            AdditionalTags are user-defined tags to be added on the underlying EC2 instances associated with this machine pool.

            + + + + autoRepair
            bool @@ -24431,6 +27033,32 @@ required if Replicas is not configured

            +tuningConfigs
            + +[]string + + + +(Optional) +

            TuningConfigs specifies the names of the tuning configs to be applied to this MachinePool. +Tuning configs must already exist.

            + + + + +additionalSecurityGroups
            + +[]string + + + +(Optional) +

            AdditionalSecurityGroups is an optional set of security groups to associate +with all node instances of the machine pool.

            + + + + providerIDList
            []string @@ -24441,6 +27069,24 @@ required if Replicas is not configured

            ProviderIDList contain a ProviderID for each machine instance that’s currently managed by this machine pool.

            + + +nodeDrainGracePeriod
            + + +Kubernetes meta/v1.Duration + + + + +(Optional) +

            NodeDrainGracePeriod is grace period for how long Pod Disruption Budget-protected workloads will be +respected during upgrades. After this grace period, any workloads protected by Pod Disruption +Budgets that have not been successfully drained from a node will be forcibly evicted.

            +

            Valid values are from 0 to 1 week(10080m|168h) . +0 or empty value means that the MachinePool can be drained without any time limitation.

            + +

            RosaMachinePoolStatus @@ -24499,6 +27145,24 @@ Cluster API api/v1beta1.Conditions +failureMessage
            + +string + + + +(Optional) +

            FailureMessage will be set in the event that there is a terminal problem +reconciling the state and will be set to a descriptive error message.

            +

            This field should not be set for transitive errors that a controller +faces that are expected to be fixed automatically over +time (like service outages), but instead indicate that something is +fundamentally wrong with the spec or the configuration of +the controller, and that manual intervention is required.

            + + + + id
            string @@ -24510,6 +27174,61 @@ string +

            RosaTaint +

            +

            +(Appears on:RosaMachinePoolSpec) +

            +

            +

            RosaTaint represents a taint to be applied to a node.

            +

            + + + + + + + + + + + + + + + + + + + + + +
            FieldDescription
            +key
            + +string + +
            +

            The taint key to be applied to a node.

            +
            +value
            + +string + +
            +(Optional) +

            The taint value corresponding to the taint key.

            +
            +effect
            + + +Kubernetes core/v1.TaintEffect + + +
            +

            The effect of the taint on pods that do not tolerate the taint. +Valid effects are NoSchedule, PreferNoSchedule and NoExecute.

            +

            SpotAllocationStrategy (string alias)

            diff --git a/docs/book/src/development/development.md b/docs/book/src/development/development.md index cea251e3ba..8b1f09eb90 100644 --- a/docs/book/src/development/development.md +++ b/docs/book/src/development/development.md @@ -5,7 +5,7 @@ ### Install prerequisites 1. Install [go][go] - - Get the latest patch version for go v1.21. + - Get the latest patch version for go v1.22. 2. Install [jq][jq] - `brew install jq` on macOS. - `chocolatey install jq` on Windows. diff --git a/docs/book/src/development/nightlies.md b/docs/book/src/development/nightlies.md new file mode 100644 index 0000000000..fc93cdd5fe --- /dev/null +++ b/docs/book/src/development/nightlies.md @@ -0,0 +1,33 @@ +# Nightly Builds + +Nightly builds are regular automated builds of the CAPA source code that occur every night. + +These builds are generated directly from the latest commit of source code on the main branch. + +Nightly builds serve several purposes: + +- **Early Testing**: They provide an opportunity for developers and testers to access the most recent changes in the codebase and identify any issues or bugs that may have been introduced. +- **Feedback Loop**: They facilitate a rapid feedback loop, enabling developers to receive feedback on their changes quickly, allowing them to iterate and improve the code more efficiently. +- **Preview of New Features**: Users and can get a preview of upcoming features or changes by testing nightly builds, although these builds may not always be stable enough for production use. + +Overall, nightly builds play a crucial role in software development by promoting user testing, early bug detection, and rapid iteration. + +CAPA Nightly build jobs run in Prow. For details on how this is configured you can check the [Periodics Jobs section](../topics/reference/jobs.md#periodics). + +## Usage + +To try a nightly build, you can download the latest built nightly CAPA manifests, you can find the available ones by executing the following command: +```bash +curl -sL -H 'Accept: application/json' "https://storage.googleapis.com/storage/v1/b/k8s-staging-cluster-api-aws/o" | jq -r '.items | map(select(.name | startswith("components/nightly_main"))) | .[] | [.timeCreated,.mediaLink] | @tsv' +``` +The output should look something like this: +``` +2024-05-03T08:03:09.087Z https://storage.googleapis.com/download/storage/v1/b/k8s-staging-cluster-api-aws/o/components%2Fnightly_main_2024050x?generation=1714723389033961&alt=media +2024-05-04T08:02:52.517Z https://storage.googleapis.com/download/storage/v1/b/k8s-staging-cluster-api-aws/o/components%2Fnightly_main_2024050y?generation=1714809772486582&alt=media +2024-05-05T08:02:45.840Z https://storage.googleapis.com/download/storage/v1/b/k8s-staging-cluster-api-aws/o/components%2Fnightly_main_2024050z?generation=1714896165803510&alt=media +``` + +Now visit the link for the manifest you want to download. This will automatically download the manifest for you. + +Once downloaded you can apply the manifest directly to your testing CAPI management cluster/namespace (e.g. with kubectl), as the downloaded CAPA manifest +will already contain the correct, corresponding CAPA nightly image reference. diff --git a/docs/book/src/development/releasing.md b/docs/book/src/development/releasing.md index 7a5c2d3761..ae94344ab1 100644 --- a/docs/book/src/development/releasing.md +++ b/docs/book/src/development/releasing.md @@ -7,16 +7,19 @@ ## Create tag, and build staging container images -1. Create a new local repository of (e.g. using `git clone`). +1. Please fork and clone your own repository with e.g. `git clone git@github.com:YourGitHubUsername/cluster-api-provider-aws.git`. `kpromo` uses the fork to build images from. +1. Add a git remote to the upstream project. `git remote add upstream git@github.com:kubernetes-sigs/cluster-api-provider-aws.git` 1. If this is a major or minor release, create a new release branch and push to GitHub, otherwise switch to it, e.g. `git checkout release-1.5`. 1. If this is a major or minor release, update `metadata.yaml` by adding a new section with the version, and make a commit. -1. Update the release branch on the repository, e.g. `git push origin HEAD:release-1.5`. +1. Update the release branch on the repository, e.g. `git push origin HEAD:release-1.5`. `origin` refers to the remote git reference to your fork. +1. Update the release branch on the repository, e.g. `git push upstream HEAD:release-1.5`. `upstream` refers to the upstream git reference. 1. Make sure your repo is clean by git standards. -1. Set environment variable `GITHUB_TOKEN` to a GitHub personal access token. The token must have write access to the `kubernetes-sigs/cluster-api-provider-aws` repository. -1. Set environment variables `PREVIOUS_VERSION` which is the last release tag and `VERSION` which is the current release version, e.g. `export PREVIOUS_VERSION=v1.4.0 VERSION=v1.5.0`, or `export PREVIOUS_VERSION=v1.5.0 VERSION=v1.5.1`). +1. Set environment variables which is the last release tag and `VERSION` which is the current release version, e.g. `export VERSION=v1.5.0`, or `export VERSION=v1.5.1`). _**Note**_: the version MUST contain a `v` in front. + _**Note**_: you must have a gpg signing configured with git and registered with GitHub. + 1. Create a tag `git tag -s -m $VERSION $VERSION`. `-s` flag is for GNU Privacy Guard (GPG) signing. -1. Make sure you have push permissions to the upstream CAPA repo. Push tag you've just created (`git push $VERSION`). +1. Make sure you have push permissions to the upstream CAPA repo. Push tag you've just created (`git push $VERSION`). Pushing this tag will kick off a GitHub Action that will create the release and attach the binaries and YAML templates to it. 1. A prow job will start running to push images to the staging repo, can be seen [here](https://testgrid.k8s.io/sig-cluster-lifecycle-image-pushes#post-cluster-api-provider-aws-push-images). The job is called "post-cluster-api-provider-aws-push-images," and is defined in . 1. When the job is finished, wait for the images to be created: `docker pull gcr.io/k8s-staging-cluster-api-aws/cluster-api-aws-controller:$VERSION`. You can also wrap this with a command to retry periodically, until the job is complete, e.g. `watch --interval 30 --chgexit docker pull <...>`. @@ -49,30 +52,34 @@ Promote the container images from the staging registry to the production registr docker pull registry.k8s.io/cluster-api-aws/cluster-api-aws-controller:${VERSION} ``` -## Create release artifacts, and a GitHub draft release -1. Again, make sure your repo is clean by git standards. -1. Export the current branch `export BRANCH=release-1.5` (`export BRANCH=main`)and run `make release`. -1. Run `make create-gh-release` to create a draft release on Github, copying the generated release notes from `out/CHANGELOG.md` into the draft. -1. Run `make upload-gh-artifacts` to upload artifacts from .out/ directory. You may run into API limit errors, so verify artifacts at next step. +## Verify and Publish the draft release + 1. Verify that all the files below are attached to the drafted release: 1. `clusterawsadm-darwin-amd64` + 1. `clusterawsadm-darwin-arm64` 1. `clusterawsadm-linux-amd64` + 1. `clusterawsadm-linux-arm64` + 1. `clusterawsadm-windows-amd64.exe` + 1. `clusterawsadm-windows-arm64.exe` 1. `infrastructure-components.yaml` 1. `cluster-template.yaml` 1. `cluster-template-machinepool.yaml` 1. `cluster-template-eks.yaml` + 1. `cluster-template-eks-ipv6.yaml` + 1. `cluster-template-eks-fargate.yaml` 1. `cluster-template-eks-managedmachinepool.yaml` 1. `cluster-template-eks-managedmachinepool-vpccni.yaml` 1. `cluster-template-eks-managedmachinepool-gpu.yaml` - 1. `eks-controlplane-components.yaml` - 1. `eks-bootstrap-components.yaml` + 1. `cluster-template-external-cloud-provider.yaml` + 1. `cluster-template-flatcar.yaml` + 1. `cluster-template-machinepool.yaml` + 1. `cluster-template-multitenancy-clusterclass.yaml` + 1. `cluster-template-rosa-machinepool.yaml` + 1. `cluster-template-rosa.yaml` + 1. `cluster-template-simple-clusterclass.yaml` 1. `metadata.yaml` -1. Finalise the release notes by editing the draft release. - _**Note**_: ONLY do this _after_ you verified that the promotion succeeded [here](https://testgrid.k8s.io/sig-k8s-infra-k8sio#post-k8sio-image-promo). - -## Publish the draft release - +1. Update the release description to link to the promotion image. 1. Publish release. Use the pre-release option for release candidate versions of Cluster API Provider AWS. 1. Email `kubernetes-sig-cluster-lifecycle@googlegroups.com` to announce the release. You can use this template for the email: diff --git a/docs/book/src/topics/bring-your-own-aws-infrastructure.md b/docs/book/src/topics/bring-your-own-aws-infrastructure.md index bd157fa28f..a5cd878a9f 100644 --- a/docs/book/src/topics/bring-your-own-aws-infrastructure.md +++ b/docs/book/src/topics/bring-your-own-aws-infrastructure.md @@ -274,3 +274,69 @@ The external system must provide all required fields within the spec of the AWSC Once the user has created externally managed AWSCluster, it is not allowed to convert it to CAPA managed cluster. However, converting from managed to externally managed is allowed. User should only use this feature if their cluster infrastructure lifecycle management has constraints that the reference implementation does not support. See [user stories](https://github.com/kubernetes-sigs/cluster-api/blob/10d89ceca938e4d3d94a1d1c2b60515bcdf39829/docs/proposals/20210203-externally-managed-cluster-infrastructure.md#user-stories) for more details. + + +## Bring your own (BYO) Public IPv4 addresses + +Cluster API also provides a mechanism to allocate Elastic IP from the existing Public IPv4 Pool that you brought to AWS[1]. + +Bringing your own Public IPv4 Pool (BYOIPv4) can be used as an alternative to buying Public IPs from AWS, also considering the changes in charging for this since February 2024[2]. + +Supported resources to BYO Public IPv4 Pool (`BYO Public IPv4`): +- NAT Gateways +- Network Load Balancer for API server +- Machines + +Use `BYO Public IPv4` when you have brought to AWS custom IPv4 CIDR blocks and want the cluster to automatically use IPs from the custom pool instead of Amazon-provided pools. + +### Prerequisites and limitations for BYO Public IPv4 Pool + +- BYOIPv4 is limited to AWS to selected regions. See more in [AWS Documentation for Regional availability](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html#byoip-reg-avail) +- The IPv4 address must be provisioned and advertised to the AWS account before the cluster is installed +- The public IPv4 addresses is limited to the network border group that the CIDR block have been advertised[3][4], and the `NetworkSpec.ElasticIpPool.PublicIpv4Pool` must be the same of the cluster will be installed. +- Only NAT Gateways and the Network Load Balancer for API server will consume from the IPv4 pool defined in the network scope. +- The public IPv4 pool must be assigned to each machine to consume public IPv4 from a custom IPv4 pool. + +### Steps to set BYO Public IPv4 Pool to core infrastructure + +Currently, CAPA supports BYO Public IPv4 to core components NAT Gateways and Network Load Balancer for the internet-facing API server. + +To specify a Public IPv4 Pool for core components you must set the `spec.elasticIpPool` as follows: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: AWSCluster +metadata: + name: aws-cluster-localzone +spec: + region: us-east-1 + networkSpec: + vpc: + elasticIpPool: + publicIpv4Pool: ipv4pool-ec2-0123456789abcdef0 + publicIpv4PoolFallbackOrder: amazon-pool +``` + +Then all the Elastic IPs will be created by consuming from the pool `ipv4pool-ec2-0123456789abcdef0`. + +### Steps to BYO Public IPv4 Pool to machines + +To create a machine consuming from a custom Public IPv4 Pool you must set the pool ID to the AWSMachine spec, then set the `PublicIP` to `true`: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: AWSMachine +metadata: + name: byoip-s55p4-bootstrap +spec: + # placeholder for AWSMachine spec + elasticIpPool: + publicIpv4Pool: ipv4pool-ec2-0123456789abcdef0 + publicIpv4PoolFallbackOrder: amazon-pool + publicIP: true +``` + +[1] https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html +[2] https://aws.amazon.com/blogs/aws/new-aws-public-ipv4-address-charge-public-ip-insights/ +[3] https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html#byoip-onboard +[4] https://docs.aws.amazon.com/cli/latest/reference/ec2/advertise-byoip-cidr.html diff --git a/docs/book/src/topics/eks/creating-a-cluster.md b/docs/book/src/topics/eks/creating-a-cluster.md index fcb85db130..0ef75009c6 100644 --- a/docs/book/src/topics/eks/creating-a-cluster.md +++ b/docs/book/src/topics/eks/creating-a-cluster.md @@ -34,4 +34,12 @@ kubectl --namespace=default get secret managed-test-user-kubeconfig \ This kubeconfig is used internally by CAPI and shouldn't be used outside of the management server. It is used by CAPI to perform operations, such as draining a node. The name of the secret that contains the kubeconfig will be `[cluster-name]-kubeconfig` where you need to replace **[cluster-name]** with the name of your cluster. Note that there is NO `-user` in the name. -The kubeconfig is regenerated every `sync-period` as the token that is embedded in the kubeconfig is only valid for a short period of time. When EKS support is enabled the maximum sync period is 10 minutes. If you try to set `--sync-period` to greater than 10 minutes then an error will be raised. +There are three keys in the CAPI kubeconfig for eks clusters: + +| keys | purpose | +|-------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| value | contains a complete kubeconfig with the cluster admin user and token embedded | +| relative | contains a kubeconfig with the cluster admin user, referencing the token file in a relative path - assumes you are mounting all the secret keys in the same dir | +| single-file | contains the same token embedded in the complete kubeconfig, it is separated into a single file so that existing APIMachinery can reload the token file when the secret is updated | + +The secret contents are regenerated every `sync-period` as the token that is embedded in the kubeconfig and token file is only valid for a short period of time. When EKS support is enabled the maximum sync period is 10 minutes. If you try to set `--sync-period` to greater than 10 minutes then an error will be raised. diff --git a/docs/book/src/topics/eks/pod-networking.md b/docs/book/src/topics/eks/pod-networking.md index 2eb0a266b9..1a9723c7a9 100644 --- a/docs/book/src/topics/eks/pod-networking.md +++ b/docs/book/src/topics/eks/pod-networking.md @@ -97,6 +97,26 @@ spec: disableVPCCNI: true ``` +If you are replacing Amazon VPC CNI with your own helm managed instance, you will need to set `AWSManagedControlPlane.spec.disableVPCCNI` to `true` and add `"aws.cluster.x-k8s.io/prevent-deletion": "true"` label on the Daemonset. This label is needed so `aws-node` daemonset is not reaped during CNI reconciliation. + +The following example shows how to label your aws-node Daemonset. + +```yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + annotations: + ... + generation: 1 + labels: + app.kubernetes.io/instance: aws-vpc-cni + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: aws-node + app.kubernetes.io/version: v1.15.1 + helm.sh/chart: aws-vpc-cni-1.15.1 + aws.cluster.x-k8s.io/prevent-deletion: true +``` + > You cannot set **disableVPCCNI** to true if you are using the VPC CNI addon. Some alternative CNIs provide for the replacement of kube-proxy, such as in [Calico](https://projectcalico.docs.tigera.io/maintenance/ebpf/enabling-ebpf#configure-kube-proxy) and [Cilium](https://docs.cilium.io/en/stable/gettingstarted/kubeproxy-free/). When enabling the kube-proxy alternative, the kube-proxy installed by EKS must be deleted. This can be done via the **disable** property of **kubeProxy** in **AWSManagedControlPlane**: diff --git a/docs/book/src/topics/eks/prerequisites.md b/docs/book/src/topics/eks/prerequisites.md index c00520210f..ccaf34c0e2 100644 --- a/docs/book/src/topics/eks/prerequisites.md +++ b/docs/book/src/topics/eks/prerequisites.md @@ -1,6 +1,6 @@ # Prerequisites -To use EKS you must give the controller the required permissions. The easiest way to do this is by using `clusterawasadm`. For instructions on how to do this see the [prerequisites](../using-clusterawsadm-to-fulfill-prerequisites.md). +To use EKS you must give the controller the required permissions. The easiest way to do this is by using `clusterawsadm`. For instructions on how to do this see the [prerequisites](../using-clusterawsadm-to-fulfill-prerequisites.md). When using `clusterawsadm` and enabling EKS support a new IAM role will be created for you called **eks-controlplane.cluster-api-provider-aws.sigs.k8s.io**. This role is the IAM role that will be used for the EKS control plane if you don't specify your own role and if **EKSEnableIAM** isn't enabled (see the [enabling docs](enabling.md) for further information). diff --git a/docs/book/src/topics/iam-permissions.md b/docs/book/src/topics/iam-permissions.md index 217b9702fc..5b15dd060b 100644 --- a/docs/book/src/topics/iam-permissions.md +++ b/docs/book/src/topics/iam-permissions.md @@ -1,6 +1,6 @@ # IAM Permissions -## Required to use clusterawasadm to provision IAM roles via CloudFormation +## Required to use clusterawsadm to provision IAM roles via CloudFormation If using `clusterawsadm` to automate deployment of IAM roles via CloudFormation, you must have IAM administrative access as `clusterawsadm` will provision IAM diff --git a/docs/book/src/topics/provision-edge-zones.md b/docs/book/src/topics/provision-edge-zones.md new file mode 100644 index 0000000000..b8176aec87 --- /dev/null +++ b/docs/book/src/topics/provision-edge-zones.md @@ -0,0 +1,174 @@ +# Manage Local Zone subnets + +## Overview + +CAPA provides the option to manage network resources required to provision compute nodes +to Local Zone and Wavelength Zone locations. + +[AWS Local Zones](https://aws.amazon.com/about-aws/global-infrastructure/localzones/) +extends the cloud infrastructure to metropolitan regions, +allowing to deliver applications closer to the end-users, decreasing the +network latency. + +[AWS Wavelength Zones](https://aws.amazon.com/wavelength/) +extends the AWS infrastructure deployments infrastructure to carrier infrastructure, +allowing to deploy within communications service providers’ (CSP) 5G networks. + +When "edge zones" is mentioned in this document, it is referencing to AWS Local Zones and AWS Wavelength Zones. + +## Requirements and defaults + +For both Local Zones and Wavelength Zones ('edge zones'): + +- Subnets in edge zones are _not_ created by default. +- When you choose to CAPA manage edge zone's subnets, you also must specify the + regular zones (Availability Zones) you will create the cluster. +- IPv6 is not globally supported by AWS across Local Zones, + and is not supported in Wavelength zones, CAPA support is limited to IPv4 + subnets in edge zones. +- The subnets in edge zones will not be used by CAPA to create NAT Gateways, + Network Load Balancers, or provision Control Plane or Compute nodes by default. +- NAT Gateways are not globally available to edge zone's locations, the CAPA uses + the Parent Zone for the edge zone to create the NAT Gateway to allow the instances on + private subnets to egress traffic to the internet. +- The CAPA subnet controllers discovers the zone attributes `ZoneType` and + `ParentZoneName` for each subnet on creation, those fields are used to ensure subnets for + it's role. For example: only subnets with `ZoneType` with value `availability-zone` + can be used to create a load balancer for API. +- It is required to manually opt-in to each zone group for edge zones you are planning to create subnets. + +The following steps are example to describe the zones and opt-into an zone group for an Local Zone: + + - To check the zone group name for a Local Zone, you can use the [EC2 API `DescribeAvailabilityZones`][describe-availability-zones]. For example: +```sh +aws --region "" ec2 describe-availability-zones \ + --query 'AvailabilityZones[].[{ZoneName: ZoneName, GroupName: GroupName, Status: OptInStatus}]' \ + --filters Name=zone-type,Values=local-zone \ + --all-availability-zones +``` + + - To opt-int the zone group, you can use the [EC2 API `ModifyZoneAttributes`](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyAvailabilityZoneGroup.html): +```sh +aws ec2 modify-availability-zone-group \ + --group-name "" \ + --opt-in-status opted-in +``` + +## Installing managed clusters extending subnets to Local Zones + +To create a cluster with support of subnets on AWS Local Zones, add the `Subnets` stanza to your `AWSCluster.NetworkSpec`. Example: + +```yaml +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: AWSCluster +metadata: + name: aws-cluster-localzone +spec: + region: us-east-1 + networkSpec: + vpc: + cidrBlock: "10.0.0.0/20" + subnets: + # regular zones (availability zones) + - availabilityZone: us-east-1a + cidrBlock: "10.0.0.0/24" + id: "cluster-subnet-private-us-east-1a" + isPublic: false + - availabilityZone: us-east-1a + cidrBlock: "10.0.1.0/24" + id: "cluster-subnet-public-us-east-1a" + isPublic: true + - availabilityZone: us-east-1b + cidrBlock: "10.0.3.0/24" + id: "cluster-subnet-private-us-east-1b" + isPublic: false + - availabilityZone: us-east-1b + cidrBlock: "10.0.4.0/24" + id: "cluster-subnet-public-us-east-1b" + isPublic: true + - availabilityZone: us-east-1c + cidrBlock: "10.0.5.0/24" + id: "cluster-subnet-private-us-east-1c" + isPublic: false + - availabilityZone: us-east-1c + cidrBlock: "10.0.6.0/24" + id: "cluster-subnet-public-us-east-1c" + isPublic: true + # Subnets in Local Zones of New York location (public and private) + - availabilityZone: us-east-1-nyc-1a + cidrBlock: "10.0.128.0/25" + id: "cluster-subnet-private-us-east-1-nyc-1a" + isPublic: false + - availabilityZone: us-east-1-nyc-1a + cidrBlock: "10.0.128.128/25" + id: "cluster-subnet-public-us-east-1-nyc-1a" + isPublic: true +``` + +## Installing managed clusters extending subnets to Wavelength Zones + +To create a cluster with support of subnets on AWS Wavelength Zones, add the `Subnets` stanza to your `AWSCluster.NetworkSpec`. Example: + +```yaml +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: AWSCluster +metadata: + name: aws-cluster-wavelengthzone +spec: + region: us-east-1 + networkSpec: + vpc: + cidrBlock: "10.0.0.0/20" + subnets: + # + - availabilityZone: us-east-1-wl1-was-wlz-1 + cidrBlock: "10.0.128.0/25" + id: "cluster-subnet-private-us-east-1-wl1-was-wlz-1" + isPublic: false + - availabilityZone: us-east-1-wl1-was-wlz-1 + cidrBlock: "10.0.128.128/25" + id: "cluster-subnet-public-us-east-1-wl1-was-wlz-1" + isPublic: true +``` + +## Installing managed clusters extending subnets to Local and Wavelength Zones + +It is also possible to mix the creation across both Local and Wavelength zones. + +To create a cluster with support of edge zones, add the `Subnets` stanza to your `AWSCluster.NetworkSpec`. Example: + +```yaml +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: AWSCluster +metadata: + name: aws-cluster-edge +spec: + region: us-east-1 + networkSpec: + vpc: + cidrBlock: "10.0.0.0/20" + subnets: + # + - availabilityZone: us-east-1-nyc-1a + cidrBlock: "10.0.128.0/25" + id: "cluster-subnet-private-us-east-1-nyc-1a" + isPublic: false + - availabilityZone: us-east-1-nyc-1a + cidrBlock: "10.0.128.128/25" + id: "cluster-subnet-public-us-east-1-nyc-1a" + isPublic: true + - availabilityZone: us-east-1-wl1-was-wlz-1 + cidrBlock: "10.0.129.0/25" + id: "cluster-subnet-private-us-east-1-wl1-was-wlz-1" + isPublic: false + - availabilityZone: us-east-1-wl1-was-wlz-1 + cidrBlock: "10.0.129.128/25" + id: "cluster-subnet-public-us-east-1-wl1-was-wlz-1" + isPublic: true +``` + + +[describe-availability-zones]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html diff --git a/docs/book/src/topics/rosa/creating-a-cluster.md b/docs/book/src/topics/rosa/creating-a-cluster.md index b88ab20d91..b28c19cbad 100644 --- a/docs/book/src/topics/rosa/creating-a-cluster.md +++ b/docs/book/src/topics/rosa/creating-a-cluster.md @@ -5,7 +5,7 @@ CAPA controller requires an API token in order to be able to provision ROSA clus 1. Visit [https://console.redhat.com/openshift/token](https://console.redhat.com/openshift/token) to retrieve your API authentication token -1. Create a credentials secret with the token to be referenced later by `ROSAControlePlane` +1. Create a credentials secret within the target namespace with the token to be referenced later by `ROSAControlePlane` ```shell kubectl create secret generic rosa-creds-secret \ --from-literal=ocmToken='eyJhbGciOiJIUzI1NiIsI....' \ @@ -37,10 +37,9 @@ Once Step 3 is done, you will be ready to proceed with creating a ROSA cluster u 1. Prepare the environment: ```bash export OPENSHIFT_VERSION="4.14.5" - export CLUSTER_NAME="capi-rosa-quickstart" export AWS_REGION="us-west-2" export AWS_AVAILABILITY_ZONE="us-west-2a" - export AWS_ACCOUNT_ID=" rosa-capi-cluster.yaml + clusterctl generate cluster --from templates/cluster-template-rosa.yaml > rosa-capi-cluster.yaml ``` + Note: The AWS role name must be no more than 64 characters in length. Otherwise an error will be returned. Truncate values exceeding 64 characters. -1. If a credentials secret was created earlier, edit `ROSAControlPlane` to refernce it: - +1. If a credentials secret was created earlier, edit `ROSAControlPlane` to reference it: ```yaml apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: ROSAControlPlane @@ -70,7 +69,34 @@ Once Step 3 is done, you will be ready to proceed with creating a ROSA cluster u ... ``` +1. Provide an AWS identity reference + ```yaml + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 + kind: ROSAControlPlane + metadata: + name: "capi-rosa-quickstart-control-plane" + spec: + identityRef: + kind: + name: + ... + ``` + + Otherwise, make sure the following `AWSClusterControllerIdentity` singleton exists in your management cluster: + ```yaml + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + kind: AWSClusterControllerIdentity + metadata: + name: "default" + spec: + allowedNamespaces: {} # matches all namespaces + ``` + + see [Multi-tenancy](../multitenancy.md) for more details + 1. Finally apply the manifest to create your Rosa cluster: ```shell kubectl apply -f rosa-capi-cluster.yaml ``` + +see [ROSAControlPlane CRD Reference](https://cluster-api-aws.sigs.k8s.io/crd/#controlplane.cluster.x-k8s.io/v1beta2.ROSAControlPlane) for all possible configurations. diff --git a/docs/book/src/topics/rosa/creating-rosa-machinepools.md b/docs/book/src/topics/rosa/creating-rosa-machinepools.md new file mode 100644 index 0000000000..8d78260a99 --- /dev/null +++ b/docs/book/src/topics/rosa/creating-rosa-machinepools.md @@ -0,0 +1,49 @@ +# Creating MachinePools + +Cluster API Provider AWS (CAPA) has experimental support for managed ROSA MachinePools through the infrastructure type `ROSAMachinePool`. A `ROSAMachinePool` is responsible for orchestrating and bootstraping a group of EC2 machines into kubernetes nodes. + +### Using `clusterctl` to deploy + +To deploy a MachinePool / ROSAMachinePool via `clusterctl generate` use the template located [here](https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/main/templates/cluster-template-rosa-machinepool.yaml). + +Make sure to set up your environment as described [here](./creating-a-cluster.md#creating-the-cluster). + +```shell +clusterctl generate cluster my-cluster --from templates/cluster-template-rosa-machinepool > my-cluster.yaml +``` + +## Example + +Below is an example of the resources needed to create a ROSA MachinePool. + +```yaml +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachinePool +metadata: + name: "${CLUSTER_NAME}-pool-0" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: 1 + template: + spec: + clusterName: "${CLUSTER_NAME}" + bootstrap: + dataSecretName: "" + infrastructureRef: + name: "${CLUSTER_NAME}-pool-0" + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + kind: ROSAMachinePool +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: ROSAMachinePool +metadata: + name: "${CLUSTER_NAME}-pool-0" +spec: + nodePoolName: "nodepool-0" + instanceType: "m5.xlarge" + subnet: "${PRIVATE_SUBNET_ID}" + version: "${OPENSHIFT_VERSION}" +``` + +see [ROSAMachinePool CRD Reference](https://cluster-api-aws.sigs.k8s.io/crd/#infrastructure.cluster.x-k8s.io/v1beta2.ROSAMachinePool) for all possible configurations. diff --git a/docs/book/src/topics/rosa/enabling.md b/docs/book/src/topics/rosa/enabling.md index 86fdd96756..aeae9ab5e7 100644 --- a/docs/book/src/topics/rosa/enabling.md +++ b/docs/book/src/topics/rosa/enabling.md @@ -1,7 +1,8 @@ # Enabling ROSA Support -To enable support for ROSA clusters, the ROSA feature flag must be set to true. This can be done using the **EXP_ROSA** environment variable: +To enable support for ROSA clusters, the ROSA feature flag must be set to true. This can be done using the **EXP_ROSA** environment variable. +Make sure to set up your AWS environment first as described [here](https://cluster-api.sigs.k8s.io/user/quick-start.html). ```shell export EXP_ROSA="true" export EXP_MACHINE_POOL="true" diff --git a/docs/book/src/topics/rosa/external-auth.md b/docs/book/src/topics/rosa/external-auth.md new file mode 100644 index 0000000000..fb2702397c --- /dev/null +++ b/docs/book/src/topics/rosa/external-auth.md @@ -0,0 +1,113 @@ +# External Auth Providers (BYOI) + +ROSA allows you to Bring Your Own Identity (BYOI) to manage and authenticate cluster users. + +## Enabling + +To enable this feature, `enableExternalAuthProviders` field should be set to `true` on cluster creation. Changing this field afterwards will have no effect: +```yaml +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 +kind: ROSAControlPlane +metadata: + name: "capi-rosa-quickstart-control-plane" +spec: + enableExternalAuthProviders: true + .... +``` + +Note: This feauture requires OpenShift version `4.15.5` or newer. + +## Usage + +After creating and configuring your OIDC provider of choice, the next step is to configure ROSAControlPlane `externalAuthProviders` as follows: +```yaml +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 +kind: ROSAControlPlane +metadata: + name: "capi-rosa-quickstart-control-plane" +spec: + enableExternalAuthProviders: true + externalAuthProviders: + - name: my-oidc-provider + issuer: + issuerURL: https://login.microsoftonline.com//v2.0 # e.g. if using Microsoft Entra ID + audiences: # audiences that will be trusted by the kube-apiserver + - "audience1" # usually the client ID + claimMappings: + username: + claim: email + prefixPolicy: "" + groups: + claim: groups + .... +``` + +Note: `oidcProviders` only accepts one entry at the moment. + +## Accessing the cluster + +### Setting up RBAC + +When `enableExternalAuthProviders` is set to `true`, ROSA provider will generate a temporary admin kubeconfig secret in the same namespace named `-bootstrap-kubeconfig`. This kubeconfig can be used to access the cluster to setup RBAC for OIDC users/groups. + +The following example binds the `cluster-admin` role to an OIDC group, giving all users in that group admin permissions. +```shell +kubectl get secret -bootstrap-kubeconfig -o jsonpath='{.data.value}' | base64 -d > /tmp/capi-admin-kubeconfig +export KUBECONFIG=/tmp/capi-admin-kubeconfig + +kubectl create clusterrolebinding oidc-cluster-admins --clusterrole cluster-admin --group +``` + +Note: The generated bootstrap kubeconfig is only valid for 24h, and will not be usable afterwards. However, users can opt to manually delete the secret object to trigger the generation of a new one which will be valid for another 24h. + +### Login using the cli + +The [kubelogin kubectl plugin](https://github.com/int128/kubelogin/tree/master) can be used to login with OIDC credentials using the cli. + +### Configuring OpenShift Console + +The OpenShift Console needs to be configured before it can be used to authenticate and login to the cluster. +1. Setup a new client in your OIDC provider with the following Redirect URL: `/auth/callback`. You can find the console URL in the status field of the `ROSAControlPlane` once the cluster is ready: + ```shell + kubectl get rosacontrolplane -o jsonpath='{.status.consoleURL}' + ``` + +2. Create a new client secret in your OIDC provider and store the value in a kubernetes secret in the same namespace as your cluster: + ```shell + kubectl create secret generic console-client-secret --from-literal=clientSecret='' + ``` + +3. Configure `ROSAControlPlane` external auth provider with the created client: + ```yaml + --- + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 + kind: ROSAControlPlane + metadata: + name: "capi-rosa-quickstart-control-plane" + spec: + enableExternalAuthProviders: true + externalAuthProviders: + - name: my-oidc-provider + issuer: + issuerURL: https://login.microsoftonline.com//v2.0 # e.g. if using Microsoft Entra ID + audiences: # audiences that will be trusted by the kube-apiserver + - "audience1" + - # <----New + claimMappings: + username: + claim: email + prefixPolicy: "" + groups: + claim: groups + oidcClients: # <----New + - componentName: console + componentNamespace: openshift-console + clientID: + clientSecret: + name: console-client-secret # secret name created in step 2 + .... + ``` + +see [ROSAControlPlane CRD Reference](https://cluster-api-aws.sigs.k8s.io/crd/#controlplane.cluster.x-k8s.io/v1beta2.ExternalAuthProvider) for all possible configurations. diff --git a/docs/book/src/topics/rosa/index.md b/docs/book/src/topics/rosa/index.md index 0fa6a87ada..fc6df78113 100644 --- a/docs/book/src/topics/rosa/index.md +++ b/docs/book/src/topics/rosa/index.md @@ -19,4 +19,8 @@ A new template is available in the templates folder for creating a managed ROSA ## SEE ALSO * [Enabling ROSA Support](enabling.md) -* [Creating a cluster](creating-a-cluster.md) \ No newline at end of file +* [Creating a cluster](creating-a-cluster.md) +* [Creating MachinePools](creating-rosa-machinepools.md) +* [Upgrades](upgrades.md) +* [External Auth Providers](external-auth.md) +* [Support](support.md) \ No newline at end of file diff --git a/docs/book/src/topics/rosa/support.md b/docs/book/src/topics/rosa/support.md new file mode 100644 index 0000000000..1d3dbed96a --- /dev/null +++ b/docs/book/src/topics/rosa/support.md @@ -0,0 +1,21 @@ +# Create issue for ROSA + +When creating issue for ROSA-HCP cluster, include the logs for the capa-controller-manager and capi-controller-manager deployment pods. The logs can be saved to text file using the commands below. Also include the yaml files for all the resources used to create the ROSA cluster: +- `Cluster` +- `ROSAControlPlane` +- `MachinePool` +- `ROSAMachinePool` + +```shell +$ kubectl get pod -n capa-system +NAME READY STATUS RESTARTS AGE +capa-controller-manager-77f5b946b-sddcg 1/1 Running 1 3d3h + +$ kubectl logs -n capa-system capa-controller-manager-77f5b946b-sddcg > capa-controller-manager-logs.txt + +$ kubectl get pod -n capi-system +NAME READY STATUS RESTARTS AGE +capi-controller-manager-78dc897784-f8gpn 1/1 Running 18 26d + +$ kubectl logs -n capi-system capi-controller-manager-78dc897784-f8gpn > capi-controller-manager-logs.txt +``` diff --git a/docs/book/src/topics/rosa/upgrades.md b/docs/book/src/topics/rosa/upgrades.md new file mode 100644 index 0000000000..bcf6c22ff7 --- /dev/null +++ b/docs/book/src/topics/rosa/upgrades.md @@ -0,0 +1,15 @@ +# Upgrades + +## Control Plane Upgrade + +Upgrading the OpenShift version of the control plane is supported by the provider. To perform an upgrade you need to update the `version` in the spec of the `ROSAControlPlane`. Once the version has changed the provider will handle the upgrade for you. + +The Upgrade state can be checked in the conditions under `ROSAControlPlane.status`. + +## MachinePool Upgrade + +Upgrading the OpenShift version of the MachinePools is supported by the provider and can be performed independetly from the Control Plane upgrades. To perform an upgrade you need to update the `version` in the spec of the `ROSAMachinePool`. Once the version has changed the provider will handle the upgrade for you. + +The Upgrade state can be checked in the conditions under `ROSAMachinePool.status`. + +The version of the MachinePool can't be greater than Control Plane version. diff --git a/docs/triage-party/Dockerfile b/docs/triage-party/Dockerfile index e777bd19db..27cc7cf8d2 100644 --- a/docs/triage-party/Dockerfile +++ b/docs/triage-party/Dockerfile @@ -15,7 +15,7 @@ # limitations under the License. -FROM golang:1.21.5 as builder +FROM golang:1.22.6 as builder RUN go get github.com/google/triage-party/cmd/server RUN go install github.com/google/triage-party/cmd/server@latest diff --git a/docs/triage-party/go.mod b/docs/triage-party/go.mod index d1e53f95a3..317d49f5bf 100644 --- a/docs/triage-party/go.mod +++ b/docs/triage-party/go.mod @@ -1,6 +1,6 @@ module triage-party-deployment -go 1.21 +go 1.22 require ( github.com/aws/aws-cdk-go/awscdk v1.110.0-devpreview diff --git a/docs/triage-party/triage-party-deployment.go b/docs/triage-party/triage-party-deployment.go index d7c278521d..53af6beac9 100644 --- a/docs/triage-party/triage-party-deployment.go +++ b/docs/triage-party/triage-party-deployment.go @@ -18,11 +18,9 @@ package main import ( "fmt" - - "github.com/aws/aws-cdk-go/awscdk" - "os" + "github.com/aws/aws-cdk-go/awscdk" "github.com/aws/aws-cdk-go/awscdk/awsecs" "github.com/aws/aws-cdk-go/awscdk/awsecspatterns" "github.com/aws/aws-cdk-go/awscdk/awselasticloadbalancingv2" diff --git a/exp/api/v1beta1/awsmanagedmachinepool_types.go b/exp/api/v1beta1/awsmanagedmachinepool_types.go index ec9f1ff3f5..48cee1e8cc 100644 --- a/exp/api/v1beta1/awsmanagedmachinepool_types.go +++ b/exp/api/v1beta1/awsmanagedmachinepool_types.go @@ -37,6 +37,10 @@ const ( Al2x86_64GPU ManagedMachineAMIType = "AL2_x86_64_GPU" // Al2Arm64 is the Arm AMI type. Al2Arm64 ManagedMachineAMIType = "AL2_ARM_64" + // Al2023x86_64 is the AL2023 x86-64 AMI type. + Al2023x86_64 ManagedMachineAMIType = "AL2023_x86_64_STANDARD" + // Al2023Arm64 is the AL2023 Arm AMI type. + Al2023Arm64 ManagedMachineAMIType = "AL2023_ARM_64_STANDARD" ) // ManagedMachinePoolCapacityType specifies the capacity type to be used for the managed MachinePool. @@ -99,7 +103,7 @@ type AWSManagedMachinePoolSpec struct { AMIVersion *string `json:"amiVersion,omitempty"` // AMIType defines the AMI type - // +kubebuilder:validation:Enum:=AL2_x86_64;AL2_x86_64_GPU;AL2_ARM_64;CUSTOM + // +kubebuilder:validation:Enum:=AL2_x86_64;AL2_x86_64_GPU;AL2_ARM_64;AL2023_x86_64_STANDARD;AL2023_ARM_64_STANDARD;CUSTOM // +kubebuilder:default:=AL2_x86_64 // +optional AMIType *ManagedMachineAMIType `json:"amiType,omitempty"` diff --git a/exp/api/v1beta1/conversion.go b/exp/api/v1beta1/conversion.go index ff55f3b930..50a62f6bb4 100644 --- a/exp/api/v1beta1/conversion.go +++ b/exp/api/v1beta1/conversion.go @@ -57,6 +57,7 @@ func (src *AWSMachinePool) ConvertTo(dstRaw conversion.Hub) error { } dst.Spec.DefaultInstanceWarmup = restored.Spec.DefaultInstanceWarmup + dst.Spec.AWSLaunchTemplate.NonRootVolumes = restored.Spec.AWSLaunchTemplate.NonRootVolumes return nil } @@ -102,6 +103,7 @@ func (src *AWSManagedMachinePool) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.AWSLaunchTemplate = restored.Spec.AWSLaunchTemplate } dst.Spec.AWSLaunchTemplate.InstanceMetadataOptions = restored.Spec.AWSLaunchTemplate.InstanceMetadataOptions + dst.Spec.AWSLaunchTemplate.NonRootVolumes = restored.Spec.AWSLaunchTemplate.NonRootVolumes if restored.Spec.AWSLaunchTemplate.PrivateDNSName != nil { dst.Spec.AWSLaunchTemplate.PrivateDNSName = restored.Spec.AWSLaunchTemplate.PrivateDNSName diff --git a/exp/api/v1beta1/conversion_test.go b/exp/api/v1beta1/conversion_test.go index 5992c664be..3cedcf3342 100644 --- a/exp/api/v1beta1/conversion_test.go +++ b/exp/api/v1beta1/conversion_test.go @@ -21,7 +21,6 @@ import ( . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) diff --git a/exp/api/v1beta1/zz_generated.conversion.go b/exp/api/v1beta1/zz_generated.conversion.go index 869a3c13d4..4f50a8c20f 100644 --- a/exp/api/v1beta1/zz_generated.conversion.go +++ b/exp/api/v1beta1/zz_generated.conversion.go @@ -402,6 +402,7 @@ func autoConvert_v1beta2_AWSLaunchTemplate_To_v1beta1_AWSLaunchTemplate(in *v1be out.ImageLookupBaseOS = in.ImageLookupBaseOS out.InstanceType = in.InstanceType out.RootVolume = (*apiv1beta2.Volume)(unsafe.Pointer(in.RootVolume)) + // WARNING: in.NonRootVolumes requires manual conversion: does not exist in peer-type out.SSHKeyName = (*string)(unsafe.Pointer(in.SSHKeyName)) out.VersionNumber = (*int64)(unsafe.Pointer(in.VersionNumber)) out.AdditionalSecurityGroups = *(*[]apiv1beta2.AWSResourceReference)(unsafe.Pointer(&in.AdditionalSecurityGroups)) diff --git a/exp/api/v1beta2/awsmachinepool_webhook.go b/exp/api/v1beta2/awsmachinepool_webhook.go index 41af26b9e9..541243c53f 100644 --- a/exp/api/v1beta2/awsmachinepool_webhook.go +++ b/exp/api/v1beta2/awsmachinepool_webhook.go @@ -82,6 +82,31 @@ func (r *AWSMachinePool) validateRootVolume() field.ErrorList { return allErrs } +func (r *AWSMachinePool) validateNonRootVolumes() field.ErrorList { + var allErrs field.ErrorList + + for _, volume := range r.Spec.AWSLaunchTemplate.NonRootVolumes { + if v1beta2.VolumeTypesProvisioned.Has(string(volume.Type)) && volume.IOPS == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("spec.template.spec.nonRootVolumes.iops"), "iops required if type is 'io1' or 'io2'")) + } + + if volume.Throughput != nil { + if volume.Type != v1beta2.VolumeTypeGP3 { + allErrs = append(allErrs, field.Required(field.NewPath("spec.template.spec.nonRootVolumes.throughput"), "throughput is valid only for type 'gp3'")) + } + if *volume.Throughput < 0 { + allErrs = append(allErrs, field.Required(field.NewPath("spec.template.spec.nonRootVolumes.throughput"), "throughput must be nonnegative")) + } + } + + if volume.DeviceName == "" { + allErrs = append(allErrs, field.Required(field.NewPath("spec.template.spec.nonRootVolumes.deviceName"), "non root volume should have device name")) + } + } + + return allErrs +} + func (r *AWSMachinePool) validateSubnets() field.ErrorList { var allErrs field.ErrorList @@ -124,6 +149,7 @@ func (r *AWSMachinePool) ValidateCreate() (admission.Warnings, error) { allErrs = append(allErrs, r.validateDefaultCoolDown()...) allErrs = append(allErrs, r.validateRootVolume()...) + allErrs = append(allErrs, r.validateNonRootVolumes()...) allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...) allErrs = append(allErrs, r.validateSubnets()...) allErrs = append(allErrs, r.validateAdditionalSecurityGroups()...) @@ -141,7 +167,7 @@ func (r *AWSMachinePool) ValidateCreate() (admission.Warnings, error) { } // ValidateUpdate will do any extra validation when updating a AWSMachinePool. -func (r *AWSMachinePool) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { +func (r *AWSMachinePool) ValidateUpdate(_ runtime.Object) (admission.Warnings, error) { var allErrs field.ErrorList allErrs = append(allErrs, r.validateDefaultCoolDown()...) diff --git a/exp/api/v1beta2/awsmanagedmachinepool_types.go b/exp/api/v1beta2/awsmanagedmachinepool_types.go index a9fd346ba5..c7e70fcf55 100644 --- a/exp/api/v1beta2/awsmanagedmachinepool_types.go +++ b/exp/api/v1beta2/awsmanagedmachinepool_types.go @@ -37,6 +37,10 @@ const ( Al2x86_64GPU ManagedMachineAMIType = "AL2_x86_64_GPU" // Al2Arm64 is the Arm AMI type. Al2Arm64 ManagedMachineAMIType = "AL2_ARM_64" + // Al2023x86_64 is the AL2023 x86-64 AMI type. + Al2023x86_64 ManagedMachineAMIType = "AL2023_x86_64_STANDARD" + // Al2023Arm64 is the AL2023 Arm AMI type. + Al2023Arm64 ManagedMachineAMIType = "AL2023_ARM_64_STANDARD" ) // ManagedMachinePoolCapacityType specifies the capacity type to be used for the managed MachinePool. @@ -104,7 +108,7 @@ type AWSManagedMachinePoolSpec struct { AMIVersion *string `json:"amiVersion,omitempty"` // AMIType defines the AMI type - // +kubebuilder:validation:Enum:=AL2_x86_64;AL2_x86_64_GPU;AL2_ARM_64;CUSTOM + // +kubebuilder:validation:Enum:=AL2_x86_64;AL2_x86_64_GPU;AL2_ARM_64;AL2023_x86_64_STANDARD;AL2023_ARM_64_STANDARD;CUSTOM // +kubebuilder:default:=AL2_x86_64 // +optional AMIType *ManagedMachineAMIType `json:"amiType,omitempty"` diff --git a/exp/api/v1beta2/conditions_consts.go b/exp/api/v1beta2/conditions_consts.go index 45e8bf3923..2d052fae53 100644 --- a/exp/api/v1beta2/conditions_consts.go +++ b/exp/api/v1beta2/conditions_consts.go @@ -108,7 +108,11 @@ const ( RosaMachinePoolReadyCondition clusterv1.ConditionType = "RosaMchinePoolReady" // RosaMachinePoolUpgradingCondition condition reports whether ROSAMachinePool is upgrading or not. RosaMachinePoolUpgradingCondition clusterv1.ConditionType = "RosaMchinePoolUpgrading" + // WaitingForRosaControlPlaneReason used when the machine pool is waiting for // ROSA control plane infrastructure to be ready before proceeding. WaitingForRosaControlPlaneReason = "WaitingForRosaControlPlane" + + // RosaMachinePoolReconciliationFailedReason used to report failures while reconciling ROSAMachinePool. + RosaMachinePoolReconciliationFailedReason = "ReconciliationFailed" ) diff --git a/exp/api/v1beta2/groupversion_info.go b/exp/api/v1beta2/groupversion_info.go index a54b837a42..c1a5f0bed2 100644 --- a/exp/api/v1beta2/groupversion_info.go +++ b/exp/api/v1beta2/groupversion_info.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// package v1beta2 contains API Schema definitions for experimental v1beta2 API group +// Package v1beta2 contains API Schema definitions for experimental v1beta2 API group // +kubebuilder:object:generate=true // +groupName=infrastructure.cluster.x-k8s.io package v1beta2 diff --git a/exp/api/v1beta2/rosacluster_types.go b/exp/api/v1beta2/rosacluster_types.go index ed08317c50..1b3ffa5d77 100644 --- a/exp/api/v1beta2/rosacluster_types.go +++ b/exp/api/v1beta2/rosacluster_types.go @@ -22,13 +22,14 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) +// ROSAClusterSpec defines the desired state of ROSACluster. type ROSAClusterSpec struct { // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. // +optional ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` } -// ROSAClusterStatus defines the observed state of ROSACluster +// ROSAClusterStatus defines the observed state of ROSACluster. type ROSAClusterStatus struct { // Ready is when the ROSAControlPlane has a API server URL. // +optional @@ -47,6 +48,7 @@ type ROSAClusterStatus struct { // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Control plane infrastructure is ready for worker nodes" // +kubebuilder:printcolumn:name="Endpoint",type="string",JSONPath=".spec.controlPlaneEndpoint.host",description="API Endpoint",priority=1 +// ROSACluster is the Schema for the ROSAClusters API. type ROSACluster struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` diff --git a/exp/api/v1beta2/rosamachinepool_types.go b/exp/api/v1beta2/rosamachinepool_types.go index a94738a944..3b591dde65 100644 --- a/exp/api/v1beta2/rosamachinepool_types.go +++ b/exp/api/v1beta2/rosamachinepool_types.go @@ -17,8 +17,11 @@ limitations under the License. package v1beta2 import ( + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) @@ -33,7 +36,7 @@ type RosaMachinePoolSpec struct { // +kubebuilder:validation:Pattern:=`^[a-z]([-a-z0-9]*[a-z0-9])?$` NodePoolName string `json:"nodePoolName"` - // Version specifies the penshift version of the nodes associated with this machinepool. + // Version specifies the OpenShift version of the nodes associated with this machinepool. // ROSAControlPlane version is used if not set. // // +optional @@ -44,6 +47,8 @@ type RosaMachinePoolSpec struct { // +optional AvailabilityZone string `json:"availabilityZone,omitempty"` + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="subnet is immutable" + // +immutable // +optional Subnet string `json:"subnet,omitempty"` @@ -51,28 +56,80 @@ type RosaMachinePoolSpec struct { // +optional Labels map[string]string `json:"labels,omitempty"` + // Taints specifies the taints to apply to the nodes of the machine pool + // +optional + Taints []RosaTaint `json:"taints,omitempty"` + + // AdditionalTags are user-defined tags to be added on the underlying EC2 instances associated with this machine pool. + // +immutable + // +optional + AdditionalTags infrav1.Tags `json:"additionalTags,omitempty"` + // AutoRepair specifies whether health checks should be enabled for machines - // in the NodePool. The default is false. + // in the NodePool. The default is true. + // +kubebuilder:default=true // +optional - // +kubebuilder:default=false AutoRepair bool `json:"autoRepair,omitempty"` // InstanceType specifies the AWS instance type - InstanceType string `json:"instanceType,omitempty"` + // + // +kubebuilder:validation:Required + InstanceType string `json:"instanceType"` // Autoscaling specifies auto scaling behaviour for this MachinePool. // required if Replicas is not configured // +optional Autoscaling *RosaMachinePoolAutoScaling `json:"autoscaling,omitempty"` - // TODO(alberto): Enable and propagate this API input. - // Taints []*Taint `json:"taints,omitempty"` - // TuningConfigs []string `json:"tuningConfigs,omitempty"` - // Version *Version `json:"version,omitempty"` + // TuningConfigs specifies the names of the tuning configs to be applied to this MachinePool. + // Tuning configs must already exist. + // +optional + TuningConfigs []string `json:"tuningConfigs,omitempty"` + + // AdditionalSecurityGroups is an optional set of security groups to associate + // with all node instances of the machine pool. + // + // +immutable + // +optional + AdditionalSecurityGroups []string `json:"additionalSecurityGroups,omitempty"` // ProviderIDList contain a ProviderID for each machine instance that's currently managed by this machine pool. // +optional ProviderIDList []string `json:"providerIDList,omitempty"` + + // NodeDrainGracePeriod is grace period for how long Pod Disruption Budget-protected workloads will be + // respected during upgrades. After this grace period, any workloads protected by Pod Disruption + // Budgets that have not been successfully drained from a node will be forcibly evicted. + // + // Valid values are from 0 to 1 week(10080m|168h) . + // 0 or empty value means that the MachinePool can be drained without any time limitation. + // + // +optional + NodeDrainGracePeriod *metav1.Duration `json:"nodeDrainGracePeriod,omitempty"` + + // UpdateConfig specifies update configurations. + // + // +optional + UpdateConfig *RosaUpdateConfig `json:"updateConfig,omitempty"` +} + +// RosaTaint represents a taint to be applied to a node. +type RosaTaint struct { + // The taint key to be applied to a node. + // + // +kubebuilder:validation:Required + Key string `json:"key"` + // The taint value corresponding to the taint key. + // + // +kubebuilder:validation:Pattern:=`^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$` + // +optional + Value string `json:"value,omitempty"` + // The effect of the taint on pods that do not tolerate the taint. + // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum=NoSchedule;PreferNoSchedule;NoExecute + Effect corev1.TaintEffect `json:"effect"` } // RosaMachinePoolAutoScaling specifies scaling options. @@ -83,6 +140,55 @@ type RosaMachinePoolAutoScaling struct { MaxReplicas int `json:"maxReplicas,omitempty"` } +// RosaUpdateConfig specifies update configuration +type RosaUpdateConfig struct { + // RollingUpdate specifies MaxUnavailable & MaxSurge number of nodes during update. + // + // +optional + RollingUpdate *RollingUpdate `json:"rollingUpdate,omitempty"` +} + +// RollingUpdate specifies MaxUnavailable & MaxSurge number of nodes during update. +type RollingUpdate struct { + // MaxUnavailable is the maximum number of nodes that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired nodes (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // + // MaxUnavailable can not be 0 if MaxSurge is 0, default is 0. + // Both MaxUnavailable & MaxSurge must use the same units (absolute value or percentage). + // + // Example: when MaxUnavailable is set to 30%, old nodes can be deleted down to 70% of + // desired nodes immediately when the rolling update starts. Once new nodes + // are ready, more old nodes be deleted, followed by provisioning new nodes, + // ensuring that the total number of nodes available at all times during the + // update is at least 70% of desired nodes. + // + // +kubebuilder:validation:Pattern="^((100|[0-9]{1,2})%|[0-9]+)$" + // +kubebuilder:validation:XIntOrString + // +kubebuilder:default=0 + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` + + // MaxSurge is the maximum number of nodes that can be provisioned above the desired number of nodes. + // Value can be an absolute number (ex: 5) or a percentage of desired nodes (ex: 10%). + // Absolute number is calculated from percentage by rounding up. + // + // MaxSurge can not be 0 if MaxUnavailable is 0, default is 1. + // Both MaxSurge & MaxUnavailable must use the same units (absolute value or percentage). + // + // Example: when MaxSurge is set to 30%, new nodes can be provisioned immediately + // when the rolling update starts, such that the total number of old and new + // nodes do not exceed 130% of desired nodes. Once old nodes have been + // deleted, new nodes can be provisioned, ensuring that total number of nodes + // running at any time during the update is at most 130% of desired nodes. + // + // +kubebuilder:validation:Pattern="^((100|[0-9]{1,2})%|[0-9]+)$" + // +kubebuilder:validation:XIntOrString + // +kubebuilder:default=1 + // +optional + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` +} + // RosaMachinePoolStatus defines the observed state of RosaMachinePool. type RosaMachinePoolStatus struct { // Ready denotes that the RosaMachinePool nodepool has joined diff --git a/exp/api/v1beta2/rosamachinepool_webhook.go b/exp/api/v1beta2/rosamachinepool_webhook.go index a3d0b9d227..d4fdaf00a5 100644 --- a/exp/api/v1beta2/rosamachinepool_webhook.go +++ b/exp/api/v1beta2/rosamachinepool_webhook.go @@ -2,9 +2,14 @@ package v1beta2 import ( "github.com/blang/semver" + "github.com/google/go-cmp/cmp" + "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -31,6 +36,12 @@ func (r *ROSAMachinePool) ValidateCreate() (warnings admission.Warnings, err err allErrs = append(allErrs, err) } + if err := r.validateNodeDrainGracePeriod(); err != nil { + allErrs = append(allErrs, err) + } + + allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...) + if len(allErrs) == 0 { return nil, nil } @@ -44,12 +55,25 @@ func (r *ROSAMachinePool) ValidateCreate() (warnings admission.Warnings, err err // ValidateUpdate implements admission.Validator. func (r *ROSAMachinePool) ValidateUpdate(old runtime.Object) (warnings admission.Warnings, err error) { - var allErrs field.ErrorList + oldPool, ok := old.(*ROSAMachinePool) + if !ok { + return nil, apierrors.NewInvalid(GroupVersion.WithKind("ROSAMachinePool").GroupKind(), r.Name, field.ErrorList{ + field.InternalError(nil, errors.New("failed to convert old ROSAMachinePool to object")), + }) + } + var allErrs field.ErrorList if err := r.validateVersion(); err != nil { allErrs = append(allErrs, err) } + if err := r.validateNodeDrainGracePeriod(); err != nil { + allErrs = append(allErrs, err) + } + + allErrs = append(allErrs, validateImmutable(oldPool.Spec.AdditionalSecurityGroups, r.Spec.AdditionalSecurityGroups, "additionalSecurityGroups")...) + allErrs = append(allErrs, validateImmutable(oldPool.Spec.AdditionalTags, r.Spec.AdditionalTags, "additionalTags")...) + if len(allErrs) == 0 { return nil, nil } @@ -72,12 +96,51 @@ func (r *ROSAMachinePool) validateVersion() *field.Error { } _, err := semver.Parse(r.Spec.Version) if err != nil { - return field.Invalid(field.NewPath("spec.version"), r.Spec.Version, "version must be a valid semantic version") + return field.Invalid(field.NewPath("spec.version"), r.Spec.Version, "must be a valid semantic version") } return nil } +func (r *ROSAMachinePool) validateNodeDrainGracePeriod() *field.Error { + if r.Spec.NodeDrainGracePeriod == nil { + return nil + } + + if r.Spec.NodeDrainGracePeriod.Minutes() > 10080 { + return field.Invalid(field.NewPath("spec.nodeDrainGracePeriod"), r.Spec.NodeDrainGracePeriod, + "max supported duration is 1 week (10080m|168h)") + } + + return nil +} + +func validateImmutable(old, updated interface{}, name string) field.ErrorList { + var allErrs field.ErrorList + + if !cmp.Equal(old, updated) { + allErrs = append( + allErrs, + field.Invalid(field.NewPath("spec", name), updated, "field is immutable"), + ) + } + + return allErrs +} + // Default implements admission.Defaulter. func (r *ROSAMachinePool) Default() { + if r.Spec.NodeDrainGracePeriod == nil { + r.Spec.NodeDrainGracePeriod = &metav1.Duration{} + } + + if r.Spec.UpdateConfig == nil { + r.Spec.UpdateConfig = &RosaUpdateConfig{} + } + if r.Spec.UpdateConfig.RollingUpdate == nil { + r.Spec.UpdateConfig.RollingUpdate = &RollingUpdate{ + MaxUnavailable: ptr.To(intstr.FromInt32(0)), + MaxSurge: ptr.To(intstr.FromInt32(1)), + } + } } diff --git a/exp/api/v1beta2/types.go b/exp/api/v1beta2/types.go index ef589c2951..0bc4009a2e 100644 --- a/exp/api/v1beta2/types.go +++ b/exp/api/v1beta2/types.go @@ -96,6 +96,10 @@ type AWSLaunchTemplate struct { // +optional RootVolume *infrav1.Volume `json:"rootVolume,omitempty"` + // Configuration options for the non root storage volumes. + // +optional + NonRootVolumes []infrav1.Volume `json:"nonRootVolumes,omitempty"` + // SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string // (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name) // +optional diff --git a/exp/api/v1beta2/zz_generated.deepcopy.go b/exp/api/v1beta2/zz_generated.deepcopy.go index 867324e441..69ff149f48 100644 --- a/exp/api/v1beta2/zz_generated.deepcopy.go +++ b/exp/api/v1beta2/zz_generated.deepcopy.go @@ -21,7 +21,9 @@ limitations under the License. package v1beta2 import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" apiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/errors" @@ -95,6 +97,13 @@ func (in *AWSLaunchTemplate) DeepCopyInto(out *AWSLaunchTemplate) { *out = new(apiv1beta2.Volume) (*in).DeepCopyInto(*out) } + if in.NonRootVolumes != nil { + in, out := &in.NonRootVolumes, &out.NonRootVolumes + *out = make([]apiv1beta2.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.SSHKeyName != nil { in, out := &in.SSHKeyName, &out.SSHKeyName *out = new(string) @@ -1065,6 +1074,31 @@ func (in *RefreshPreferences) DeepCopy() *RefreshPreferences { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdate) DeepCopyInto(out *RollingUpdate) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(intstr.IntOrString) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdate. +func (in *RollingUpdate) DeepCopy() *RollingUpdate { + if in == nil { + return nil + } + out := new(RollingUpdate) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RosaMachinePoolAutoScaling) DeepCopyInto(out *RosaMachinePoolAutoScaling) { *out = *in @@ -1090,16 +1124,48 @@ func (in *RosaMachinePoolSpec) DeepCopyInto(out *RosaMachinePoolSpec) { (*out)[key] = val } } + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]RosaTaint, len(*in)) + copy(*out, *in) + } + if in.AdditionalTags != nil { + in, out := &in.AdditionalTags, &out.AdditionalTags + *out = make(apiv1beta2.Tags, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } if in.Autoscaling != nil { in, out := &in.Autoscaling, &out.Autoscaling *out = new(RosaMachinePoolAutoScaling) **out = **in } + if in.TuningConfigs != nil { + in, out := &in.TuningConfigs, &out.TuningConfigs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AdditionalSecurityGroups != nil { + in, out := &in.AdditionalSecurityGroups, &out.AdditionalSecurityGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.ProviderIDList != nil { in, out := &in.ProviderIDList, &out.ProviderIDList *out = make([]string, len(*in)) copy(*out, *in) } + if in.NodeDrainGracePeriod != nil { + in, out := &in.NodeDrainGracePeriod, &out.NodeDrainGracePeriod + *out = new(v1.Duration) + **out = **in + } + if in.UpdateConfig != nil { + in, out := &in.UpdateConfig, &out.UpdateConfig + *out = new(RosaUpdateConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RosaMachinePoolSpec. @@ -1139,6 +1205,41 @@ func (in *RosaMachinePoolStatus) DeepCopy() *RosaMachinePoolStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RosaTaint) DeepCopyInto(out *RosaTaint) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RosaTaint. +func (in *RosaTaint) DeepCopy() *RosaTaint { + if in == nil { + return nil + } + out := new(RosaTaint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RosaUpdateConfig) DeepCopyInto(out *RosaUpdateConfig) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdate) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RosaUpdateConfig. +func (in *RosaUpdateConfig) DeepCopy() *RosaUpdateConfig { + if in == nil { + return nil + } + out := new(RosaUpdateConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SuspendProcessesTypes) DeepCopyInto(out *SuspendProcessesTypes) { *out = *in diff --git a/exp/controlleridentitycreator/awscontrolleridentity_controller.go b/exp/controlleridentitycreator/awscontrolleridentity_controller.go index 0060d712de..bc3a557529 100644 --- a/exp/controlleridentitycreator/awscontrolleridentity_controller.go +++ b/exp/controlleridentitycreator/awscontrolleridentity_controller.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package controlleridentitycreator provides a way to reconcile AWSClusterControllerIdentity instance. package controlleridentitycreator import ( diff --git a/exp/controlleridentitycreator/awscontrolleridentity_controller_test.go b/exp/controlleridentitycreator/awscontrolleridentity_controller_test.go index 94415dbca3..81f4087d32 100644 --- a/exp/controlleridentitycreator/awscontrolleridentity_controller_test.go +++ b/exp/controlleridentitycreator/awscontrolleridentity_controller_test.go @@ -54,6 +54,6 @@ func TestAWSControllerIdentityController(t *testing.T) { return true } return false - }, 10*time.Second).Should(BeTrue()) + }, 10*time.Second).Should(BeTrue(), "Eventually failed ensuring AWSClusterControllerIdentity instance is created") }) } diff --git a/exp/controllers/awsmachinepool_controller.go b/exp/controllers/awsmachinepool_controller.go index 1a30c90314..741cdcdb10 100644 --- a/exp/controllers/awsmachinepool_controller.go +++ b/exp/controllers/awsmachinepool_controller.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package controllers provides experimental API controllers. package controllers import ( @@ -585,7 +586,7 @@ func machinePoolToInfrastructureMapFunc(gvk schema.GroupVersionKind) handler.Map return func(ctx context.Context, o client.Object) []reconcile.Request { m, ok := o.(*expclusterv1.MachinePool) if !ok { - klog.Error("Expected a MachinePool but got a %T", o) + klog.Errorf("Expected a MachinePool but got a %T", o) } gk := gvk.GroupKind() diff --git a/exp/controllers/awsmachinepool_controller_test.go b/exp/controllers/awsmachinepool_controller_test.go index 447ecd3fab..4902dbb7e7 100644 --- a/exp/controllers/awsmachinepool_controller_test.go +++ b/exp/controllers/awsmachinepool_controller_test.go @@ -472,15 +472,15 @@ func TestAWSMachinePoolReconciler(t *testing.T) { }) t.Run("ReconcileLaunchTemplate not mocked", func(t *testing.T) { - g := NewWithT(t) - setup(t, g) - reconciler.reconcileServiceFactory = nil // use real implementation, but keep EC2 calls mocked (`ec2ServiceFactory`) - reconSvc = nil // not used - defer teardown(t, g) - launchTemplateIDExisting := "lt-existing" t.Run("nothing exists, so launch template and ASG must be created", func(t *testing.T) { + g := NewWithT(t) + setup(t, g) + reconciler.reconcileServiceFactory = nil // use real implementation, but keep EC2 calls mocked (`ec2ServiceFactory`) + reconSvc = nil // not used + defer teardown(t, g) + ec2Svc.EXPECT().GetLaunchTemplate(gomock.Eq("test")).Return(nil, "", nil, nil) ec2Svc.EXPECT().DiscoverLaunchTemplateAMI(gomock.Any()).Return(ptr.To[string]("ami-abcdef123"), nil) ec2Svc.EXPECT().CreateLaunchTemplate(gomock.Any(), gomock.Eq(ptr.To[string]("ami-abcdef123")), gomock.Eq(userDataSecretKey), gomock.Eq([]byte("shell-script"))).Return("lt-ghijkl456", nil) @@ -497,6 +497,12 @@ func TestAWSMachinePoolReconciler(t *testing.T) { }) t.Run("launch template and ASG exist and need no update", func(t *testing.T) { + g := NewWithT(t) + setup(t, g) + reconciler.reconcileServiceFactory = nil // use real implementation, but keep EC2 calls mocked (`ec2ServiceFactory`) + reconSvc = nil // not used + defer teardown(t, g) + // Latest ID and version already stored, no need to retrieve it ms.AWSMachinePool.Status.LaunchTemplateID = launchTemplateIDExisting ms.AWSMachinePool.Status.LaunchTemplateVersion = ptr.To[string]("1") @@ -538,6 +544,12 @@ func TestAWSMachinePoolReconciler(t *testing.T) { }) t.Run("launch template and ASG exist and only AMI ID changed", func(t *testing.T) { + g := NewWithT(t) + setup(t, g) + reconciler.reconcileServiceFactory = nil // use real implementation, but keep EC2 calls mocked (`ec2ServiceFactory`) + reconSvc = nil // not used + defer teardown(t, g) + // Latest ID and version already stored, no need to retrieve it ms.AWSMachinePool.Status.LaunchTemplateID = launchTemplateIDExisting ms.AWSMachinePool.Status.LaunchTemplateVersion = ptr.To[string]("1") @@ -585,6 +597,12 @@ func TestAWSMachinePoolReconciler(t *testing.T) { }) t.Run("launch template and ASG exist and only bootstrap data secret name changed", func(t *testing.T) { + g := NewWithT(t) + setup(t, g) + reconciler.reconcileServiceFactory = nil // use real implementation, but keep EC2 calls mocked (`ec2ServiceFactory`) + reconSvc = nil // not used + defer teardown(t, g) + // Latest ID and version already stored, no need to retrieve it ms.AWSMachinePool.Status.LaunchTemplateID = launchTemplateIDExisting ms.AWSMachinePool.Status.LaunchTemplateVersion = ptr.To[string]("1") @@ -635,6 +653,12 @@ func TestAWSMachinePoolReconciler(t *testing.T) { }) t.Run("launch template and ASG created from zero, then bootstrap config reference changes", func(t *testing.T) { + g := NewWithT(t) + setup(t, g) + reconciler.reconcileServiceFactory = nil // use real implementation, but keep EC2 calls mocked (`ec2ServiceFactory`) + reconSvc = nil // not used + defer teardown(t, g) + ec2Svc.EXPECT().GetLaunchTemplate(gomock.Eq("test")).Return(nil, "", nil, nil) ec2Svc.EXPECT().DiscoverLaunchTemplateAMI(gomock.Any()).Return(ptr.To[string]("ami-abcdef123"), nil) ec2Svc.EXPECT().CreateLaunchTemplate(gomock.Any(), gomock.Eq(ptr.To[string]("ami-abcdef123")), gomock.Eq(userDataSecretKey), gomock.Eq([]byte("shell-script"))).Return("lt-ghijkl456", nil) @@ -650,7 +674,6 @@ func TestAWSMachinePoolReconciler(t *testing.T) { g.Expect(err).To(Succeed()) g.Expect(ms.AWSMachinePool.Status.LaunchTemplateID).ToNot(BeEmpty()) - g.Expect(ptr.Deref[string](ms.AWSMachinePool.Status.LaunchTemplateVersion, "")).ToNot(BeEmpty()) // Data secret name changes newBootstrapSecret := &corev1.Secret{ @@ -665,6 +688,10 @@ func TestAWSMachinePoolReconciler(t *testing.T) { g.Expect(testEnv.Create(ctx, newBootstrapSecret)).To(Succeed()) ms.MachinePool.Spec.Template.Spec.Bootstrap.DataSecretName = ptr.To[string](newBootstrapSecret.Name) + // Since `AWSMachinePool.status.launchTemplateVersion` isn't set yet, + // the controller will ask for the current version and then set the status. + ec2Svc.EXPECT().GetLaunchTemplateLatestVersion(gomock.Any()).Return("1", nil) + ec2Svc.EXPECT().GetLaunchTemplate(gomock.Eq("test")).Return( &expinfrav1.AWSLaunchTemplate{ Name: "test", @@ -773,7 +800,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { }) } -//TODO: This was taken from awsmachine_controller_test, i think it should be moved to elsewhere in both locations like test/helpers +//TODO: This was taken from awsmachine_controller_test, i think it should be moved to elsewhere in both locations like test/helpers. type conditionAssertion struct { conditionType clusterv1.ConditionType diff --git a/exp/controllers/rosamachinepool_controller.go b/exp/controllers/rosamachinepool_controller.go index cb2cf41ec4..41a8f15848 100644 --- a/exp/controllers/rosamachinepool_controller.go +++ b/exp/controllers/rosamachinepool_controller.go @@ -5,14 +5,22 @@ import ( "fmt" "time" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/blang/semver" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" + "github.com/openshift/rosa/pkg/ocm" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" @@ -34,7 +42,7 @@ import ( "sigs.k8s.io/cluster-api/util/predicates" ) -// ROSAMachinePoolReconciler reconciles a RosaMachinePool object. +// ROSAMachinePoolReconciler reconciles a ROSAMachinePool object. type ROSAMachinePoolReconciler struct { client.Client Recorder record.EventRecorder @@ -48,7 +56,7 @@ func (r *ROSAMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ct gvk, err := apiutil.GVKForObject(new(expinfrav1.ROSAMachinePool), mgr.GetScheme()) if err != nil { - return errors.Wrapf(err, "failed to find GVK for RosaMachinePool") + return errors.Wrapf(err, "failed to find GVK for ROSAMachinePool") } rosaControlPlaneToRosaMachinePoolMap := rosaControlPlaneToRosaMachinePoolMapFunc(r.Client, gvk, log) return ctrl.NewControllerManagedBy(mgr). @@ -71,8 +79,9 @@ func (r *ROSAMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ct // +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=rosacontrolplanes;rosacontrolplanes/status,verbs=get;list;watch // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=rosamachinepools,verbs=get;list;watch;update;patch;delete // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=rosamachinepools/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=rosamachinepools/finalizers,verbs=update -// Reconcile reconciles RosaMachinePool. +// Reconcile reconciles ROSAMachinePool. func (r *ROSAMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { log := logger.FromContext(ctx) @@ -99,7 +108,7 @@ func (r *ROSAMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Requ cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machinePool.ObjectMeta) if err != nil { log.Info("Failed to retrieve Cluster from MachinePool") - return reconcile.Result{}, nil + return ctrl.Result{}, nil } if annotations.IsPaused(cluster, rosaMachinePool) { @@ -116,7 +125,7 @@ func (r *ROSAMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Requ controlPlane := &rosacontrolplanev1.ROSAControlPlane{} if err := r.Client.Get(ctx, controlPlaneKey, controlPlane); err != nil { log.Info("Failed to retrieve ControlPlane from MachinePool") - return reconcile.Result{}, nil + return ctrl.Result{}, err } machinePoolScope, err := scope.NewRosaMachinePoolScope(scope.RosaMachinePoolScopeParams{ @@ -127,9 +136,10 @@ func (r *ROSAMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Requ MachinePool: machinePool, RosaMachinePool: rosaMachinePool, Logger: log, + Endpoints: r.Endpoints, }) if err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to create scope") + return ctrl.Result{}, errors.Wrap(err, "failed to create rosaMachinePool scope") } rosaControlPlaneScope, err := scope.NewROSAControlPlaneScope(scope.ROSAControlPlaneScopeParams{ @@ -137,12 +147,13 @@ func (r *ROSAMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Requ Cluster: cluster, ControlPlane: controlPlane, ControllerName: "rosaControlPlane", + Endpoints: r.Endpoints, }) if err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to create control plane scope") + return ctrl.Result{}, errors.Wrap(err, "failed to create rosaControlPlane scope") } - if !controlPlane.Status.Ready { + if !controlPlane.Status.Ready && controlPlane.ObjectMeta.DeletionTimestamp.IsZero() { log.Info("Control plane is not ready yet") err := machinePoolScope.RosaMchinePoolReadyFalse(expinfrav1.WaitingForRosaControlPlaneReason, "") return ctrl.Result{}, err @@ -167,7 +178,7 @@ func (r *ROSAMachinePoolReconciler) reconcileNormal(ctx context.Context, machinePoolScope *scope.RosaMachinePoolScope, rosaControlPlaneScope *scope.ROSAControlPlaneScope, ) (ctrl.Result, error) { - machinePoolScope.Info("Reconciling RosaMachinePool") + machinePoolScope.Info("Reconciling ROSAMachinePool") if controllerutil.AddFinalizer(machinePoolScope.RosaMachinePool, expinfrav1.RosaMachinePoolFinalizer) { if err := machinePoolScope.PatchObject(); err != nil { @@ -175,11 +186,11 @@ func (r *ROSAMachinePoolReconciler) reconcileNormal(ctx context.Context, } } - rosaClient, err := rosa.NewRosaClient(ctx, rosaControlPlaneScope) + ocmClient, err := rosa.NewOCMClient(ctx, rosaControlPlaneScope) if err != nil { - return ctrl.Result{}, fmt.Errorf("failed to create a rosa client: %w", err) + // TODO: need to expose in status, as likely the credentials are invalid + return ctrl.Result{}, fmt.Errorf("failed to create OCM client: %w", err) } - defer rosaClient.Close() failureMessage, err := validateMachinePoolSpec(machinePoolScope) if err != nil { @@ -189,26 +200,61 @@ func (r *ROSAMachinePoolReconciler) reconcileNormal(ctx context.Context, machinePoolScope.RosaMachinePool.Status.FailureMessage = failureMessage // dont' requeue because input is invalid and manual intervention is needed. return ctrl.Result{}, nil - } else { - machinePoolScope.RosaMachinePool.Status.FailureMessage = nil } + machinePoolScope.RosaMachinePool.Status.FailureMessage = nil rosaMachinePool := machinePoolScope.RosaMachinePool machinePool := machinePoolScope.MachinePool - controlPlane := machinePoolScope.ControlPlane - createdNodePool, found, err := rosaClient.GetNodePool(*controlPlane.Status.ID, rosaMachinePool.Spec.NodePoolName) + if rosaMachinePool.Spec.Autoscaling != nil && !annotations.ReplicasManagedByExternalAutoscaler(machinePool) { + // make sure cluster.x-k8s.io/replicas-managed-by annotation is set on CAPI MachinePool when autoscaling is enabled. + annotations.AddAnnotations(machinePool, map[string]string{ + clusterv1.ReplicasManagedByAnnotation: "rosa", + }) + if err := machinePoolScope.PatchCAPIMachinePoolObject(ctx); err != nil { + return ctrl.Result{}, err + } + } + + nodePool, found, err := ocmClient.GetNodePool(machinePoolScope.ControlPlane.Status.ID, rosaMachinePool.Spec.NodePoolName) if err != nil { return ctrl.Result{}, err } + if found { - // TODO (alberto): discover and store providerIDs from aws so the CAPI controller can match then to Nodes and report readiness. - rosaMachinePool.Status.Replicas = int32(createdNodePool.Status().CurrentReplicas()) - if createdNodePool.Replicas() == createdNodePool.Status().CurrentReplicas() && createdNodePool.Status().Message() == "" { + if rosaMachinePool.Spec.AvailabilityZone == "" { + // reflect the current AvailabilityZone in the spec if not set. + rosaMachinePool.Spec.AvailabilityZone = nodePool.AvailabilityZone() + } + + nodePool, err := r.updateNodePool(machinePoolScope, ocmClient, nodePool) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to ensure rosaMachinePool: %w", err) + } + + currentReplicas := int32(nodePool.Status().CurrentReplicas()) + if annotations.ReplicasManagedByExternalAutoscaler(machinePool) { + // Set MachinePool replicas to rosa autoscaling replicas + if *machinePool.Spec.Replicas != currentReplicas { + machinePoolScope.Info("Setting MachinePool replicas to rosa autoscaling replicas", + "local", *machinePool.Spec.Replicas, + "external", currentReplicas) + machinePool.Spec.Replicas = ¤tReplicas + if err := machinePoolScope.PatchCAPIMachinePoolObject(ctx); err != nil { + return ctrl.Result{}, err + } + } + } + if err := r.reconcileProviderIDList(ctx, machinePoolScope, nodePool); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to reconcile ProviderIDList: %w", err) + } + + rosaMachinePool.Status.Replicas = currentReplicas + if rosa.IsNodePoolReady(nodePool) { conditions.MarkTrue(rosaMachinePool, expinfrav1.RosaMachinePoolReadyCondition) rosaMachinePool.Status.Ready = true - if err := r.reconcileMachinePoolVersion(machinePoolScope, rosaClient, createdNodePool); err != nil { + if err := r.reconcileMachinePoolVersion(machinePoolScope, ocmClient, nodePool); err != nil { return ctrl.Result{}, err } @@ -217,54 +263,32 @@ func (r *ROSAMachinePoolReconciler) reconcileNormal(ctx context.Context, conditions.MarkFalse(rosaMachinePool, expinfrav1.RosaMachinePoolReadyCondition, - createdNodePool.Status().Message(), + nodePool.Status().Message(), clusterv1.ConditionSeverityInfo, "") - machinePoolScope.Info("waiting for NodePool to become ready", "state", createdNodePool.Status().Message()) + machinePoolScope.Info("waiting for NodePool to become ready", "state", nodePool.Status().Message()) // Requeue so that status.ready is set to true when the nodepool is fully created. return ctrl.Result{RequeueAfter: time.Second * 60}, nil } - npBuilder := cmv1.NewNodePool() - npBuilder.ID(rosaMachinePool.Spec.NodePoolName). - Labels(rosaMachinePool.Spec.Labels). - AutoRepair(rosaMachinePool.Spec.AutoRepair) - - if rosaMachinePool.Spec.Autoscaling != nil { - npBuilder = npBuilder.Autoscaling( - cmv1.NewNodePoolAutoscaling(). - MinReplica(rosaMachinePool.Spec.Autoscaling.MinReplicas). - MaxReplica(rosaMachinePool.Spec.Autoscaling.MaxReplicas)) - } else { - replicas := 1 - if machinePool.Spec.Replicas != nil { - replicas = int(*machinePool.Spec.Replicas) - } - npBuilder = npBuilder.Replicas(replicas) - } - - if rosaMachinePool.Spec.Subnet != "" { - npBuilder.Subnet(rosaMachinePool.Spec.Subnet) - } - - npBuilder.AWSNodePool(cmv1.NewAWSNodePool().InstanceType(rosaMachinePool.Spec.InstanceType)) - if rosaMachinePool.Spec.Version != "" { - npBuilder.Version(cmv1.NewVersion().ID(rosa.VersionID(rosaMachinePool.Spec.Version))) - } - + npBuilder := nodePoolBuilder(rosaMachinePool.Spec, machinePool.Spec) nodePoolSpec, err := npBuilder.Build() if err != nil { return ctrl.Result{}, fmt.Errorf("failed to build rosa nodepool: %w", err) } - createdNodePool, err = rosaClient.CreateNodePool(*controlPlane.Status.ID, nodePoolSpec) + nodePool, err = ocmClient.CreateNodePool(machinePoolScope.ControlPlane.Status.ID, nodePoolSpec) if err != nil { + conditions.MarkFalse(rosaMachinePool, + expinfrav1.RosaMachinePoolReadyCondition, + expinfrav1.RosaMachinePoolReconciliationFailedReason, + clusterv1.ConditionSeverityError, + "failed to create ROSAMachinePool: %s", err.Error()) return ctrl.Result{}, fmt.Errorf("failed to create nodepool: %w", err) } - machinePoolScope.RosaMachinePool.Status.ID = createdNodePool.ID() - + machinePoolScope.RosaMachinePool.Status.ID = nodePool.ID() return ctrl.Result{}, nil } @@ -274,20 +298,21 @@ func (r *ROSAMachinePoolReconciler) reconcileDelete( ) error { machinePoolScope.Info("Reconciling deletion of RosaMachinePool") - rosaClient, err := rosa.NewRosaClient(ctx, rosaControlPlaneScope) + ocmClient, err := rosa.NewOCMClient(ctx, rosaControlPlaneScope) if err != nil { - return fmt.Errorf("failed to create a rosa client: %w", err) + // TODO: need to expose in status, as likely the credentials are invalid + return fmt.Errorf("failed to create OCM client: %w", err) } - defer rosaClient.Close() - nodePool, found, err := rosaClient.GetNodePool(*machinePoolScope.ControlPlane.Status.ID, machinePoolScope.NodePoolName()) + nodePool, found, err := ocmClient.GetNodePool(machinePoolScope.ControlPlane.Status.ID, machinePoolScope.NodePoolName()) if err != nil { return err } if found { - if err := rosaClient.DeleteNodePool(*machinePoolScope.ControlPlane.Status.ID, nodePool.ID()); err != nil { + if err := ocmClient.DeleteNodePool(machinePoolScope.ControlPlane.Status.ID, nodePool.ID()); err != nil { return err } + machinePoolScope.Info("Successfully deleted NodePool") } controllerutil.RemoveFinalizer(machinePoolScope.RosaMachinePool, expinfrav1.RosaMachinePoolFinalizer) @@ -295,25 +320,21 @@ func (r *ROSAMachinePoolReconciler) reconcileDelete( return nil } -func (r *ROSAMachinePoolReconciler) reconcileMachinePoolVersion(machinePoolScope *scope.RosaMachinePoolScope, rosaClient *rosa.RosaClient, nodePool *cmv1.NodePool) error { +func (r *ROSAMachinePoolReconciler) reconcileMachinePoolVersion(machinePoolScope *scope.RosaMachinePoolScope, ocmClient *ocm.Client, nodePool *cmv1.NodePool) error { version := machinePoolScope.RosaMachinePool.Spec.Version - if version == "" { - version = machinePoolScope.ControlPlane.Spec.Version - } - - if version == rosa.RawVersionID(nodePool.Version()) { + if version == "" || version == rosa.RawVersionID(nodePool.Version()) { conditions.MarkFalse(machinePoolScope.RosaMachinePool, expinfrav1.RosaMachinePoolUpgradingCondition, "upgraded", clusterv1.ConditionSeverityInfo, "") return nil } - clusterID := *machinePoolScope.ControlPlane.Status.ID - scheduledUpgrade, err := rosaClient.CheckNodePoolExistingScheduledUpgrade(clusterID, nodePool) + clusterID := machinePoolScope.ControlPlane.Status.ID + _, scheduledUpgrade, err := ocmClient.GetHypershiftNodePoolUpgrade(clusterID, machinePoolScope.ControlPlane.Spec.RosaClusterName, nodePool.ID()) if err != nil { return fmt.Errorf("failed to get existing scheduled upgrades: %w", err) } if scheduledUpgrade == nil { - scheduledUpgrade, err = rosaClient.ScheduleNodePoolUpgrade(clusterID, nodePool, version, time.Now()) + scheduledUpgrade, err = rosa.ScheduleNodePoolUpgrade(ocmClient, clusterID, nodePool, version, time.Now()) if err != nil { return fmt.Errorf("failed to schedule nodePool upgrade to version %s: %w", version, err) } @@ -335,6 +356,58 @@ func (r *ROSAMachinePoolReconciler) reconcileMachinePoolVersion(machinePoolScope return nil } +func (r *ROSAMachinePoolReconciler) updateNodePool(machinePoolScope *scope.RosaMachinePoolScope, ocmClient *ocm.Client, nodePool *cmv1.NodePool) (*cmv1.NodePool, error) { + machinePool := machinePoolScope.RosaMachinePool.DeepCopy() + // default all fields before comparing, so that nil/unset fields don't cause an unnecessary update call. + machinePool.Default() + desiredSpec := machinePool.Spec + + specDiff := computeSpecDiff(desiredSpec, nodePool) + if specDiff == "" { + // no changes detected. + return nodePool, nil + } + machinePoolScope.Info("MachinePool spec diff detected", "diff", specDiff) + + // zero-out fields that shouldn't be part of the update call. + desiredSpec.Version = "" + desiredSpec.AdditionalSecurityGroups = nil + desiredSpec.AdditionalTags = nil + + npBuilder := nodePoolBuilder(desiredSpec, machinePoolScope.MachinePool.Spec) + nodePoolSpec, err := npBuilder.Build() + if err != nil { + return nil, fmt.Errorf("failed to build nodePool spec: %w", err) + } + + updatedNodePool, err := ocmClient.UpdateNodePool(machinePoolScope.ControlPlane.Status.ID, nodePoolSpec) + if err != nil { + conditions.MarkFalse(machinePoolScope.RosaMachinePool, + expinfrav1.RosaMachinePoolReadyCondition, + expinfrav1.RosaMachinePoolReconciliationFailedReason, + clusterv1.ConditionSeverityError, + "failed to update ROSAMachinePool: %s", err.Error()) + return nil, fmt.Errorf("failed to update nodePool: %w", err) + } + + return updatedNodePool, nil +} + +func computeSpecDiff(desiredSpec expinfrav1.RosaMachinePoolSpec, nodePool *cmv1.NodePool) string { + currentSpec := nodePoolToRosaMachinePoolSpec(nodePool) + + ignoredFields := []string{ + "ProviderIDList", // providerIDList is set by the controller. + "Version", // Version changes are reconciled separately. + "AdditionalTags", // AdditionalTags day2 changes not supported. + "AdditionalSecurityGroups", // AdditionalSecurityGroups day2 changes not supported. + } + + return cmp.Diff(desiredSpec, currentSpec, + cmpopts.EquateEmpty(), // ensures empty non-nil slices and nil slices are considered equal. + cmpopts.IgnoreFields(currentSpec, ignoredFields...)) +} + func validateMachinePoolSpec(machinePoolScope *scope.RosaMachinePoolScope) (*string, error) { if machinePoolScope.RosaMachinePool.Spec.Version == "" { return nil, nil @@ -358,6 +431,179 @@ func validateMachinePoolSpec(machinePoolScope *scope.RosaMachinePoolScope) (*str return nil, nil } +func nodePoolBuilder(rosaMachinePoolSpec expinfrav1.RosaMachinePoolSpec, machinePoolSpec expclusterv1.MachinePoolSpec) *cmv1.NodePoolBuilder { + npBuilder := cmv1.NewNodePool().ID(rosaMachinePoolSpec.NodePoolName). + Labels(rosaMachinePoolSpec.Labels). + AutoRepair(rosaMachinePoolSpec.AutoRepair) + + if rosaMachinePoolSpec.TuningConfigs != nil { + npBuilder = npBuilder.TuningConfigs(rosaMachinePoolSpec.TuningConfigs...) + } + + if len(rosaMachinePoolSpec.Taints) > 0 { + taintBuilders := []*cmv1.TaintBuilder{} + for _, taint := range rosaMachinePoolSpec.Taints { + newTaintBuilder := cmv1.NewTaint().Key(taint.Key).Value(taint.Value).Effect(string(taint.Effect)) + taintBuilders = append(taintBuilders, newTaintBuilder) + } + npBuilder = npBuilder.Taints(taintBuilders...) + } + + if rosaMachinePoolSpec.Autoscaling != nil { + npBuilder = npBuilder.Autoscaling( + cmv1.NewNodePoolAutoscaling(). + MinReplica(rosaMachinePoolSpec.Autoscaling.MinReplicas). + MaxReplica(rosaMachinePoolSpec.Autoscaling.MaxReplicas)) + } else { + replicas := 1 + if machinePoolSpec.Replicas != nil { + replicas = int(*machinePoolSpec.Replicas) + } + npBuilder = npBuilder.Replicas(replicas) + } + + if rosaMachinePoolSpec.Subnet != "" { + npBuilder.Subnet(rosaMachinePoolSpec.Subnet) + } + + awsNodePool := cmv1.NewAWSNodePool().InstanceType(rosaMachinePoolSpec.InstanceType) + if rosaMachinePoolSpec.AdditionalSecurityGroups != nil { + awsNodePool = awsNodePool.AdditionalSecurityGroupIds(rosaMachinePoolSpec.AdditionalSecurityGroups...) + } + if rosaMachinePoolSpec.AdditionalTags != nil { + awsNodePool = awsNodePool.Tags(rosaMachinePoolSpec.AdditionalTags) + } + npBuilder.AWSNodePool(awsNodePool) + + if rosaMachinePoolSpec.Version != "" { + npBuilder.Version(cmv1.NewVersion().ID(ocm.CreateVersionID(rosaMachinePoolSpec.Version, ocm.DefaultChannelGroup))) + } + + if rosaMachinePoolSpec.NodeDrainGracePeriod != nil { + valueBuilder := cmv1.NewValue().Value(rosaMachinePoolSpec.NodeDrainGracePeriod.Minutes()).Unit("minutes") + npBuilder.NodeDrainGracePeriod(valueBuilder) + } + + if rosaMachinePoolSpec.UpdateConfig != nil { + configMgmtBuilder := cmv1.NewNodePoolManagementUpgrade() + + if rollingUpdate := rosaMachinePoolSpec.UpdateConfig.RollingUpdate; rollingUpdate != nil { + if rollingUpdate.MaxSurge != nil { + configMgmtBuilder = configMgmtBuilder.MaxSurge(rollingUpdate.MaxSurge.String()) + } + if rollingUpdate.MaxUnavailable != nil { + configMgmtBuilder = configMgmtBuilder.MaxUnavailable(rollingUpdate.MaxUnavailable.String()) + } + } + + npBuilder = npBuilder.ManagementUpgrade(configMgmtBuilder) + } + + return npBuilder +} + +func nodePoolToRosaMachinePoolSpec(nodePool *cmv1.NodePool) expinfrav1.RosaMachinePoolSpec { + spec := expinfrav1.RosaMachinePoolSpec{ + NodePoolName: nodePool.ID(), + Version: rosa.RawVersionID(nodePool.Version()), + AvailabilityZone: nodePool.AvailabilityZone(), + Subnet: nodePool.Subnet(), + Labels: nodePool.Labels(), + AutoRepair: nodePool.AutoRepair(), + InstanceType: nodePool.AWSNodePool().InstanceType(), + TuningConfigs: nodePool.TuningConfigs(), + AdditionalSecurityGroups: nodePool.AWSNodePool().AdditionalSecurityGroupIds(), + // nodePool.AWSNodePool().Tags() returns all tags including "system" tags if "fetchUserTagsOnly" parameter is not specified. + // TODO: enable when AdditionalTags day2 changes is supported. + // AdditionalTags: nodePool.AWSNodePool().Tags(), + } + + if nodePool.Autoscaling() != nil { + spec.Autoscaling = &expinfrav1.RosaMachinePoolAutoScaling{ + MinReplicas: nodePool.Autoscaling().MinReplica(), + MaxReplicas: nodePool.Autoscaling().MaxReplica(), + } + } + if nodePool.Taints() != nil { + rosaTaints := make([]expinfrav1.RosaTaint, 0, len(nodePool.Taints())) + for _, taint := range nodePool.Taints() { + rosaTaints = append(rosaTaints, expinfrav1.RosaTaint{ + Key: taint.Key(), + Value: taint.Value(), + Effect: corev1.TaintEffect(taint.Effect()), + }) + } + spec.Taints = rosaTaints + } + if nodePool.NodeDrainGracePeriod() != nil { + spec.NodeDrainGracePeriod = &metav1.Duration{ + Duration: time.Minute * time.Duration(nodePool.NodeDrainGracePeriod().Value()), + } + } + if nodePool.ManagementUpgrade() != nil { + spec.UpdateConfig = &expinfrav1.RosaUpdateConfig{ + RollingUpdate: &expinfrav1.RollingUpdate{}, + } + if nodePool.ManagementUpgrade().MaxSurge() != "" { + spec.UpdateConfig.RollingUpdate.MaxSurge = ptr.To(intstr.Parse(nodePool.ManagementUpgrade().MaxSurge())) + } + if nodePool.ManagementUpgrade().MaxUnavailable() != "" { + spec.UpdateConfig.RollingUpdate.MaxUnavailable = ptr.To(intstr.Parse(nodePool.ManagementUpgrade().MaxUnavailable())) + } + } + + return spec +} + +func (r *ROSAMachinePoolReconciler) reconcileProviderIDList(ctx context.Context, machinePoolScope *scope.RosaMachinePoolScope, nodePool *cmv1.NodePool) error { + tags := nodePool.AWSNodePool().Tags() + if len(tags) == 0 { + // can't identify EC2 instances belonging to this NodePool without tags. + return nil + } + + ec2Svc := scope.NewEC2Client(machinePoolScope, machinePoolScope, &machinePoolScope.Logger, machinePoolScope.InfraCluster()) + response, err := ec2Svc.DescribeInstancesWithContext(ctx, &ec2.DescribeInstancesInput{ + Filters: buildEC2FiltersFromTags(tags), + }) + if err != nil { + return err + } + + var providerIDList []string + for _, reservation := range response.Reservations { + for _, instance := range reservation.Instances { + providerID := scope.GenerateProviderID(*instance.Placement.AvailabilityZone, *instance.InstanceId) + providerIDList = append(providerIDList, providerID) + } + } + + machinePoolScope.RosaMachinePool.Spec.ProviderIDList = providerIDList + return nil +} + +func buildEC2FiltersFromTags(tags map[string]string) []*ec2.Filter { + filters := make([]*ec2.Filter, len(tags)+1) + for key, value := range tags { + filters = append(filters, &ec2.Filter{ + Name: ptr.To(fmt.Sprintf("tag:%s", key)), + Values: aws.StringSlice([]string{ + value, + }), + }) + } + + // only list instances that are running or just started + filters = append(filters, &ec2.Filter{ + Name: ptr.To("instance-state-name"), + Values: aws.StringSlice([]string{ + "running", "pending", + }), + }) + + return filters +} + func rosaControlPlaneToRosaMachinePoolMapFunc(c client.Client, gvk schema.GroupVersionKind, log logger.Wrapper) handler.MapFunc { return func(ctx context.Context, o client.Object) []reconcile.Request { rosaControlPlane, ok := o.(*rosacontrolplanev1.ROSAControlPlane) diff --git a/exp/controllers/rosamachinepool_controller_test.go b/exp/controllers/rosamachinepool_controller_test.go new file mode 100644 index 0000000000..0ff8ae0c83 --- /dev/null +++ b/exp/controllers/rosamachinepool_controller_test.go @@ -0,0 +1,62 @@ +package controllers + +import ( + "testing" + "time" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" + + expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" + expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" +) + +func TestNodePoolToRosaMachinePoolSpec(t *testing.T) { + g := NewWithT(t) + + rosaMachinePoolSpec := expinfrav1.RosaMachinePoolSpec{ + NodePoolName: "test-nodepool", + Version: "4.14.5", + Subnet: "subnet-id", + AutoRepair: true, + InstanceType: "m5.large", + TuningConfigs: []string{"config1"}, + NodeDrainGracePeriod: &metav1.Duration{ + Duration: time.Minute * 10, + }, + UpdateConfig: &expinfrav1.RosaUpdateConfig{ + RollingUpdate: &expinfrav1.RollingUpdate{ + MaxSurge: ptr.To(intstr.FromInt32(3)), + MaxUnavailable: ptr.To(intstr.FromInt32(5)), + }, + }, + AdditionalSecurityGroups: []string{ + "id-1", + "id-2", + }, + Labels: map[string]string{ + "label1": "value1", + "label2": "value2", + }, + Taints: []expinfrav1.RosaTaint{ + { + Key: "myKey", + Value: "myValue", + Effect: corev1.TaintEffectNoExecute, + }, + }, + } + + machinePoolSpec := expclusterv1.MachinePoolSpec{ + Replicas: ptr.To[int32](2), + } + + nodePoolBuilder := nodePoolBuilder(rosaMachinePoolSpec, machinePoolSpec) + nodePoolSpec, err := nodePoolBuilder.Build() + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(computeSpecDiff(rosaMachinePoolSpec, nodePoolSpec)).To(BeEmpty()) +} diff --git a/exp/doc.go b/exp/doc.go index 1c9b3ddc0b..84020d8a62 100644 --- a/exp/doc.go +++ b/exp/doc.go @@ -14,4 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package exp provides experimental code that is not ready for production use. package exp diff --git a/exp/instancestate/awsinstancestate_controller.go b/exp/instancestate/awsinstancestate_controller.go index c04f8687c4..15464eae61 100644 --- a/exp/instancestate/awsinstancestate_controller.go +++ b/exp/instancestate/awsinstancestate_controller.go @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package instancestate provides a controller that listens +// for EC2 instance state change notifications and updates the corresponding AWSMachine's status. package instancestate import ( diff --git a/exp/instancestate/awsinstancestate_controller_test.go b/exp/instancestate/awsinstancestate_controller_test.go index b9bedde94b..0c015b6deb 100644 --- a/exp/instancestate/awsinstancestate_controller_test.go +++ b/exp/instancestate/awsinstancestate_controller_test.go @@ -135,14 +135,14 @@ func TestAWSInstanceStateController(t *testing.T) { exist = exist && ok } return exist - }, 10*time.Second).Should(BeTrue()) + }, 10*time.Second).Should(BeTrue(), "Eventually failed ensuring queue URLs are up-to-date") deleteAWSCluster(g, "aws-cluster-2") t.Log("Ensuring we stop tracking deleted queue") g.Eventually(func() bool { _, ok := instanceStateReconciler.queueURLs.Load("aws-cluster-2") return ok - }, 10*time.Second).Should(BeFalse()) + }, 10*time.Second).Should(BeFalse(), "Eventually failed ensuring we stop tracking deleted queue") persistObject(g, createAWSCluster("aws-cluster-3")) t.Log("Ensuring newly created cluster is added to tracked clusters") @@ -153,7 +153,7 @@ func TestAWSInstanceStateController(t *testing.T) { exist = exist && ok } return exist - }, 10*time.Second).Should(BeTrue()) + }, 10*time.Second).Should(BeTrue(), "Eventually failed ensuring newly created cluster is added to the tracked clusters") t.Log("Ensuring machine is labelled with correct instance state") g.Eventually(func() bool { @@ -166,7 +166,7 @@ func TestAWSInstanceStateController(t *testing.T) { labels := m.GetLabels() val := labels[Ec2InstanceStateLabelKey] return val == "shutting-down" - }, 10*time.Second).Should(BeTrue()) + }, 10*time.Second).Should(BeTrue(), "Eventually failed ensuring machine is labelled with correct instance state") }) } diff --git a/exp/instancestate/helpers_test.go b/exp/instancestate/helpers_test.go index 51c9e5ae75..69588e9add 100644 --- a/exp/instancestate/helpers_test.go +++ b/exp/instancestate/helpers_test.go @@ -18,6 +18,7 @@ package instancestate import ( "context" + "fmt" "time" . "github.com/onsi/ginkgo/v2" @@ -50,7 +51,7 @@ func persistObject(g *WithT, o client.Object) { g.Eventually(func() bool { err := k8sClient.Get(ctx, lookupKey, o) return err == nil - }, time.Second*10).Should(BeTrue()) + }, time.Second*10).Should(BeTrue(), fmt.Sprintf("Eventually failed getting the newly created object %v", lookupKey)) } func deleteAWSCluster(g *WithT, name string) { diff --git a/feature/feature.go b/feature/feature.go index 8180138e2b..061e4edd57 100644 --- a/feature/feature.go +++ b/feature/feature.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package feature provides a feature-gate implementation for capa. package feature import ( @@ -100,7 +101,7 @@ var defaultCAPAFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ EKSAllowAddRoles: {Default: false, PreRelease: featuregate.Beta}, EKSFargate: {Default: false, PreRelease: featuregate.Alpha}, EventBridgeInstanceState: {Default: false, PreRelease: featuregate.Alpha}, - MachinePool: {Default: false, PreRelease: featuregate.Alpha}, + MachinePool: {Default: true, PreRelease: featuregate.Beta}, AutoControllerIdentityCreator: {Default: true, PreRelease: featuregate.Alpha}, BootstrapFormatIgnition: {Default: false, PreRelease: featuregate.Alpha}, ExternalResourceGC: {Default: false, PreRelease: featuregate.Alpha}, diff --git a/go.mod b/go.mod index 046be3cb9f..65c61d6b54 100644 --- a/go.mod +++ b/go.mod @@ -1,11 +1,16 @@ module sigs.k8s.io/cluster-api-provider-aws/v2 -go 1.21 +go 1.21.0 + +toolchain go1.22.6 replace ( + // TODO: remove when component-base updates its prometheus deps (https://github.com/prometheus/client_golang/releases/tag/v1.19.0) + github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.18.0 + github.com/prometheus/common => github.com/prometheus/common v0.46.0 // kube-openapi should match the version imported by CAPI. - k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 - sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v1.6.1 + k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 + sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v1.7.1 ) require ( @@ -13,41 +18,45 @@ require ( github.com/apparentlymart/go-cidr v1.1.0 github.com/aws/amazon-vpc-cni-k8s v1.15.4 github.com/aws/aws-lambda-go v1.41.0 - github.com/aws/aws-sdk-go v1.44.332 + github.com/aws/aws-sdk-go v1.51.17 github.com/awslabs/goformation/v4 v4.19.5 github.com/blang/semver v3.5.1+incompatible github.com/coreos/ignition v0.35.0 github.com/coreos/ignition/v2 v2.16.2 - github.com/go-logr/logr v1.3.0 - github.com/gofrs/flock v0.8.1 + github.com/go-logr/logr v1.4.1 + github.com/gofrs/flock v0.12.1 github.com/golang/mock v1.6.0 github.com/google/go-cmp v0.6.0 github.com/google/goexpect v0.0.0-20210430020637-ab937bf7fd6f github.com/google/gofuzz v1.2.0 - github.com/onsi/ginkgo/v2 v2.13.1 - github.com/onsi/gomega v1.30.0 - github.com/openshift-online/ocm-sdk-go v0.1.388 + github.com/onsi/ginkgo/v2 v2.17.1 + github.com/onsi/gomega v1.32.0 + github.com/openshift-online/ocm-common v0.0.0-20240129111424-ff8c6c11d909 + github.com/openshift-online/ocm-sdk-go v0.1.422 + github.com/openshift/rosa v1.2.35-rc1.0.20240301152457-ad986cecd364 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.17.0 + github.com/prometheus/client_golang v1.19.0 github.com/sergi/go-diff v1.3.1 - github.com/spf13/cobra v1.8.0 + github.com/sirupsen/logrus v1.9.3 + github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace - golang.org/x/crypto v0.18.0 + github.com/zgalor/weberr v0.8.2 + golang.org/x/crypto v0.22.0 golang.org/x/text v0.14.0 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.28.4 - k8s.io/apiextensions-apiserver v0.28.4 - k8s.io/apimachinery v0.28.4 - k8s.io/cli-runtime v0.28.4 - k8s.io/client-go v0.28.4 - k8s.io/component-base v0.28.4 - k8s.io/klog/v2 v2.100.1 + k8s.io/api v0.29.3 + k8s.io/apiextensions-apiserver v0.29.3 + k8s.io/apimachinery v0.29.3 + k8s.io/apiserver v0.29.3 + k8s.io/cli-runtime v0.29.3 + k8s.io/client-go v0.29.3 + k8s.io/component-base v0.29.3 + k8s.io/klog/v2 v2.110.1 k8s.io/utils v0.0.0-20240102154912-e7106e64919e sigs.k8s.io/aws-iam-authenticator v0.6.13 - sigs.k8s.io/cluster-api v1.6.1 - sigs.k8s.io/cluster-api/test v1.6.1 - sigs.k8s.io/controller-runtime v0.16.3 - sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 + sigs.k8s.io/cluster-api v1.7.1 + sigs.k8s.io/cluster-api/test v1.7.1 + sigs.k8s.io/controller-runtime v0.17.3 sigs.k8s.io/yaml v1.4.0 ) @@ -64,38 +73,42 @@ require ( github.com/adrg/xdg v0.4.0 // indirect github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect + github.com/aws/aws-sdk-go-v2 v1.24.1 // indirect + github.com/aws/aws-sdk-go-v2/service/iam v1.27.1 // indirect + github.com/aws/smithy-go v1.19.0 // indirect github.com/aymerick/douceur v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect - github.com/cenkalti/backoff/v4 v4.2.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/briandowns/spinner v1.11.1 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect github.com/cloudflare/circl v1.3.7 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/coreos/vcontext v0.0.0-20230201181013-d72178a18687 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/daviddengcn/go-colortext v1.0.0 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v24.0.7+incompatible // indirect - github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/docker v26.1.5+incompatible // indirect + github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 // indirect - github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect - github.com/evanphx/json-patch/v5 v5.7.0 // indirect + github.com/emicklei/go-restful/v3 v3.12.0 // indirect + github.com/evanphx/json-patch v5.7.0+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/fatih/camelcase v1.0.0 // indirect github.com/fatih/color v1.16.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fvbommel/sortorder v1.1.0 // indirect + github.com/ghodss/yaml v1.0.0 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-logr/zapr v1.2.4 // indirect + github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect @@ -103,11 +116,11 @@ require ( github.com/gobuffalo/flect v1.0.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect - github.com/golang/glog v1.1.2 // indirect + github.com/golang/glog v1.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.0.1 // indirect - github.com/google/cel-go v0.16.1 // indirect + github.com/google/cel-go v0.17.8 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-github/v53 v53.2.0 // indirect github.com/google/go-querystring v1.1.0 // indirect @@ -115,11 +128,12 @@ require ( github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/google/uuid v1.3.1 // indirect - github.com/gorilla/css v1.0.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/css v1.0.1 // indirect + github.com/gorilla/websocket v1.5.0 // indirect github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc // indirect - github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/huandu/xstrings v1.4.0 // indirect github.com/imdario/mergo v0.3.13 // indirect @@ -135,12 +149,12 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/microcosm-cc/bluemonday v1.0.18 // indirect + github.com/microcosm-cc/bluemonday v1.0.26 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -155,70 +169,65 @@ require ( github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.1.0 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect - github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.11.1 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.52.2 // indirect + github.com/prometheus/procfs v0.13.0 // indirect github.com/rivo/uniseg v0.4.2 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/sagikazarmark/locafero v0.3.0 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sanathkr/go-yaml v0.0.0-20170819195128-ed9d249f429b // indirect github.com/sanathkr/yaml v0.0.0-20170819201035-0056894fa522 // indirect github.com/shopspring/decimal v1.3.1 // indirect + github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/sourcegraph/conc v0.3.0 // indirect - github.com/spf13/afero v1.10.0 // indirect - github.com/spf13/cast v1.5.1 // indirect - github.com/spf13/viper v1.17.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect + github.com/spf13/viper v1.18.2 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/valyala/fastjson v1.6.4 // indirect github.com/vincent-petithory/dataurl v1.0.0 // indirect github.com/xlab/treeprint v1.2.0 // indirect - go.etcd.io/etcd/api/v3 v3.5.10 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.10 // indirect - go.etcd.io/etcd/client/v3 v3.5.10 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect + gitlab.com/c0b/go-ordered-json v0.0.0-20171130231205-49bbdab258c2 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0 // indirect - go.opentelemetry.io/otel v1.20.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 // indirect + go.opentelemetry.io/otel v1.22.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 // indirect - go.opentelemetry.io/otel/metric v1.20.0 // indirect - go.opentelemetry.io/otel/sdk v1.20.0 // indirect - go.opentelemetry.io/otel/trace v1.20.0 // indirect + go.opentelemetry.io/otel/metric v1.22.0 // indirect + go.opentelemetry.io/otel/sdk v1.22.0 // indirect + go.opentelemetry.io/otel/trace v1.22.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.25.0 // indirect + go.uber.org/zap v1.26.0 // indirect golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect - golang.org/x/net v0.18.0 // indirect - golang.org/x/oauth2 v0.14.0 // indirect - golang.org/x/sync v0.4.0 // indirect - golang.org/x/sys v0.16.0 // indirect - golang.org/x/term v0.16.0 // indirect - golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.14.0 // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/oauth2 v0.19.0 // indirect + golang.org/x/sync v0.6.0 // indirect + golang.org/x/sys v0.22.0 // indirect + golang.org/x/term v0.19.0 // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.18.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect - google.golang.org/grpc v1.59.0 // indirect - google.golang.org/protobuf v1.31.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect + google.golang.org/grpc v1.60.1 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect - gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiserver v0.28.4 // indirect - k8s.io/cluster-bootstrap v0.28.4 // indirect - k8s.io/component-helpers v0.28.4 // indirect - k8s.io/kms v0.28.4 // indirect - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect - k8s.io/kubectl v0.28.4 // indirect - k8s.io/metrics v0.28.4 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect + k8s.io/cluster-bootstrap v0.29.3 // indirect + k8s.io/component-helpers v0.29.3 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/kubectl v0.29.3 // indirect + k8s.io/metrics v0.29.3 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/kind v0.20.0 // indirect + sigs.k8s.io/kind v0.22.0 // indirect + sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect sigs.k8s.io/kustomize/kustomize/v5 v5.0.4-0.20230601165947-6ce0bf390ce3 // indirect sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index 95c736aaff..2359ffa20b 100644 --- a/go.sum +++ b/go.sum @@ -1,53 +1,10 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= -cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= @@ -80,27 +37,32 @@ github.com/aws/amazon-vpc-cni-k8s v1.15.4 h1:eF4YcX+BvQGg73MzCaar5FoZNxe3sTokYhF github.com/aws/amazon-vpc-cni-k8s v1.15.4/go.mod h1:eVzV7+2QctvKc+yyr3kLNHFwb9xZQRKl0C8ki4ObzDw= github.com/aws/aws-lambda-go v1.41.0 h1:l/5fyVb6Ud9uYd411xdHZzSf2n86TakxzpvIoz7l+3Y= github.com/aws/aws-lambda-go v1.41.0/go.mod h1:jwFe2KmMsHmffA1X2R09hH6lFzJQxzI8qK17ewzbQMM= -github.com/aws/aws-sdk-go v1.44.332 h1:Ze+98F41+LxoJUdsisAFThV+0yYYLYw17/Vt0++nFYM= -github.com/aws/aws-sdk-go v1.44.332/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.51.17 h1:Cfa40lCdjv9OxC3X1Ks3a6O1Tu3gOANSyKHOSw/zuWU= +github.com/aws/aws-sdk-go v1.51.17/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU= +github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= +github.com/aws/aws-sdk-go-v2/service/iam v1.27.1 h1:rPkEOnwPOVop34lpAlA4Dv6x67Ys3moXkPDvBfjgSSo= +github.com/aws/aws-sdk-go-v2/service/iam v1.27.1/go.mod h1:qdQ8NUrhmXE80S54w+LrtHUY+1Fp7cQSRZbJUZKrAcU= +github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= +github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= github.com/awslabs/goformation/v4 v4.19.5 h1:Y+Tzh01tWg8gf//AgGKUamaja7Wx9NPiJf1FpZu4/iU= github.com/awslabs/goformation/v4 v4.19.5/go.mod h1:JoNpnVCBOUtEz9bFxc9sjy8uBUCLF5c4D1L7RhRTVM8= github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= -github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/briandowns/spinner v1.11.1 h1:OixPqDEcX3juo5AjQZAnFPbeUA0jvkp2qzB5gOZJ/L0= +github.com/briandowns/spinner v1.11.1/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ= github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -111,10 +73,8 @@ github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtM github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/coredns/caddy v1.1.0 h1:ezvsPrT/tA/7pYDBZxu0cT0VmWk75AfIaf6GSYCNMf0= github.com/coredns/caddy v1.1.0/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= github.com/coredns/corefile-migration v1.0.21 h1:W/DCETrHDiFo0Wj03EyMkaQ9fwsmSgqTCQDHpceaSsE= @@ -134,8 +94,8 @@ github.com/coreos/ignition/v2 v2.16.2/go.mod h1:Y1BKC60VSNgA5oWNoLIHXigpFX1FFn4C github.com/coreos/vcontext v0.0.0-20230201181013-d72178a18687 h1:uSmlDgJGbUB0bwQBcZomBTottKwEDF5fF8UjSwKSzWM= github.com/coreos/vcontext v0.0.0-20230201181013-d72178a18687/go.mod h1:Salmysdw7DAVuobBW/LwsKKgpyCPHUhjyJoMJD+ZJiI= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= @@ -147,63 +107,56 @@ github.com/daviddengcn/go-colortext v1.0.0 h1:ANqDyC0ys6qCSvuEK7l3g5RaehL/Xck9EX github.com/daviddengcn/go-colortext v1.0.0/go.mod h1:zDqEI5NVUop5QPpVJUxE9UO10hRnmkD5G4Pmri9+m4c= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= -github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= -github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/docker v26.1.5+incompatible h1:NEAxTwEjxV6VbBMBoGG3zPqbiJosIApZjxlbrG9q3/g= +github.com/docker/docker v26.1.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 h1:7QPwrLT79GlD5sizHf27aoY2RTvw62mO6x7mxkScNk0= github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46/go.mod h1:esf2rsHFNlZlxsqsZDojNBcnNs5REqIvRrWRHqX0vEU= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= -github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk= +github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= -github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= +github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= -github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc= -github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= -github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fvbommel/sortorder v1.1.0 h1:fUmoe+HLsBTctBDoaBwpQo5N+nrCp8g/BjKb/6ZQmYw= github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= -github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= @@ -216,35 +169,23 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4 github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA= github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= -github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= +github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= -github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= +github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -252,33 +193,28 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= github.com/golangplus/bytes v1.0.0/go.mod h1:AdRaCFwmc/00ZzELMWb01soso6W1R/++O1XL80yAn+A= github.com/golangplus/fmt v1.0.0/go.mod h1:zpM0OfbMCjPtd2qkTD/jX2MgiFCqklhSUFyDW44gVQE= github.com/golangplus/testing v1.0.0 h1:+ZeeiKZENNOMkTTELoSySazi+XaEhVO0mb+eanrSEUQ= github.com/golangplus/testing v1.0.0/go.mod h1:ZDreixUV3YzhoVraIDyOzHrr76p6NUh6k/pPg/Q3gYA= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.16.1 h1:3hZfSNiAU3KOiNtxuFXVp5WFy4hf/Ly3Sa4/7F8SXNo= -github.com/google/cel-go v0.16.1/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= +github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto= +github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= @@ -294,37 +230,20 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/goterm v0.0.0-20190703233501-fc88cf888a3f h1:5CjVwnuUcp5adK4gmY6i72gpVFVnZDP2h5TmPScB6u4= github.com/google/goterm v0.0.0-20190703233501-fc88cf888a3f/go.mod h1:nOFQdrUlIlx6M6ODdSpBj1NVA+VgLC6kmw60mkw34H4= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= -github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY= -github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= -github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8= +github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc h1:f8eY6cV/x1x+HLjOp4r72s/31/V2aTUtg5oKRRPf8/Q= github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= @@ -335,15 +254,14 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= @@ -358,20 +276,20 @@ github.com/itchyny/timefmt-go v0.1.3 h1:7M3LGVDsqcd0VZH2U+x393obrzZisp7C0uEe921i github.com/itchyny/timefmt-go v0.1.3/go.mod h1:0osSSCQSASBJMsIZnhAaF1C2fCBTJZXrnj37mG8/c+A= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/pgconn v1.12.0 h1:/RvQ24k3TnNdfBSW0ou9EOi5jx2cX7zfE8n2nLKuiP0= -github.com/jackc/pgconn v1.12.0/go.mod h1:ZkhRC59Llhrq3oSfrikvwQ5NaxYExr6twkdkMLaKono= +github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w= +github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgproto3/v2 v2.3.0 h1:brH0pCGBDkBW07HWlN/oSBXrmo3WB0UvZd1pIuDcL8Y= -github.com/jackc/pgproto3/v2 v2.3.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= -github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= -github.com/jackc/pgtype v1.11.0 h1:u4uiGPz/1hryuXzyaBhSk6dnIyyG2683olG2OV+UUgs= -github.com/jackc/pgtype v1.11.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= -github.com/jackc/pgx/v4 v4.16.0 h1:4k1tROTJctHotannFYzu77dY3bgtMRymQP7tXQjqpPk= -github.com/jackc/pgx/v4 v4.16.0/go.mod h1:N0A9sFdWzkw/Jy1lwoiB64F2+ugFZi987zRxcPez/wI= +github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag= +github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw= +github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA= +github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -383,11 +301,8 @@ github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8Hm github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -404,8 +319,10 @@ github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0V github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= @@ -413,10 +330,8 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/microcosm-cc/bluemonday v1.0.18 h1:6HcxvXDAi3ARt3slx6nTesbvorIc3QeTzBNRvWktHBo= -github.com/microcosm-cc/bluemonday v1.0.18/go.mod h1:Z0r70sCuXHig8YpBzCc5eGHAap2K7e/u082ZUpDRRqM= +github.com/microcosm-cc/bluemonday v1.0.26 h1:xbqSvqzQMeEHCqMi64VAs4d8uy6Mequs3rQ0k/Khz58= +github.com/microcosm-cc/bluemonday v1.0.26/go.mod h1:JyzOCs9gkyQyjs+6h10UEVSe02CGwkhd72Xdqh78TWs= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= @@ -427,6 +342,8 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= @@ -455,19 +372,23 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108 github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.13.1 h1:LNGfMbR2OVGBfXjvRZIZ2YCTQdGKtPLvuI1rMCCj3OU= -github.com/onsi/ginkgo/v2 v2.13.1/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM= +github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= +github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.12.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= -github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= -github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= +github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8= github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= -github.com/openshift-online/ocm-sdk-go v0.1.388 h1:c8yPCUQwJm3QhcVmnyMPFpeDtxPBaPeYh5hLv1vg9YQ= -github.com/openshift-online/ocm-sdk-go v0.1.388/go.mod h1:/+VFIw1iW2H0jEkFH4GnbL/liWareyzsL0w7mDIudB4= +github.com/openshift-online/ocm-common v0.0.0-20240129111424-ff8c6c11d909 h1:WV67GNazQuGDaLX3kBbz0859NYPOQCsDCY5XUScF85M= +github.com/openshift-online/ocm-common v0.0.0-20240129111424-ff8c6c11d909/go.mod h1:7FaAb07S63RF4sFMLSLtQaJLvPdaRnhAT4dBLD8/5kM= +github.com/openshift-online/ocm-sdk-go v0.1.422 h1:NWXLNTg7sLgUJRM3tyuk/QuVbUCRuMH+aLlbCKNzXWc= +github.com/openshift-online/ocm-sdk-go v0.1.422/go.mod h1:CiAu2jwl3ITKOxkeV0Qnhzv4gs35AmpIzVABQLtcI2Y= +github.com/openshift/rosa v1.2.35-rc1.0.20240301152457-ad986cecd364 h1:j1aGLgZhO5xXpYgGAjmraioHTvCK7+gXZXoN9cnpnkw= +github.com/openshift/rosa v1.2.35-rc1.0.20240301152457-ad986cecd364/go.mod h1:kSNsBW8P9KfLCsZYGIrr/aKbLDct8I5gW0e4cCRrr0o= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= @@ -478,29 +399,27 @@ github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= -github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= -github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y= +github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ= +github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o= +github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.2 h1:YwD0ulJSJytLpiaWua0sBDusfsCZohxjxzVTYjwxfV8= github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sagikazarmark/locafero v0.3.0 h1:zT7VEGWC2DTflmccN/5T1etyKvxSxpHsjb9cJvm4SvQ= -github.com/sagikazarmark/locafero v0.3.0/go.mod h1:w+v7UsPNFwzF1cHuOajOOzoq4U7v/ig1mpRjqV+Bu1U= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/sanathkr/go-yaml v0.0.0-20170819195128-ed9d249f429b h1:jUK33OXuZP/l6babJtnLo1qsGvq6G9so9KMflGAm4YA= @@ -513,31 +432,34 @@ github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFR github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= +github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY= -github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= -github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace h1:9PNP1jnUjRhfmGMlkXHjYPishpcw4jpSt/V/xYY3FMA= github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.17.0 h1:I5txKw7MJasPL/BrfkbA0Jyo/oELqVmux4pR/UxOMfI= -github.com/spf13/viper v1.17.0/go.mod h1:BmMMMLQXSbcHK6KAOiFLz0l5JHrU89OdIRHvsk0+yVI= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -547,8 +469,9 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= @@ -564,225 +487,123 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5 github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zgalor/weberr v0.8.2 h1:rzGP0jQVt8hGSNnzjDAQNHMxNNrf3gUrYhpSgY76+mk= +github.com/zgalor/weberr v0.8.2/go.mod h1:cqK89mj84q3PRgqQXQFWJDzCorOd8xOtov/ulOnqDwc= github.com/ziutek/telnet v0.0.0-20180329124119-c3b780dc415b/go.mod h1:IZpXDfkJ6tWD3PhBK5YzgQT+xJWh7OsdwiG8hA2MkO4= -go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= -go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= -go.etcd.io/etcd/api/v3 v3.5.10 h1:szRajuUUbLyppkhs9K6BRtjY37l66XQQmw7oZRANE4k= -go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI= -go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0= -go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U= -go.etcd.io/etcd/client/v2 v2.305.9 h1:YZ2OLi0OvR0H75AcgSUajjd5uqKDKocQUqROTG11jIo= -go.etcd.io/etcd/client/v2 v2.305.9/go.mod h1:0NBdNx9wbxtEQLwAQtrDHwx58m02vXpDcgSYI2seohQ= -go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao= -go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc= -go.etcd.io/etcd/pkg/v3 v3.5.9 h1:6R2jg/aWd/zB9+9JxmijDKStGJAPFsX3e6BeJkMi6eQ= -go.etcd.io/etcd/pkg/v3 v3.5.9/go.mod h1:BZl0SAShQFk0IpLWR78T/+pyt8AruMHhTNNX73hkNVY= -go.etcd.io/etcd/raft/v3 v3.5.9 h1:ZZ1GIHoUlHsn0QVqiRysAm3/81Xx7+i2d7nSdWxlOiI= -go.etcd.io/etcd/raft/v3 v3.5.9/go.mod h1:WnFkqzFdZua4LVlVXQEGhmooLeyS7mqzS4Pf4BCVqXg= -go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0= -go.etcd.io/etcd/server/v3 v3.5.9/go.mod h1:GgI1fQClQCFIzuVjlvdbMxNbnISt90gdfYyqiAIt65g= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +gitlab.com/c0b/go-ordered-json v0.0.0-20171130231205-49bbdab258c2 h1:M+r1hdmjZc4L4SCn0ZIq/5YQIRxprV+kOf7n7f04l5o= +gitlab.com/c0b/go-ordered-json v0.0.0-20171130231205-49bbdab258c2/go.mod h1:NREvu3a57BaK0R1+ztrEzHWiZAihohNLQ6trPxlIqZI= +go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= +go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.etcd.io/etcd/api/v3 v3.5.13 h1:8WXU2/NBge6AUF1K1gOexB6e07NgsN1hXK0rSTtgSp4= +go.etcd.io/etcd/api/v3 v3.5.13/go.mod h1:gBqlqkcMMZMVTMm4NDZloEVJzxQOQIls8splbqBDa0c= +go.etcd.io/etcd/client/pkg/v3 v3.5.13 h1:RVZSAnWWWiI5IrYAXjQorajncORbS0zI48LQlE2kQWg= +go.etcd.io/etcd/client/pkg/v3 v3.5.13/go.mod h1:XxHT4u1qU12E2+po+UVPrEeL94Um6zL58ppuJWXSAB8= +go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4= +go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA= +go.etcd.io/etcd/client/v3 v3.5.13 h1:o0fHTNJLeO0MyVbc7I3fsCf6nrOqn5d+diSarKnB2js= +go.etcd.io/etcd/client/v3 v3.5.13/go.mod h1:cqiAeY8b5DEEcpxvgWKsbLIWNM/8Wy2xJSDMtioMcoI= +go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM= +go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs= +go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA= +go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc= +go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg= +go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0 h1:1eHu3/pUSWaOgltNK3WJFaywKsTIr/PwvHyDmi0lQA0= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0/go.mod h1:HyABWq60Uy1kjJSa2BVOxUVao8Cdick5AWSKPutqy6U= -go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= -go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 h1:DeFD0VgTZ+Cj6hxravYYZE2W4GlneVH81iAOPjZkzk8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0/go.mod h1:GijYcYmNpX1KazD5JmWGsi4P7dDTTTnfv1UbGn84MnU= +go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y= +go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 h1:gvmNvqrPYovvyRmCSygkUDyL8lC5Tl845MLEwqpxhEU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0/go.mod h1:vNUq47TGFioo+ffTSnKNdob241vePmtNZnAODKapKd0= -go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= -go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= -go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM= -go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= -go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= -go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0 h1:FyjCyI9jVEfqhUh2MoSkmolPjfh5fp2hnV0b0irxH4Q= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0/go.mod h1:hYwym2nDEeZfG/motx0p7L7J1N1vyzIThemQsb4g2qY= +go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg= +go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= +go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw= +go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc= +go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0= +go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= -go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= -golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= -golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.14.0 h1:P0Vrf/2538nmC0H+pEQ3MNFRRnVR7RlqyVw+bvm26z0= -golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM= +golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg= +golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= -golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -793,183 +614,62 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= -golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb h1:XFBgcDwm7irdHTbz4Zk2h7Mh+eis4nfJEFQFYzJzuIA= -google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= -google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb h1:lK0oleSc7IQsUxO3U5TjL9DWlsxpEBemh+zpB7IqhWI= -google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= +google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ= +google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY= +google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo= +google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= +google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -978,19 +678,16 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= @@ -1013,67 +710,57 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.28.4 h1:8ZBrLjwosLl/NYgv1P7EQLqoO8MGQApnbgH8tu3BMzY= -k8s.io/api v0.28.4/go.mod h1:axWTGrY88s/5YE+JSt4uUi6NMM+gur1en2REMR7IRj0= -k8s.io/apiextensions-apiserver v0.28.4 h1:AZpKY/7wQ8n+ZYDtNHbAJBb+N4AXXJvyZx6ww6yAJvU= -k8s.io/apiextensions-apiserver v0.28.4/go.mod h1:pgQIZ1U8eJSMQcENew/0ShUTlePcSGFq6dxSxf2mwPM= -k8s.io/apimachinery v0.28.4 h1:zOSJe1mc+GxuMnFzD4Z/U1wst50X28ZNsn5bhgIIao8= -k8s.io/apimachinery v0.28.4/go.mod h1:wI37ncBvfAoswfq626yPTe6Bz1c22L7uaJ8dho83mgg= -k8s.io/apiserver v0.28.4 h1:BJXlaQbAU/RXYX2lRz+E1oPe3G3TKlozMMCZWu5GMgg= -k8s.io/apiserver v0.28.4/go.mod h1:Idq71oXugKZoVGUUL2wgBCTHbUR+FYTWa4rq9j4n23w= -k8s.io/cli-runtime v0.28.4 h1:IW3aqSNFXiGDllJF4KVYM90YX4cXPGxuCxCVqCD8X+Q= -k8s.io/cli-runtime v0.28.4/go.mod h1:MLGRB7LWTIYyYR3d/DOgtUC8ihsAPA3P8K8FDNIqJ0k= -k8s.io/client-go v0.28.4 h1:Np5ocjlZcTrkyRJ3+T3PkXDpe4UpatQxj85+xjaD2wY= -k8s.io/client-go v0.28.4/go.mod h1:0VDZFpgoZfelyP5Wqu0/r/TRYcLYuJ2U1KEeoaPa1N4= -k8s.io/cluster-bootstrap v0.28.4 h1:4MKNy1Qd9QY7pl47rSMGIORF+tm3CUaqC1M8U9bjn4Q= -k8s.io/cluster-bootstrap v0.28.4/go.mod h1:/c4ro/R4yf4EtJgFgFtvnHkbDOHwubeKJXh5R1c89Bc= -k8s.io/component-base v0.28.4 h1:c/iQLWPdUgI90O+T9TeECg8o7N3YJTiuz2sKxILYcYo= -k8s.io/component-base v0.28.4/go.mod h1:m9hR0uvqXDybiGL2nf/3Lf0MerAfQXzkfWhUY58JUbU= -k8s.io/component-helpers v0.28.4 h1:+X9VXT5+jUsRdC26JyMZ8Fjfln7mSjgumafocE509C4= -k8s.io/component-helpers v0.28.4/go.mod h1:8LzMalOQ0K10tkBJWBWq8h0HTI9HDPx4WT3QvTFn9Ro= -k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= -k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kms v0.28.4 h1:PMgY/3CQTWP9eIKmNQiTgjLIZ0ns6O+voagzD2/4mSg= -k8s.io/kms v0.28.4/go.mod h1:HL4/lR/bhjAJPbqycKtfhWiKh1Sp21cpHOL8P4oo87w= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= -k8s.io/kubectl v0.28.4 h1:gWpUXW/T7aFne+rchYeHkyB8eVDl5UZce8G4X//kjUQ= -k8s.io/kubectl v0.28.4/go.mod h1:CKOccVx3l+3MmDbkXtIUtibq93nN2hkDR99XDCn7c/c= -k8s.io/metrics v0.28.4 h1:u36fom9+6c8jX2sk8z58H0hFaIUfrPWbXIxN7GT2blk= -k8s.io/metrics v0.28.4/go.mod h1:bBqAJxH20c7wAsTQxDXOlVqxGMdce49d7WNr1WeaLac= +k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= +k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= +k8s.io/apiextensions-apiserver v0.29.3 h1:9HF+EtZaVpFjStakF4yVufnXGPRppWFEQ87qnO91YeI= +k8s.io/apiextensions-apiserver v0.29.3/go.mod h1:po0XiY5scnpJfFizNGo6puNU6Fq6D70UJY2Cb2KwAVc= +k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= +k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= +k8s.io/apiserver v0.29.3 h1:xR7ELlJ/BZSr2n4CnD3lfA4gzFivh0wwfNfz9L0WZcE= +k8s.io/apiserver v0.29.3/go.mod h1:hrvXlwfRulbMbBgmWRQlFru2b/JySDpmzvQwwk4GUOs= +k8s.io/cli-runtime v0.29.3 h1:r68rephmmytoywkw2MyJ+CxjpasJDQY7AGc3XY2iv1k= +k8s.io/cli-runtime v0.29.3/go.mod h1:aqVUsk86/RhaGJwDhHXH0jcdqBrgdF3bZWk4Z9D4mkM= +k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= +k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= +k8s.io/cluster-bootstrap v0.29.3 h1:DIMDZSN8gbFMy9CS2mAS2Iqq/fIUG783WN/1lqi5TF8= +k8s.io/cluster-bootstrap v0.29.3/go.mod h1:aPAg1VtXx3uRrx5qU2jTzR7p1rf18zLXWS+pGhiqPto= +k8s.io/component-base v0.29.3 h1:Oq9/nddUxlnrCuuR2K/jp6aflVvc0uDvxMzAWxnGzAo= +k8s.io/component-base v0.29.3/go.mod h1:Yuj33XXjuOk2BAaHsIGHhCKZQAgYKhqIxIjIr2UXYio= +k8s.io/component-helpers v0.29.3 h1:1dqZswuZgT2ZMixYeORyCUOAApXxgsvjVSgfoUT+P4o= +k8s.io/component-helpers v0.29.3/go.mod h1:yiDqbRQrnQY+sPju/bL7EkwDJb6LVOots53uZNMZBos= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/kubectl v0.29.3 h1:RuwyyIU42MAISRIePaa8Q7A3U74Q9P4MoJbDFz9o3us= +k8s.io/kubectl v0.29.3/go.mod h1:yCxfY1dbwgVdEt2zkJ6d5NNLOhhWgTyrqACIoFhpdd4= +k8s.io/metrics v0.29.3 h1:nN+eavbMQ7Kuif2tIdTr2/F2ec2E/SIAWSruTZ+Ye6U= +k8s.io/metrics v0.29.3/go.mod h1:kb3tGGC4ZcIDIuvXyUE291RwJ5WmDu0tB4wAVZM6h2I= k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0/go.mod h1:z7+wmGM2dfIiLRfrC6jb5kV2Mq/sK1ZP303cxzkV5Y4= sigs.k8s.io/aws-iam-authenticator v0.6.13 h1:QSQcAkpt/hF97Ogyoz6sj3WD2twTd2cmxFb4e6Rs9gA= sigs.k8s.io/aws-iam-authenticator v0.6.13/go.mod h1:CnvFyzR/xeLHmUY/BD0qW6q0wp6KIwXmFp4eTfrHdP8= -sigs.k8s.io/cluster-api v1.6.1 h1:I34p/fwgRlEhs+o9cUhKXDwNNfPS3no0yJsd2bJyQVc= -sigs.k8s.io/cluster-api v1.6.1/go.mod h1:DaxwruDvSaEYq5q6FREDaGzX6UsAVUCA99Sp8vfMHyQ= -sigs.k8s.io/cluster-api/test v1.6.1 h1:9TffRPOuYNUyfHqdeWQtFhdK0oY+NAbvjlzbqK7chTw= -sigs.k8s.io/cluster-api/test v1.6.1/go.mod h1:+zOSrnG/2wI2XtWOkaVpVJ1BXumT/73zqRXZBYrclPQ= -sigs.k8s.io/controller-runtime v0.16.3 h1:2TuvuokmfXvDUamSx1SuAOO3eTyye+47mJCigwG62c4= -sigs.k8s.io/controller-runtime v0.16.3/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0= +sigs.k8s.io/cluster-api v1.7.1 h1:JkMAbAMzBM+WBHxXLTJXTiCisv1PAaHRzld/3qrmLYY= +sigs.k8s.io/cluster-api v1.7.1/go.mod h1:V9ZhKLvQtsDODwjXOKgbitjyCmC71yMBwDcMyNNIov0= +sigs.k8s.io/cluster-api/test v1.7.1 h1:QDru2586ZjIFBTW1Z7VVXVtauzR/yANm4tglUNLm9iE= +sigs.k8s.io/cluster-api/test v1.7.1/go.mod h1:yG0g5Mdq73fMn9JP4akgRQPSne973L+Qx6iVH+LjtSM= +sigs.k8s.io/controller-runtime v0.17.3 h1:65QmN7r3FWgTxDMz9fvGnO1kbf2nu+acg9p2R9oYYYk= +sigs.k8s.io/controller-runtime v0.17.3/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kind v0.20.0 h1:f0sc3v9mQbGnjBUaqSFST1dwIuiikKVGgoTwpoP33a8= -sigs.k8s.io/kind v0.20.0/go.mod h1:aBlbxg08cauDgZ612shr017/rZwqd7AS563FvpWKPVs= +sigs.k8s.io/kind v0.22.0 h1:z/+yr/azoOfzsfooqRsPw1wjJlqT/ukXP0ShkHwNlsI= +sigs.k8s.io/kind v0.22.0/go.mod h1:aBlbxg08cauDgZ612shr017/rZwqd7AS563FvpWKPVs= sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY= sigs.k8s.io/kustomize/kustomize/v5 v5.0.4-0.20230601165947-6ce0bf390ce3 h1:vq2TtoDcQomhy7OxXLUOzSbHMuMYq0Bjn93cDtJEdKw= sigs.k8s.io/kustomize/kustomize/v5 v5.0.4-0.20230601165947-6ce0bf390ce3/go.mod h1:/d88dHCvoy7d0AKFT0yytezSGZKjsZBVs9YTkBHSGFk= sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM7vh3b7HvGNfXrJ/xL6BDMS0v1V/HHg5U= sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3/go.mod h1:JWP1Fj0VWGHyw3YUPjXSQnRnrwezrZSrApfX5S0nIag= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/hack/boilerplate/test/fail.go b/hack/boilerplate/test/fail.go index fd911e499c..fa814ad151 100644 --- a/hack/boilerplate/test/fail.go +++ b/hack/boilerplate/test/fail.go @@ -16,4 +16,5 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package test provides a test package for boilerplate. package test diff --git a/hack/ensure-go.sh b/hack/ensure-go.sh index 171e451433..12d1854fe2 100755 --- a/hack/ensure-go.sh +++ b/hack/ensure-go.sh @@ -31,7 +31,7 @@ EOF local go_version IFS=" " read -ra go_version <<< "$(go version)" local minimum_go_version - minimum_go_version=go1.21.5 + minimum_go_version=go1.22.5 if [[ "${minimum_go_version}" != $(echo -e "${minimum_go_version}\n${go_version[2]}" | sort -s -t. -k 1,1 -k 2,2n -k 3,3n | head -n1) && "${go_version[2]}" != "devel" ]]; then cat < k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f @@ -9,51 +11,67 @@ require ( github.com/a8m/envsubst v1.4.2 github.com/ahmetb/gen-crd-api-reference-docs v0.3.0 github.com/golang/mock v1.6.0 - github.com/itchyny/gojq v0.12.14 + github.com/goreleaser/goreleaser v1.25.1 + github.com/itchyny/gojq v0.12.15 github.com/joelanford/go-apidiff v0.8.2 - github.com/mikefarah/yq/v4 v4.40.5 + github.com/mikefarah/yq/v4 v4.43.1 github.com/spf13/pflag v1.0.5 - k8s.io/apimachinery v0.29.1 - k8s.io/code-generator v0.28.4 - k8s.io/gengo v0.0.0-20220902162205-c0856e24416d - k8s.io/klog/v2 v2.110.1 + k8s.io/apimachinery v0.29.4 + k8s.io/code-generator v0.29.3 + k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 + k8s.io/klog/v2 v2.120.1 sigs.k8s.io/cluster-api/hack/tools v0.0.0-20221121093230-b1688621953c - sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20211110210527-619e6b92dab9 - sigs.k8s.io/controller-tools v0.13.0 - sigs.k8s.io/kind v0.21.0 + sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240531134648-6636df17d67b + sigs.k8s.io/controller-tools v0.14.0 + sigs.k8s.io/kind v0.22.0 sigs.k8s.io/kustomize/kustomize/v4 v4.5.7 sigs.k8s.io/promo-tools/v4 v4.0.5 sigs.k8s.io/testing_frameworks v0.1.2 ) require ( - cloud.google.com/go v0.112.0 // indirect - cloud.google.com/go/compute v1.23.3 // indirect + cloud.google.com/go v0.112.1 // indirect + cloud.google.com/go/compute v1.25.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/containeranalysis v0.11.4 // indirect cloud.google.com/go/errorreporting v0.3.0 // indirect cloud.google.com/go/grafeas v0.3.4 // indirect cloud.google.com/go/iam v1.1.6 // indirect + cloud.google.com/go/kms v1.15.8 // indirect cloud.google.com/go/logging v1.9.0 // indirect - cloud.google.com/go/longrunning v0.5.4 // indirect - cloud.google.com/go/storage v1.37.0 // indirect - cuelang.org/go v0.6.0 // indirect + cloud.google.com/go/longrunning v0.5.5 // indirect + cloud.google.com/go/storage v1.39.1 // indirect + code.gitea.io/sdk/gitea v0.17.1 // indirect + cuelabs.dev/go/oci/ociregistry v0.0.0-20240314152124-224736b49f2e // indirect + cuelang.org/go v0.8.1 // indirect dario.cat/mergo v1.0.0 // indirect - filippo.io/edwards25519 v1.0.0 // indirect + filippo.io/edwards25519 v1.1.0 // indirect + github.com/AlekSi/pointer v1.2.0 // indirect github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0 // indirect github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.29 // indirect github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 // indirect github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect + github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/BurntSushi/toml v1.2.1 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.2.1 // indirect + github.com/Masterminds/sprig/v3 v3.2.3 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/OneOfOne/xxhash v1.2.8 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c // indirect + github.com/ProtonMail/go-crypto v1.0.0 // indirect github.com/ThalesIgnite/crypto11 v1.2.5 // indirect github.com/agnivade/levenshtein v1.1.1 // indirect github.com/alecthomas/participle/v2 v2.1.1 // indirect @@ -70,49 +88,80 @@ require ( github.com/alibabacloud-go/tea-xml v1.1.3 // indirect github.com/aliyun/credentials-go v1.3.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go v1.50.9 // indirect - github.com/aws/aws-sdk-go-v2 v1.23.5 // indirect - github.com/aws/aws-sdk-go-v2/config v1.25.11 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.16.9 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.9 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.8 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.8 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 // indirect + github.com/atc0005/go-teams-notify/v2 v2.10.0 // indirect + github.com/aws/aws-sdk-go v1.51.6 // indirect + github.com/aws/aws-sdk-go-v2 v1.26.0 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.1 // indirect + github.com/aws/aws-sdk-go-v2/config v1.27.9 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.9 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.0 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.9 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.4 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.4 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.3 // indirect github.com/aws/aws-sdk-go-v2/service/ecr v1.20.2 // indirect github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.18.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.8 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.18.2 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.2 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.26.2 // indirect - github.com/aws/smithy-go v1.18.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.5 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.6 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.3 // indirect + github.com/aws/aws-sdk-go-v2/service/kms v1.30.0 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.51.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.20.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.3 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.28.5 // indirect + github.com/aws/smithy-go v1.20.1 // indirect github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8 // indirect + github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect + github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb // indirect github.com/blang/semver v3.5.1+incompatible // indirect github.com/blang/semver/v4 v4.0.0 // indirect - github.com/buildkite/agent/v3 v3.59.0 // indirect - github.com/buildkite/go-pipeline v0.2.0 // indirect + github.com/buger/jsonparser v1.1.1 // indirect + github.com/buildkite/agent/v3 v3.62.0 // indirect + github.com/buildkite/go-pipeline v0.3.2 // indirect github.com/buildkite/interpolate v0.0.0-20200526001904-07f35b4ae251 // indirect + github.com/caarlos0/ctrlc v1.2.0 // indirect + github.com/caarlos0/env/v9 v9.0.0 // indirect + github.com/caarlos0/go-reddit/v3 v3.0.1 // indirect + github.com/caarlos0/go-shellwords v1.0.12 // indirect + github.com/caarlos0/go-version v0.1.1 // indirect + github.com/caarlos0/log v0.4.4 // indirect + github.com/cavaliergopher/cpio v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/charmbracelet/lipgloss v0.10.0 // indirect + github.com/charmbracelet/x/exp/ordered v0.0.0-20231010190216-1cb11efc897d // indirect github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 // indirect github.com/clbanning/mxj/v2 v2.7.0 // indirect github.com/cloudflare/circl v1.3.7 // indirect github.com/cockroachdb/apd/v3 v3.2.1 // indirect github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be // indirect github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect - github.com/coreos/go-oidc/v3 v3.9.0 // indirect + github.com/coreos/go-oidc/v3 v3.10.0 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/davidmz/go-pageant v1.0.2 // indirect + github.com/dghubble/go-twitter v0.0.0-20211115160449-93a8679adecb // indirect + github.com/dghubble/oauth1 v0.7.3 // indirect + github.com/dghubble/sling v1.4.0 // indirect github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect - github.com/digitorus/timestamp v0.0.0-20230902153158-687734543647 // indirect + github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/docker/cli v24.0.7+incompatible // indirect + github.com/distribution/reference v0.5.0 // indirect + github.com/docker/cli v25.0.4+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v24.0.7+incompatible // indirect + github.com/docker/docker v26.1.5+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.0 // indirect + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect github.com/elliotchance/orderedmap v1.5.1 // indirect + github.com/elliotchance/orderedmap/v2 v2.2.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/emicklei/proto v1.12.1 // indirect github.com/emirpasic/gods v1.18.1 // indirect @@ -123,139 +172,173 @@ require ( github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-chi/chi v4.1.2+incompatible // indirect github.com/go-errors/errors v1.0.1 // indirect + github.com/go-fed/httpsig v1.1.0 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.5.0 // indirect github.com/go-git/go-git/v5 v5.11.0 // indirect github.com/go-ini/ini v1.67.0 // indirect - github.com/go-jose/go-jose/v3 v3.0.1 // indirect + github.com/go-jose/go-jose/v3 v3.0.3 // indirect + github.com/go-jose/go-jose/v4 v4.0.1 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-logr/zapr v1.2.0 // indirect - github.com/go-openapi/analysis v0.21.4 // indirect - github.com/go-openapi/errors v0.20.4 // indirect - github.com/go-openapi/jsonpointer v0.20.0 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/loads v0.21.2 // indirect - github.com/go-openapi/runtime v0.26.0 // indirect - github.com/go-openapi/spec v0.20.11 // indirect - github.com/go-openapi/strfmt v0.21.8 // indirect - github.com/go-openapi/swag v0.22.4 // indirect - github.com/go-openapi/validate v0.22.3 // indirect + github.com/go-logr/zapr v1.3.0 // indirect + github.com/go-openapi/analysis v0.23.0 // indirect + github.com/go-openapi/errors v0.22.0 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/loads v0.22.0 // indirect + github.com/go-openapi/runtime v0.28.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect + github.com/go-openapi/strfmt v0.23.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-openapi/validate v0.24.0 // indirect github.com/go-piv/piv-go v1.11.0 // indirect + github.com/go-telegram-bot-api/telegram-bot-api v4.6.4+incompatible // indirect github.com/gobuffalo/flect v1.0.2 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/goccy/go-json v0.10.2 // indirect - github.com/goccy/go-yaml v1.11.2 // indirect + github.com/goccy/go-yaml v1.11.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/google/certificate-transparency-go v1.1.7 // indirect + github.com/google/certificate-transparency-go v1.1.8 // indirect github.com/google/gnostic v0.6.9 // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/google/go-cmp v0.6.0 // indirect - github.com/google/go-containerregistry v0.19.0 // indirect + github.com/google/go-containerregistry v0.19.1 // indirect github.com/google/go-github/v55 v55.0.0 // indirect github.com/google/go-github/v58 v58.0.0 // indirect + github.com/google/go-github/v61 v61.0.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/google/ko v0.15.2 // indirect + github.com/google/rpmpack v0.6.0 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect + github.com/google/wire v0.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.0 // indirect + github.com/googleapis/gax-go/v2 v2.12.3 // indirect + github.com/goreleaser/chglog v0.5.0 // indirect + github.com/goreleaser/fileglob v1.3.0 // indirect + github.com/goreleaser/nfpm/v2 v2.36.1 // indirect github.com/gorilla/mux v1.8.1 // indirect + github.com/gorilla/websocket v1.5.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-retryablehttp v0.7.5 // indirect + github.com/hashicorp/go-retryablehttp v0.7.7 // indirect + github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/hcl v1.0.1-vault-5 // indirect + github.com/huandu/xstrings v1.3.3 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/in-toto/in-toto-golang v0.9.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/invopop/jsonschema v0.12.0 // indirect github.com/itchyny/timefmt-go v0.1.5 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect - github.com/jellydator/ttlcache/v3 v3.1.1 // indirect + github.com/jellydator/ttlcache/v3 v3.2.0 // indirect github.com/jinzhu/copier v0.4.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect - github.com/klauspost/compress v1.17.2 // indirect + github.com/klauspost/compress v1.17.7 // indirect + github.com/klauspost/pgzip v1.2.6 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/letsencrypt/boulder v0.0.0-20231026200631-000cd05d5491 // indirect + github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-mastodon v0.0.6 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect - github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/mozillazg/docker-credential-acr-helper v0.3.0 // indirect - github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de // indirect + github.com/muesli/mango v0.1.0 // indirect + github.com/muesli/mango-cobra v1.2.0 // indirect + github.com/muesli/mango-pflag v0.1.0 // indirect + github.com/muesli/reflow v0.3.0 // indirect + github.com/muesli/roff v0.1.0 // indirect + github.com/muesli/termenv v0.15.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/oleiade/reflections v1.0.1 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/onsi/gomega v1.29.0 // indirect - github.com/open-policy-agent/opa v0.59.0 // indirect + github.com/onsi/gomega v1.32.0 // indirect + github.com/open-policy-agent/opa v0.63.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc5 // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pborman/uuid v1.2.1 // indirect github.com/pelletier/go-toml v1.9.5 // indirect - github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.0 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_golang v1.17.0 // indirect - github.com/prometheus/client_model v0.5.0 // indirect - github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/client_golang v1.19.0 // indirect + github.com/prometheus/client_model v0.6.0 // indirect + github.com/prometheus/common v0.51.1 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/protocolbuffers/txtpbfmt v0.0.0-20231025115547-084445ff1adf // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect - github.com/rivo/uniseg v0.4.4 // indirect + github.com/rivo/uniseg v0.4.7 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/sagikazarmark/locafero v0.3.0 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sassoftware/relic v7.2.1+incompatible // indirect github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect github.com/segmentio/ksuid v1.0.4 // indirect github.com/sergi/go-diff v1.3.1 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect - github.com/sigstore/cosign/v2 v2.2.2 // indirect - github.com/sigstore/fulcio v1.4.3 // indirect - github.com/sigstore/rekor v1.3.4 // indirect - github.com/sigstore/sigstore v1.8.1 // indirect - github.com/sigstore/timestamp-authority v1.2.0 // indirect + github.com/shopspring/decimal v1.3.1 // indirect + github.com/sigstore/cosign/v2 v2.2.4 // indirect + github.com/sigstore/fulcio v1.4.5 // indirect + github.com/sigstore/rekor v1.3.6 // indirect + github.com/sigstore/sigstore v1.8.3 // indirect + github.com/sigstore/timestamp-authority v1.2.2 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/skeema/knownhosts v1.2.1 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect + github.com/slack-go/slack v0.12.5 // indirect github.com/sourcegraph/conc v0.3.0 // indirect - github.com/spf13/afero v1.10.0 // indirect - github.com/spf13/cast v1.5.1 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect github.com/spf13/cobra v1.8.0 // indirect - github.com/spf13/viper v1.17.0 // indirect - github.com/spiffe/go-spiffe/v2 v2.1.6 // indirect + github.com/spf13/viper v1.18.2 // indirect + github.com/spiffe/go-spiffe/v2 v2.2.0 // indirect github.com/src-d/gcfg v1.4.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect github.com/tchap/go-patricia/v2 v2.3.1 // indirect + github.com/technoweenie/multipartstreamer v1.0.1 // indirect github.com/thales-e-security/pool v0.0.2 // indirect github.com/theupdateframework/go-tuf v0.7.0 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/tjfoc/gmsm v1.4.1 // indirect + github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 // indirect github.com/transparency-dev/merkle v0.0.2 // indirect + github.com/ulikunitz/xz v0.5.11 // indirect github.com/vbatts/tar-split v0.11.5 // indirect - github.com/xanzy/go-gitlab v0.94.0 // indirect + github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect + github.com/xanzy/go-gitlab v0.102.0 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect @@ -263,51 +346,54 @@ require ( github.com/yashtewari/glob-intersection v0.2.0 // indirect github.com/yuin/gopher-lua v1.1.1 // indirect github.com/zeebo/errs v1.3.0 // indirect - go.mongodb.org/mongo-driver v1.12.1 // indirect + gitlab.com/digitalxero/go-conventional-commit v1.0.7 // indirect + go.mongodb.org/mongo-driver v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect - go.opentelemetry.io/otel v1.22.0 // indirect - go.opentelemetry.io/otel/metric v1.22.0 // indirect - go.opentelemetry.io/otel/sdk v1.21.0 // indirect - go.opentelemetry.io/otel/trace v1.22.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.opentelemetry.io/otel/sdk v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect - go.step.sm/crypto v0.38.0 // indirect + go.step.sm/crypto v0.44.2 // indirect + go.uber.org/automaxprocs v1.5.3 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.26.0 // indirect - golang.org/x/crypto v0.18.0 // indirect - golang.org/x/exp v0.0.0-20231108232855-2478ac86f678 // indirect - golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.20.0 // indirect - golang.org/x/oauth2 v0.16.0 // indirect - golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.16.0 // indirect - golang.org/x/term v0.16.0 // indirect + go.uber.org/zap v1.27.0 // indirect + gocloud.dev v0.37.0 // indirect + golang.org/x/crypto v0.22.0 // indirect + golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb // indirect + golang.org/x/mod v0.16.0 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/oauth2 v0.19.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/term v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.17.0 // indirect + golang.org/x/tools v0.19.0 // indirect golang.org/x/tools/go/vcs v0.1.0-deprecated // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect - google.golang.org/api v0.161.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac // indirect - google.golang.org/grpc v1.61.0 // indirect - google.golang.org/protobuf v1.32.0 // indirect - gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect + google.golang.org/api v0.172.0 // indirect + google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect + google.golang.org/grpc v1.62.1 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect + gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/mail.v2 v2.3.1 // indirect gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 // indirect - gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/src-d/go-billy.v4 v4.3.2 // indirect gopkg.in/src-d/go-git.v4 v4.13.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.28.4 // indirect - k8s.io/apiextensions-apiserver v0.28.4 // indirect - k8s.io/client-go v0.28.4 // indirect + k8s.io/api v0.29.0 // indirect + k8s.io/apiextensions-apiserver v0.29.0 // indirect + k8s.io/client-go v0.29.0 // indirect k8s.io/klog v0.2.0 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect diff --git a/hack/tools/go.sum b/hack/tools/go.sum index 3c9b3aa71f..e6e926154e 100644 --- a/hack/tools/go.sum +++ b/hack/tools/go.sum @@ -1,85 +1,65 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= -cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= +cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM= +cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4= +cloud.google.com/go/compute v1.25.0 h1:H1/4SqSUhjPFE7L5ddzHOfY2bCAvjwNRZPNl6Ni5oYU= +cloud.google.com/go/compute v1.25.0/go.mod h1:GR7F0ZPZH8EhChlMo9FkLd7eUTwEymjqQagxzilIxIE= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/containeranalysis v0.11.4 h1:doJ0M1ljS4hS0D2UbHywlHGwB7sQLNrt9vFk9Zyi7vY= cloud.google.com/go/containeranalysis v0.11.4/go.mod h1:cVZT7rXYBS9NG1rhQbWL9pWbXCKHWJPYraE8/FTSYPE= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/errorreporting v0.3.0 h1:kj1XEWMu8P0qlLhm3FwcaFsUvXChV/OraZwA70trRR0= cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= cloud.google.com/go/grafeas v0.3.4 h1:D4x32R/cHX3MTofKwirz015uEdVk4uAxvZkZCZkOrF4= cloud.google.com/go/grafeas v0.3.4/go.mod h1:A5m316hcG+AulafjAbPKXBO/+I5itU4LOdKO2R/uDIc= cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= -cloud.google.com/go/kms v1.15.5 h1:pj1sRfut2eRbD9pFRjNnPNg/CzJPuQAzUujMIM1vVeM= -cloud.google.com/go/kms v1.15.5/go.mod h1:cU2H5jnp6G2TDpUGZyqTCoy1n16fbubHZjmVXSMtwDI= +cloud.google.com/go/kms v1.15.8 h1:szIeDCowID8th2i8XE4uRev5PMxQFqW+JjwYxL9h6xs= +cloud.google.com/go/kms v1.15.8/go.mod h1:WoUHcDjD9pluCg7pNds131awnH429QGvRM3N/4MyoVs= cloud.google.com/go/logging v1.9.0 h1:iEIOXFO9EmSiTjDmfpbRjOxECO7R8C7b8IXUGOj7xZw= cloud.google.com/go/logging v1.9.0/go.mod h1:1Io0vnZv4onoUnsVUQY3HZ3Igb1nBchky0A0y7BBBhE= -cloud.google.com/go/longrunning v0.5.4 h1:w8xEcbZodnA2BbW6sVirkkoC+1gP8wS57EUUgGS0GVg= -cloud.google.com/go/longrunning v0.5.4/go.mod h1:zqNVncI0BOP8ST6XQD1+VcvuShMmq7+xFSzOL++V0dI= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -cloud.google.com/go/storage v1.37.0 h1:WI8CsaFO8Q9KjPVtsZ5Cmi0dXV25zMoX0FklT7c3Jm4= -cloud.google.com/go/storage v1.37.0/go.mod h1:i34TiT2IhiNDmcj65PqwCjcoUX7Z5pLzS8DEmoiFq1k= -cuelang.org/go v0.6.0 h1:dJhgKCog+FEZt7OwAYV1R+o/RZPmE8aqFoptmxSWyr8= -cuelang.org/go v0.6.0/go.mod h1:9CxOX8aawrr3BgSdqPj7V0RYoXo7XIb+yDFC6uESrOQ= +cloud.google.com/go/longrunning v0.5.5 h1:GOE6pZFdSrTb4KAiKnXsJBtlE6mEyaW44oKyMILWnOg= +cloud.google.com/go/longrunning v0.5.5/go.mod h1:WV2LAxD8/rg5Z1cNW6FJ/ZpX4E4VnDnoTk0yawPBB7s= +cloud.google.com/go/storage v1.39.1 h1:MvraqHKhogCOTXTlct/9C3K3+Uy2jBmFYb3/Sp6dVtY= +cloud.google.com/go/storage v1.39.1/go.mod h1:xK6xZmxZmo+fyP7+DEF6FhNc24/JAe95OLyOHCXFH1o= +code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8= +code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM= +cuelabs.dev/go/oci/ociregistry v0.0.0-20240314152124-224736b49f2e h1:GwCVItFUPxwdsEYnlUcJ6PJxOjTeFFCKOh6QWg4oAzQ= +cuelabs.dev/go/oci/ociregistry v0.0.0-20240314152124-224736b49f2e/go.mod h1:ApHceQLLwcOkCEXM1+DyCXTHEJhNGDpJ2kmV6axsx24= +cuelang.org/go v0.8.1 h1:VFYsxIFSPY5KgSaH1jQ2GxHOrbu6Ga3kEI70yCZwnOg= +cuelang.org/go v0.8.1/go.mod h1:CoDbYolfMms4BhWUlhD+t5ORnihR7wvjcfgyO9lL5FI= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -filippo.io/edwards25519 v1.0.0 h1:0wAIcmJUqRdI8IJ/3eGi5/HwXZWPujYXXlkrQogz0Ek= -filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= -github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230618160516-e936619f9f18 h1:rd389Q26LMy03gG4anandGFC2LW/xvjga5GezeeaxQk= -github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230618160516-e936619f9f18/go.mod h1:fgJuSBrJP5qZtKqaMJE0hmhS2tmRH+44IkfZvjtaf1M= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM= +github.com/AlekSi/pointer v1.2.0 h1:glcy/gc4h8HnG2Z3ZECSzZ1IX1x2JxRVuDzaJwQE0+w= +github.com/AlekSi/pointer v1.2.0/go.mod h1:gZGfd3dpW4vEc/UlyfKKi1roIqcCgwOIvb0tSNSBle0= github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0 h1:8+4G8JaejP8Xa6W46PzJEwisNgBXMvFcz78N6zG/ARw= github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0/go.mod h1:GgeIE+1be8Ivm7Sh4RgwI42aTtC9qrcj+Y9Y6CjJhJs= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 h1:d81/ng9rET2YqdVkVwkb6EXeRrLJIwyGnJcAlAWKwhs= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1 h1:MyVTgWR8qd/Jw1Le0NZebGBUCLbtak3bJ3z1OlqZBpw= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1/go.mod h1:GpPjLhVR9dnUoJMyHWSPy71xY9/lcmpzIPZXmF0FCVY= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 h1:n1DH8TPV4qqPTje2RcUBYwtrTWlabVp4n46+74X2pn4= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= +github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0 h1:m/sWOGCREuSBqg2htVQTBY8nOZpyajYztF0vUvSZTuM= +github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0/go.mod h1:Pu5Zksi2KrU7LPbZbNINx6fuVrUp/ffvpxdDj+i8LeE= +github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 h1:FbH3BbSb4bvGluTesZZ+ttN/MDsnMmQP36OSnDuSXqw= +github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1/go.mod h1:9V2j0jn9jDEkCkv8w/bKTNppX/d0FVA1ud77xCIP4KA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0 h1:DRiANoJTiW6obBQe3SqZizkuV1PEgfiiGivmVocDy64= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0/go.mod h1:qLIye2hwb/ZouqhpSD9Zn3SJipvpEnz1Ywl3VUk9Y0s= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 h1:D3occbWoio4EBLkbkevetNMAVX197GkzbUMtqjGWn80= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0/go.mod h1:bTSOgj05NGRuHHhQwAdPnYr9TOdNmKlZTgGLL6nyAdI= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 h1:fXPMAmuh0gDuRDey0atC8cXBuKIlqCzCkL8sm1n9Ov0= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1/go.mod h1:SUZc9YRRHfx2+FAQKNDGrssXehqLpxmwRv2mC/5ntj4= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= @@ -99,25 +79,39 @@ github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSY github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= +github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 h1:hVeq+yCyUi+MsoO/CU95yqCIcdzra5ovzk8Q2BBpV2M= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= -github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c h1:kMFnB0vCcX7IL/m9Y5LO+KQYv+t1CQOiFe6+SV2J7bE= -github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78= +github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k= +github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f/go.mod h1:gcr0kNtGBqin9zDW9GOHcVntrwnjrK+qdJ06mWYBybw= +github.com/ProtonMail/gopenpgp/v2 v2.7.1 h1:Awsg7MPc2gD3I7IFac2qE3Gdls0lZW8SzrFZ3k1oz0s= +github.com/ProtonMail/gopenpgp/v2 v2.7.1/go.mod h1:/BU5gfAVwqyd8EfC3Eu7zmuhwYQpKs+cGD8M//iiaxs= github.com/ThalesIgnite/crypto11 v1.2.5 h1:1IiIIEqYmBvUYFeMnHqRft4bwf/O36jryEUpY+9ef8E= github.com/ThalesIgnite/crypto11 v1.2.5/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE= github.com/a8m/envsubst v1.4.2 h1:4yWIHXOLEJHQEFd4UjrWDrYeYlV7ncFWJOCBRLOZHQg= @@ -132,8 +126,8 @@ github.com/alecthomas/assert/v2 v2.3.0 h1:mAsH2wmvjsuvyBvAmCtm7zFsBlb8mIHx5ySLVd github.com/alecthomas/assert/v2 v2.3.0/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= github.com/alecthomas/participle/v2 v2.1.1 h1:hrjKESvSqGHzRb4yW1ciisFJ4p3MGYih6icjJvbsmV8= github.com/alecthomas/participle/v2 v2.1.1/go.mod h1:Y1+hAs8DHPmc3YUFzqllV+eSQ9ljPTk0ZkPMtEdAx2c= -github.com/alecthomas/repr v0.3.0 h1:NeYzUPfjjlqHY4KtzgKJiWd6sVq2eNUPTi34PiFGjY8= -github.com/alecthomas/repr v0.3.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= +github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc= +github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.2/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc= @@ -186,68 +180,106 @@ github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.50.9 h1:yX66aKnEtRc/uNV/1EH8CudRT5aLwVwcSwTBphuVPt8= -github.com/aws/aws-sdk-go v1.50.9/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/atc0005/go-teams-notify/v2 v2.10.0 h1:eQvRIkyESQgBvlUdQ/iPol/lj3QcRyrdEQM3+c/nXhM= +github.com/atc0005/go-teams-notify/v2 v2.10.0/go.mod h1:SIeE1UfCcVRYMqP5b+r1ZteHyA/2UAjzWF5COnZ8q0w= +github.com/aws/aws-sdk-go v1.51.6 h1:Ld36dn9r7P9IjU8WZSaswQ8Y/XUCRpewim5980DwYiU= +github.com/aws/aws-sdk-go v1.51.6/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v1.21.2/go.mod h1:ErQhvNuEMhJjweavOYhxVkn2RUx7kQXVATHrjKtxIpM= -github.com/aws/aws-sdk-go-v2 v1.23.5 h1:xK6C4udTyDMd82RFvNkDQxtAd00xlzFUtX4fF2nMZyg= -github.com/aws/aws-sdk-go-v2 v1.23.5/go.mod h1:t3szzKfP0NeRU27uBFczDivYJjsmSnqI8kIvKyWb9ds= -github.com/aws/aws-sdk-go-v2/config v1.25.11 h1:RWzp7jhPRliIcACefGkKp03L0Yofmd2p8M25kbiyvno= -github.com/aws/aws-sdk-go-v2/config v1.25.11/go.mod h1:BVUs0chMdygHsQtvaMyEOpW2GIW+ubrxJLgIz/JU29s= -github.com/aws/aws-sdk-go-v2/credentials v1.16.9 h1:LQo3MUIOzod9JdUK+wxmSdgzLVYUbII3jXn3S/HJZU0= -github.com/aws/aws-sdk-go-v2/credentials v1.16.9/go.mod h1:R7mDuIJoCjH6TxGUc/cylE7Lp/o0bhKVoxdBThsjqCM= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.9 h1:FZVFahMyZle6WcogZCOxo6D/lkDA2lqKIn4/ueUmVXw= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.9/go.mod h1:kjq7REMIkxdtcEC9/4BVXjOsNY5isz6jQbEgk6osRTU= +github.com/aws/aws-sdk-go-v2 v1.26.0 h1:/Ce4OCiM3EkpW7Y+xUnfAFpchU78K7/Ug01sZni9PgA= +github.com/aws/aws-sdk-go-v2 v1.26.0/go.mod h1:35hUlJVYd+M++iLI3ALmVwMOyRYMmRqUXpTtRGW+K9I= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.1 h1:gTK2uhtAPtFcdRRJilZPx8uJLL2J85xK11nKtWL0wfU= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.1/go.mod h1:sxpLb+nZk7tIfCWChfd+h4QwHNUR57d8hA1cleTkjJo= +github.com/aws/aws-sdk-go-v2/config v1.27.9 h1:gRx/NwpNEFSk+yQlgmk1bmxxvQ5TyJ76CWXs9XScTqg= +github.com/aws/aws-sdk-go-v2/config v1.27.9/go.mod h1:dK1FQfpwpql83kbD873E9vz4FyAxuJtR22wzoXn3qq0= +github.com/aws/aws-sdk-go-v2/credentials v1.17.9 h1:N8s0/7yW+h8qR8WaRlPQeJ6czVMNQVNtNdUqf6cItao= +github.com/aws/aws-sdk-go-v2/credentials v1.17.9/go.mod h1:446YhIdmSV0Jf/SLafGZalQo+xr2iw7/fzXGDPTU1yQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.0 h1:af5YzcLf80tv4Em4jWVD75lpnOHSBkPUZxZfGkrI3HI= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.0/go.mod h1:nQ3how7DMnFMWiU1SpECohgC82fpn4cKZ875NDMmwtA= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.9 h1:vXY/Hq1XdxHBIYgBUmug/AbMyIe1AKulPYS2/VE1X70= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.9/go.mod h1:GyJJTZoHVuENM4TeJEl5Ffs4W9m19u+4wKJcDi/GZ4A= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43/go.mod h1:auo+PiyLl0n1l8A0e8RIeR8tOzYPfZZH/JNlrJ8igTQ= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.8 h1:8GVZIR0y6JRIUNSYI1xAMF4HDfV8H/bOsZ/8AD/uY5Q= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.8/go.mod h1:rwBfu0SoUkBUZndVgPZKAD9Y2JigaZtRP68unRiYToQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.4 h1:0ScVK/4qZ8CIW0k8jOeFVsyS/sAiXpYxRBLolMkuLQM= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.4/go.mod h1:84KyjNZdHC6QZW08nfHI6yZgPd+qRgaWcYsyLUo3QY8= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37/go.mod h1:Qe+2KtKml+FEsQF/DHmDV+xjtche/hwoF75EG4UlHW8= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.8 h1:ZE2ds/qeBkhk3yqYvS3CDCFNvd9ir5hMjlVStLZWrvM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.8/go.mod h1:/lAPPymDYL023+TS6DJmjuL42nxix2AvEvfjqOBRODk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 h1:uR9lXYjdPX0xY+NhvaJ4dD8rpSRz5VY81ccIIoNG+lw= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.4 h1:sHmMWWX5E7guWEFQ9SVo6A3S4xpPrWnd77a6y4WM6PU= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.4/go.mod h1:WjpDrhWisWOIoS9n3nk67A3Ll1vfULJ9Kq6h29HTD48= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.3 h1:mDnFOE2sVkyphMWtTH+stv0eW3k0OTx94K63xpxHty4= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.3/go.mod h1:V8MuRVcCRt5h1S+Fwu8KbC7l/gBGo3yBAyUbJM2IJOk= github.com/aws/aws-sdk-go-v2/service/ecr v1.20.2 h1:y6LX9GUoEA3mO0qpFl1ZQHj1rFyPWVphlzebiSt2tKE= github.com/aws/aws-sdk-go-v2/service/ecr v1.20.2/go.mod h1:Q0LcmaN/Qr8+4aSBrdrXXePqoX0eOuYpJLbYpilmWnA= github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.18.2 h1:PpbXaecV3sLAS6rjQiaKw4/jyq3Z8gNzmoJupHAoBp0= github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.18.2/go.mod h1:fUHpGXr4DrXkEDpGAjClPsviWf+Bszeb0daKE0blxv8= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.3 h1:e3PCNeEaev/ZF01cQyNZgmYE9oYYePIMJs2mWSKG514= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.3/go.mod h1:gIeeNyaL8tIEqZrzAnTeyhHcE0yysCtcaP+N9kxLZ+E= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.8 h1:EamsKe+ZjkOQjDdHd86/JCEucjFKQ9T0atWKO4s2Lgs= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.8/go.mod h1:Q0vV3/csTpbkfKLI5Sb56cJQTCTtJ0ixdb7P+Wedqiw= -github.com/aws/aws-sdk-go-v2/service/kms v1.27.2 h1:I0NiSQiZu1UzP0akJWXSacjckEpYdN4VN7XYYfW6EYs= -github.com/aws/aws-sdk-go-v2/service/kms v1.27.2/go.mod h1:E2IzqbIZfYuYUgib2KxlaweBbkxHCb3ZIgnp85TjKic= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.2 h1:xJPydhNm0Hiqct5TVKEuHG7weC0+sOs4MUnd7A5n5F4= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.2/go.mod h1:zxk6y1X2KXThESWMS5CrKRvISD8mbIMab6nZrCGxDG0= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.2 h1:8dU9zqA77C5egbU6yd4hFLaiIdPv3rU+6cp7sz5FjCU= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.2/go.mod h1:7Lt5mjQ8x5rVdKqg+sKKDeuwoszDJIIPmkd8BVsEdS0= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.2 h1:fFrLsy08wEbAisqW3KDl/cPHrF43GmV79zXB9EwJiZw= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.2/go.mod h1:7Ld9eTqocTvJqqJ5K/orbSDwmGcpRdlDiLjz2DO+SL8= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 h1:EyBZibRTVAs6ECHZOw5/wlylS9OcTzwyjeQMudmREjE= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1/go.mod h1:JKpmtYhhPs7D97NL/ltqz7yCkERFW5dOlHyVl66ZYF8= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.5 h1:mbWNpfRUTT6bnacmvOTKXZjR/HycibdWzNpfbrbLDIs= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.5/go.mod h1:FCOPWGjsshkkICJIn9hq9xr6dLKtyaWpuUojiN3W1/8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.6 h1:b+E7zIUHMmcB4Dckjpkapoy47W6C9QBv/zoUP+Hn8Kc= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.6/go.mod h1:S2fNV0rxrP78NhPbCZeQgY8H9jdDMeGtwcfZIRxzBqU= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.3 h1:4t+QEX7BsXz98W8W1lNvMAG+NX8qHz2CjLBxQKku40g= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.3/go.mod h1:oFcjjUq5Hm09N9rpxTdeMeLeQcxS7mIkBkL8qUKng+A= +github.com/aws/aws-sdk-go-v2/service/kms v1.30.0 h1:yS0JkEdV6h9JOo8sy2JSpjX+i7vsKifU8SIeHrqiDhU= +github.com/aws/aws-sdk-go-v2/service/kms v1.30.0/go.mod h1:+I8VUUSVD4p5ISQtzpgSva4I8cJ4SQ4b1dcBcof7O+g= +github.com/aws/aws-sdk-go-v2/service/s3 v1.51.4 h1:lW5xUzOPGAMY7HPuNF4FdyBwRc3UJ/e8KsapbesVeNU= +github.com/aws/aws-sdk-go-v2/service/s3 v1.51.4/go.mod h1:MGTaf3x/+z7ZGugCGvepnx2DS6+caCYYqKhzVoLNYPk= +github.com/aws/aws-sdk-go-v2/service/sso v1.20.3 h1:mnbuWHOcM70/OFUlZZ5rcdfA8PflGXXiefU/O+1S3+8= +github.com/aws/aws-sdk-go-v2/service/sso v1.20.3/go.mod h1:5HFu51Elk+4oRBZVxmHrSds5jFXmFj8C3w7DVF2gnrs= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.3 h1:uLq0BKatTmDzWa/Nu4WO0M1AaQDaPpwTKAeByEc6WFM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.3/go.mod h1:b+qdhjnxj8GSR6t5YfphOffeoQSQ1KmpoVVuBn+PWxs= +github.com/aws/aws-sdk-go-v2/service/sts v1.28.5 h1:J/PpTf/hllOjx8Xu9DMflff3FajfLxqM5+tepvVXmxg= +github.com/aws/aws-sdk-go-v2/service/sts v1.28.5/go.mod h1:0ih0Z83YDH/QeQ6Ori2yGE2XvWYv/Xm+cZc01LC6oK0= github.com/aws/smithy-go v1.15.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/aws/smithy-go v1.18.1 h1:pOdBTUfXNazOlxLrgeYalVnuTpKreACHtc62xLwIB3c= -github.com/aws/smithy-go v1.18.1/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= +github.com/aws/smithy-go v1.20.1 h1:4SZlSlMr36UEqC7XOyRVb27XMeZubNcBNN+9IgEPIQw= +github.com/aws/smithy-go v1.20.1/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8 h1:SoFYaT9UyGkR0+nogNyD/Lj+bsixB+SNuAS4ABlEs6M= github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8/go.mod h1:2JF49jcDOrLStIXN/j/K1EKRq8a8R2qRnlZA6/o/c7c= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= +github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= +github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= +github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb h1:m935MPodAbYS46DG4pJSv7WO+VECIWUQ7OJYSoTrMh4= +github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/buildkite/agent/v3 v3.59.0 h1:4+4gnl5DAgmDXs2JomzlVcYAiWJWkb6U9631Nh3wr0A= -github.com/buildkite/agent/v3 v3.59.0/go.mod h1:htNkEYNyaN7sNK63I1AIMJ30kR3l7IwxF5OCDh25jHY= -github.com/buildkite/go-pipeline v0.2.0 h1:+abnWSScMhJscxYgom2z0dzuXbjnYOz4RPeHmrfy31k= -github.com/buildkite/go-pipeline v0.2.0/go.mod h1:Wkiq1SFvic/GcDem1mg4o8BrtA8JJubTlsBMLPHYoSQ= +github.com/buildkite/agent/v3 v3.62.0 h1:yvzSjI8Lgifw883I8m9u8/L/Thxt4cLFd5aWPn3gg70= +github.com/buildkite/agent/v3 v3.62.0/go.mod h1:jN6SokGXrVNNIpI0BGQ+j5aWeI3gin8F+3zwA5Q6gqM= +github.com/buildkite/go-pipeline v0.3.2 h1:SW4EaXNwfjow7xDRPGgX0Rcx+dPj5C1kV9LKCLjWGtM= +github.com/buildkite/go-pipeline v0.3.2/go.mod h1:iY5jzs3Afc8yHg6KDUcu3EJVkfaUkd9x/v/OH98qyUA= github.com/buildkite/interpolate v0.0.0-20200526001904-07f35b4ae251 h1:k6UDF1uPYOs0iy1HPeotNa155qXRWrzKnqAaGXHLZCE= github.com/buildkite/interpolate v0.0.0-20200526001904-07f35b4ae251/go.mod h1:gbPR1gPu9dB96mucYIR7T3B7p/78hRVSOuzIWLHK2Y4= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HVHpXvjfy0Dy7g6fuA= github.com/bytecodealliance/wasmtime-go/v3 v3.0.2/go.mod h1:RnUjnIXxEJcL6BgCvNyzCCRzZcxCgsZCi+RNlvYor5Q= +github.com/caarlos0/ctrlc v1.2.0 h1:AtbThhmbeYx1WW3WXdWrd94EHKi+0NPRGS4/4pzrjwk= +github.com/caarlos0/ctrlc v1.2.0/go.mod h1:n3gDlSjsXZ7rbD9/RprIR040b7oaLfNStikPd4gFago= +github.com/caarlos0/env/v9 v9.0.0 h1:SI6JNsOA+y5gj9njpgybykATIylrRMklbs5ch6wO6pc= +github.com/caarlos0/env/v9 v9.0.0/go.mod h1:ye5mlCVMYh6tZ+vCgrs/B95sj88cg5Tlnc0XIzgZ020= +github.com/caarlos0/go-reddit/v3 v3.0.1 h1:w8ugvsrHhaE/m4ez0BO/sTBOBWI9WZTjG7VTecHnql4= +github.com/caarlos0/go-reddit/v3 v3.0.1/go.mod h1:QlwgmG5SAqxMeQvg/A2dD1x9cIZCO56BMnMdjXLoisI= +github.com/caarlos0/go-rpmutils v0.2.1-0.20240105125627-01185134a559 h1:5TPRjT2njvPKzXUcrcg6Dt+JPzQF+M5K7xb5V1Nwteg= +github.com/caarlos0/go-rpmutils v0.2.1-0.20240105125627-01185134a559/go.mod h1:sUS7SdlihaphHRYa/Uu4haxl9zL6DLGrFjoTsurEYOw= +github.com/caarlos0/go-shellwords v1.0.12 h1:HWrUnu6lGbWfrDcFiHcZiwOLzHWjjrPVehULaTFgPp8= +github.com/caarlos0/go-shellwords v1.0.12/go.mod h1:bYeeX1GrTLPl5cAMYEzdm272qdsQAZiaHgeF0KTk1Gw= +github.com/caarlos0/go-version v0.1.1 h1:1bikKHkGGVIIxqCmufhSSs3hpBScgHGacrvsi8FuIfc= +github.com/caarlos0/go-version v0.1.1/go.mod h1:Ze5Qx4TsBBi5FyrSKVg1Ibc44KGV/llAaKGp86oTwZ0= +github.com/caarlos0/log v0.4.4 h1:LnvgBz/ofsJ00AupP/cEfksJSZglb1L69g4Obk/sdAc= +github.com/caarlos0/log v0.4.4/go.mod h1:+AmCI9Liv5LKXmzFmFI1htuHdTTj/0R3KuoP9DMY7Mo= +github.com/caarlos0/testfs v0.4.4 h1:3PHvzHi5Lt+g332CiShwS8ogTgS3HjrmzZxCm6JCDr8= +github.com/caarlos0/testfs v0.4.4/go.mod h1:bRN55zgG4XCUVVHZCeU+/Tz1Q6AxEJOEJTliBy+1DMk= +github.com/cavaliergopher/cpio v1.0.1 h1:KQFSeKmZhv0cr+kawA3a0xTQCU4QxXF1vhU7P7av2KM= +github.com/cavaliergopher/cpio v1.0.1/go.mod h1:pBdaqQjnvXxdS/6CvNDwIANIFSP0xRKI16PX4xejRQc= github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -255,6 +287,12 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charmbracelet/keygen v0.5.0 h1:XY0fsoYiCSM9axkrU+2ziE6u6YjJulo/b9Dghnw6MZc= +github.com/charmbracelet/keygen v0.5.0/go.mod h1:DfvCgLHxZ9rJxdK0DGw3C/LkV4SgdGbnliHcObV3L+8= +github.com/charmbracelet/lipgloss v0.10.0 h1:KWeXFSexGcfahHX+54URiZGkBFazf70JNMtwg/AFW3s= +github.com/charmbracelet/lipgloss v0.10.0/go.mod h1:Wig9DSfvANsxqkRsqj6x87irdy123SR4dOXlKa91ciE= +github.com/charmbracelet/x/exp/ordered v0.0.0-20231010190216-1cb11efc897d h1:+o+e/8hf7cG0SbAzEAm/usJ8qoZPgFXhudLjop+TM0g= +github.com/charmbracelet/x/exp/ordered v0.0.0-20231010190216-1cb11efc897d/go.mod h1:aoG4bThKYIOnyB55r202eHqo6TkN7ZXV+cu4Do3eoBQ= github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 h1:krfRl01rzPzxSxyLyrChD+U+MzsBXbm0OwYYB67uF+4= github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589/go.mod h1:OuDyvmLnMCwa2ep4Jkm6nyA0ocJuZlGyk2gGseVzERM= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -268,25 +306,26 @@ github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUK github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 h1:7To3pQ+pZo0i3dsWEbinPNFs5gPSBOsJtx3wTT94VBY= -github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd/v3 v3.2.1 h1:U+8j7t0axsIgvQUqthuNm82HIrYXodOV2iWLWtEaIwg= github.com/cockroachdb/apd/v3 v3.2.1/go.mod h1:klXJcjp+FffLTHlhIG69tezTDvdP065naDsHzKhYSqc= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be h1:J5BL2kskAlV9ckgEsNQXscjIaLiOYiZ75d4e94E6dcQ= github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be/go.mod h1:mk5IQ+Y0ZeO87b858TlA645sVcEcbiX6YqP98kt+7+w= +github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= +github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k= github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= -github.com/coreos/go-oidc/v3 v3.9.0 h1:0J/ogVOd4y8P0f0xUh8l9t07xRP/d8tccvjHl2dcsSo= -github.com/coreos/go-oidc/v3 v3.9.0/go.mod h1:rTKz2PYwftcrtoCzV5g5kvfJoWcm0Mk8AF8y1iAQro4= +github.com/coreos/go-oidc/v3 v3.10.0 h1:tDnXHnLyiTVyT/2zLDGj09pFPkhND8Gl8lnTRhoEaJU= +github.com/coreos/go-oidc/v3 v3.10.0/go.mod h1:5j11xcw0D3+SGxn6Z/WFADsgcWVMyNAlSQupk0KK3ac= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 h1:2Dx4IHfC1yHWI12AxQDJM1QbRCDfk6M+blLzlZCXdrc= github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= @@ -297,8 +336,16 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davidmz/go-pageant v1.0.2 h1:bPblRCh5jGU+Uptpz6LgMZGD5hJoOt7otgT454WvHn0= +github.com/davidmz/go-pageant v1.0.2/go.mod h1:P2EDDnMqIwG5Rrp05dTRITj9z2zpGcD9efWSkTNKLIE= github.com/depcheck-test/depcheck-test v0.0.0-20220607135614-199033aaa936 h1:foGzavPWwtoyBvjWyKJYDYsyzy+23iBV7NKTwdk+LRY= github.com/depcheck-test/depcheck-test v0.0.0-20220607135614-199033aaa936/go.mod h1:ttKPnOepYt4LLzD+loXQ1rT6EmpyIYHro7TAJuIIlHo= +github.com/dghubble/go-twitter v0.0.0-20211115160449-93a8679adecb h1:7ENzkH+O3juL+yj2undESLTaAeRllHwCs/b8z6aWSfc= +github.com/dghubble/go-twitter v0.0.0-20211115160449-93a8679adecb/go.mod h1:qhZBgV9e4WyB1JNjHpcXVkUe3knWUwYuAPB1hITdm50= +github.com/dghubble/oauth1 v0.7.3 h1:EkEM/zMDMp3zOsX2DC/ZQ2vnEX3ELK0/l9kb+vs4ptE= +github.com/dghubble/oauth1 v0.7.3/go.mod h1:oxTe+az9NSMIucDPDCCtzJGsPhciJV33xocHfcR2sVY= +github.com/dghubble/sling v1.4.0 h1:/n8MRosVTthvMbwlNZgLx579OGVjUOy3GNEv5BIqAWY= +github.com/dghubble/sling v1.4.0/go.mod h1:0r40aNsU9EdDUVBNhfCstAtFgutjgJGYbO1oNzkMoM8= github.com/dgraph-io/badger/v3 v3.2103.5 h1:ylPa6qzbjYRQMU6jokoj4wzcaweHylt//CH0AKt0akg= github.com/dgraph-io/badger/v3 v3.2103.5/go.mod h1:4MPiseMeDQ3FNCYwRbbcBOGJLf5jsE0PPFzRiKjtcdw= github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= @@ -308,18 +355,30 @@ github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8 github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE= github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= -github.com/digitorus/timestamp v0.0.0-20230902153158-687734543647 h1:WOk5Aclr/+sZ2/SX2YyxulNFwZOUhSrDJLw5KbHKmdE= -github.com/digitorus/timestamp v0.0.0-20230902153158-687734543647/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1GUYL7P0MlNa00M67axePTq+9nBSGddR8I= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg= -github.com/docker/cli v24.0.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/distribution/distribution/v3 v3.0.0-alpha.1 h1:jn7I1gvjOvmLztH1+1cLiUFud7aeJCIQcgzugtwjyJo= +github.com/distribution/distribution/v3 v3.0.0-alpha.1/go.mod h1:LCp4JZp1ZalYg0W/TN05jarCQu+h4w7xc7ZfQF4Y/cY= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/docker/cli v25.0.4+incompatible h1:DatRkJ+nrFoYL2HZUzjM5Z5sAmcA5XGp+AW0oEw2+cA= +github.com/docker/cli v25.0.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= -github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.5+incompatible h1:NEAxTwEjxV6VbBMBoGG3zPqbiJosIApZjxlbrG9q3/g= +github.com/docker/docker v26.1.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= @@ -327,6 +386,8 @@ github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcej github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= github.com/elliotchance/orderedmap v1.5.1 h1:G1X4PYlljzimbdQ3RXmtIZiQ9d6aRQ3sH1nzjq5mECE= github.com/elliotchance/orderedmap v1.5.1/go.mod h1:wsDwEaX5jEoyhbs7x93zk2H/qv0zwuhg4inXhDkYqys= +github.com/elliotchance/orderedmap/v2 v2.2.0 h1:7/2iwO98kYT4XkOjA9mBEIwvi4KpGB4cyHeOFOnj4Vk= +github.com/elliotchance/orderedmap/v2 v2.2.0/go.mod h1:85lZyVbpGaGvHvnKa7Qhx7zncAdBIBq6u56Hb1PRU5Q= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emicklei/proto v1.12.1 h1:6n/Z2pZAnBwuhU66Gs8160B8rrrYKo7h2F2sCOnNceE= @@ -337,12 +398,9 @@ github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FM github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= -github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= @@ -355,25 +413,27 @@ github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= -github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= -github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= -github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= +github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= -github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= +github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= +github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= -github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= +github.com/gliderlabs/ssh v0.3.6 h1:ZzjlDa05TcFRICb3anf/dSPN3ewz1Zx6CMLPWgkm3b8= +github.com/gliderlabs/ssh v0.3.6/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8= github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec= github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-fed/httpsig v1.1.0 h1:9M+hb0jkEICD8/cAiNqEB66R87tTINszBRTjwjQzWcI= +github.com/go-fed/httpsig v1.1.0/go.mod h1:RCMrTZvN1bJYtofsG4rd5NaO5obxQ5xBkdiS7xsT7bM= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= @@ -382,69 +442,58 @@ github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMj github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA= -github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k= +github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= +github.com/go-jose/go-jose/v4 v4.0.1 h1:QVEPDE3OluqXBQZDcnNvQrInro2h0e4eqNbnZSWqS6U= +github.com/go-jose/go-jose/v4 v4.0.1/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= -github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= -github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= -github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= -github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M= -github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= -github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= -github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= -github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc= -github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ= -github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/spec v0.20.11 h1:J/TzFDLTt4Rcl/l1PmyErvkqlJDncGvPTMnCI39I4gY= -github.com/go-openapi/spec v0.20.11/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= -github.com/go-openapi/strfmt v0.21.8 h1:VYBUoKYRLAlgKDrIxR/I0lKrztDQ0tuTDrbhLVP8Erg= -github.com/go-openapi/strfmt v0.21.8/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= -github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/validate v0.22.3 h1:KxG9mu5HBRYbecRb37KRCihvGGtND2aXziBAv0NNfyI= -github.com/go-openapi/validate v0.22.3/go.mod h1:kVxh31KbfsxU8ZyoHaDbLBWU5CnMdqBUEtadQ2G4d5M= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= +github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= +github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= +github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= +github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= +github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-piv/piv-go v1.11.0 h1:5vAaCdRTFSIW4PeqMbnsDlUZ7odMYWnHBDGdmtU/Zhg= github.com/go-piv/piv-go v1.11.0/go.mod h1:NZ2zmjVkfFaL/CF8cVQ/pXdXtuj110zEKGdJM6fJZZM= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.15.5 h1:LEBecTWb/1j5TNY1YYG2RcOUN3R7NLylN+x8TTueE24= -github.com/go-playground/validator/v10 v10.15.5/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= -github.com/go-quicktest/qt v1.100.0 h1:I7iSLgIwNp0E0UnSvKJzs7ig0jg/Iq83zsZjtQNW7jY= -github.com/go-quicktest/qt v1.100.0/go.mod h1:leyLsQ4jksGmF1KaQEyabnqGIiJTbOU5S46QegToEj4= -github.com/go-rod/rod v0.114.5 h1:1x6oqnslwFVuXJbJifgxspJUd3O4ntaGhRLHt+4Er9c= -github.com/go-rod/rod v0.114.5/go.mod h1:aiedSEFg5DwG/fnNbUOTPMTTWX3MRj6vIs/a684Mthw= +github.com/go-playground/validator/v10 v10.18.0 h1:BvolUXjp4zuvkZ5YN5t7ebzbhlUtPsPm2S9NAZ5nl9U= +github.com/go-playground/validator/v10 v10.18.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= +github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= +github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= +github.com/go-rod/rod v0.114.7 h1:h4pimzSOUnw7Eo41zdJA788XsawzHjJMyzCE3BrBww0= +github.com/go-rod/rod v0.114.7/go.mod h1:aiedSEFg5DwG/fnNbUOTPMTTWX3MRj6vIs/a684Mthw= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-telegram-bot-api/telegram-bot-api v4.6.4+incompatible h1:2cauKuaELYAEARXRkq2LrJ0yDDv1rW7+wrTEdVL3uaU= +github.com/go-telegram-bot-api/telegram-bot-api v4.6.4+incompatible/go.mod h1:qf9acutJ8cwBUhm1bqgz6Bei9/C/c93FPDljKWwsOgM= +github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA= @@ -453,8 +502,8 @@ github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/goccy/go-yaml v1.11.2 h1:joq77SxuyIs9zzxEjgyLBugMQ9NEgTWxXfz2wVqwAaQ= -github.com/goccy/go-yaml v1.11.2/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU= +github.com/goccy/go-yaml v1.11.3 h1:B3W9IdWbvrUu2OYQGwvU1nZtvMQJPBKgBUuweJjLj6I= +github.com/goccy/go-yaml v1.11.3/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -463,32 +512,21 @@ github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= -github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= -github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= +github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -499,15 +537,12 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/certificate-transparency-go v1.1.7 h1:IASD+NtgSTJLPdzkthwvAG1ZVbF2WtFg4IvoA68XGSw= -github.com/google/certificate-transparency-go v1.1.7/go.mod h1:FSSBo8fyMVgqptbfF6j5p/XNdgQftAhSmXcIxV9iphE= +github.com/google/certificate-transparency-go v1.1.8 h1:LGYKkgZF7satzgTak9R4yzfJXEeYVAjV6/EAEJOf1to= +github.com/google/certificate-transparency-go v1.1.8/go.mod h1:bV/o8r0TBKRf1X//iiiSgWrvII4d7/8OiA+3vG26gI8= github.com/google/flatbuffers v2.0.8+incompatible h1:ivUb1cGomAB101ZM1T0nOiWz9pSrTMoa9+EiY7igmkM= github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= @@ -518,91 +553,97 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.19.0 h1:uIsMRBV7m/HDkDxE/nXMnv1q+lOOSPlQ/ywc5JbB8Ic= -github.com/google/go-containerregistry v0.19.0/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= +github.com/google/go-containerregistry v0.19.1 h1:yMQ62Al6/V0Z7CqIrrS1iYoA5/oQCm88DeNujc7C1KY= +github.com/google/go-containerregistry v0.19.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI= github.com/google/go-github/v55 v55.0.0 h1:4pp/1tNMB9X/LuAhs5i0KQAE40NmiR/y6prLNb9x9cg= github.com/google/go-github/v55 v55.0.0/go.mod h1:JLahOTA1DnXzhxEymmFF5PP2tSS9JVNj68mSZNDwskA= github.com/google/go-github/v58 v58.0.0 h1:Una7GGERlF/37XfkPwpzYJe0Vp4dt2k1kCjlxwjIvzw= github.com/google/go-github/v58 v58.0.0/go.mod h1:k4hxDKEfoWpSqFlc8LTpGd9fu2KrV1YAa6Hi6FmDNY4= +github.com/google/go-github/v61 v61.0.0 h1:VwQCBwhyE9JclCI+22/7mLB1PuU9eowCXKY5pNlu1go= +github.com/google/go-github/v61 v61.0.0/go.mod h1:0WR+KmsWX75G2EbpyGsGmradjo3IiciuI4BmdVCobQY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/go-replayers/grpcreplay v1.1.0 h1:S5+I3zYyZ+GQz68OfbURDdt/+cSMqCK1wrvNx7WBzTE= +github.com/google/go-replayers/grpcreplay v1.1.0/go.mod h1:qzAvJ8/wi57zq7gWqaE6AwLM6miiXUQwP1S+I9icmhk= +github.com/google/go-replayers/httpreplay v1.2.0 h1:VM1wEyyjaoU53BwrOnaf9VhAyQQEEioJvFYxYcLRKzk= +github.com/google/go-replayers/httpreplay v1.2.0/go.mod h1:WahEFFZZ7a1P4VM1qEeHy+tME4bwyqPcwWbNlUI1Mcg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/ko v0.15.2 h1:+M1yxpUGPaynwHn26BELF57eDGyt8MUHM7iV/W28kss= +github.com/google/ko v0.15.2/go.mod h1:7a7U0AvWS9MbZdEHcrN8QjJgbafoRqU29WS/azE8cw8= github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b h1:RMpPgZTSApbPf7xaVel+QkoGPRLFLrwFO89uDUHEGf0= github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/rpmpack v0.6.0 h1:LoQuqlw6kHRwg25n3M0xtYrW+z2pTkR0ae1xx11hRw8= +github.com/google/rpmpack v0.6.0/go.mod h1:uqVAUVQLq8UY2hCDfmJ/+rtO3aw7qyhc90rCVEabEfI= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/tink/go v1.7.0 h1:6Eox8zONGebBFcCBqkVmt60LaWZa6xg1cl/DwAh/J1w= github.com/google/tink/go v1.7.0/go.mod h1:GAUOd+QE3pgj9q8VKIGTCP33c/B7eb4NhxLcgTJZStM= -github.com/google/trillian v1.5.3 h1:3ioA5p09qz+U9/t2riklZtaQdZclaStp0/eQNfewNRg= -github.com/google/trillian v1.5.3/go.mod h1:p4tcg7eBr7aT6DxrAoILpc3uXNfcuAvZSnQKonVg+Eo= +github.com/google/trillian v1.6.0 h1:jMBeDBIkINFvS2n6oV5maDqfRlxREAc6CW9QYWQ0qT4= +github.com/google/trillian v1.6.0/go.mod h1:Yu3nIMITzNhhMJEHjAtp6xKiu+H/iHu2Oq5FjV2mCWI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/wire v0.6.0 h1:HBkoIh4BdSxoyo9PveV8giw7ZsaBOvzWKfcg/6MrVwI= +github.com/google/wire v0.6.0/go.mod h1:F4QhpQ9EDIdJ1Mbop/NZBRB+5yrR6qg3BnctaoUk6NA= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= -github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= +github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= +github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= +github.com/goreleaser/chglog v0.5.0 h1:Sk6BMIpx8+vpAf8KyPit34OgWui8c7nKTMHhYx88jJ4= +github.com/goreleaser/chglog v0.5.0/go.mod h1:Ri46M3lrMuv76FHszs3vtABR8J8k1w9JHYAzxeeOl28= +github.com/goreleaser/fileglob v1.3.0 h1:/X6J7U8lbDpQtBvGcwwPS6OpzkNVlVEsFUVRx9+k+7I= +github.com/goreleaser/fileglob v1.3.0/go.mod h1:Jx6BoXv3mbYkEzwm9THo7xbr5egkAraxkGorbJb4RxU= +github.com/goreleaser/goreleaser v1.25.1 h1:a9skjeROotTN5GPPJDHDfhmOK4n13cBgJ34sTdXRDN0= +github.com/goreleaser/goreleaser v1.25.1/go.mod h1:nsbhCYp9eImbE2fyd9/3Tgv5hjuGuDIQRoBozEUEYbc= +github.com/goreleaser/nfpm/v2 v2.36.1 h1:6JmvvEJzSHddJJfNXEu+JrkLsCb9yMOvDTC6ZDhlanY= +github.com/goreleaser/nfpm/v2 v2.36.1/go.mod h1:GHvX+qQk3eRn0OeDjQS2DDBibL1TleOcu1/RB/NlxOE= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 h1:RtRsiaGvWxcwd8y3BiRZxsylPT8hLWZ5SPcfI+3IDNk= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0/go.mod h1:TzP6duP4Py2pHLVPPQp42aoYI92+PCrVotyR5e8Vqlk= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= -github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M= -github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs= @@ -611,19 +652,21 @@ github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9 github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= github.com/hashicorp/go-sockaddr v1.0.5 h1:dvk7TIXCZpmfOlM+9mlcrWmWjw/wlKT+VDq2wMvfPJU= github.com/hashicorp/go-sockaddr v1.0.5/go.mod h1:uoUUmtwU7n9Dv3O4SNLeFvg0SxQ3lyjsj6+CCykpaxI= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= -github.com/hashicorp/vault/api v1.10.0 h1:/US7sIjWN6Imp4o/Rj1Ce2Nr5bki/AXi9vAW3p2tOJQ= -github.com/hashicorp/vault/api v1.10.0/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8= +github.com/hashicorp/vault/api v1.12.2 h1:7YkCTE5Ni90TcmYHDBExdt4WGJxhpzaHqR6uGbQb/rE= +github.com/hashicorp/vault/api v1.12.2/go.mod h1:LSGf1NGT1BnvFFnKVtnvcaLBM2Lz+gJdpL6HUYed8KE= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= @@ -631,16 +674,20 @@ github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3 github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/itchyny/gojq v0.12.14 h1:6k8vVtsrhQSYgSGg827AD+PVVaB1NLXEdX+dda2oZCc= -github.com/itchyny/gojq v0.12.14/go.mod h1:y1G7oO7XkcR1LPZO59KyoCRy08T3j9vDYRV0GgYSS+s= +github.com/invopop/jsonschema v0.12.0 h1:6ovsNSuvn9wEQVOyc72aycBMVQFKz7cPdMJn10CvzRI= +github.com/invopop/jsonschema v0.12.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= +github.com/itchyny/gojq v0.12.15 h1:WC1Nxbx4Ifw5U2oQWACYz32JK8G9qxNtHzrvW4KEcqI= +github.com/itchyny/gojq v0.12.15/go.mod h1:uWAHCbCIla1jiNxmeT5/B5mOjSdfkCq6p8vxWg+BM10= github.com/itchyny/timefmt-go v0.1.5 h1:G0INE2la8S6ru/ZI5JecgyzbbJNs5lG1RcBqa7Jm6GE= github.com/itchyny/timefmt-go v0.1.5/go.mod h1:nEP7L+2YmAbT2kZ2HfSs1d8Xtw9LY8D2stDBckWakZ8= +github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= +github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= -github.com/jellydator/ttlcache/v3 v3.1.1 h1:RCgYJqo3jgvhl+fEWvjNW8thxGWsgxi+TPhRir1Y9y8= -github.com/jellydator/ttlcache/v3 v3.1.1/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= +github.com/jellydator/ttlcache/v3 v3.2.0 h1:6lqVJ8X3ZaUwvzENqPAobDsXNExfUJd61u++uW8a3LE= +github.com/jellydator/ttlcache/v3 v3.2.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8= github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= @@ -657,21 +704,19 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= -github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= +github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= +github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -681,45 +726,55 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= -github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/letsencrypt/boulder v0.0.0-20231026200631-000cd05d5491 h1:WGrKdjHtWC67RX96eTkYD2f53NDHhrq/7robWTAfk4s= github.com/letsencrypt/boulder v0.0.0-20231026200631-000cd05d5491/go.mod h1:o158RFmdEbYyIZmXAbrvmJWesbyxlLKee6X64VPVuOc= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= +github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= +github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-mastodon v0.0.6 h1:lqU1sOeeIapaDsDUL6udDZIzMb2Wqapo347VZlaOzf0= +github.com/mattn/go-mastodon v0.0.6/go.mod h1:cg7RFk2pcUfHZw/IvKe1FUzmlq5KnLFqs7eV2PHplV8= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= -github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= -github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= +github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM= +github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk= github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/mikefarah/yq/v4 v4.40.5 h1:7gDj+GlXINEIB4wv30XR/UkH400kJHauiwxKwIXqgRc= -github.com/mikefarah/yq/v4 v4.40.5/go.mod h1:y2lpkZypzZrJ2kr098cL0PfzdqEwVCJHPW8bH8HNQI8= +github.com/mikefarah/yq/v4 v4.43.1 h1:1bCrQwVDhjGnPboQidy30hu6U2TCd8sUQTy1hKCHOGI= +github.com/mikefarah/yq/v4 v4.43.1/go.mod h1:jcSqtyUKbPWvwaa8cNw8Ej4rmPb3iWE8zYvpkTvM7oc= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= -github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -729,11 +784,22 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mozillazg/docker-credential-acr-helper v0.3.0 h1:DVWFZ3/O8BP6Ue3iS/Olw+G07u1hCq1EOVCDZZjCIBI= github.com/mozillazg/docker-credential-acr-helper v0.3.0/go.mod h1:cZlu3tof523ujmLuiNUb6JsjtHcNA70u1jitrrdnuyA= -github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de h1:D5x39vF5KCwKQaw+OC9ZPiLVHXz3UFw2+psEX+gYcto= -github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de/go.mod h1:kJun4WP5gFuHZgRjZUWWuH1DTxCtxbHDOIJsudS8jzY= +github.com/muesli/mango v0.1.0 h1:DZQK45d2gGbql1arsYA4vfg4d7I9Hfx5rX/GCmzsAvI= +github.com/muesli/mango v0.1.0/go.mod h1:5XFpbC8jY5UUv89YQciiXNlbi+iJgt29VDC5xbzrLL4= +github.com/muesli/mango-cobra v1.2.0 h1:DQvjzAM0PMZr85Iv9LIMaYISpTOliMEg+uMFtNbYvWg= +github.com/muesli/mango-cobra v1.2.0/go.mod h1:vMJL54QytZAJhCT13LPVDfkvCUJ5/4jNUKF/8NC2UjA= +github.com/muesli/mango-pflag v0.1.0 h1:UADqbYgpUyRoBja3g6LUL+3LErjpsOwaC9ywvBWe7Sg= +github.com/muesli/mango-pflag v0.1.0/go.mod h1:YEQomTxaCUp8PrbhFh10UfbhbQrM/xJ4i2PB8VTLLW0= +github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= +github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= +github.com/muesli/roff v0.1.0 h1:YD0lalCotmYuF5HhZliKWlIx7IEhiXeSfq7hNjFqGF8= +github.com/muesli/roff v0.1.0/go.mod h1:pjAHQM9hdUUwm/krAfrLGgJkXJ+YuhtsfZ42kieB2Ig= +github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo= +github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= @@ -755,78 +821,82 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= -github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= +github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= -github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= -github.com/open-policy-agent/opa v0.59.0 h1:1WFU/KUhJAr3qatm0Lf8Ea5jp10ZmlE2M07oaLiHypg= -github.com/open-policy-agent/opa v0.59.0/go.mod h1:rdJSkEc4oQ+0074/3Fsgno5bkPsYxTjU5aLNmMujIvI= +github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= +github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= +github.com/open-policy-agent/opa v0.63.0 h1:ztNNste1v8kH0/vJMJNquE45lRvqwrM5mY9Ctr9xIXw= +github.com/open-policy-agent/opa v0.63.0/go.mod h1:9VQPqEfoB2N//AToTxzZ1pVTVPUoF2Mhd64szzjWPpU= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= -github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss= +github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/ory/dockertest/v3 v3.10.0 h1:4K3z2VMe8Woe++invjaTB7VRyQXQy5UY+loujO4aNE4= +github.com/ory/dockertest/v3 v3.10.0/go.mod h1:nr57ZbRWMqfsdGdFNLHz5jjNdDb7VVFnzAeW1n5N1Lg= github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= -github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pelletier/go-toml/v2 v2.2.0 h1:QLgLl2yMN7N+ruc31VynXs1vhMZa7CeHHejIeBAsoHo= +github.com/pelletier/go-toml/v2 v2.2.0/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= +github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= +github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= -github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= +github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= +github.com/prometheus/common v0.51.1 h1:eIjN50Bwglz6a/c3hAgSMcofL3nD+nFQkV6Dd4DsQCw= +github.com/prometheus/common v0.51.1/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/protocolbuffers/txtpbfmt v0.0.0-20231025115547-084445ff1adf h1:014O62zIzQwvoD7Ekj3ePDF5bv9Xxy0w6AZk0qYbjUk= github.com/protocolbuffers/txtpbfmt v0.0.0-20231025115547-084445ff1adf/go.mod h1:jgxiZysxFPM+iWKwQwPR+y+Jvo54ARd4EisXxKYpB5c= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= -github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sagikazarmark/locafero v0.3.0 h1:zT7VEGWC2DTflmccN/5T1etyKvxSxpHsjb9cJvm4SvQ= -github.com/sagikazarmark/locafero v0.3.0/go.mod h1:w+v7UsPNFwzF1cHuOajOOzoq4U7v/ig1mpRjqV+Bu1U= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A= github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= -github.com/sassoftware/relic/v7 v7.6.1 h1:O5s8ewCgq5QYNpv45dK4u6IpBmDM9RIcsbf/G1uXepQ= -github.com/sassoftware/relic/v7 v7.6.1/go.mod h1:NxwtWxWxlUa9as2qZi635Ye6bBT/tGnMALLq7dSfOOU= +github.com/sassoftware/relic/v7 v7.6.2 h1:rS44Lbv9G9eXsukknS4mSjIAuuX+lMq/FnStgmZlUv4= +github.com/sassoftware/relic/v7 v7.6.2/go.mod h1:kjmP0IBVkJZ6gXeAu35/KCEfca//+PKM6vTAsyDPY+k= github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= @@ -836,25 +906,28 @@ github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sigstore/cosign/v2 v2.2.2 h1:V1uE1/QnKGfj77vuqlEGBg6O2ZJqOrWkLwjTC21Vxw0= -github.com/sigstore/cosign/v2 v2.2.2/go.mod h1:bNmX0qyby7sgcqY9oY/jy5m+XJ3N3LtpOsNfO+A1CGo= -github.com/sigstore/fulcio v1.4.3 h1:9JcUCZjjVhRF9fmhVuz6i1RyhCc/EGCD7MOl+iqCJLQ= -github.com/sigstore/fulcio v1.4.3/go.mod h1:BQPWo7cfxmJwgaHlphUHUpFkp5+YxeJes82oo39m5og= -github.com/sigstore/rekor v1.3.4 h1:RGIia1iOZU7fOiiP2UY/WFYhhp50S5aUm7YrM8aiA6E= -github.com/sigstore/rekor v1.3.4/go.mod h1:1GubPVO2yO+K0m0wt/3SHFqnilr/hWbsjSOe7Vzxrlg= -github.com/sigstore/sigstore v1.8.1 h1:mAVposMb14oplk2h/bayPmIVdzbq2IhCgy4g6R0ZSjo= -github.com/sigstore/sigstore v1.8.1/go.mod h1:02SL1158BSj15bZyOFz7m+/nJzLZfFd9A8ab3Kz7w/E= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.7.6 h1:WzZExOcFanrFfCi7SUgkBtJicWnSNziBD9nSSQIrqhc= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.7.6/go.mod h1:3zOHOLHnCE6EXyVH+6Z/lC9O1RDsbmR045NQ1DogiHw= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.7.6 h1:wsPt9kNXF1ZZyae2wO35NLsK+cjWqPGpuPaDdXzRe0g= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.7.6/go.mod h1:LH+ct6D77J8Ks6PXijMYYhmlQ1mbqKHbmy7+Sw5/Woc= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.7.6 h1:aMVT9XXFQEnBtJ6szzanyAdKT5gFK4emN+jLSlFlOso= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.7.6/go.mod h1:Hwhlx8JSZJF1R27JlwW/Bl2h40reG3MfKANREtBI0L8= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.7.6 h1:TdSHzcFtPJxbk4B+huWC6GDq7OpgHmLg18inRo9u70I= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.7.6/go.mod h1:/l/PzSbTOuIAtglOwUdlzzYvjIZ2WyaBpt5722JTmLY= -github.com/sigstore/timestamp-authority v1.2.0 h1:Ffk10QsHxu6aLwySQ7WuaoWkD63QkmcKtozlEFot/VI= -github.com/sigstore/timestamp-authority v1.2.0/go.mod h1:ojKaftH78Ovfow9DzuNl5WgTCEYSa4m5622UkKDHRXc= +github.com/sigstore/cosign/v2 v2.2.4 h1:iY4vtEacmu2hkNj1Fh+8EBqBwKs2DHM27/lbNWDFJro= +github.com/sigstore/cosign/v2 v2.2.4/go.mod h1:JZlRD2uaEjVAvZ1XJ3QkkZJhTqSDVtLaet+C/TMR81Y= +github.com/sigstore/fulcio v1.4.5 h1:WWNnrOknD0DbruuZWCbN+86WRROpEl3Xts+WT2Ek1yc= +github.com/sigstore/fulcio v1.4.5/go.mod h1:oz3Qwlma8dWcSS/IENR/6SjbW4ipN0cxpRVfgdsjMU8= +github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8= +github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc= +github.com/sigstore/sigstore v1.8.3 h1:G7LVXqL+ekgYtYdksBks9B38dPoIsbscjQJX/MGWkA4= +github.com/sigstore/sigstore v1.8.3/go.mod h1:mqbTEariiGA94cn6G3xnDiV6BD8eSLdL/eA7bvJ0fVs= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.3 h1:LTfPadUAo+PDRUbbdqbeSl2OuoFQwUFTnJ4stu+nwWw= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.3/go.mod h1:QV/Lxlxm0POyhfyBtIbTWxNeF18clMlkkyL9mu45y18= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.3 h1:xgbPRCr2npmmsuVVteJqi/ERw9+I13Wou7kq0Yk4D8g= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.3/go.mod h1:G4+I83FILPX6MtnoaUdmv/bRGEVtR3JdLeJa/kXdk/0= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.3 h1:vDl2fqPT0h3D/k6NZPlqnKFd1tz3335wm39qjvpZNJc= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.3/go.mod h1:9uOJXbXEXj+M6QjMKH5PaL5WDMu43rHfbIMgXzA8eKI= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.3 h1:h9G8j+Ds21zqqulDbA/R/ft64oQQIyp8S7wJYABYSlg= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.3/go.mod h1:zgCeHOuqF6k7A7TTEvftcA9V3FRzB7mrPtHOhXAQBnc= +github.com/sigstore/timestamp-authority v1.2.2 h1:X4qyutnCQqJ0apMewFyx+3t7Tws00JQ/JonBiu3QvLE= +github.com/sigstore/timestamp-authority v1.2.2/go.mod h1:nEah4Eq4wpliDjlY342rXclGSO7Kb9hoRrl9tqLW13A= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= @@ -862,28 +935,34 @@ github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2 github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= +github.com/slack-go/slack v0.12.5 h1:ddZ6uz6XVaB+3MTDhoW04gG+Vc/M/X1ctC+wssy2cqs= +github.com/slack-go/slack v0.12.5/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262 h1:unQFBIznI+VYD1/1fApl1A+9VcBk+9dcqGfnePY87LY= github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262/go.mod h1:MyOHs9Po2fbM1LHej6sBUT8ozbxmMOFG+E+rx/GSGuc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= +github.com/smartystreets/assertions v1.13.1 h1:Ef7KhSmjZcK6AVf9YbJdvPYG9avaF0ZxudX+ThRdWfU= +github.com/smartystreets/assertions v1.13.1/go.mod h1:cXr/IwVfSo/RbCSPhoAPv73p3hlSdrBH/b3SdnW/LMY= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL+JXWq3w= +github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY= -github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= -github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= -github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.17.0 h1:I5txKw7MJasPL/BrfkbA0Jyo/oELqVmux4pR/UxOMfI= -github.com/spf13/viper v1.17.0/go.mod h1:BmMMMLQXSbcHK6KAOiFLz0l5JHrU89OdIRHvsk0+yVI= -github.com/spiffe/go-spiffe/v2 v2.1.6 h1:4SdizuQieFyL9eNU+SPiCArH4kynzaKOOj0VvM8R7Xo= -github.com/spiffe/go-spiffe/v2 v2.1.6/go.mod h1:eVDqm9xFvyqao6C+eQensb9ZPkyNEeaUbqbBpOhBnNk= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/spiffe/go-spiffe/v2 v2.2.0 h1:9Vf06UsvsDbLYK/zJ4sYsIsHmMFknUD+feA7IYoWMQY= +github.com/spiffe/go-spiffe/v2 v2.2.0/go.mod h1:Urzb779b3+IwDJD2ZbN8fVl3Aa8G4N/PiUe6iXC0XxU= github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4= github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= @@ -891,8 +970,8 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.1 h1:4VhoImhV/Bm0ToFkXFi8hXNXwpDRZ/ynw3amt82mzq0= -github.com/stretchr/objx v0.5.1/go.mod h1:/iHQpkQwBD6DLUmQ4pE+s1TXdob1mORJ4/UFdrifcy0= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -904,49 +983,54 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= +github.com/technoweenie/multipartstreamer v1.0.1 h1:XRztA5MXiR1TIRHxH2uNxXxaIkKQDeX7m2XsSOlQEnM= +github.com/technoweenie/multipartstreamer v1.0.1/go.mod h1:jNVxdtShOxzAsukZwTSw6MDx5eUJoiEBsSvzDU9uzog= github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gtvVDbmPg= github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU= github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/tjfoc/gmsm v1.3.2/go.mod h1:HaUcFuY0auTiaHB9MHFGCPx5IaLhTUd2atbCFBQXn9w= github.com/tjfoc/gmsm v1.4.1 h1:aMe1GlZb+0bLjn+cKTPEvvn9oUEBlJitaZiiBwsbgho= github.com/tjfoc/gmsm v1.4.1/go.mod h1:j4INPkHWMrhJb38G+J6W4Tw0AbuN8Thu3PbdVYhVcTE= +github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 h1:nrZ3ySNYwJbSpD6ce9duiP+QkD3JuLCcWkdaehUS/3Y= +github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80/go.mod h1:iFyPdL66DjUD96XmzVL3ZntbzcflLnznH0fr99w5VqE= github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= +github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= +github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= -github.com/xanzy/go-gitlab v0.94.0 h1:GmBl2T5zqUHqyjkxFSvsT7CbelGdAH/dmBqUBqS+4BE= -github.com/xanzy/go-gitlab v0.94.0/go.mod h1:ETg8tcj4OhrB84UEgeE8dSuV/0h4BBL1uOV/qK0vlyI= +github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= +github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= +github.com/xanzy/go-gitlab v0.102.0 h1:ExHuJ1OTQ2yt25zBMMj0G96ChBirGYv8U7HyUiYkZ+4= +github.com/xanzy/go-gitlab v0.102.0/go.mod h1:ETg8tcj4OhrB84UEgeE8dSuV/0h4BBL1uOV/qK0vlyI= github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= -github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= -github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= -github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= -github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= +github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk= github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yashtewari/glob-intersection v0.2.0 h1:8iuHdN88yYuCzCdjt0gDe+6bAhUwBeEWqThExu54RFg= github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= -github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns= github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= @@ -957,128 +1041,97 @@ github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE= github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= github.com/ysmood/leakless v0.8.0 h1:BzLrVoiwxikpgEQR0Lk8NyBN5Cit2b1z+u0mgL4ZJak= github.com/ysmood/leakless v0.8.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.30/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= -github.com/zalando/go-keyring v0.2.2 h1:f0xmpYiSrHtSNAVgwip93Cg8tuF45HJM6rHq/A5RI/4= -github.com/zalando/go-keyring v0.2.2/go.mod h1:sI3evg9Wvpw3+n4SqplGSJUMwtDeROfD4nsFz4z9PG0= +github.com/zalando/go-keyring v0.2.3 h1:v9CUu9phlABObO4LPWycf+zwMG7nlbb3t/B5wa97yms= +github.com/zalando/go-keyring v0.2.3/go.mod h1:HL4k+OXQfJUWaMnqyuSOc0drfGPX2b51Du6K+MRgZMk= github.com/zeebo/errs v1.3.0 h1:hmiaKqgYZzcVgRL1Vkc1Mn2914BbzB0IBxs+ebeutGs= github.com/zeebo/errs v1.3.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= -go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.12.1 h1:nLkghSU8fQNaK7oUmDhQFsnrtcoNy7Z6LVFKsEecqgE= -go.mongodb.org/mongo-driver v1.12.1/go.mod h1:/rGBTebI3XYboVmgz+Wv3Bcbl3aD0QF9zl6kDDw18rQ= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +gitlab.com/digitalxero/go-conventional-commit v1.0.7 h1:8/dO6WWG+98PMhlZowt/YjuiKhqhGlOCwlIV8SqqGh8= +gitlab.com/digitalxero/go-conventional-commit v1.0.7/go.mod h1:05Xc2BFsSyC5tKhK0y+P3bs0AwUtNuTp+mTpbCU/DZ0= +go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= +go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= -go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y= -go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0= -go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg= -go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0= -go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 h1:H2JFgRcGiyHg7H7bwcwaQJYrNFqCqrbTQ8K4p1OvDu8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0/go.mod h1:WfCWp1bGoYK8MeULtI15MmQVczfR+bFkk0DF3h06QmQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0 h1:FyjCyI9jVEfqhUh2MoSkmolPjfh5fp2hnV0b0irxH4Q= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0/go.mod h1:hYwym2nDEeZfG/motx0p7L7J1N1vyzIThemQsb4g2qY= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= +go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= +go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc= go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= -go.step.sm/crypto v0.38.0 h1:kRVtzOjplP5xDh9UlenXdDAtXWCfVL6GevZgpiom1Zg= -go.step.sm/crypto v0.38.0/go.mod h1:0Cv9UB8sHqnsLO14FhboDE/OIN993c3G0ImOafTS2AI= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.step.sm/crypto v0.44.2 h1:t3p3uQ7raP2jp2ha9P6xkQF85TJZh+87xmjSLaib+jk= +go.step.sm/crypto v0.44.2/go.mod h1:x1439EnFhadzhkuaGX7sz03LEMQ+jV4gRamf5LCZJQQ= +go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= +go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +gocloud.dev v0.37.0 h1:XF1rN6R0qZI/9DYjN16Uy0durAmSlf58DHOcb28GPro= +gocloud.dev v0.37.0/go.mod h1:7/O4kqdInCNsc6LqgmuFnS0GRew4XNNYWpA44yQnwco= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191219195013-becbf705a915/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20231108232855-2478ac86f678 h1:mchzmB1XO2pMaKFRqk/+MV3mgGG96aqaPXaMifQU47w= -golang.org/x/exp v0.0.0-20231108232855-2478ac86f678/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb h1:c0vyKkb6yr3KR7jEfJaOSv4lG7xPkbN6r52aJz1d8a8= +golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= +golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180112015858-5ccada7d0a7b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1087,33 +1140,15 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= @@ -1128,80 +1163,49 @@ golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg= +golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180117170059-2c42eef0765b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1211,14 +1215,19 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= @@ -1226,92 +1235,49 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= -golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20171227012246-e19ae1496984/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw= +golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= golang.org/x/tools/go/vcs v0.1.0-deprecated h1:cOIJqWBl99H1dH5LWizPa+0ImeeJq3t3cJjaeOWUAL4= golang.org/x/tools/go/vcs v0.1.0-deprecated/go.mod h1:zUrvATBAvEI9535oC0yWYsLsHIV4Z7g63sNPVMtuBy8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1321,101 +1287,32 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.161.0 h1:oYzk/bs26WN10AV7iU7MVJVXBH8oCPS2hHyBiEeFoSU= -google.golang.org/api v0.161.0/go.mod h1:0mu0TpK33qnydLvWqbImq2b1eQ5FHRSDCBzAxX9ZHyw= +google.golang.org/api v0.172.0 h1:/1OcMZGPmW1rX2LCu2CmGUD1KXK1+pfzxotxyRUCCdk= +google.golang.org/api v0.172.0/go.mod h1:+fJZq6QXWfa9pXhnIzsjx4yI22d4aI9ZpLb58gvXjis= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac h1:ZL/Teoy/ZGnzyrqK/Optxxp2pmVh+fmJ97slxSRyzUg= -google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:+Rvu7ElI+aLzyDQhpHMFMMltsD6m7nqpuWDd2CwJw3k= -google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe h1:0poefMBYvYbs7g5UkjS6HcxBPaTRAmznle9jnxYoAI8= -google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac h1:nUQEQmH/csSvFECKYRv6HWEyypysidKl2I6Qpsglq/0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA= +google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7 h1:ImUcDPHjTrAqNhlOkSocDLfG9rrNHH7w7uoKWPaWZ8s= +google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7/go.mod h1:/3XmxOjePkvmKrHuBy4zNFw7IzxJXtAgdpXi8Ll990U= +google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7 h1:oqta3O3AnlWbmIE3bFnWbu4bRxZjfbWCp0cKSuZh01E= +google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= -google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= +google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1424,13 +1321,14 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk= +gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1438,19 +1336,18 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/go-jose/go-jose.v2 v2.6.1 h1:qEzJlIDmG9q5VO0M/o8tGS65QMHMS1w01TQJB1VPJ4U= -gopkg.in/go-jose/go-jose.v2 v2.6.1/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI= +gopkg.in/go-jose/go-jose.v2 v2.6.3 h1:nt80fvSDlhKWQgSWyHyy5CfmlQr+asih51R8PTWNKKs= +gopkg.in/go-jose/go-jose.v2 v2.6.3/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.56.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/mail.v2 v2.3.1 h1:WYFn/oANrAGP2C0dcV6/pbkPzv8yGzqTjPmTeO7qoXk= +gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw= gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 h1:6D+BvnJ/j6e222UW8s2qTSe3wGBtvo0MbVQG/c5k8RE= gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog= -gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= -gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/src-d/go-billy.v4 v4.3.2 h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg= gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98= gopkg.in/src-d/go-git-fixtures.v3 v3.5.0 h1:ivZFOIltbce2Mo8IjzUHAFoq/IylO9WHhNOAJK+LsJg= @@ -1470,55 +1367,45 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.28.4 h1:8ZBrLjwosLl/NYgv1P7EQLqoO8MGQApnbgH8tu3BMzY= -k8s.io/api v0.28.4/go.mod h1:axWTGrY88s/5YE+JSt4uUi6NMM+gur1en2REMR7IRj0= -k8s.io/apiextensions-apiserver v0.28.4 h1:AZpKY/7wQ8n+ZYDtNHbAJBb+N4AXXJvyZx6ww6yAJvU= -k8s.io/apiextensions-apiserver v0.28.4/go.mod h1:pgQIZ1U8eJSMQcENew/0ShUTlePcSGFq6dxSxf2mwPM= -k8s.io/apimachinery v0.29.1 h1:KY4/E6km/wLBguvCZv8cKTeOwwOBqFNjwJIdMkMbbRc= -k8s.io/apimachinery v0.29.1/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= -k8s.io/client-go v0.28.4 h1:Np5ocjlZcTrkyRJ3+T3PkXDpe4UpatQxj85+xjaD2wY= -k8s.io/client-go v0.28.4/go.mod h1:0VDZFpgoZfelyP5Wqu0/r/TRYcLYuJ2U1KEeoaPa1N4= -k8s.io/code-generator v0.28.4 h1:tcOSNIZQvuAvXhOwpbuJkKbAABJQeyCcQBCN/3uI18c= -k8s.io/code-generator v0.28.4/go.mod h1:OQAfl6bZikQ/tK6faJ18Vyzo54rUII2NmjurHyiN1g4= +k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A= +k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA= +k8s.io/apiextensions-apiserver v0.29.0 h1:0VuspFG7Hj+SxyF/Z/2T0uFbI5gb5LRgEyUVE3Q4lV0= +k8s.io/apiextensions-apiserver v0.29.0/go.mod h1:TKmpy3bTS0mr9pylH0nOt/QzQRrW7/h7yLdRForMZwc= +k8s.io/apimachinery v0.29.4 h1:RaFdJiDmuKs/8cm1M6Dh1Kvyh59YQFDcFuFTSmXes6Q= +k8s.io/apimachinery v0.29.4/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= +k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8= +k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38= +k8s.io/code-generator v0.29.3 h1:m7E25/t9R9NvejspO2zBdyu+/Gl0Z5m7dCRc680KS14= +k8s.io/code-generator v0.29.3/go.mod h1:x47ofBhN4gxYFcxeKA1PYXeaPreAGaDN85Y/lNUsPoM= k8s.io/gengo v0.0.0-20201203183100-97869a43a9d9/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08= -k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v0.2.0 h1:0ElL0OHzF3N+OhoJTL0uca20SxtYt4X4+bzHeqrB83c= k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/cluster-api/hack/tools v0.0.0-20221121093230-b1688621953c h1:DXSapcAhMk979WoxCKPWA6XFNDpSHFAGA/PgNLeVkeQ= sigs.k8s.io/cluster-api/hack/tools v0.0.0-20221121093230-b1688621953c/go.mod h1:7luenhlsUTb9obnAferuDFEvhtITw7JjHpXkiDmCmKY= -sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20211110210527-619e6b92dab9 h1:ylYUI5uaq/guUFerFRVG81FHSA5/3+fERCE1RQbQUZ4= -sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20211110210527-619e6b92dab9/go.mod h1:+sJcI1F0QI0Cv+8fp5rH5B2fK1LxzrAQqYnaPx9nY8I= -sigs.k8s.io/controller-tools v0.13.0 h1:NfrvuZ4bxyolhDBt/rCZhDnx3M2hzlhgo5n3Iv2RykI= -sigs.k8s.io/controller-tools v0.13.0/go.mod h1:5vw3En2NazbejQGCeWKRrE7q4P+CW8/klfVqP8QZkgA= +sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240531134648-6636df17d67b h1:E+2LwLidBVcAyCb9LEnKVqIZjZ9ktTv9JbBU8HCWkhY= +sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240531134648-6636df17d67b/go.mod h1:RuyOlKuz3BnqAsDTf0Hgwzvm+Snno1Ko5hvz0nRHWnU= +sigs.k8s.io/controller-tools v0.14.0 h1:rnNoCC5wSXlrNoBKKzL70LNJKIQKEzT6lloG6/LF73A= +sigs.k8s.io/controller-tools v0.14.0/go.mod h1:TV7uOtNNnnR72SpzhStvPkoS/U5ir0nMudrkrC4M9Sc= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kind v0.21.0 h1:QgkVrW35dMXNLkWlUkq2uFQNQbPLr0Z6RgRH5P/NzZU= -sigs.k8s.io/kind v0.21.0/go.mod h1:aBlbxg08cauDgZ612shr017/rZwqd7AS563FvpWKPVs= +sigs.k8s.io/kind v0.22.0 h1:z/+yr/azoOfzsfooqRsPw1wjJlqT/ukXP0ShkHwNlsI= +sigs.k8s.io/kind v0.22.0/go.mod h1:aBlbxg08cauDgZ612shr017/rZwqd7AS563FvpWKPVs= sigs.k8s.io/kubebuilder/docs/book/utils v0.0.0-20211028165026-57688c578b5d h1:KLiQzLW3RZJR19+j4pw2h5iioyAyqCkDBEAFdnGa3N8= sigs.k8s.io/kubebuilder/docs/book/utils v0.0.0-20211028165026-57688c578b5d/go.mod h1:NRdZafr4zSCseLQggdvIMXa7umxf+Q+PJzrj3wFwiGE= sigs.k8s.io/kustomize/api v0.12.1 h1:7YM7gW3kYBwtKvoY216ZzY+8hM+lV53LUayghNRJ0vM= @@ -1543,5 +1430,5 @@ sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= -software.sslmate.com/src/go-pkcs12 v0.2.0 h1:nlFkj7bTysH6VkC4fGphtjXRbezREPgrHuJG20hBGPE= -software.sslmate.com/src/go-pkcs12 v0.2.0/go.mod h1:23rNcYsMabIc1otwLpTkCCPwUq6kQsTyowttG/as0kQ= +software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= +software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= diff --git a/hack/tools/third_party/conversion-gen/generators/conversion.go b/hack/tools/third_party/conversion-gen/generators/conversion.go index f6d6c782c4..a4b127aea4 100644 --- a/hack/tools/third_party/conversion-gen/generators/conversion.go +++ b/hack/tools/third_party/conversion-gen/generators/conversion.go @@ -25,15 +25,13 @@ import ( "sort" "strings" + conversionargs "k8s.io/code-generator/cmd/conversion-gen/args" + genutil "k8s.io/code-generator/pkg/util" "k8s.io/gengo/args" "k8s.io/gengo/generator" "k8s.io/gengo/namer" "k8s.io/gengo/types" - "k8s.io/klog/v2" - - conversionargs "k8s.io/code-generator/cmd/conversion-gen/args" - genutil "k8s.io/code-generator/pkg/util" ) // These are the comment tags that carry parameters for conversion generation. diff --git a/hack/tools/third_party/conversion-gen/main.go b/hack/tools/third_party/conversion-gen/main.go index eae9db7eac..c8d859612d 100644 --- a/hack/tools/third_party/conversion-gen/main.go +++ b/hack/tools/third_party/conversion-gen/main.go @@ -63,7 +63,7 @@ limitations under the License. // fundamentally differently typed fields. // // `conversion-gen` will scan its `--input-dirs`, looking at the -// package defined in each of those directories for comment tags that +// Package defined in each of those directories for comment tags that // define a conversion code generation task. A package requests // conversion code generation by including one or more comment in the // package's `doc.go` file (currently anywhere in that file is @@ -73,7 +73,7 @@ limitations under the License. // // +k8s:conversion-gen= // // This introduces a conversion task, for which the destination -// package is the one containing the file with the tag and the tag +// Package is the one containing the file with the tag and the tag // identifies a package containing internal types. If there is also a // tag of the form // @@ -98,9 +98,8 @@ import ( "flag" "github.com/spf13/pflag" - "k8s.io/klog/v2" - generatorargs "k8s.io/code-generator/cmd/conversion-gen/args" + "k8s.io/klog/v2" "sigs.k8s.io/cluster-api-provider-aws/hack/tools/third_party/conversion-gen/generators" ) diff --git a/hack/tools/tools.go b/hack/tools/tools.go index 904b8df95f..418afcde99 100644 --- a/hack/tools/tools.go +++ b/hack/tools/tools.go @@ -26,6 +26,7 @@ import ( _ "github.com/a8m/envsubst" _ "github.com/ahmetb/gen-crd-api-reference-docs" _ "github.com/golang/mock/mockgen" + _ "github.com/goreleaser/goreleaser" _ "github.com/itchyny/gojq/cmd/gojq" _ "github.com/joelanford/go-apidiff" _ "github.com/mikefarah/yq/v4" diff --git a/iam/api/v1beta1/types.go b/iam/api/v1beta1/types.go index 3147969066..527c857be9 100644 --- a/iam/api/v1beta1/types.go +++ b/iam/api/v1beta1/types.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package v1beta1 contains API Schema definitions for the iam v1beta1 API group. // +k8s:deepcopy-gen=package,register // +k8s:defaulter-gen=TypeMeta // +groupName=iam.aws.infrastructure.cluster.x-k8s.io diff --git a/main.go b/main.go index 8f38b3f49f..fad2ec3967 100644 --- a/main.go +++ b/main.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package main contains the main entrypoint for the AWS provider components. package main import ( @@ -91,20 +92,23 @@ func init() { } var ( - enableLeaderElection bool - leaderElectionNamespace string - watchNamespace string - watchFilterValue string - profilerAddress string - awsClusterConcurrency int - instanceStateConcurrency int - awsMachineConcurrency int - waitInfraPeriod time.Duration - syncPeriod time.Duration - webhookPort int - webhookCertDir string - healthAddr string - serviceEndpoints string + enableLeaderElection bool + leaderElectionLeaseDuration time.Duration + leaderElectionRenewDeadline time.Duration + leaderElectionRetryPeriod time.Duration + leaderElectionNamespace string + watchNamespace string + watchFilterValue string + profilerAddress string + awsClusterConcurrency int + instanceStateConcurrency int + awsMachineConcurrency int + waitInfraPeriod time.Duration + syncPeriod time.Duration + webhookPort int + webhookCertDir string + healthAddr string + serviceEndpoints string // maxEKSSyncPeriod is the maximum allowed duration for the sync-period flag when using EKS. It is set to 10 minutes // because during resync it will create a new AWS auth token which can a maximum life of 15 minutes and this ensures @@ -170,6 +174,9 @@ func main() { Scheme: scheme, Metrics: diagnosticsOpts, LeaderElection: enableLeaderElection, + LeaseDuration: &leaderElectionLeaseDuration, + RenewDeadline: &leaderElectionRenewDeadline, + RetryPeriod: &leaderElectionRetryPeriod, LeaderElectionResourceLock: resourcelock.LeasesResourceLock, LeaderElectionID: "controller-leader-elect-capa", LeaderElectionNamespace: leaderElectionNamespace, @@ -494,6 +501,27 @@ func initFlags(fs *pflag.FlagSet) { "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.", ) + fs.DurationVar( + &leaderElectionLeaseDuration, + "leader-elect-lease-duration", + 15*time.Second, + "Interval at which non-leader candidates will wait to force acquire leadership (duration string)", + ) + + fs.DurationVar( + &leaderElectionRenewDeadline, + "leader-elect-renew-deadline", + 10*time.Second, + "Duration that the leading controller manager will retry refreshing leadership before giving up (duration string)", + ) + + fs.DurationVar( + &leaderElectionRetryPeriod, + "leader-elect-retry-period", + 2*time.Second, + "Duration the LeaderElector clients should wait between tries of actions (duration string)", + ) + fs.StringVar( &watchNamespace, "namespace", diff --git a/metadata.yaml b/metadata.yaml index d1c28d3ccd..3633269e4b 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -47,3 +47,12 @@ releaseSeries: - major: 2 minor: 3 contract: v1beta1 + - major: 2 + minor: 4 + contract: v1beta1 + - major: 2 + minor: 5 + contract: v1beta1 + - major: 2 + minor: 6 + contract: v1beta1 diff --git a/netlify.toml b/netlify.toml index 8d00611e0a..15183afe21 100644 --- a/netlify.toml +++ b/netlify.toml @@ -4,7 +4,7 @@ publish = "docs/book/book" [build.environment] - GO_VERSION = "1.21.5" + GO_VERSION = "1.22.6" # Standard Netlify redirects [[redirects]] diff --git a/pkg/annotations/annotations.go b/pkg/annotations/annotations.go index debcd25153..8bc4a00ff3 100644 --- a/pkg/annotations/annotations.go +++ b/pkg/annotations/annotations.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package annotations provides utility functions for working with annotations. package annotations import ( diff --git a/pkg/cloud/awserrors/errors.go b/pkg/cloud/awserrors/errors.go index b7ff53b654..d51b41595c 100644 --- a/pkg/cloud/awserrors/errors.go +++ b/pkg/cloud/awserrors/errors.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package awserrors provides a way to generate AWS errors. package awserrors import ( @@ -32,6 +33,7 @@ const ( GatewayNotFound = "InvalidGatewayID.NotFound" GroupNotFound = "InvalidGroup.NotFound" InternetGatewayNotFound = "InvalidInternetGatewayID.NotFound" + InvalidCarrierGatewayNotFound = "InvalidCarrierGatewayID.NotFound" EgressOnlyInternetGatewayNotFound = "InvalidEgressOnlyInternetGatewayID.NotFound" InUseIPAddress = "InvalidIPAddress.InUse" InvalidAccessKeyID = "InvalidAccessKeyId" @@ -102,6 +104,7 @@ func NewConflict(msg string) error { } } +// IsBucketAlreadyOwnedByYou checks if the bucket is already owned. func IsBucketAlreadyOwnedByYou(err error) bool { if code, ok := Code(err); ok { return code == BucketAlreadyOwnedByYou diff --git a/pkg/cloud/converters/eks.go b/pkg/cloud/converters/eks.go index d9bc45d8a8..d9985f4693 100644 --- a/pkg/cloud/converters/eks.go +++ b/pkg/cloud/converters/eks.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package converters provides conversion functions for AWS SDK types to CAPA types. package converters import ( @@ -146,6 +147,7 @@ func TaintEffectFromSDK(effect string) (expinfrav1.TaintEffect, error) { } } +// ConvertSDKToIdentityProvider is used to convert an AWS SDK OIDCIdentityProviderConfig to a CAPA OidcIdentityProviderConfig. func ConvertSDKToIdentityProvider(in *ekscontrolplanev1.OIDCIdentityProviderConfig) *identityprovider.OidcIdentityProviderConfig { if in != nil { if in.RequiredClaims == nil { diff --git a/pkg/cloud/endpoints/endpoints.go b/pkg/cloud/endpoints/endpoints.go index e7092ec714..33a87b11cc 100644 --- a/pkg/cloud/endpoints/endpoints.go +++ b/pkg/cloud/endpoints/endpoints.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package endpoints contains aws endpoint related utilities. package endpoints import ( diff --git a/pkg/cloud/filter/types.go b/pkg/cloud/filter/types.go index 3193efc74b..3c704200d3 100644 --- a/pkg/cloud/filter/types.go +++ b/pkg/cloud/filter/types.go @@ -14,4 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package filter contains the ec2 sdk related filters. package filter diff --git a/pkg/cloud/identity/identity.go b/pkg/cloud/identity/identity.go index c14a667e24..18e77bf293 100644 --- a/pkg/cloud/identity/identity.go +++ b/pkg/cloud/identity/identity.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package identity provides the AWSPrincipalTypeProvider interface and its implementations. package identity import ( @@ -79,10 +80,11 @@ func GetAssumeRoleCredentials(roleIdentityProvider *AWSRolePrincipalTypeProvider } // NewAWSRolePrincipalTypeProvider will create a new AWSRolePrincipalTypeProvider from an AWSClusterRoleIdentity. -func NewAWSRolePrincipalTypeProvider(identity *infrav1.AWSClusterRoleIdentity, sourceProvider *AWSPrincipalTypeProvider, log logger.Wrapper) *AWSRolePrincipalTypeProvider { +func NewAWSRolePrincipalTypeProvider(identity *infrav1.AWSClusterRoleIdentity, sourceProvider AWSPrincipalTypeProvider, region string, log logger.Wrapper) *AWSRolePrincipalTypeProvider { return &AWSRolePrincipalTypeProvider{ credentials: nil, stsClient: nil, + region: region, Principal: identity, sourceProvider: sourceProvider, log: log.WithName("AWSRolePrincipalTypeProvider"), @@ -129,7 +131,8 @@ func (p *AWSStaticPrincipalTypeProvider) IsExpired() bool { type AWSRolePrincipalTypeProvider struct { Principal *infrav1.AWSClusterRoleIdentity credentials *credentials.Credentials - sourceProvider *AWSPrincipalTypeProvider + region string + sourceProvider AWSPrincipalTypeProvider log logger.Wrapper stsClient stsiface.STSAPI } @@ -153,9 +156,9 @@ func (p *AWSRolePrincipalTypeProvider) Name() string { // Retrieve returns the credential values for the AWSRolePrincipalTypeProvider. func (p *AWSRolePrincipalTypeProvider) Retrieve() (credentials.Value, error) { if p.credentials == nil || p.IsExpired() { - awsConfig := aws.NewConfig() + awsConfig := aws.NewConfig().WithRegion(p.region) if p.sourceProvider != nil { - sourceCreds, err := (*p.sourceProvider).Retrieve() + sourceCreds, err := p.sourceProvider.Retrieve() if err != nil { return credentials.Value{}, err } diff --git a/pkg/cloud/identity/identity_test.go b/pkg/cloud/identity/identity_test.go index 29cd0ee826..9f4a995ab8 100644 --- a/pkg/cloud/identity/identity_test.go +++ b/pkg/cloud/identity/identity_test.go @@ -45,7 +45,7 @@ func TestAWSStaticPrincipalTypeProvider(t *testing.T) { }, } - var staticProvider AWSPrincipalTypeProvider = NewAWSStaticPrincipalTypeProvider(&infrav1.AWSClusterStaticIdentity{}, secret) + staticProvider := NewAWSStaticPrincipalTypeProvider(&infrav1.AWSClusterStaticIdentity{}, secret) stsMock := mock_stsiface.NewMockSTSAPI(mockCtrl) roleIdentity := &infrav1.AWSClusterRoleIdentity{ @@ -58,10 +58,11 @@ func TestAWSStaticPrincipalTypeProvider(t *testing.T) { }, } - var roleProvider AWSPrincipalTypeProvider = &AWSRolePrincipalTypeProvider{ + roleProvider := &AWSRolePrincipalTypeProvider{ credentials: nil, Principal: roleIdentity, - sourceProvider: &staticProvider, + region: "us-west-2", + sourceProvider: staticProvider, stsClient: stsMock, } @@ -75,10 +76,11 @@ func TestAWSStaticPrincipalTypeProvider(t *testing.T) { }, } - var roleProvider2 AWSPrincipalTypeProvider = &AWSRolePrincipalTypeProvider{ + roleProvider2 := &AWSRolePrincipalTypeProvider{ credentials: nil, Principal: roleIdentity2, - sourceProvider: &roleProvider, + region: "us-west-2", + sourceProvider: roleProvider, stsClient: stsMock, } @@ -167,8 +169,8 @@ func TestAWSStaticPrincipalTypeProvider(t *testing.T) { name: "Role provider with role provider source fails to retrieve when the source's source cannot assume source", provider: roleProvider2, expect: func(m *mock_stsiface.MockSTSAPIMockRecorder) { - roleProvider.(*AWSRolePrincipalTypeProvider).credentials.Expire() - roleProvider2.(*AWSRolePrincipalTypeProvider).credentials.Expire() + roleProvider.credentials.Expire() + roleProvider2.credentials.Expire() // AssumeRoleWithContext() call is not needed for roleIdentity as it has unexpired credentials m.AssumeRoleWithContext(gomock.Any(), &sts.AssumeRoleInput{ RoleArn: aws.String(roleIdentity.Spec.RoleArn), diff --git a/pkg/cloud/interfaces.go b/pkg/cloud/interfaces.go index 7d6115d429..0ebc12e383 100644 --- a/pkg/cloud/interfaces.go +++ b/pkg/cloud/interfaces.go @@ -14,10 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package cloud contains interfaces for working with AWS resources. package cloud import ( awsclient "github.com/aws/aws-sdk-go/aws/client" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" @@ -67,6 +69,8 @@ type ClusterScoper interface { // Cluster returns the cluster object. ClusterObj() ClusterObject + // UnstructuredControlPlane returns the unstructured control plane object. + UnstructuredControlPlane() (*unstructured.Unstructured, error) // IdentityRef returns the AWS infrastructure cluster identityRef. IdentityRef() *infrav1.AWSIdentityReference diff --git a/pkg/cloud/logs/logs.go b/pkg/cloud/logs/logs.go index d20c657347..af22708f12 100644 --- a/pkg/cloud/logs/logs.go +++ b/pkg/cloud/logs/logs.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package logs provides a wrapper for the logr.Logger to be used as an AWS Logger. package logs import ( diff --git a/pkg/cloud/metrics/metrics.go b/pkg/cloud/metrics/metrics.go index b2c763ee78..4c3e5e988d 100644 --- a/pkg/cloud/metrics/metrics.go +++ b/pkg/cloud/metrics/metrics.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package metrics provides a way to capture request metrics. package metrics import ( diff --git a/pkg/cloud/scope/cluster.go b/pkg/cloud/scope/cluster.go index fd67eede86..9efaa16d7a 100644 --- a/pkg/cloud/scope/cluster.go +++ b/pkg/cloud/scope/cluster.go @@ -22,6 +22,7 @@ import ( awsclient "github.com/aws/aws-sdk-go/aws/client" "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" @@ -152,6 +153,17 @@ func (s *ClusterScope) SecondaryCidrBlock() *string { return nil } +// SecondaryCidrBlocks returns the additional CIDR blocks to be associated with the managed VPC. +func (s *ClusterScope) SecondaryCidrBlocks() []infrav1.VpcCidrBlock { + return s.AWSCluster.Spec.NetworkSpec.VPC.SecondaryCidrBlocks +} + +// AllSecondaryCidrBlocks returns all secondary CIDR blocks (combining `SecondaryCidrBlock` and `SecondaryCidrBlocks`). +func (s *ClusterScope) AllSecondaryCidrBlocks() []infrav1.VpcCidrBlock { + // Non-EKS clusters don't have anything in `SecondaryCidrBlock()` + return s.SecondaryCidrBlocks() +} + // Name returns the CAPI cluster name. func (s *ClusterScope) Name() string { return s.Cluster.Name @@ -183,6 +195,7 @@ func (s *ClusterScope) ControlPlaneLoadBalancer() *infrav1.AWSLoadBalancerSpec { return s.AWSCluster.Spec.ControlPlaneLoadBalancer } +// ControlPlaneLoadBalancers returns load balancers configured for the control plane. func (s *ClusterScope) ControlPlaneLoadBalancers() []*infrav1.AWSLoadBalancerSpec { return []*infrav1.AWSLoadBalancerSpec{ s.AWSCluster.Spec.ControlPlaneLoadBalancer, @@ -191,6 +204,7 @@ func (s *ClusterScope) ControlPlaneLoadBalancers() []*infrav1.AWSLoadBalancerSpe } // ControlPlaneLoadBalancerScheme returns the Classic ELB scheme (public or internal facing). +// Deprecated: This method is going to be removed in a future release. Use LoadBalancer.Scheme. func (s *ClusterScope) ControlPlaneLoadBalancerScheme() infrav1.ELBScheme { if s.ControlPlaneLoadBalancer() != nil && s.ControlPlaneLoadBalancer().Scheme != nil { return *s.ControlPlaneLoadBalancer().Scheme @@ -198,6 +212,7 @@ func (s *ClusterScope) ControlPlaneLoadBalancerScheme() infrav1.ELBScheme { return infrav1.ELBSchemeInternetFacing } +// ControlPlaneLoadBalancerName returns the name of the control plane load balancer. func (s *ClusterScope) ControlPlaneLoadBalancerName() *string { if s.AWSCluster.Spec.ControlPlaneLoadBalancer != nil { return s.AWSCluster.Spec.ControlPlaneLoadBalancer.Name @@ -205,10 +220,12 @@ func (s *ClusterScope) ControlPlaneLoadBalancerName() *string { return nil } +// ControlPlaneEndpoint returns the cluster control plane endpoint. func (s *ClusterScope) ControlPlaneEndpoint() clusterv1.APIEndpoint { return s.AWSCluster.Spec.ControlPlaneEndpoint } +// Bucket returns the cluster bucket configuration. func (s *ClusterScope) Bucket() *infrav1.S3Bucket { return s.AWSCluster.Spec.S3Bucket } @@ -395,3 +412,9 @@ func (s *ClusterScope) Partition() string { func (s *ClusterScope) AdditionalControlPlaneIngressRules() []infrav1.IngressRule { return s.AWSCluster.Spec.NetworkSpec.DeepCopy().AdditionalControlPlaneIngressRules } + +// UnstructuredControlPlane returns the unstructured object for the control plane, if any. +// When the reference is not set, it returns an empty object. +func (s *ClusterScope) UnstructuredControlPlane() (*unstructured.Unstructured, error) { + return getUnstructuredControlPlane(context.TODO(), s.client, s.Cluster) +} diff --git a/pkg/cloud/scope/elb.go b/pkg/cloud/scope/elb.go index 53b3d6db99..3d588f665b 100644 --- a/pkg/cloud/scope/elb.go +++ b/pkg/cloud/scope/elb.go @@ -43,6 +43,7 @@ type ELBScope interface { ControlPlaneLoadBalancer() *infrav1.AWSLoadBalancerSpec // ControlPlaneLoadBalancerScheme returns the Classic ELB scheme (public or internal facing) + // Deprecated: This method is going to be removed in a future release. Use LoadBalancer.Scheme. ControlPlaneLoadBalancerScheme() infrav1.ELBScheme // ControlPlaneLoadBalancerName returns the Classic ELB name diff --git a/pkg/cloud/scope/global.go b/pkg/cloud/scope/global.go index cd02a81eef..2ecc9dbf50 100644 --- a/pkg/cloud/scope/global.go +++ b/pkg/cloud/scope/global.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package scope provides a global scope for CAPA controllers. package scope import ( diff --git a/pkg/cloud/scope/launchtemplate.go b/pkg/cloud/scope/launchtemplate.go index 676e255365..fb2df8b59f 100644 --- a/pkg/cloud/scope/launchtemplate.go +++ b/pkg/cloud/scope/launchtemplate.go @@ -51,11 +51,13 @@ type LaunchTemplateScope interface { logger.Wrapper } +// ResourceServiceToUpdate is a struct that contains the resource ID and the resource service to update. type ResourceServiceToUpdate struct { ResourceID *string ResourceService ResourceService } +// ResourceService defines the interface for resources. type ResourceService interface { UpdateResourceTags(resourceID *string, create, remove map[string]string) error } diff --git a/pkg/cloud/scope/machine.go b/pkg/cloud/scope/machine.go index fcb735c22e..331c4c31e2 100644 --- a/pkg/cloud/scope/machine.go +++ b/pkg/cloud/scope/machine.go @@ -19,11 +19,9 @@ package scope import ( "context" "encoding/base64" - "fmt" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" "k8s.io/utils/ptr" @@ -44,7 +42,6 @@ import ( type MachineScopeParams struct { Client client.Client Logger *logger.Logger - ControlPlane *unstructured.Unstructured Cluster *clusterv1.Cluster Machine *clusterv1.Machine InfraCluster EC2Scope @@ -69,9 +66,6 @@ func NewMachineScope(params MachineScopeParams) (*MachineScope, error) { if params.InfraCluster == nil { return nil, errors.New("aws cluster is required when creating a MachineScope") } - if params.ControlPlane == nil { - return nil, errors.New("cluster control plane is required when creating a MachineScope") - } if params.Logger == nil { log := klog.Background() @@ -86,7 +80,6 @@ func NewMachineScope(params MachineScopeParams) (*MachineScope, error) { Logger: *params.Logger, client: params.Client, patchHelper: helper, - ControlPlane: params.ControlPlane, Cluster: params.Cluster, Machine: params.Machine, InfraCluster: params.InfraCluster, @@ -102,7 +95,6 @@ type MachineScope struct { Cluster *clusterv1.Cluster Machine *clusterv1.Machine - ControlPlane *unstructured.Unstructured InfraCluster EC2Scope AWSMachine *infrav1.AWSMachine } @@ -149,7 +141,7 @@ func (m *MachineScope) GetProviderID() string { // SetProviderID sets the AWSMachine providerID in spec. func (m *MachineScope) SetProviderID(instanceID, availabilityZone string) { - providerID := fmt.Sprintf("aws:///%s/%s", availabilityZone, instanceID) + providerID := GenerateProviderID(availabilityZone, instanceID) m.AWSMachine.Spec.ProviderID = ptr.To[string](providerID) } @@ -202,6 +194,7 @@ func (m *MachineScope) UseSecretsManager(userDataFormat string) bool { return !m.AWSMachine.Spec.CloudInit.InsecureSkipSecretsManager && !m.UseIgnition(userDataFormat) } +// UseIgnition returns true if the AWSMachine should use Ignition. func (m *MachineScope) UseIgnition(userDataFormat string) bool { return userDataFormat == "ignition" || (m.AWSMachine.Spec.Ignition != nil) } @@ -272,6 +265,7 @@ func (m *MachineScope) GetRawBootstrapData() ([]byte, error) { return data, err } +// GetRawBootstrapDataWithFormat returns the bootstrap data from the secret in the Machine's bootstrap.dataSecretName. func (m *MachineScope) GetRawBootstrapDataWithFormat() ([]byte, string, error) { if m.Machine.Spec.Bootstrap.DataSecretName == nil { return nil, "", errors.New("error retrieving bootstrap data: linked Machine's bootstrap.dataSecretName is nil") @@ -377,8 +371,22 @@ func (m *MachineScope) IsEKSManaged() bool { return m.InfraCluster.InfraCluster().GetObjectKind().GroupVersionKind().Kind == ekscontrolplanev1.AWSManagedControlPlaneKind } +// IsControlPlaneExternallyManaged checks if the control plane is externally managed. +// +// This is determined by the kind of the control plane object (EKS for example), +// or if the control plane referenced object is reporting as externally managed. func (m *MachineScope) IsControlPlaneExternallyManaged() bool { - return util.IsExternalManagedControlPlane(m.ControlPlane) + if m.IsEKSManaged() { + return true + } + + // Check if the control plane is externally managed. + u, err := m.InfraCluster.UnstructuredControlPlane() + if err != nil { + m.Error(err, "failed to get unstructured control plane") + return false + } + return util.IsExternalManagedControlPlane(u) } // IsExternallyManaged checks if the machine is externally managed. @@ -392,3 +400,11 @@ func (m *MachineScope) SetInterruptible() { m.AWSMachine.Status.Interruptible = true } } + +// GetElasticIPPool returns the Elastic IP Pool for an machine, when exists. +func (m *MachineScope) GetElasticIPPool() *infrav1.ElasticIPPool { + if m.AWSMachine == nil { + return nil + } + return m.AWSMachine.Spec.ElasticIPPool +} diff --git a/pkg/cloud/scope/machine_test.go b/pkg/cloud/scope/machine_test.go index 9cad370f35..f34790d061 100644 --- a/pkg/cloud/scope/machine_test.go +++ b/pkg/cloud/scope/machine_test.go @@ -22,7 +22,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" @@ -133,8 +132,7 @@ func setupMachineScope() (*MachineScope, error) { InfraCluster: &ClusterScope{ AWSCluster: awsCluster, }, - ControlPlane: &unstructured.Unstructured{}, - AWSMachine: awsMachine, + AWSMachine: awsMachine, }, ) } @@ -225,10 +223,9 @@ func TestGetRawBootstrapDataWithFormat(t *testing.T) { machineScope, err := NewMachineScope( MachineScopeParams{ - Client: client, - Machine: machine, - Cluster: cluster, - ControlPlane: &unstructured.Unstructured{}, + Client: client, + Machine: machine, + Cluster: cluster, InfraCluster: &ClusterScope{ AWSCluster: awsCluster, }, diff --git a/pkg/cloud/scope/machinepool.go b/pkg/cloud/scope/machinepool.go index 069c76a41b..00e8abeadc 100644 --- a/pkg/cloud/scope/machinepool.go +++ b/pkg/cloud/scope/machinepool.go @@ -234,34 +234,40 @@ func (m *MachinePoolScope) SetASGStatus(v expinfrav1.ASGStatus) { m.AWSMachinePool.Status.ASGStatus = &v } +// GetObjectMeta returns the AWSMachinePool ObjectMeta. func (m *MachinePoolScope) GetObjectMeta() *metav1.ObjectMeta { return &m.AWSMachinePool.ObjectMeta } +// GetSetter returns the AWSMachinePool object setter. func (m *MachinePoolScope) GetSetter() conditions.Setter { return m.AWSMachinePool } +// GetEC2Scope returns the EC2 scope. func (m *MachinePoolScope) GetEC2Scope() EC2Scope { return m.InfraCluster } +// GetLaunchTemplateIDStatus returns the launch template ID status. func (m *MachinePoolScope) GetLaunchTemplateIDStatus() string { return m.AWSMachinePool.Status.LaunchTemplateID } +// SetLaunchTemplateIDStatus sets the launch template ID status. func (m *MachinePoolScope) SetLaunchTemplateIDStatus(id string) { m.AWSMachinePool.Status.LaunchTemplateID = id } +// GetLaunchTemplateLatestVersionStatus returns the launch template latest version status. func (m *MachinePoolScope) GetLaunchTemplateLatestVersionStatus() string { if m.AWSMachinePool.Status.LaunchTemplateVersion != nil { return *m.AWSMachinePool.Status.LaunchTemplateVersion - } else { - return "" } + return "" } +// SetLaunchTemplateLatestVersionStatus sets the launch template latest version status. func (m *MachinePoolScope) SetLaunchTemplateLatestVersionStatus(version string) { m.AWSMachinePool.Status.LaunchTemplateVersion = &version } @@ -370,18 +376,22 @@ func nodeIsReady(node corev1.Node) bool { return false } +// GetLaunchTemplate returns the launch template. func (m *MachinePoolScope) GetLaunchTemplate() *expinfrav1.AWSLaunchTemplate { return &m.AWSMachinePool.Spec.AWSLaunchTemplate } +// GetMachinePool returns the machine pool object. func (m *MachinePoolScope) GetMachinePool() *expclusterv1.MachinePool { return m.MachinePool } +// LaunchTemplateName returns the name of the launch template. func (m *MachinePoolScope) LaunchTemplateName() string { return m.Name() } +// GetRuntimeObject returns the AWSMachinePool object, in runtime.Object form. func (m *MachinePoolScope) GetRuntimeObject() runtime.Object { return m.AWSMachinePool } diff --git a/pkg/cloud/scope/managedcontrolplane.go b/pkg/cloud/scope/managedcontrolplane.go index 56e5bd59c5..9c3b6b208d 100644 --- a/pkg/cloud/scope/managedcontrolplane.go +++ b/pkg/cloud/scope/managedcontrolplane.go @@ -27,6 +27,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" @@ -207,6 +208,28 @@ func (s *ManagedControlPlaneScope) SecondaryCidrBlock() *string { return s.ControlPlane.Spec.SecondaryCidrBlock } +// SecondaryCidrBlocks returns the additional CIDR blocks to be associated with the managed VPC. +func (s *ManagedControlPlaneScope) SecondaryCidrBlocks() []infrav1.VpcCidrBlock { + return s.ControlPlane.Spec.NetworkSpec.VPC.SecondaryCidrBlocks +} + +// AllSecondaryCidrBlocks returns all secondary CIDR blocks (combining `SecondaryCidrBlock` and `SecondaryCidrBlocks`). +func (s *ManagedControlPlaneScope) AllSecondaryCidrBlocks() []infrav1.VpcCidrBlock { + secondaryCidrBlocks := s.ControlPlane.Spec.NetworkSpec.VPC.SecondaryCidrBlocks + + // If only `AWSManagedControlPlane.spec.secondaryCidrBlock` is set, no additional checks are done to remain + // backward-compatible. The `VPCSpec.SecondaryCidrBlocks` field was added later - if that list is not empty, we + // require `AWSManagedControlPlane.spec.secondaryCidrBlock` to be listed in there as well (validation done in + // webhook). + if s.ControlPlane.Spec.SecondaryCidrBlock != nil && len(secondaryCidrBlocks) == 0 { + secondaryCidrBlocks = []infrav1.VpcCidrBlock{{ + IPv4CidrBlock: *s.ControlPlane.Spec.SecondaryCidrBlock, + }} + } + + return secondaryCidrBlocks +} + // SecurityGroupOverrides returns the security groups that are overrides in the ControlPlane spec. func (s *ManagedControlPlaneScope) SecurityGroupOverrides() map[infrav1.SecurityGroupRole]string { return s.ControlPlane.Spec.NetworkSpec.SecurityGroupOverrides @@ -406,6 +429,12 @@ func (s *ManagedControlPlaneScope) VpcCni() ekscontrolplanev1.VpcCni { return s.ControlPlane.Spec.VpcCni } +// RestrictPrivateSubnets returns whether Control Plane should be restricted to Private subnets. +func (s *ManagedControlPlaneScope) RestrictPrivateSubnets() bool { + return s.ControlPlane.Spec.RestrictPrivateSubnets +} + +// OIDCIdentityProviderConfig returns the OIDC identity provider config. func (s *ManagedControlPlaneScope) OIDCIdentityProviderConfig() *ekscontrolplanev1.OIDCIdentityProviderConfig { return s.ControlPlane.Spec.OIDCIdentityProviderConfig } @@ -428,6 +457,11 @@ func (s *ManagedControlPlaneScope) ControlPlaneLoadBalancer() *infrav1.AWSLoadBa return nil } +// ControlPlaneLoadBalancers returns the AWSLoadBalancerSpecs. +func (s *ManagedControlPlaneScope) ControlPlaneLoadBalancers() []*infrav1.AWSLoadBalancerSpec { + return nil +} + // Partition returns the cluster partition. func (s *ManagedControlPlaneScope) Partition() string { if s.ControlPlane.Spec.Partition == "" { @@ -440,3 +474,9 @@ func (s *ManagedControlPlaneScope) Partition() string { func (s *ManagedControlPlaneScope) AdditionalControlPlaneIngressRules() []infrav1.IngressRule { return nil } + +// UnstructuredControlPlane returns the unstructured object for the control plane, if any. +// When the reference is not set, it returns an empty object. +func (s *ManagedControlPlaneScope) UnstructuredControlPlane() (*unstructured.Unstructured, error) { + return getUnstructuredControlPlane(context.TODO(), s.Client, s.Cluster) +} diff --git a/pkg/cloud/scope/managednodegroup.go b/pkg/cloud/scope/managednodegroup.go index 1950ea0221..e9421d7282 100644 --- a/pkg/cloud/scope/managednodegroup.go +++ b/pkg/cloud/scope/managednodegroup.go @@ -315,14 +315,17 @@ func (s *ManagedMachinePoolScope) NodegroupName() string { return s.ManagedMachinePool.Spec.EKSNodegroupName } +// Name returns the name of the AWSManagedMachinePool. func (s *ManagedMachinePoolScope) Name() string { return s.ManagedMachinePool.Name } +// Namespace returns the namespace of the AWSManagedMachinePool. func (s *ManagedMachinePoolScope) Namespace() string { return s.ManagedMachinePool.Namespace } +// GetRawBootstrapData returns the raw bootstrap data from the linked Machine's bootstrap.dataSecretName. func (s *ManagedMachinePoolScope) GetRawBootstrapData() ([]byte, *types.NamespacedName, error) { if s.MachinePool.Spec.Template.Spec.Bootstrap.DataSecretName == nil { return nil, nil, errors.New("error retrieving bootstrap data: linked Machine's bootstrap.dataSecretName is nil") @@ -343,58 +346,68 @@ func (s *ManagedMachinePoolScope) GetRawBootstrapData() ([]byte, *types.Namespac return value, &key, nil } +// GetObjectMeta returns the ObjectMeta for the AWSManagedMachinePool. func (s *ManagedMachinePoolScope) GetObjectMeta() *metav1.ObjectMeta { return &s.ManagedMachinePool.ObjectMeta } +// GetSetter returns the condition setter. func (s *ManagedMachinePoolScope) GetSetter() conditions.Setter { return s.ManagedMachinePool } +// GetEC2Scope returns the EC2Scope. func (s *ManagedMachinePoolScope) GetEC2Scope() EC2Scope { return s.EC2Scope } +// IsEKSManaged returns true if the control plane is managed by EKS. func (s *ManagedMachinePoolScope) IsEKSManaged() bool { return true } +// GetLaunchTemplateIDStatus returns the launch template ID status. func (s *ManagedMachinePoolScope) GetLaunchTemplateIDStatus() string { if s.ManagedMachinePool.Status.LaunchTemplateID != nil { return *s.ManagedMachinePool.Status.LaunchTemplateID - } else { - return "" } + return "" } +// SetLaunchTemplateIDStatus sets the launch template ID status. func (s *ManagedMachinePoolScope) SetLaunchTemplateIDStatus(id string) { s.ManagedMachinePool.Status.LaunchTemplateID = &id } +// GetLaunchTemplateLatestVersionStatus returns the launch template latest version status. func (s *ManagedMachinePoolScope) GetLaunchTemplateLatestVersionStatus() string { if s.ManagedMachinePool.Status.LaunchTemplateVersion != nil { return *s.ManagedMachinePool.Status.LaunchTemplateVersion - } else { - return "" } + return "" } +// SetLaunchTemplateLatestVersionStatus sets the launch template latest version status. func (s *ManagedMachinePoolScope) SetLaunchTemplateLatestVersionStatus(version string) { s.ManagedMachinePool.Status.LaunchTemplateVersion = &version } +// GetLaunchTemplate returns the launch template. func (s *ManagedMachinePoolScope) GetLaunchTemplate() *expinfrav1.AWSLaunchTemplate { return s.ManagedMachinePool.Spec.AWSLaunchTemplate } +// GetMachinePool returns the machine pool. func (s *ManagedMachinePoolScope) GetMachinePool() *expclusterv1.MachinePool { return s.MachinePool } +// LaunchTemplateName returns the launch template name. func (s *ManagedMachinePoolScope) LaunchTemplateName() string { return fmt.Sprintf("%s-%s", s.ControlPlane.Name, s.ManagedMachinePool.Name) } +// GetRuntimeObject returns the AWSManagedMachinePool, in runtime.Object form. func (s *ManagedMachinePoolScope) GetRuntimeObject() runtime.Object { return s.ManagedMachinePool } diff --git a/pkg/cloud/scope/network.go b/pkg/cloud/scope/network.go index 32b02ca0d2..aeb0e34231 100644 --- a/pkg/cloud/scope/network.go +++ b/pkg/cloud/scope/network.go @@ -37,8 +37,14 @@ type NetworkScope interface { CNIIngressRules() infrav1.CNIIngressRules // SecurityGroups returns the cluster security groups as a map, it creates the map if empty. SecurityGroups() map[infrav1.SecurityGroupRole]infrav1.SecurityGroup - // SecondaryCidrBlock returns the optional secondary CIDR block to use for pod IPs + // SecondaryCidrBlock returns the optional secondary CIDR block to use for pod IPs. This may later be renamed since + // it should not be confused with SecondaryCidrBlocks. SecondaryCidrBlock() *string + // SecondaryCidrBlocks returns the additional CIDR blocks to be associated with the managed VPC. + SecondaryCidrBlocks() []infrav1.VpcCidrBlock + // AllSecondaryCidrBlocks returns a unique list of all secondary CIDR blocks (combining `SecondaryCidrBlock` and + // `SecondaryCidrBlocks`). + AllSecondaryCidrBlocks() []infrav1.VpcCidrBlock // Bastion returns the bastion details for the cluster. Bastion() *infrav1.Bastion diff --git a/pkg/cloud/scope/providerid.go b/pkg/cloud/scope/providerid.go index ecf3feea19..1b11135ce4 100644 --- a/pkg/cloud/scope/providerid.go +++ b/pkg/cloud/scope/providerid.go @@ -17,6 +17,7 @@ limitations under the License. package scope import ( + "fmt" "regexp" "strings" @@ -124,3 +125,14 @@ func (p *ProviderID) Validate() bool { func (p *ProviderID) IndexKey() string { return p.String() } + +// ProviderIDPrefix is the prefix of AWS resource IDs to form the Kubernetes Provider ID. +// NOTE: this format matches the 2 slashes format used in cloud-provider and cluster-autoscaler. +const ProviderIDPrefix = "aws://" + +// GenerateProviderID generates a valid AWS Node/Machine ProviderID field. +// +// By default, the last id provided is used as identifier (last part). +func GenerateProviderID(ids ...string) string { + return fmt.Sprintf("%s/%s", ProviderIDPrefix, strings.Join(ids, "/")) +} diff --git a/pkg/cloud/scope/providerid_test.go b/pkg/cloud/scope/providerid_test.go new file mode 100644 index 0000000000..df6011f8d2 --- /dev/null +++ b/pkg/cloud/scope/providerid_test.go @@ -0,0 +1,55 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scope + +import ( + "testing" + + . "github.com/onsi/gomega" +) + +func TestGenerateProviderID(t *testing.T) { + testCases := []struct { + ids []string + + expectedProviderID string + }{ + { + ids: []string{ + "eu-west-1a", + "instance-id", + }, + expectedProviderID: "aws:///eu-west-1a/instance-id", + }, + { + ids: []string{ + "eu-west-1a", + "test-id1", + "test-id2", + "instance-id", + }, + expectedProviderID: "aws:///eu-west-1a/test-id1/test-id2/instance-id", + }, + } + + for _, tc := range testCases { + g := NewGomegaWithT(t) + providerID := GenerateProviderID(tc.ids...) + + g.Expect(providerID).To(Equal(tc.expectedProviderID)) + } +} diff --git a/pkg/cloud/scope/rosacontrolplane.go b/pkg/cloud/scope/rosacontrolplane.go index da4c36cb13..71cc24ed61 100644 --- a/pkg/cloud/scope/rosacontrolplane.go +++ b/pkg/cloud/scope/rosacontrolplane.go @@ -37,6 +37,7 @@ import ( "sigs.k8s.io/cluster-api/util/patch" ) +// ROSAControlPlaneScopeParams defines the input parameters used to create a new ROSAControlPlaneScope. type ROSAControlPlaneScopeParams struct { Client client.Client Logger *logger.Logger @@ -46,6 +47,7 @@ type ROSAControlPlaneScopeParams struct { Endpoints []ServiceEndpoint } +// NewROSAControlPlaneScope creates a new ROSAControlPlaneScope from the supplied parameters. func NewROSAControlPlaneScope(params ROSAControlPlaneScopeParams) (*ROSAControlPlaneScope, error) { if params.Cluster == nil { return nil, errors.New("failed to generate new scope from nil Cluster") @@ -67,7 +69,7 @@ func NewROSAControlPlaneScope(params ROSAControlPlaneScopeParams) (*ROSAControlP controllerName: params.ControllerName, } - session, serviceLimiters, err := sessionForClusterWithRegion(params.Client, managedScope, *params.ControlPlane.Spec.Region, params.Endpoints, params.Logger) + session, serviceLimiters, err := sessionForClusterWithRegion(params.Client, managedScope, params.ControlPlane.Spec.Region, params.Endpoints, params.Logger) if err != nil { return nil, errors.Errorf("failed to create aws session: %v", err) } @@ -106,18 +108,22 @@ type ROSAControlPlaneScope struct { Identity *sts.GetCallerIdentityOutput } +// InfraCluster returns the AWSManagedControlPlane object. func (s *ROSAControlPlaneScope) InfraCluster() cloud.ClusterObject { return s.ControlPlane } +// IdentityRef returns the AWSIdentityReference object. func (s *ROSAControlPlaneScope) IdentityRef() *infrav1.AWSIdentityReference { return s.ControlPlane.Spec.IdentityRef } +// Session returns the AWS SDK session. Used for creating clients. func (s *ROSAControlPlaneScope) Session() awsclient.ConfigProvider { return s.session } +// ServiceLimiter returns the AWS SDK session. Used for creating clients. func (s *ROSAControlPlaneScope) ServiceLimiter(service string) *throttle.ServiceLimiter { if sl, ok := s.serviceLimiters[service]; ok { return sl @@ -125,6 +131,7 @@ func (s *ROSAControlPlaneScope) ServiceLimiter(service string) *throttle.Service return nil } +// ControllerName returns the name of the controller. func (s *ROSAControlPlaneScope) ControllerName() string { return s.controllerName } @@ -143,6 +150,7 @@ func (s *ROSAControlPlaneScope) InfraClusterName() string { return s.ControlPlane.Name } +// RosaClusterName returns the ROSA cluster name. func (s *ROSAControlPlaneScope) RosaClusterName() string { return s.ControlPlane.Spec.RosaClusterName } @@ -167,11 +175,27 @@ func (s *ROSAControlPlaneScope) CredentialsSecret() *corev1.Secret { } } +// ClusterAdminPasswordSecret returns the corev1.Secret object for the cluster admin password. func (s *ROSAControlPlaneScope) ClusterAdminPasswordSecret() *corev1.Secret { + return s.secretWithOwnerReference(fmt.Sprintf("%s-admin-password", s.Cluster.Name)) +} + +// ExternalAuthBootstrapKubeconfigSecret returns the corev1.Secret object for the external auth bootstrap kubeconfig. +// This is a temporarily admin kubeconfig generated using break-glass credentials for the user to bootstreap their environment like setting up RBAC for oidc users/groups. +// This Kubeonconfig will be created only once initially and be valid for only 24h. +// The kubeconfig secret will not be autoamticallty rotated and will be invalid after the 24h. However, users can opt to manually delete the secret to trigger the generation of a new one which will be valid for another 24h. +func (s *ROSAControlPlaneScope) ExternalAuthBootstrapKubeconfigSecret() *corev1.Secret { + return s.secretWithOwnerReference(fmt.Sprintf("%s-bootstrap-kubeconfig", s.Cluster.Name)) +} + +func (s *ROSAControlPlaneScope) secretWithOwnerReference(name string) *corev1.Secret { return &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-admin-password", s.Cluster.Name), + Name: name, Namespace: s.ControlPlane.Namespace, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(s.ControlPlane, rosacontrolplanev1.GroupVersion.WithKind("ROSAControlPlane")), + }, }, } } @@ -183,6 +207,8 @@ func (s *ROSAControlPlaneScope) PatchObject() error { s.ControlPlane, patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ rosacontrolplanev1.ROSAControlPlaneReadyCondition, + rosacontrolplanev1.ROSAControlPlaneValidCondition, + rosacontrolplanev1.ROSAControlPlaneUpgradingCondition, }}) } diff --git a/pkg/cloud/scope/rosamachinepool.go b/pkg/cloud/scope/rosamachinepool.go index c39372b6b0..00d480ca3e 100644 --- a/pkg/cloud/scope/rosamachinepool.go +++ b/pkg/cloud/scope/rosamachinepool.go @@ -19,13 +19,16 @@ package scope import ( "context" + awsclient "github.com/aws/aws-sdk-go/aws/client" "github.com/pkg/errors" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" @@ -42,6 +45,8 @@ type RosaMachinePoolScopeParams struct { RosaMachinePool *expinfrav1.ROSAMachinePool MachinePool *expclusterv1.MachinePool ControllerName string + + Endpoints []ServiceEndpoint } // NewRosaMachinePoolScope creates a new Scope from the supplied parameters. @@ -70,7 +75,7 @@ func NewRosaMachinePoolScope(params RosaMachinePoolScopeParams) (*RosaMachinePoo return nil, errors.Wrap(err, "failed to init MachinePool patch helper") } - return &RosaMachinePoolScope{ + scope := &RosaMachinePoolScope{ Logger: *params.Logger, Client: params.Client, patchHelper: ammpHelper, @@ -81,9 +86,22 @@ func NewRosaMachinePoolScope(params RosaMachinePoolScopeParams) (*RosaMachinePoo RosaMachinePool: params.RosaMachinePool, MachinePool: params.MachinePool, controllerName: params.ControllerName, - }, nil + } + + session, serviceLimiters, err := sessionForClusterWithRegion(params.Client, scope, params.ControlPlane.Spec.Region, params.Endpoints, params.Logger) + if err != nil { + return nil, errors.Errorf("failed to create aws session: %v", err) + } + + scope.session = session + scope.serviceLimiters = serviceLimiters + + return scope, nil } +var _ cloud.Session = &RosaMachinePoolScope{} +var _ cloud.SessionMetadata = &RosaMachinePoolScope{} + // RosaMachinePoolScope defines the basic context for an actuator to operate upon. type RosaMachinePoolScope struct { logger.Logger @@ -96,6 +114,9 @@ type RosaMachinePoolScope struct { RosaMachinePool *expinfrav1.ROSAMachinePool MachinePool *expclusterv1.MachinePool + session awsclient.ConfigProvider + serviceLimiters throttle.ServiceLimiters + controllerName string } @@ -135,10 +156,39 @@ func (s *RosaMachinePoolScope) ControllerName() string { return s.controllerName } +// GetSetter returns the condition setter for the RosaMachinePool. func (s *RosaMachinePoolScope) GetSetter() conditions.Setter { return s.RosaMachinePool } +// ServiceLimiter implements cloud.Session. +func (s *RosaMachinePoolScope) ServiceLimiter(service string) *throttle.ServiceLimiter { + if sl, ok := s.serviceLimiters[service]; ok { + return sl + } + return nil +} + +// Session implements cloud.Session. +func (s *RosaMachinePoolScope) Session() awsclient.ConfigProvider { + return s.session +} + +// IdentityRef implements cloud.SessionMetadata. +func (s *RosaMachinePoolScope) IdentityRef() *v1beta2.AWSIdentityReference { + return s.ControlPlane.Spec.IdentityRef +} + +// InfraClusterName implements cloud.SessionMetadata. +func (s *RosaMachinePoolScope) InfraClusterName() string { + return s.ControlPlane.Name +} + +// Namespace implements cloud.SessionMetadata. +func (s *RosaMachinePoolScope) Namespace() string { + return s.Cluster.Namespace +} + // RosaMchinePoolReadyFalse marks the ready condition false using warning if error isn't // empty. func (s *RosaMachinePoolScope) RosaMchinePoolReadyFalse(reason string, err string) error { diff --git a/pkg/cloud/scope/session.go b/pkg/cloud/scope/session.go index cda46352f5..546e11089b 100644 --- a/pkg/cloud/scope/session.go +++ b/pkg/cloud/scope/session.go @@ -120,7 +120,7 @@ func sessionForClusterWithRegion(k8sClient client.Client, clusterScoper cloud.Se return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) } - providers, err := getProvidersForCluster(context.Background(), k8sClient, clusterScoper, log) + providers, err := getProvidersForCluster(context.Background(), k8sClient, clusterScoper, region, log) if err != nil { // could not get providers and retrieve the credentials conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1.PrincipalCredentialRetrievedCondition, infrav1.PrincipalCredentialRetrievalFailedReason, clusterv1.ConditionSeverityError, err.Error()) @@ -256,6 +256,7 @@ func buildProvidersForRef( k8sClient client.Client, clusterScoper cloud.SessionMetadata, ref *infrav1.AWSIdentityReference, + region string, log logger.Wrapper) ([]identity.AWSPrincipalTypeProvider, error) { if ref == nil { log.Trace("AWSCluster does not have a IdentityRef specified") @@ -299,7 +300,7 @@ func buildProvidersForRef( setPrincipalUsageAllowedCondition(clusterScoper) if roleIdentity.Spec.SourceIdentityRef != nil { - providers, err = buildProvidersForRef(ctx, providers, k8sClient, clusterScoper, roleIdentity.Spec.SourceIdentityRef, log) + providers, err = buildProvidersForRef(ctx, providers, k8sClient, clusterScoper, roleIdentity.Spec.SourceIdentityRef, region, log) if err != nil { return providers, err } @@ -313,11 +314,7 @@ func buildProvidersForRef( } } - if sourceProvider != nil { - provider = identity.NewAWSRolePrincipalTypeProvider(roleIdentity, &sourceProvider, log) - } else { - provider = identity.NewAWSRolePrincipalTypeProvider(roleIdentity, nil, log) - } + provider = identity.NewAWSRolePrincipalTypeProvider(roleIdentity, sourceProvider, region, log) providers = append(providers, provider) default: return providers, errors.Errorf("No such provider known: '%s'", ref.Kind) @@ -408,9 +405,9 @@ func buildAWSClusterControllerIdentity(ctx context.Context, identityObjectKey cl return nil } -func getProvidersForCluster(ctx context.Context, k8sClient client.Client, clusterScoper cloud.SessionMetadata, log logger.Wrapper) ([]identity.AWSPrincipalTypeProvider, error) { +func getProvidersForCluster(ctx context.Context, k8sClient client.Client, clusterScoper cloud.SessionMetadata, region string, log logger.Wrapper) ([]identity.AWSPrincipalTypeProvider, error) { providers := make([]identity.AWSPrincipalTypeProvider, 0) - providers, err := buildProvidersForRef(ctx, providers, k8sClient, clusterScoper, clusterScoper.IdentityRef(), log) + providers, err := buildProvidersForRef(ctx, providers, k8sClient, clusterScoper, clusterScoper.IdentityRef(), region, log) if err != nil { return nil, err } diff --git a/pkg/cloud/scope/session_test.go b/pkg/cloud/scope/session_test.go index 13bffa1a9e..9620d23df1 100644 --- a/pkg/cloud/scope/session_test.go +++ b/pkg/cloud/scope/session_test.go @@ -228,7 +228,7 @@ func TestPrincipalParsing(t *testing.T) { Namespace: "default", }, }, - AWSCluster: &infrav1.AWSCluster{}, + AWSCluster: &infrav1.AWSCluster{Spec: infrav1.AWSClusterSpec{Region: "us-west-2"}}, }, ) @@ -489,7 +489,7 @@ func TestPrincipalParsing(t *testing.T) { k8sClient := fake.NewClientBuilder().WithScheme(scheme).Build() tc.setup(t, k8sClient) clusterScope.AWSCluster = &tc.awsCluster - providers, err := getProvidersForCluster(context.Background(), k8sClient, clusterScope, logger.NewLogger(klog.Background())) + providers, err := getProvidersForCluster(context.Background(), k8sClient, clusterScope, clusterScope.Region(), logger.NewLogger(klog.Background())) if tc.expectError { if err == nil { t.Fatal("Expected an error but didn't get one") diff --git a/pkg/cloud/scope/sg.go b/pkg/cloud/scope/sg.go index 793ff80669..5db8282c86 100644 --- a/pkg/cloud/scope/sg.go +++ b/pkg/cloud/scope/sg.go @@ -44,6 +44,7 @@ type SGScope interface { Bastion() *infrav1.Bastion // ControlPlaneLoadBalancer returns the load balancer settings that are requested. + // Deprecated: Use ControlPlaneLoadBalancers() ControlPlaneLoadBalancer() *infrav1.AWSLoadBalancerSpec // SetNatGatewaysIPs sets the Nat Gateways Public IPs. @@ -54,4 +55,8 @@ type SGScope interface { // AdditionalControlPlaneIngressRules returns the additional ingress rules for the control plane security group. AdditionalControlPlaneIngressRules() []infrav1.IngressRule + + // ControlPlaneLoadBalancers returns both the ControlPlaneLoadBalancer and SecondaryControlPlaneLoadBalancer AWSLoadBalancerSpecs. + // The control plane load balancers should always be returned in the above order. + ControlPlaneLoadBalancers() []*infrav1.AWSLoadBalancerSpec } diff --git a/pkg/cloud/scope/shared.go b/pkg/cloud/scope/shared.go index 76e1ec91d8..2521f100de 100644 --- a/pkg/cloud/scope/shared.go +++ b/pkg/cloud/scope/shared.go @@ -17,13 +17,18 @@ limitations under the License. package scope import ( + "context" "fmt" "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/controllers/external" ) var ( @@ -95,7 +100,7 @@ func (p *defaultSubnetPlacementStrategy) Place(input *placementInput) ([]string, return subnetIDs, nil } - controlPlaneSubnetIDs := input.ControlplaneSubnets.FilterPrivate().IDs() + controlPlaneSubnetIDs := input.ControlplaneSubnets.FilterPrivate().FilterNonCni().IDs() if len(controlPlaneSubnetIDs) > 0 { p.logger.Debug("using all the private subnets from the control plane") return controlPlaneSubnetIDs, nil @@ -114,9 +119,9 @@ func (p *defaultSubnetPlacementStrategy) getSubnetsForAZs(azs []string, controlP case expinfrav1.AZSubnetTypeAll: // no-op case expinfrav1.AZSubnetTypePublic: - subnets = subnets.FilterPublic() + subnets = subnets.FilterPublic().FilterNonCni() case expinfrav1.AZSubnetTypePrivate: - subnets = subnets.FilterPrivate() + subnets = subnets.FilterPrivate().FilterNonCni() } } if len(subnets) == 0 { @@ -127,3 +132,24 @@ func (p *defaultSubnetPlacementStrategy) getSubnetsForAZs(azs []string, controlP return subnetIDs, nil } + +// getUnstructuredControlPlane returns the unstructured object for the control plane, if any. +// When the reference is not set, it returns an empty object. +func getUnstructuredControlPlane(ctx context.Context, client client.Client, cluster *clusterv1.Cluster) (*unstructured.Unstructured, error) { + if cluster.Spec.ControlPlaneRef == nil { + // If the control plane ref is not set, return an empty object. + // Not having a control plane ref is valid given API contracts. + return &unstructured.Unstructured{}, nil + } + + namespace := cluster.Spec.ControlPlaneRef.Namespace + if namespace == "" { + namespace = cluster.Namespace + } + + u, err := external.Get(ctx, client, cluster.Spec.ControlPlaneRef, namespace) + if err != nil { + return nil, errors.Wrapf(err, "failed to retrieve control plane object %s/%s", cluster.Spec.ControlPlaneRef.Namespace, cluster.Spec.ControlPlaneRef.Name) + } + return u, nil +} diff --git a/pkg/cloud/scope/shared_test.go b/pkg/cloud/scope/shared_test.go index 34d124abf3..8afc051c6c 100644 --- a/pkg/cloud/scope/shared_test.go +++ b/pkg/cloud/scope/shared_test.go @@ -182,6 +182,14 @@ func TestSubnetPlacement(t *testing.T) { AvailabilityZone: "eu-west-1c", IsPublic: false, }, + infrav1.SubnetSpec{ + ID: "subnet-az6", + AvailabilityZone: "eu-west-1c", + IsPublic: false, + Tags: infrav1.Tags{ + infrav1.NameAWSSubnetAssociation: infrav1.SecondarySubnetTagValue, + }, + }, }, logger: logger.NewLogger(klog.Background()), expectedSubnetIDs: []string{"subnet-az3"}, diff --git a/pkg/cloud/services/autoscaling/autoscalinggroup.go b/pkg/cloud/services/autoscaling/autoscalinggroup.go index 6e24cf22f9..d473010d12 100644 --- a/pkg/cloud/services/autoscaling/autoscalinggroup.go +++ b/pkg/cloud/services/autoscaling/autoscalinggroup.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package asg provides a service for managing AWS AutoScalingGroups. package asg import ( @@ -471,6 +472,7 @@ func (s *Service) UpdateResourceTags(resourceID *string, create, remove map[stri return nil } +// SuspendProcesses suspends the processes for an autoscaling group. func (s *Service) SuspendProcesses(name string, processes []string) error { input := autoscaling.ScalingProcessQuery{ AutoScalingGroupName: aws.String(name), @@ -482,6 +484,7 @@ func (s *Service) SuspendProcesses(name string, processes []string) error { return nil } +// ResumeProcesses resumes the processes for an autoscaling group. func (s *Service) ResumeProcesses(name string, processes []string) error { input := autoscaling.ScalingProcessQuery{ AutoScalingGroupName: aws.String(name), @@ -539,6 +542,12 @@ func (s *Service) SubnetIDs(scope *scope.MachinePoolScope) ([]string, error) { } for _, subnet := range out.Subnets { + tags := converters.TagsToMap(subnet.Tags) + if tags[infrav1.NameAWSSubnetAssociation] == infrav1.SecondarySubnetTagValue { + // Subnet belongs to a secondary CIDR block which won't be used to create instances + continue + } + subnetIDs = append(subnetIDs, *subnet.SubnetId) } diff --git a/pkg/cloud/services/autoscaling/mock_autoscalingiface/autoscalingapi_mock.go b/pkg/cloud/services/autoscaling/mock_autoscalingiface/autoscalingapi_mock.go index 0c1a67496c..58e83111bb 100644 --- a/pkg/cloud/services/autoscaling/mock_autoscalingiface/autoscalingapi_mock.go +++ b/pkg/cloud/services/autoscaling/mock_autoscalingiface/autoscalingapi_mock.go @@ -1333,6 +1333,39 @@ func (mr *MockAutoScalingAPIMockRecorder) DescribeInstanceRefreshes(arg0 interfa return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeInstanceRefreshes", reflect.TypeOf((*MockAutoScalingAPI)(nil).DescribeInstanceRefreshes), arg0) } +// DescribeInstanceRefreshesPages mocks base method. +func (m *MockAutoScalingAPI) DescribeInstanceRefreshesPages(arg0 *autoscaling.DescribeInstanceRefreshesInput, arg1 func(*autoscaling.DescribeInstanceRefreshesOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeInstanceRefreshesPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DescribeInstanceRefreshesPages indicates an expected call of DescribeInstanceRefreshesPages. +func (mr *MockAutoScalingAPIMockRecorder) DescribeInstanceRefreshesPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeInstanceRefreshesPages", reflect.TypeOf((*MockAutoScalingAPI)(nil).DescribeInstanceRefreshesPages), arg0, arg1) +} + +// DescribeInstanceRefreshesPagesWithContext mocks base method. +func (m *MockAutoScalingAPI) DescribeInstanceRefreshesPagesWithContext(arg0 context.Context, arg1 *autoscaling.DescribeInstanceRefreshesInput, arg2 func(*autoscaling.DescribeInstanceRefreshesOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeInstanceRefreshesPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// DescribeInstanceRefreshesPagesWithContext indicates an expected call of DescribeInstanceRefreshesPagesWithContext. +func (mr *MockAutoScalingAPIMockRecorder) DescribeInstanceRefreshesPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeInstanceRefreshesPagesWithContext", reflect.TypeOf((*MockAutoScalingAPI)(nil).DescribeInstanceRefreshesPagesWithContext), varargs...) +} + // DescribeInstanceRefreshesRequest mocks base method. func (m *MockAutoScalingAPI) DescribeInstanceRefreshesRequest(arg0 *autoscaling.DescribeInstanceRefreshesInput) (*request.Request, *autoscaling.DescribeInstanceRefreshesOutput) { m.ctrl.T.Helper() @@ -1566,6 +1599,39 @@ func (mr *MockAutoScalingAPIMockRecorder) DescribeLoadBalancerTargetGroups(arg0 return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeLoadBalancerTargetGroups", reflect.TypeOf((*MockAutoScalingAPI)(nil).DescribeLoadBalancerTargetGroups), arg0) } +// DescribeLoadBalancerTargetGroupsPages mocks base method. +func (m *MockAutoScalingAPI) DescribeLoadBalancerTargetGroupsPages(arg0 *autoscaling.DescribeLoadBalancerTargetGroupsInput, arg1 func(*autoscaling.DescribeLoadBalancerTargetGroupsOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeLoadBalancerTargetGroupsPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DescribeLoadBalancerTargetGroupsPages indicates an expected call of DescribeLoadBalancerTargetGroupsPages. +func (mr *MockAutoScalingAPIMockRecorder) DescribeLoadBalancerTargetGroupsPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeLoadBalancerTargetGroupsPages", reflect.TypeOf((*MockAutoScalingAPI)(nil).DescribeLoadBalancerTargetGroupsPages), arg0, arg1) +} + +// DescribeLoadBalancerTargetGroupsPagesWithContext mocks base method. +func (m *MockAutoScalingAPI) DescribeLoadBalancerTargetGroupsPagesWithContext(arg0 context.Context, arg1 *autoscaling.DescribeLoadBalancerTargetGroupsInput, arg2 func(*autoscaling.DescribeLoadBalancerTargetGroupsOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeLoadBalancerTargetGroupsPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// DescribeLoadBalancerTargetGroupsPagesWithContext indicates an expected call of DescribeLoadBalancerTargetGroupsPagesWithContext. +func (mr *MockAutoScalingAPIMockRecorder) DescribeLoadBalancerTargetGroupsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeLoadBalancerTargetGroupsPagesWithContext", reflect.TypeOf((*MockAutoScalingAPI)(nil).DescribeLoadBalancerTargetGroupsPagesWithContext), varargs...) +} + // DescribeLoadBalancerTargetGroupsRequest mocks base method. func (m *MockAutoScalingAPI) DescribeLoadBalancerTargetGroupsRequest(arg0 *autoscaling.DescribeLoadBalancerTargetGroupsInput) (*request.Request, *autoscaling.DescribeLoadBalancerTargetGroupsOutput) { m.ctrl.T.Helper() @@ -1616,6 +1682,39 @@ func (mr *MockAutoScalingAPIMockRecorder) DescribeLoadBalancers(arg0 interface{} return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeLoadBalancers", reflect.TypeOf((*MockAutoScalingAPI)(nil).DescribeLoadBalancers), arg0) } +// DescribeLoadBalancersPages mocks base method. +func (m *MockAutoScalingAPI) DescribeLoadBalancersPages(arg0 *autoscaling.DescribeLoadBalancersInput, arg1 func(*autoscaling.DescribeLoadBalancersOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeLoadBalancersPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DescribeLoadBalancersPages indicates an expected call of DescribeLoadBalancersPages. +func (mr *MockAutoScalingAPIMockRecorder) DescribeLoadBalancersPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeLoadBalancersPages", reflect.TypeOf((*MockAutoScalingAPI)(nil).DescribeLoadBalancersPages), arg0, arg1) +} + +// DescribeLoadBalancersPagesWithContext mocks base method. +func (m *MockAutoScalingAPI) DescribeLoadBalancersPagesWithContext(arg0 context.Context, arg1 *autoscaling.DescribeLoadBalancersInput, arg2 func(*autoscaling.DescribeLoadBalancersOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeLoadBalancersPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// DescribeLoadBalancersPagesWithContext indicates an expected call of DescribeLoadBalancersPagesWithContext. +func (mr *MockAutoScalingAPIMockRecorder) DescribeLoadBalancersPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeLoadBalancersPagesWithContext", reflect.TypeOf((*MockAutoScalingAPI)(nil).DescribeLoadBalancersPagesWithContext), varargs...) +} + // DescribeLoadBalancersRequest mocks base method. func (m *MockAutoScalingAPI) DescribeLoadBalancersRequest(arg0 *autoscaling.DescribeLoadBalancersInput) (*request.Request, *autoscaling.DescribeLoadBalancersOutput) { m.ctrl.T.Helper() diff --git a/pkg/cloud/services/autoscaling/mock_autoscalingiface/doc.go b/pkg/cloud/services/autoscaling/mock_autoscalingiface/doc.go index 522e4d3ab5..f664299d6d 100644 --- a/pkg/cloud/services/autoscaling/mock_autoscalingiface/doc.go +++ b/pkg/cloud/services/autoscaling/mock_autoscalingiface/doc.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package mock_autoscalingiface provides a mock implementation for the AutoScalingAPI interface. // Run go generate to regenerate this mock. +// //go:generate ../../../../../hack/tools/bin/mockgen -destination autoscalingapi_mock.go -package mock_autoscalingiface github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface AutoScalingAPI //go:generate /usr/bin/env bash -c "cat ../../../../../hack/boilerplate/boilerplate.generatego.txt autoscalingapi_mock.go > _autoscalingapi_mock.go && mv _autoscalingapi_mock.go autoscalingapi_mock.go" - package mock_autoscalingiface //nolint:stylecheck diff --git a/pkg/cloud/services/awsnode/cni.go b/pkg/cloud/services/awsnode/cni.go index 25211e6062..ade31ddbcf 100644 --- a/pkg/cloud/services/awsnode/cni.go +++ b/pkg/cloud/services/awsnode/cni.go @@ -31,7 +31,6 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/kustomize/api/konfig" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" @@ -272,22 +271,25 @@ func (s *Service) deleteResource(ctx context.Context, remoteClient client.Client return fmt.Errorf("deleting resource %s: %w", key, err) } s.scope.Debug(fmt.Sprintf("resource %s was not found, no action", key)) - } else { - // resource found, delete if no label or not managed by helm - if val, ok := obj.GetLabels()[konfig.ManagedbyLabelKey]; !ok || val != "Helm" { - if err := remoteClient.Delete(ctx, obj, &client.DeleteOptions{}); err != nil { - if !apierrors.IsNotFound(err) { - return fmt.Errorf("deleting %s: %w", key, err) - } - s.scope.Debug(fmt.Sprintf( - "resource %s was not found, not deleted", key)) - } else { - s.scope.Debug(fmt.Sprintf("resource %s was deleted", key)) - } - } else { - s.scope.Debug(fmt.Sprintf("resource %s is managed by helm, not deleted", key)) + return nil + } + // Don't delete if the "PreventDeletionLabel" label exists. It could be there because CAPA added it (see below), + // or because it was added externally, for example if a custom version of AWS CNI was already installed. + // Either way, CAPA should not delete such a labelled CNI installation. + labels := obj.GetLabels() + if _, exists := labels[infrav1.PreventDeletionLabel]; exists { + s.scope.Debug(fmt.Sprintf("resource %s has '%s' label, skipping deletion", key, infrav1.PreventDeletionLabel)) + return nil + } + // Delete the resource + if err := remoteClient.Delete(ctx, obj, &client.DeleteOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to delete %s: %w", key, err) } + s.scope.Debug(fmt.Sprintf( + "resource %s was not found, not deleted", key)) + } else { + s.scope.Debug(fmt.Sprintf("resource %s was deleted", key)) } - return nil } diff --git a/pkg/cloud/services/awsnode/cni_test.go b/pkg/cloud/services/awsnode/cni_test.go index 1619d843ac..67c78d806b 100644 --- a/pkg/cloud/services/awsnode/cni_test.go +++ b/pkg/cloud/services/awsnode/cni_test.go @@ -263,7 +263,7 @@ type cachingClient struct { updateChain []client.Object } -func (c *cachingClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { +func (c *cachingClient) Get(_ context.Context, _ client.ObjectKey, obj client.Object, _ ...client.GetOption) error { if _, ok := obj.(*v1.DaemonSet); ok { daemonset, _ := obj.(*v1.DaemonSet) *daemonset = *c.getValue.(*v1.DaemonSet) @@ -271,12 +271,12 @@ func (c *cachingClient) Get(ctx context.Context, key client.ObjectKey, obj clien return nil } -func (c *cachingClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { +func (c *cachingClient) Update(_ context.Context, obj client.Object, _ ...client.UpdateOption) error { c.updateChain = append(c.updateChain, obj) return nil } -func (c *cachingClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { +func (c *cachingClient) List(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return nil } @@ -297,7 +297,7 @@ func (s *mockScope) VpcCni() ekscontrolplanev1.VpcCni { return s.cni } -func (s *mockScope) Info(msg string, keysAndValues ...interface{}) { +func (s *mockScope) Info(_ string, _ ...interface{}) { } diff --git a/pkg/cloud/services/awsnode/service.go b/pkg/cloud/services/awsnode/service.go index 892a703429..ddc8d52251 100644 --- a/pkg/cloud/services/awsnode/service.go +++ b/pkg/cloud/services/awsnode/service.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package awsnode provides a way to interact with AWS nodes. package awsnode import ( diff --git a/pkg/cloud/services/ec2/eip.go b/pkg/cloud/services/ec2/eip.go new file mode 100644 index 0000000000..77484e201c --- /dev/null +++ b/pkg/cloud/services/ec2/eip.go @@ -0,0 +1,54 @@ +package ec2 + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + + infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" +) + +func getElasticIPRoleName(instanceID string) string { + return fmt.Sprintf("ec2-%s", instanceID) +} + +// ReconcileElasticIPFromPublicPool reconciles the elastic IP from a custom Public IPv4 Pool. +func (s *Service) ReconcileElasticIPFromPublicPool(pool *infrav1.ElasticIPPool, instance *infrav1.Instance) error { + // TODO: check if the instance is in the state allowing EIP association. + // Expected instance states: pending or running + // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html + if err := s.getAndAssociateAddressesToInstance(pool, getElasticIPRoleName(instance.ID), instance.ID); err != nil { + return fmt.Errorf("failed to reconcile Elastic IP: %w", err) + } + return nil +} + +// ReleaseElasticIP releases a specific elastic IP based on the instance role. +func (s *Service) ReleaseElasticIP(instanceID string) error { + return s.netService.ReleaseAddressByRole(getElasticIPRoleName(instanceID)) +} + +// getAndAssociateAddressesToInstance find or create an EIP from an instance and role. +func (s *Service) getAndAssociateAddressesToInstance(pool *infrav1.ElasticIPPool, role string, instance string) (err error) { + eips, err := s.netService.GetOrAllocateAddresses(pool, 1, role) + if err != nil { + record.Warnf(s.scope.InfraCluster(), "FailedAllocateEIP", "Failed to get Elastic IP for %q: %v", role, err) + return err + } + if len(eips) != 1 { + record.Warnf(s.scope.InfraCluster(), "FailedAllocateEIP", "Failed to allocate Elastic IP for %q: %v", role, err) + return fmt.Errorf("unexpected number of Elastic IP to instance %q, got %d: %w", instance, len(eips), err) + } + _, err = s.EC2Client.AssociateAddressWithContext(context.TODO(), &ec2.AssociateAddressInput{ + InstanceId: aws.String(instance), + AllocationId: aws.String(eips[0]), + }) + if err != nil { + record.Warnf(s.scope.InfraCluster(), "FailedAssociateEIP", "Failed to associate Elastic IP for %q: %v", role, err) + return fmt.Errorf("failed to associate Elastic IP %q to instance %q: %w", eips[0], instance, err) + } + return nil +} diff --git a/pkg/cloud/services/ec2/instances.go b/pkg/cloud/services/ec2/instances.go index 1fbcce3c90..ac11fea9fd 100644 --- a/pkg/cloud/services/ec2/instances.go +++ b/pkg/cloud/services/ec2/instances.go @@ -25,6 +25,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "github.com/pkg/errors" "k8s.io/utils/ptr" @@ -98,11 +99,11 @@ func (s *Service) InstanceIfExists(id *string) (*infrav1.Instance, error) { if len(out.Reservations) > 0 && len(out.Reservations[0].Instances) > 0 { return s.SDKToInstance(out.Reservations[0].Instances[0]) - } else { - // Failed to find instance with provider id. - record.Eventf(s.scope.InfraCluster(), "FailedFindInstances", "failed to find instance by providerId %q: %v", *id, err) - return nil, ErrInstanceNotFoundByID } + + // Failed to find instance with provider id. + record.Eventf(s.scope.InfraCluster(), "FailedFindInstances", "failed to find instance by providerId %q: %v", *id, err) + return nil, ErrInstanceNotFoundByID } // CreateInstance runs an ec2 instance. @@ -181,9 +182,18 @@ func (s *Service) CreateInstance(scope *scope.MachineScope, userData []byte, use } input.SubnetID = subnetID + // Preserve user-defined PublicIp option. + input.PublicIPOnLaunch = scope.AWSMachine.Spec.PublicIP + + // Public address from BYO Public IPv4 Pools need to be associated after launch (main machine + // reconciliate loop) preventing duplicated public IP. The map on launch is explicitly + // disabled in instances with PublicIP defined to true. + if scope.AWSMachine.Spec.ElasticIPPool != nil && scope.AWSMachine.Spec.ElasticIPPool.PublicIpv4Pool != nil { + input.PublicIPOnLaunch = ptr.To(false) + } + if !scope.IsControlPlaneExternallyManaged() && !scope.IsExternallyManaged() && !scope.IsEKSManaged() && s.scope.Network().APIServerELB.DNSName == "" { record.Eventf(s.scope.InfraCluster(), "FailedCreateInstance", "Failed to run controlplane, APIServer ELB not available") - return nil, awserrors.NewFailedDependency("failed to run controlplane, APIServer ELB not available") } @@ -237,8 +247,12 @@ func (s *Service) CreateInstance(scope *scope.MachineScope, userData []byte, use input.PlacementGroupName = scope.AWSMachine.Spec.PlacementGroupName + input.PlacementGroupPartition = scope.AWSMachine.Spec.PlacementGroupPartition + input.PrivateDNSName = scope.AWSMachine.Spec.PrivateDNSName + input.CapacityReservationID = scope.AWSMachine.Spec.CapacityReservationID + s.scope.Debug("Running instance", "machine-role", scope.Role()) s.scope.Debug("Running instance with instance metadata options", "metadata options", input.InstanceMetadataOptions) out, err := s.runInstance(scope.Role(), input) @@ -335,10 +349,25 @@ func (s *Service) findSubnet(scope *scope.MachineScope) (string, error) { *subnet.SubnetId, *subnet.AvailabilityZone, *failureDomain) continue } - if scope.AWSMachine.Spec.PublicIP != nil && *scope.AWSMachine.Spec.PublicIP && !*subnet.MapPublicIpOnLaunch { - errMessage += fmt.Sprintf(" subnet %q is a private subnet.", *subnet.SubnetId) + + if ptr.Deref(scope.AWSMachine.Spec.PublicIP, false) { + matchingSubnet := s.scope.Subnets().FindByID(*subnet.SubnetId) + if matchingSubnet == nil { + errMessage += fmt.Sprintf(" unable to find subnet %q among the AWSCluster subnets.", *subnet.SubnetId) + continue + } + if !matchingSubnet.IsPublic { + errMessage += fmt.Sprintf(" subnet %q is a private subnet.", *subnet.SubnetId) + continue + } + } + + tags := converters.TagsToMap(subnet.Tags) + if tags[infrav1.NameAWSSubnetAssociation] == infrav1.SecondarySubnetTagValue { + errMessage += fmt.Sprintf(" subnet %q belongs to a secondary CIDR block which won't be used to create instances.", *subnet.SubnetId) continue } + filtered = append(filtered, subnet) } // prefer a subnet in the cluster VPC if multiple match @@ -355,7 +384,7 @@ func (s *Service) findSubnet(scope *scope.MachineScope) (string, error) { return *filtered[0].SubnetId, nil case failureDomain != nil: if scope.AWSMachine.Spec.PublicIP != nil && *scope.AWSMachine.Spec.PublicIP { - subnets := s.scope.Subnets().FilterPublic().FilterByZone(*failureDomain) + subnets := s.scope.Subnets().FilterPublic().FilterNonCni().FilterByZone(*failureDomain) if len(subnets) == 0 { errMessage := fmt.Sprintf("failed to run machine %q with public IP, no public subnets available in availability zone %q", scope.Name(), *failureDomain) @@ -365,7 +394,7 @@ func (s *Service) findSubnet(scope *scope.MachineScope) (string, error) { return subnets[0].GetResourceID(), nil } - subnets := s.scope.Subnets().FilterPrivate().FilterByZone(*failureDomain) + subnets := s.scope.Subnets().FilterPrivate().FilterNonCni().FilterByZone(*failureDomain) if len(subnets) == 0 { errMessage := fmt.Sprintf("failed to run machine %q, no subnets available in availability zone %q", scope.Name(), *failureDomain) @@ -374,7 +403,7 @@ func (s *Service) findSubnet(scope *scope.MachineScope) (string, error) { } return subnets[0].GetResourceID(), nil case scope.AWSMachine.Spec.PublicIP != nil && *scope.AWSMachine.Spec.PublicIP: - subnets := s.scope.Subnets().FilterPublic() + subnets := s.scope.Subnets().FilterPublic().FilterNonCni() if len(subnets) == 0 { errMessage := fmt.Sprintf("failed to run machine %q with public IP, no public subnets available", scope.Name()) record.Eventf(scope.AWSMachine, "FailedCreate", errMessage) @@ -386,7 +415,7 @@ func (s *Service) findSubnet(scope *scope.MachineScope) (string, error) { // with control plane machines. default: - sns := s.scope.Subnets().FilterPrivate() + sns := s.scope.Subnets().FilterPrivate().FilterNonCni() if len(sns) == 0 { errMessage := fmt.Sprintf("failed to run machine %q, no subnets available", scope.Name()) record.Eventf(s.scope.InfraCluster(), "FailedCreateInstance", errMessage) @@ -539,13 +568,25 @@ func (s *Service) runInstance(role string, i *infrav1.Instance) (*infrav1.Instan DeviceIndex: aws.Int64(int64(index)), }) } + netInterfaces[0].AssociatePublicIpAddress = i.PublicIPOnLaunch input.NetworkInterfaces = netInterfaces } else { - input.SubnetId = aws.String(i.SubnetID) + if ptr.Deref(i.PublicIPOnLaunch, false) { + input.NetworkInterfaces = []*ec2.InstanceNetworkInterfaceSpecification{ + { + DeviceIndex: aws.Int64(0), + SubnetId: aws.String(i.SubnetID), + Groups: aws.StringSlice(i.SecurityGroupIDs), + AssociatePublicIpAddress: i.PublicIPOnLaunch, + }, + } + } else { + input.SubnetId = aws.String(i.SubnetID) - if len(i.SecurityGroupIDs) > 0 { - input.SecurityGroupIds = aws.StringSlice(i.SecurityGroupIDs) + if len(i.SecurityGroupIDs) > 0 { + input.SecurityGroupIds = aws.StringSlice(i.SecurityGroupIDs) + } } } @@ -584,26 +625,31 @@ func (s *Service) runInstance(role string, i *infrav1.Instance) (*infrav1.Instan } if len(i.Tags) > 0 { - spec := &ec2.TagSpecification{ResourceType: aws.String(ec2.ResourceTypeInstance)} - // We need to sort keys for tests to work - keys := make([]string, 0, len(i.Tags)) - for k := range i.Tags { - keys = append(keys, k) - } - sort.Strings(keys) - for _, key := range keys { - spec.Tags = append(spec.Tags, &ec2.Tag{ - Key: aws.String(key), - Value: aws.String(i.Tags[key]), - }) - } + resources := []string{ec2.ResourceTypeInstance, ec2.ResourceTypeVolume, ec2.ResourceTypeNetworkInterface} + for _, r := range resources { + spec := &ec2.TagSpecification{ResourceType: aws.String(r)} + + // We need to sort keys for tests to work + keys := make([]string, 0, len(i.Tags)) + for k := range i.Tags { + keys = append(keys, k) + } + sort.Strings(keys) + for _, key := range keys { + spec.Tags = append(spec.Tags, &ec2.Tag{ + Key: aws.String(key), + Value: aws.String(i.Tags[key]), + }) + } - input.TagSpecifications = append(input.TagSpecifications, spec) + input.TagSpecifications = append(input.TagSpecifications, spec) + } } input.InstanceMarketOptions = getInstanceMarketOptionsRequest(i.SpotMarketOptions) input.MetadataOptions = getInstanceMetadataOptionsRequest(i.InstanceMetadataOptions) input.PrivateDnsNameOptions = getPrivateDNSNameOptionsRequest(i.PrivateDNSName) + input.CapacityReservationSpecification = getCapacityReservationSpecification(i.CapacityReservationID) if i.Tenancy != "" { input.Placement = &ec2.Placement{ @@ -611,11 +657,18 @@ func (s *Service) runInstance(role string, i *infrav1.Instance) (*infrav1.Instan } } + if i.PlacementGroupName == "" && i.PlacementGroupPartition != 0 { + return nil, errors.Errorf("placementGroupPartition is set but placementGroupName is empty") + } + if i.PlacementGroupName != "" { if input.Placement == nil { input.Placement = &ec2.Placement{} } input.Placement.GroupName = &i.PlacementGroupName + if i.PlacementGroupPartition != 0 { + input.Placement.PartitionNumber = &i.PlacementGroupPartition + } } out, err := s.EC2Client.RunInstancesWithContext(context.TODO(), input) @@ -887,6 +940,8 @@ func (s *Service) SDKToInstance(v *ec2.Instance) (*infrav1.Instance, error) { func (s *Service) getInstanceAddresses(instance *ec2.Instance) []clusterv1.MachineAddress { addresses := []clusterv1.MachineAddress{} + // Check if the DHCP Option Set has domain name set + domainName := s.GetDHCPOptionSetDomainName(s.EC2Client, instance.VpcId) for _, eni := range instance.NetworkInterfaces { privateDNSAddress := clusterv1.MachineAddress{ Type: clusterv1.MachineInternalDNS, @@ -896,8 +951,18 @@ func (s *Service) getInstanceAddresses(instance *ec2.Instance) []clusterv1.Machi Type: clusterv1.MachineInternalIP, Address: aws.StringValue(eni.PrivateIpAddress), } + addresses = append(addresses, privateDNSAddress, privateIPAddress) + if domainName != nil { + // Add secondary private DNS Name with domain name set in DHCP Option Set + additionalPrivateDNSAddress := clusterv1.MachineAddress{ + Type: clusterv1.MachineInternalDNS, + Address: fmt.Sprintf("%s.%s", strings.Split(privateDNSAddress.Address, ".")[0], *domainName), + } + addresses = append(addresses, additionalPrivateDNSAddress) + } + // An elastic IP is attached if association is non nil pointer if eni.Association != nil { publicDNSAddress := clusterv1.MachineAddress{ @@ -911,6 +976,7 @@ func (s *Service) getInstanceAddresses(instance *ec2.Instance) []clusterv1.Machi addresses = append(addresses, publicDNSAddress, publicIPAddress) } } + return addresses } @@ -1009,6 +1075,54 @@ func (s *Service) ModifyInstanceMetadataOptions(instanceID string, options *infr return nil } +// GetDHCPOptionSetDomainName returns the domain DNS name for the VPC from the DHCP Options. +func (s *Service) GetDHCPOptionSetDomainName(ec2client ec2iface.EC2API, vpcID *string) *string { + log := s.scope.GetLogger() + + if vpcID == nil { + log.Info("vpcID is nil, skipping DHCP Option Set discovery") + return nil + } + + vpcInput := &ec2.DescribeVpcsInput{ + VpcIds: []*string{vpcID}, + } + + vpcResult, err := ec2client.DescribeVpcs(vpcInput) + if err != nil { + log.Info("failed to describe VPC, skipping DHCP Option Set discovery", "vpcID", *vpcID, "Error", err.Error()) + return nil + } + + dhcpInput := &ec2.DescribeDhcpOptionsInput{ + DhcpOptionsIds: []*string{vpcResult.Vpcs[0].DhcpOptionsId}, + } + + dhcpResult, err := ec2client.DescribeDhcpOptions(dhcpInput) + if err != nil { + log.Error(err, "failed to describe DHCP Options Set", "DhcpOptionsSet", *dhcpResult) + return nil + } + + for _, dhcpConfig := range dhcpResult.DhcpOptions[0].DhcpConfigurations { + if *dhcpConfig.Key == "domain-name" { + if len(dhcpConfig.Values) == 0 { + return nil + } + domainName := dhcpConfig.Values[0].Value + // default domainName is 'ec2.internal' in us-east-1 and 'region.compute.internal' in the other regions. + if (s.scope.Region() == "us-east-1" && *domainName == "ec2.internal") || + (s.scope.Region() != "us-east-1" && *domainName == fmt.Sprintf("%s.compute.internal", s.scope.Region())) { + return nil + } + + return domainName + } + } + + return nil +} + // filterGroups filters a list for a string. func filterGroups(list []string, strToFilter string) (newList []string) { for _, item := range list { @@ -1019,6 +1133,19 @@ func filterGroups(list []string, strToFilter string) (newList []string) { return } +func getCapacityReservationSpecification(capacityReservationID *string) *ec2.CapacityReservationSpecification { + if capacityReservationID == nil { + // Not targeting any specific Capacity Reservation + return nil + } + + return &ec2.CapacityReservationSpecification{ + CapacityReservationTarget: &ec2.CapacityReservationTarget{ + CapacityReservationId: capacityReservationID, + }, + } +} + func getInstanceMarketOptionsRequest(spotMarketOptions *infrav1.SpotMarketOptions) *ec2.InstanceMarketOptionsRequest { if spotMarketOptions == nil { // Instance is not a Spot instance diff --git a/pkg/cloud/services/ec2/instances_test.go b/pkg/cloud/services/ec2/instances_test.go index 9ccf5a67ba..654403427f 100644 --- a/pkg/cloud/services/ec2/instances_test.go +++ b/pkg/cloud/services/ec2/instances_test.go @@ -28,10 +28,10 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/golang/mock/gomock" "github.com/google/go-cmp/cmp" + . "github.com/onsi/gomega" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -336,6 +336,9 @@ func TestCreateInstance(t *testing.T) { IsPublic: false, }, }, + VPC: infrav1.VPCSpec{ + ID: "vpc-test", + }, }, }, Status: infrav1.AWSClusterStatus{ @@ -463,6 +466,9 @@ func TestCreateInstance(t *testing.T) { IsPublic: true, }, }, + VPC: infrav1.VPCSpec{ + ID: "vpc-test", + }, }, }, Status: infrav1.AWSClusterStatus{ @@ -697,6 +703,56 @@ func TestCreateInstance(t *testing.T) { }, }, }, + { + ResourceType: aws.String("volume"), + Tags: []*ec2.Tag{ + { + Key: aws.String("MachineName"), + Value: aws.String("/"), + }, + { + Key: aws.String("Name"), + Value: aws.String("aws-test1"), + }, + { + Key: aws.String("kubernetes.io/cluster/test1"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test1"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("node"), + }, + }, + }, + { + ResourceType: aws.String("network-interface"), + Tags: []*ec2.Tag{ + { + Key: aws.String("MachineName"), + Value: aws.String("/"), + }, + { + Key: aws.String("Name"), + Value: aws.String("aws-test1"), + }, + { + Key: aws.String("kubernetes.io/cluster/test1"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test1"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("node"), + }, + }, + }, }, UserData: aws.String(base64.StdEncoding.EncodeToString(userDataCompressed)), MaxCount: aws.Int64(1), @@ -895,6 +951,56 @@ func TestCreateInstance(t *testing.T) { }, }, }, + { + ResourceType: aws.String("volume"), + Tags: []*ec2.Tag{ + { + Key: aws.String("MachineName"), + Value: aws.String("/"), + }, + { + Key: aws.String("Name"), + Value: aws.String("aws-test1"), + }, + { + Key: aws.String("kubernetes.io/cluster/test1"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test1"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("node"), + }, + }, + }, + { + ResourceType: aws.String("network-interface"), + Tags: []*ec2.Tag{ + { + Key: aws.String("MachineName"), + Value: aws.String("/"), + }, + { + Key: aws.String("Name"), + Value: aws.String("aws-test1"), + }, + { + Key: aws.String("kubernetes.io/cluster/test1"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test1"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("node"), + }, + }, + }, }, UserData: aws.String(base64.StdEncoding.EncodeToString(userDataCompressed)), MaxCount: aws.Int64(1), @@ -974,6 +1080,9 @@ func TestCreateInstance(t *testing.T) { IsPublic: false, }, }, + VPC: infrav1.VPCSpec{ + ID: "vpc-test", + }, }, }, Status: infrav1.AWSClusterStatus{ @@ -1123,6 +1232,9 @@ func TestCreateInstance(t *testing.T) { IsPublic: false, }, }, + VPC: infrav1.VPCSpec{ + ID: "vpc-test", + }, }, ImageLookupOrg: "cluster-level-image-lookup-org", }, @@ -1274,6 +1386,9 @@ func TestCreateInstance(t *testing.T) { IsPublic: false, }, }, + VPC: infrav1.VPCSpec{ + ID: "vpc-test", + }, }, ImageLookupOrg: "cluster-level-image-lookup-org", }, @@ -2174,6 +2289,149 @@ func TestCreateInstance(t *testing.T) { } }, }, + { + name: "public IP true, public subnet ID given and MapPublicIpOnLaunch is false", + machine: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"set": "node"}, + }, + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ + DataSecretName: ptr.To[string]("bootstrap-data"), + }, + }, + }, + machineConfig: &infrav1.AWSMachineSpec{ + AMI: infrav1.AMIReference{ + ID: aws.String("abc"), + }, + InstanceType: "m5.large", + Subnet: &infrav1.AWSResourceReference{ + ID: aws.String("public-subnet-1"), + }, + PublicIP: aws.Bool(true), + }, + awsCluster: &infrav1.AWSCluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: infrav1.AWSClusterSpec{ + NetworkSpec: infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + ID: "vpc-id", + }, + Subnets: infrav1.Subnets{{ + ID: "public-subnet-1", + IsPublic: true, + }}, + }, + }, + Status: infrav1.AWSClusterStatus{ + Network: infrav1.NetworkStatus{ + SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{ + infrav1.SecurityGroupControlPlane: { + ID: "1", + }, + infrav1.SecurityGroupNode: { + ID: "2", + }, + infrav1.SecurityGroupLB: { + ID: "3", + }, + }, + APIServerELB: infrav1.LoadBalancer{ + DNSName: "test-apiserver.us-east-1.aws", + }, + }, + }, + }, + expect: func(m *mocks.MockEC2APIMockRecorder) { + m. + DescribeSubnetsWithContext(context.TODO(), &ec2.DescribeSubnetsInput{ + Filters: []*ec2.Filter{ + filter.EC2.SubnetStates(ec2.SubnetStatePending, ec2.SubnetStateAvailable), + {Name: aws.String("subnet-id"), Values: aws.StringSlice([]string{"public-subnet-1"})}, + }, + }). + Return(&ec2.DescribeSubnetsOutput{ + Subnets: []*ec2.Subnet{{ + SubnetId: aws.String("public-subnet-1"), + AvailabilityZone: aws.String("us-east-1b"), + MapPublicIpOnLaunch: aws.Bool(false), + }}, + }, nil) + m. + DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{ + InstanceTypes: []*string{ + aws.String("m5.large"), + }, + })). + Return(&ec2.DescribeInstanceTypesOutput{ + InstanceTypes: []*ec2.InstanceTypeInfo{ + { + ProcessorInfo: &ec2.ProcessorInfo{ + SupportedArchitectures: []*string{ + aws.String("x86_64"), + }, + }, + }, + }, + }, nil) + m. + RunInstancesWithContext(context.TODO(), gomock.Any()). + Do(func(_ context.Context, in *ec2.RunInstancesInput, _ ...request.Option) { + if len(in.NetworkInterfaces) == 0 { + t.Fatalf("expected a NetworkInterface to be defined") + } + if !aws.BoolValue(in.NetworkInterfaces[0].AssociatePublicIpAddress) { + t.Fatalf("expected AssociatePublicIpAddress to be set and true") + } + if subnet := aws.StringValue(in.NetworkInterfaces[0].SubnetId); subnet != "public-subnet-1" { + t.Fatalf("expected subnet ID to be \"public-subnet-1\", got %q", subnet) + } + if in.NetworkInterfaces[0].Groups == nil { + t.Fatalf("expected security groups to be set") + } + }). + Return(&ec2.Reservation{ + Instances: []*ec2.Instance{ + { + State: &ec2.InstanceState{ + Name: aws.String(ec2.InstanceStateNamePending), + }, + IamInstanceProfile: &ec2.IamInstanceProfile{ + Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"), + }, + InstanceId: aws.String("two"), + InstanceType: aws.String("m5.large"), + SubnetId: aws.String("public-subnet-1"), + ImageId: aws.String("ami-1"), + RootDeviceName: aws.String("device-1"), + BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{ + { + DeviceName: aws.String("device-1"), + Ebs: &ec2.EbsInstanceBlockDevice{ + VolumeId: aws.String("volume-1"), + }, + }, + }, + Placement: &ec2.Placement{ + AvailabilityZone: &az, + }, + }, + }, + }, nil) + m. + DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.DescribeNetworkInterfacesOutput{ + NetworkInterfaces: []*ec2.NetworkInterface{}, + NextToken: nil, + }, nil) + }, + check: func(instance *infrav1.Instance, err error) { + if err != nil { + t.Fatalf("did not expect error: %v", err) + } + }, + }, { name: "public IP true and private subnet ID given", machine: &clusterv1.Machine{ @@ -2362,7 +2620,7 @@ func TestCreateInstance(t *testing.T) { }). Return(&ec2.DescribeSubnetsOutput{ Subnets: []*ec2.Subnet{{ - SubnetId: aws.String("filtered-subnet-1"), + SubnetId: aws.String("public-subnet-1"), MapPublicIpOnLaunch: aws.Bool(true), }}, }, nil) @@ -2410,7 +2668,7 @@ func TestCreateInstance(t *testing.T) { }, }, { - name: "public IP true and public subnet exists", + name: "both public IP, subnet filter defined and MapPublicIpOnLaunch is false", machine: &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"set": "node"}, @@ -2426,7 +2684,13 @@ func TestCreateInstance(t *testing.T) { ID: aws.String("abc"), }, InstanceType: "m5.large", - PublicIP: aws.Bool(true), + Subnet: &infrav1.AWSResourceReference{ + Filters: []infrav1.Filter{{ + Name: "tag:some-tag", + Values: []string{"some-value"}, + }}, + }, + PublicIP: aws.Bool(true), }, awsCluster: &infrav1.AWSCluster{ ObjectMeta: metav1.ObjectMeta{Name: "test"}, @@ -2485,13 +2749,40 @@ func TestCreateInstance(t *testing.T) { }, }, nil) m. - RunInstancesWithContext(context.TODO(), gomock.Any()). - Return(&ec2.Reservation{ - Instances: []*ec2.Instance{ - { - State: &ec2.InstanceState{ - Name: aws.String(ec2.InstanceStateNamePending), - }, + DescribeSubnetsWithContext(context.TODO(), &ec2.DescribeSubnetsInput{ + Filters: []*ec2.Filter{ + filter.EC2.SubnetStates(ec2.SubnetStatePending, ec2.SubnetStateAvailable), + {Name: aws.String("tag:some-tag"), Values: aws.StringSlice([]string{"some-value"})}, + }, + }). + Return(&ec2.DescribeSubnetsOutput{ + Subnets: []*ec2.Subnet{{ + SubnetId: aws.String("public-subnet-1"), + MapPublicIpOnLaunch: aws.Bool(false), + }}, + }, nil) + m. + RunInstancesWithContext(context.TODO(), gomock.Any()). + Do(func(_ context.Context, in *ec2.RunInstancesInput, _ ...request.Option) { + if len(in.NetworkInterfaces) == 0 { + t.Fatalf("expected a NetworkInterface to be defined") + } + if !aws.BoolValue(in.NetworkInterfaces[0].AssociatePublicIpAddress) { + t.Fatalf("expected AssociatePublicIpAddress to be set and true") + } + if subnet := aws.StringValue(in.NetworkInterfaces[0].SubnetId); subnet != "public-subnet-1" { + t.Fatalf("expected subnet ID to be \"public-subnet-1\", got %q", subnet) + } + if in.NetworkInterfaces[0].Groups == nil { + t.Fatalf("expected security groups to be set") + } + }). + Return(&ec2.Reservation{ + Instances: []*ec2.Instance{ + { + State: &ec2.InstanceState{ + Name: aws.String(ec2.InstanceStateNamePending), + }, IamInstanceProfile: &ec2.IamInstanceProfile{ Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"), }, @@ -2528,7 +2819,7 @@ func TestCreateInstance(t *testing.T) { }, }, { - name: "public IP true and no public subnet exists", + name: "public IP true and public subnet exists", machine: &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"set": "node"}, @@ -2558,6 +2849,10 @@ func TestCreateInstance(t *testing.T) { ID: "private-subnet-1", IsPublic: false, }, + infrav1.SubnetSpec{ + ID: "public-subnet-1", + IsPublic: true, + }, }, }, }, @@ -2598,20 +2893,51 @@ func TestCreateInstance(t *testing.T) { }, }, }, nil) + m. + RunInstancesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.Reservation{ + Instances: []*ec2.Instance{ + { + State: &ec2.InstanceState{ + Name: aws.String(ec2.InstanceStateNamePending), + }, + IamInstanceProfile: &ec2.IamInstanceProfile{ + Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"), + }, + InstanceId: aws.String("two"), + InstanceType: aws.String("m5.large"), + SubnetId: aws.String("public-subnet-1"), + ImageId: aws.String("ami-1"), + RootDeviceName: aws.String("device-1"), + BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{ + { + DeviceName: aws.String("device-1"), + Ebs: &ec2.EbsInstanceBlockDevice{ + VolumeId: aws.String("volume-1"), + }, + }, + }, + Placement: &ec2.Placement{ + AvailabilityZone: &az, + }, + }, + }, + }, nil) + m. + DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.DescribeNetworkInterfacesOutput{ + NetworkInterfaces: []*ec2.NetworkInterface{}, + NextToken: nil, + }, nil) }, check: func(instance *infrav1.Instance, err error) { - expectedErrMsg := "failed to run machine \"aws-test1\" with public IP, no public subnets available" - if err == nil { - t.Fatalf("Expected error, but got nil") - } - - if !strings.Contains(err.Error(), expectedErrMsg) { - t.Fatalf("Expected error: %s\nInstead got: %s", expectedErrMsg, err.Error()) + if err != nil { + t.Fatalf("did not expect error: %v", err) } }, }, { - name: "with multiple block device mappings", + name: "public IP true, public subnet exists and MapPublicIpOnLaunch is false", machine: &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"set": "node"}, @@ -2627,22 +2953,23 @@ func TestCreateInstance(t *testing.T) { ID: aws.String("abc"), }, InstanceType: "m5.large", - NonRootVolumes: []infrav1.Volume{{ - DeviceName: "device-2", - Size: 8, - }}, + PublicIP: aws.Bool(true), }, awsCluster: &infrav1.AWSCluster{ ObjectMeta: metav1.ObjectMeta{Name: "test"}, Spec: infrav1.AWSClusterSpec{ NetworkSpec: infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + ID: "vpc-id", + }, Subnets: infrav1.Subnets{ infrav1.SubnetSpec{ - ID: "subnet-1", + ID: "private-subnet-1", IsPublic: false, }, infrav1.SubnetSpec{ - IsPublic: false, + ID: "public-subnet-1", + IsPublic: true, }, }, }, @@ -2684,8 +3011,22 @@ func TestCreateInstance(t *testing.T) { }, }, }, nil) - m. // TODO: Restore these parameters, but with the tags as well + m. RunInstancesWithContext(context.TODO(), gomock.Any()). + Do(func(_ context.Context, in *ec2.RunInstancesInput, _ ...request.Option) { + if len(in.NetworkInterfaces) == 0 { + t.Fatalf("expected a NetworkInterface to be defined") + } + if !aws.BoolValue(in.NetworkInterfaces[0].AssociatePublicIpAddress) { + t.Fatalf("expected AssociatePublicIpAddress to be set and true") + } + if subnet := aws.StringValue(in.NetworkInterfaces[0].SubnetId); subnet != "public-subnet-1" { + t.Fatalf("expected subnet ID to be \"public-subnet-1\", got %q", subnet) + } + if in.NetworkInterfaces[0].Groups == nil { + t.Fatalf("expected security groups to be set") + } + }). Return(&ec2.Reservation{ Instances: []*ec2.Instance{ { @@ -2697,7 +3038,7 @@ func TestCreateInstance(t *testing.T) { }, InstanceId: aws.String("two"), InstanceType: aws.String("m5.large"), - SubnetId: aws.String("subnet-1"), + SubnetId: aws.String("public-subnet-1"), ImageId: aws.String("ami-1"), RootDeviceName: aws.String("device-1"), BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{ @@ -2707,16 +3048,637 @@ func TestCreateInstance(t *testing.T) { VolumeId: aws.String("volume-1"), }, }, - { - DeviceName: aws.String("device-2"), - Ebs: &ec2.EbsInstanceBlockDevice{ - VolumeId: aws.String("volume-2"), - }, + }, + Placement: &ec2.Placement{ + AvailabilityZone: &az, + }, + }, + }, + }, nil) + m. + DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.DescribeNetworkInterfacesOutput{ + NetworkInterfaces: []*ec2.NetworkInterface{}, + NextToken: nil, + }, nil) + }, + check: func(instance *infrav1.Instance, err error) { + if err != nil { + t.Fatalf("did not expect error: %v", err) + } + }, + }, + { + name: "public IP true and no public subnet exists", + machine: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"set": "node"}, + }, + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ + DataSecretName: ptr.To[string]("bootstrap-data"), + }, + }, + }, + machineConfig: &infrav1.AWSMachineSpec{ + AMI: infrav1.AMIReference{ + ID: aws.String("abc"), + }, + InstanceType: "m5.large", + PublicIP: aws.Bool(true), + }, + awsCluster: &infrav1.AWSCluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: infrav1.AWSClusterSpec{ + NetworkSpec: infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + ID: "vpc-id", + }, + Subnets: infrav1.Subnets{ + infrav1.SubnetSpec{ + ID: "private-subnet-1", + IsPublic: false, + }, + }, + }, + }, + Status: infrav1.AWSClusterStatus{ + Network: infrav1.NetworkStatus{ + SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{ + infrav1.SecurityGroupControlPlane: { + ID: "1", + }, + infrav1.SecurityGroupNode: { + ID: "2", + }, + infrav1.SecurityGroupLB: { + ID: "3", + }, + }, + APIServerELB: infrav1.LoadBalancer{ + DNSName: "test-apiserver.us-east-1.aws", + }, + }, + }, + }, + expect: func(m *mocks.MockEC2APIMockRecorder) { + m. + DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{ + InstanceTypes: []*string{ + aws.String("m5.large"), + }, + })). + Return(&ec2.DescribeInstanceTypesOutput{ + InstanceTypes: []*ec2.InstanceTypeInfo{ + { + ProcessorInfo: &ec2.ProcessorInfo{ + SupportedArchitectures: []*string{ + aws.String("x86_64"), + }, + }, + }, + }, + }, nil) + }, + check: func(instance *infrav1.Instance, err error) { + expectedErrMsg := "failed to run machine \"aws-test1\" with public IP, no public subnets available" + if err == nil { + t.Fatalf("Expected error, but got nil") + } + + if !strings.Contains(err.Error(), expectedErrMsg) { + t.Fatalf("Expected error: %s\nInstead got: %s", expectedErrMsg, err.Error()) + } + }, + }, + { + name: "with multiple block device mappings", + machine: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"set": "node"}, + }, + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ + DataSecretName: ptr.To[string]("bootstrap-data"), + }, + }, + }, + machineConfig: &infrav1.AWSMachineSpec{ + AMI: infrav1.AMIReference{ + ID: aws.String("abc"), + }, + InstanceType: "m5.large", + NonRootVolumes: []infrav1.Volume{{ + DeviceName: "device-2", + Size: 8, + }}, + }, + awsCluster: &infrav1.AWSCluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: infrav1.AWSClusterSpec{ + NetworkSpec: infrav1.NetworkSpec{ + Subnets: infrav1.Subnets{ + infrav1.SubnetSpec{ + ID: "subnet-1", + IsPublic: false, + }, + infrav1.SubnetSpec{ + IsPublic: false, + }, + }, + VPC: infrav1.VPCSpec{ + ID: "vpc-test", + }, + }, + }, + Status: infrav1.AWSClusterStatus{ + Network: infrav1.NetworkStatus{ + SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{ + infrav1.SecurityGroupControlPlane: { + ID: "1", + }, + infrav1.SecurityGroupNode: { + ID: "2", + }, + infrav1.SecurityGroupLB: { + ID: "3", + }, + }, + APIServerELB: infrav1.LoadBalancer{ + DNSName: "test-apiserver.us-east-1.aws", + }, + }, + }, + }, + expect: func(m *mocks.MockEC2APIMockRecorder) { + m. + DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{ + InstanceTypes: []*string{ + aws.String("m5.large"), + }, + })). + Return(&ec2.DescribeInstanceTypesOutput{ + InstanceTypes: []*ec2.InstanceTypeInfo{ + { + ProcessorInfo: &ec2.ProcessorInfo{ + SupportedArchitectures: []*string{ + aws.String("x86_64"), + }, + }, + }, + }, + }, nil) + m. // TODO: Restore these parameters, but with the tags as well + RunInstancesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.Reservation{ + Instances: []*ec2.Instance{ + { + State: &ec2.InstanceState{ + Name: aws.String(ec2.InstanceStateNamePending), + }, + IamInstanceProfile: &ec2.IamInstanceProfile{ + Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"), + }, + InstanceId: aws.String("two"), + InstanceType: aws.String("m5.large"), + SubnetId: aws.String("subnet-1"), + ImageId: aws.String("ami-1"), + RootDeviceName: aws.String("device-1"), + BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{ + { + DeviceName: aws.String("device-1"), + Ebs: &ec2.EbsInstanceBlockDevice{ + VolumeId: aws.String("volume-1"), + }, + }, + { + DeviceName: aws.String("device-2"), + Ebs: &ec2.EbsInstanceBlockDevice{ + VolumeId: aws.String("volume-2"), + }, + }, + }, + Placement: &ec2.Placement{ + AvailabilityZone: &az, + }, + }, + }, + }, nil) + m. + DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.DescribeNetworkInterfacesOutput{ + NetworkInterfaces: []*ec2.NetworkInterface{}, + NextToken: nil, + }, nil) + }, + check: func(instance *infrav1.Instance, err error) { + if err != nil { + t.Fatalf("did not expect error: %v", err) + } + }, + }, + { + name: "with dedicated tenancy cloud-config", + machine: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"set": "node"}, + Namespace: "default", + Name: "machine-aws-test1", + }, + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ + DataSecretName: ptr.To[string]("bootstrap-data"), + }, + }, + }, + machineConfig: &infrav1.AWSMachineSpec{ + AMI: infrav1.AMIReference{ + ID: aws.String("abc"), + }, + InstanceType: "m5.large", + Tenancy: "dedicated", + UncompressedUserData: &isUncompressedFalse, + }, + awsCluster: &infrav1.AWSCluster{ + Spec: infrav1.AWSClusterSpec{ + NetworkSpec: infrav1.NetworkSpec{ + Subnets: infrav1.Subnets{ + infrav1.SubnetSpec{ + ID: "subnet-1", + IsPublic: false, + }, + infrav1.SubnetSpec{ + IsPublic: false, + }, + }, + VPC: infrav1.VPCSpec{ + ID: "vpc-test", + }, + }, + }, + Status: infrav1.AWSClusterStatus{ + Network: infrav1.NetworkStatus{ + SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{ + infrav1.SecurityGroupControlPlane: { + ID: "1", + }, + infrav1.SecurityGroupNode: { + ID: "2", + }, + infrav1.SecurityGroupLB: { + ID: "3", + }, + }, + APIServerELB: infrav1.LoadBalancer{ + DNSName: "test-apiserver.us-east-1.aws", + }, + }, + }, + }, + expect: func(m *mocks.MockEC2APIMockRecorder) { + m. // TODO: Restore these parameters, but with the tags as well + RunInstancesWithContext(context.TODO(), gomock.Eq(&ec2.RunInstancesInput{ + ImageId: aws.String("abc"), + InstanceType: aws.String("m5.large"), + KeyName: aws.String("default"), + MaxCount: aws.Int64(1), + MinCount: aws.Int64(1), + Placement: &ec2.Placement{ + Tenancy: &tenancy, + }, + SecurityGroupIds: []*string{aws.String("2"), aws.String("3")}, + SubnetId: aws.String("subnet-1"), + TagSpecifications: []*ec2.TagSpecification{ + { + ResourceType: aws.String("instance"), + Tags: []*ec2.Tag{ + { + Key: aws.String("MachineName"), + Value: aws.String("default/machine-aws-test1"), + }, + { + Key: aws.String("Name"), + Value: aws.String("aws-test1"), + }, + { + Key: aws.String("kubernetes.io/cluster/test1"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test1"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("node"), + }, + }, + }, + { + ResourceType: aws.String("volume"), + Tags: []*ec2.Tag{ + { + Key: aws.String("MachineName"), + Value: aws.String("default/machine-aws-test1"), + }, + { + Key: aws.String("Name"), + Value: aws.String("aws-test1"), + }, + { + Key: aws.String("kubernetes.io/cluster/test1"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test1"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("node"), + }, + }, + }, + { + ResourceType: aws.String("network-interface"), + Tags: []*ec2.Tag{ + { + Key: aws.String("MachineName"), + Value: aws.String("default/machine-aws-test1"), + }, + { + Key: aws.String("Name"), + Value: aws.String("aws-test1"), + }, + { + Key: aws.String("kubernetes.io/cluster/test1"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test1"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("node"), + }, + }, + }, + }, + UserData: aws.String(base64.StdEncoding.EncodeToString(userDataCompressed)), + })). + Return(&ec2.Reservation{ + Instances: []*ec2.Instance{ + { + State: &ec2.InstanceState{ + Name: aws.String(ec2.InstanceStateNamePending), + }, + IamInstanceProfile: &ec2.IamInstanceProfile{ + Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"), + }, + InstanceId: aws.String("two"), + InstanceType: aws.String("m5.large"), + SubnetId: aws.String("subnet-1"), + ImageId: aws.String("ami-1"), + RootDeviceName: aws.String("device-1"), + BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{ + { + DeviceName: aws.String("device-1"), + Ebs: &ec2.EbsInstanceBlockDevice{ + VolumeId: aws.String("volume-1"), + }, + }, + }, + Placement: &ec2.Placement{ + AvailabilityZone: &az, + Tenancy: &tenancy, + }, + }, + }, + }, nil) + m. + DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{ + InstanceTypes: []*string{ + aws.String("m5.large"), + }, + })). + Return(&ec2.DescribeInstanceTypesOutput{ + InstanceTypes: []*ec2.InstanceTypeInfo{ + { + ProcessorInfo: &ec2.ProcessorInfo{ + SupportedArchitectures: []*string{ + aws.String("x86_64"), + }, + }, + }, + }, + }, nil) + m. + DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.DescribeNetworkInterfacesOutput{ + NetworkInterfaces: []*ec2.NetworkInterface{}, + NextToken: nil, + }, nil) + }, + check: func(instance *infrav1.Instance, err error) { + if err != nil { + t.Fatalf("did not expect error: %v", err) + } + }, + }, + { + name: "with custom placement group cloud-config", + machine: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"set": "node"}, + Namespace: "default", + Name: "machine-aws-test1", + }, + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ + DataSecretName: ptr.To[string]("bootstrap-data"), + }, + }, + }, + machineConfig: &infrav1.AWSMachineSpec{ + AMI: infrav1.AMIReference{ + ID: aws.String("abc"), + }, + InstanceType: "m5.large", + PlacementGroupName: "placement-group1", + UncompressedUserData: &isUncompressedFalse, + }, + awsCluster: &infrav1.AWSCluster{ + Spec: infrav1.AWSClusterSpec{ + NetworkSpec: infrav1.NetworkSpec{ + Subnets: infrav1.Subnets{ + infrav1.SubnetSpec{ + ID: "subnet-1", + IsPublic: false, + }, + infrav1.SubnetSpec{ + IsPublic: false, + }, + }, + VPC: infrav1.VPCSpec{ + ID: "vpc-test", + }, + }, + }, + Status: infrav1.AWSClusterStatus{ + Network: infrav1.NetworkStatus{ + SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{ + infrav1.SecurityGroupControlPlane: { + ID: "1", + }, + infrav1.SecurityGroupNode: { + ID: "2", + }, + infrav1.SecurityGroupLB: { + ID: "3", + }, + }, + APIServerELB: infrav1.LoadBalancer{ + DNSName: "test-apiserver.us-east-1.aws", + }, + }, + }, + }, + expect: func(m *mocks.MockEC2APIMockRecorder) { + m. // TODO: Restore these parameters, but with the tags as well + RunInstancesWithContext(context.TODO(), gomock.Eq(&ec2.RunInstancesInput{ + ImageId: aws.String("abc"), + InstanceType: aws.String("m5.large"), + KeyName: aws.String("default"), + MaxCount: aws.Int64(1), + MinCount: aws.Int64(1), + Placement: &ec2.Placement{ + GroupName: aws.String("placement-group1"), + }, + SecurityGroupIds: []*string{aws.String("2"), aws.String("3")}, + SubnetId: aws.String("subnet-1"), + TagSpecifications: []*ec2.TagSpecification{ + { + ResourceType: aws.String("instance"), + Tags: []*ec2.Tag{ + { + Key: aws.String("MachineName"), + Value: aws.String("default/machine-aws-test1"), + }, + { + Key: aws.String("Name"), + Value: aws.String("aws-test1"), + }, + { + Key: aws.String("kubernetes.io/cluster/test1"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test1"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("node"), + }, + }, + }, + { + ResourceType: aws.String("volume"), + Tags: []*ec2.Tag{ + { + Key: aws.String("MachineName"), + Value: aws.String("default/machine-aws-test1"), + }, + { + Key: aws.String("Name"), + Value: aws.String("aws-test1"), + }, + { + Key: aws.String("kubernetes.io/cluster/test1"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test1"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("node"), + }, + }, + }, + { + ResourceType: aws.String("network-interface"), + Tags: []*ec2.Tag{ + { + Key: aws.String("MachineName"), + Value: aws.String("default/machine-aws-test1"), + }, + { + Key: aws.String("Name"), + Value: aws.String("aws-test1"), + }, + { + Key: aws.String("kubernetes.io/cluster/test1"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test1"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("node"), + }, + }, + }, + }, + UserData: aws.String(base64.StdEncoding.EncodeToString(userDataCompressed)), + })). + Return(&ec2.Reservation{ + Instances: []*ec2.Instance{ + { + State: &ec2.InstanceState{ + Name: aws.String(ec2.InstanceStateNamePending), + }, + IamInstanceProfile: &ec2.IamInstanceProfile{ + Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"), + }, + InstanceId: aws.String("two"), + InstanceType: aws.String("m5.large"), + SubnetId: aws.String("subnet-1"), + ImageId: aws.String("ami-1"), + RootDeviceName: aws.String("device-1"), + BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{ + { + DeviceName: aws.String("device-1"), + Ebs: &ec2.EbsInstanceBlockDevice{ + VolumeId: aws.String("volume-1"), + }, + }, + }, + Placement: &ec2.Placement{ + AvailabilityZone: &az, + GroupName: aws.String("placement-group1"), + }, + }, + }, + }, nil) + m. + DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{ + InstanceTypes: []*string{ + aws.String("m5.large"), + }, + })). + Return(&ec2.DescribeInstanceTypesOutput{ + InstanceTypes: []*ec2.InstanceTypeInfo{ + { + ProcessorInfo: &ec2.ProcessorInfo{ + SupportedArchitectures: []*string{ + aws.String("x86_64"), }, }, - Placement: &ec2.Placement{ - AvailabilityZone: &az, - }, }, }, }, nil) @@ -2734,7 +3696,7 @@ func TestCreateInstance(t *testing.T) { }, }, { - name: "with dedicated tenancy cloud-config", + name: "with dedicated tenancy and placement group ignition", machine: &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"set": "node"}, @@ -2753,9 +3715,12 @@ func TestCreateInstance(t *testing.T) { }, InstanceType: "m5.large", Tenancy: "dedicated", - UncompressedUserData: &isUncompressedFalse, + PlacementGroupName: "placement-group1", + UncompressedUserData: &isUncompressedTrue, + Ignition: &infrav1.Ignition{}, }, awsCluster: &infrav1.AWSCluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, Spec: infrav1.AWSClusterSpec{ NetworkSpec: infrav1.NetworkSpec{ Subnets: infrav1.Subnets{ @@ -2767,6 +3732,9 @@ func TestCreateInstance(t *testing.T) { IsPublic: false, }, }, + VPC: infrav1.VPCSpec{ + ID: "vpc-test", + }, }, }, Status: infrav1.AWSClusterStatus{ @@ -2789,6 +3757,23 @@ func TestCreateInstance(t *testing.T) { }, }, expect: func(m *mocks.MockEC2APIMockRecorder) { + m. + DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{ + InstanceTypes: []*string{ + aws.String("m5.large"), + }, + })). + Return(&ec2.DescribeInstanceTypesOutput{ + InstanceTypes: []*ec2.InstanceTypeInfo{ + { + ProcessorInfo: &ec2.ProcessorInfo{ + SupportedArchitectures: []*string{ + aws.String("x86_64"), + }, + }, + }, + }, + }, nil) m. // TODO: Restore these parameters, but with the tags as well RunInstancesWithContext(context.TODO(), gomock.Eq(&ec2.RunInstancesInput{ ImageId: aws.String("abc"), @@ -2797,7 +3782,8 @@ func TestCreateInstance(t *testing.T) { MaxCount: aws.Int64(1), MinCount: aws.Int64(1), Placement: &ec2.Placement{ - Tenancy: &tenancy, + Tenancy: &tenancy, + GroupName: aws.String("placement-group1"), }, SecurityGroupIds: []*string{aws.String("2"), aws.String("3")}, SubnetId: aws.String("subnet-1"), @@ -2827,8 +3813,58 @@ func TestCreateInstance(t *testing.T) { }, }, }, + { + ResourceType: aws.String("volume"), + Tags: []*ec2.Tag{ + { + Key: aws.String("MachineName"), + Value: aws.String("default/machine-aws-test1"), + }, + { + Key: aws.String("Name"), + Value: aws.String("aws-test1"), + }, + { + Key: aws.String("kubernetes.io/cluster/test1"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test1"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("node"), + }, + }, + }, + { + ResourceType: aws.String("network-interface"), + Tags: []*ec2.Tag{ + { + Key: aws.String("MachineName"), + Value: aws.String("default/machine-aws-test1"), + }, + { + Key: aws.String("Name"), + Value: aws.String("aws-test1"), + }, + { + Key: aws.String("kubernetes.io/cluster/test1"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test1"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("node"), + }, + }, + }, }, - UserData: aws.String(base64.StdEncoding.EncodeToString(userDataCompressed)), + UserData: aws.String(base64.StdEncoding.EncodeToString(data)), })). Return(&ec2.Reservation{ Instances: []*ec2.Instance{ @@ -2859,23 +3895,6 @@ func TestCreateInstance(t *testing.T) { }, }, }, nil) - m. - DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{ - InstanceTypes: []*string{ - aws.String("m5.large"), - }, - })). - Return(&ec2.DescribeInstanceTypesOutput{ - InstanceTypes: []*ec2.InstanceTypeInfo{ - { - ProcessorInfo: &ec2.ProcessorInfo{ - SupportedArchitectures: []*string{ - aws.String("x86_64"), - }, - }, - }, - }, - }, nil) m. DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()). Return(&ec2.DescribeNetworkInterfacesOutput{ @@ -2890,7 +3909,7 @@ func TestCreateInstance(t *testing.T) { }, }, { - name: "with custom placement group cloud-config", + name: "with custom placement group and partition number", machine: &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"set": "node"}, @@ -2907,9 +3926,10 @@ func TestCreateInstance(t *testing.T) { AMI: infrav1.AMIReference{ ID: aws.String("abc"), }, - InstanceType: "m5.large", - PlacementGroupName: "placement-group1", - UncompressedUserData: &isUncompressedFalse, + InstanceType: "m5.large", + PlacementGroupName: "placement-group1", + PlacementGroupPartition: 2, + UncompressedUserData: &isUncompressedFalse, }, awsCluster: &infrav1.AWSCluster{ Spec: infrav1.AWSClusterSpec{ @@ -2953,7 +3973,8 @@ func TestCreateInstance(t *testing.T) { MaxCount: aws.Int64(1), MinCount: aws.Int64(1), Placement: &ec2.Placement{ - GroupName: aws.String("placement-group1"), + GroupName: aws.String("placement-group1"), + PartitionNumber: aws.Int64(2), }, SecurityGroupIds: []*string{aws.String("2"), aws.String("3")}, SubnetId: aws.String("subnet-1"), @@ -2983,6 +4004,56 @@ func TestCreateInstance(t *testing.T) { }, }, }, + { + ResourceType: aws.String("volume"), + Tags: []*ec2.Tag{ + { + Key: aws.String("MachineName"), + Value: aws.String("default/machine-aws-test1"), + }, + { + Key: aws.String("Name"), + Value: aws.String("aws-test1"), + }, + { + Key: aws.String("kubernetes.io/cluster/test1"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test1"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("node"), + }, + }, + }, + { + ResourceType: aws.String("network-interface"), + Tags: []*ec2.Tag{ + { + Key: aws.String("MachineName"), + Value: aws.String("default/machine-aws-test1"), + }, + { + Key: aws.String("Name"), + Value: aws.String("aws-test1"), + }, + { + Key: aws.String("kubernetes.io/cluster/test1"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test1"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("node"), + }, + }, + }, }, UserData: aws.String(base64.StdEncoding.EncodeToString(userDataCompressed)), })). @@ -3011,6 +4082,7 @@ func TestCreateInstance(t *testing.T) { Placement: &ec2.Placement{ AvailabilityZone: &az, GroupName: aws.String("placement-group1"), + PartitionNumber: aws.Int64(2), }, }, }, @@ -3046,7 +4118,7 @@ func TestCreateInstance(t *testing.T) { }, }, { - name: "with dedicated tenancy and placement group ignition", + name: "expect error when placementGroupPartition is set, but placementGroupName is empty", machine: &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"set": "node"}, @@ -3063,14 +4135,11 @@ func TestCreateInstance(t *testing.T) { AMI: infrav1.AMIReference{ ID: aws.String("abc"), }, - InstanceType: "m5.large", - Tenancy: "dedicated", - PlacementGroupName: "placement-group1", - UncompressedUserData: &isUncompressedTrue, - Ignition: &infrav1.Ignition{}, + InstanceType: "m5.large", + PlacementGroupPartition: 2, + UncompressedUserData: &isUncompressedFalse, }, awsCluster: &infrav1.AWSCluster{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, Spec: infrav1.AWSClusterSpec{ NetworkSpec: infrav1.NetworkSpec{ Subnets: infrav1.Subnets{ @@ -3097,111 +4166,38 @@ func TestCreateInstance(t *testing.T) { ID: "3", }, }, - APIServerELB: infrav1.LoadBalancer{ - DNSName: "test-apiserver.us-east-1.aws", - }, - }, - }, - }, - expect: func(m *mocks.MockEC2APIMockRecorder) { - m. - DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{ - InstanceTypes: []*string{ - aws.String("m5.large"), - }, - })). - Return(&ec2.DescribeInstanceTypesOutput{ - InstanceTypes: []*ec2.InstanceTypeInfo{ - { - ProcessorInfo: &ec2.ProcessorInfo{ - SupportedArchitectures: []*string{ - aws.String("x86_64"), - }, - }, - }, - }, - }, nil) - m. // TODO: Restore these parameters, but with the tags as well - RunInstancesWithContext(context.TODO(), gomock.Eq(&ec2.RunInstancesInput{ - ImageId: aws.String("abc"), - InstanceType: aws.String("m5.large"), - KeyName: aws.String("default"), - MaxCount: aws.Int64(1), - MinCount: aws.Int64(1), - Placement: &ec2.Placement{ - Tenancy: &tenancy, - GroupName: aws.String("placement-group1"), - }, - SecurityGroupIds: []*string{aws.String("2"), aws.String("3")}, - SubnetId: aws.String("subnet-1"), - TagSpecifications: []*ec2.TagSpecification{ - { - ResourceType: aws.String("instance"), - Tags: []*ec2.Tag{ - { - Key: aws.String("MachineName"), - Value: aws.String("default/machine-aws-test1"), - }, - { - Key: aws.String("Name"), - Value: aws.String("aws-test1"), - }, - { - Key: aws.String("kubernetes.io/cluster/test1"), - Value: aws.String("owned"), - }, - { - Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test1"), - Value: aws.String("owned"), - }, - { - Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), - Value: aws.String("node"), - }, - }, - }, - }, - UserData: aws.String(base64.StdEncoding.EncodeToString(data)), - })). - Return(&ec2.Reservation{ - Instances: []*ec2.Instance{ - { - State: &ec2.InstanceState{ - Name: aws.String(ec2.InstanceStateNamePending), - }, - IamInstanceProfile: &ec2.IamInstanceProfile{ - Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"), - }, - InstanceId: aws.String("two"), - InstanceType: aws.String("m5.large"), - SubnetId: aws.String("subnet-1"), - ImageId: aws.String("ami-1"), - RootDeviceName: aws.String("device-1"), - BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{ - { - DeviceName: aws.String("device-1"), - Ebs: &ec2.EbsInstanceBlockDevice{ - VolumeId: aws.String("volume-1"), - }, + APIServerELB: infrav1.LoadBalancer{ + DNSName: "test-apiserver.us-east-1.aws", + }, + }, + }, + }, + expect: func(m *mocks.MockEC2APIMockRecorder) { + m. + DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{ + InstanceTypes: []*string{ + aws.String("m5.large"), + }, + })). + Return(&ec2.DescribeInstanceTypesOutput{ + InstanceTypes: []*ec2.InstanceTypeInfo{ + { + ProcessorInfo: &ec2.ProcessorInfo{ + SupportedArchitectures: []*string{ + aws.String("x86_64"), }, }, - Placement: &ec2.Placement{ - AvailabilityZone: &az, - Tenancy: &tenancy, - }, }, }, }, nil) - m. - DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()). - Return(&ec2.DescribeNetworkInterfacesOutput{ - NetworkInterfaces: []*ec2.NetworkInterface{}, - NextToken: nil, - }, nil) }, check: func(instance *infrav1.Instance, err error) { - if err != nil { - t.Fatalf("did not expect error: %v", err) + expectedErrMsg := "placementGroupPartition is set but placementGroupName is empty" + if err == nil { + t.Fatalf("Expected error, but got nil") + } + if !strings.Contains(err.Error(), expectedErrMsg) { + t.Fatalf("Expected error: %s\nInstead got: `%s", expectedErrMsg, err.Error()) } }, }, @@ -3234,6 +4230,9 @@ func TestCreateInstance(t *testing.T) { IsPublic: false, }, }, + VPC: infrav1.VPCSpec{ + ID: "vpc-test", + }, }, }, Status: infrav1.AWSClusterStatus{ @@ -3363,6 +4362,9 @@ func TestCreateInstance(t *testing.T) { IsPublic: false, }, }, + VPC: infrav1.VPCSpec{ + ID: "vpc-test", + }, }, SSHKeyName: aws.String("specific-cluster-key-name"), }, @@ -3494,6 +4496,9 @@ func TestCreateInstance(t *testing.T) { IsPublic: false, }, }, + VPC: infrav1.VPCSpec{ + ID: "vpc-test", + }, }, SSHKeyName: aws.String("specific-cluster-key-name"), }, @@ -3625,6 +4630,9 @@ func TestCreateInstance(t *testing.T) { IsPublic: false, }, }, + VPC: infrav1.VPCSpec{ + ID: "vpc-test", + }, }, SSHKeyName: aws.String(""), }, @@ -3753,6 +4761,9 @@ func TestCreateInstance(t *testing.T) { IsPublic: false, }, }, + VPC: infrav1.VPCSpec{ + ID: "vpc-test", + }, }, SSHKeyName: aws.String(""), }, @@ -3881,6 +4892,9 @@ func TestCreateInstance(t *testing.T) { IsPublic: false, }, }, + VPC: infrav1.VPCSpec{ + ID: "vpc-test", + }, }, SSHKeyName: nil, }, @@ -3979,8 +4993,173 @@ func TestCreateInstance(t *testing.T) { } }, }, - } + { + name: "expect instace PrivateDNSName to be different when DHCP Option has domain name is set in the VPC", + machine: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"set": "node"}, + }, + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ + DataSecretName: ptr.To[string]("bootstrap-data"), + }, + }, + }, + machineConfig: &infrav1.AWSMachineSpec{ + AMI: infrav1.AMIReference{ + ID: aws.String("abc"), + }, + InstanceType: "m5.large", + }, + awsCluster: &infrav1.AWSCluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: infrav1.AWSClusterSpec{ + NetworkSpec: infrav1.NetworkSpec{ + Subnets: infrav1.Subnets{ + infrav1.SubnetSpec{ + ID: "subnet-1", + IsPublic: false, + }, + infrav1.SubnetSpec{ + IsPublic: false, + }, + }, + VPC: infrav1.VPCSpec{ + ID: "vpc-exists", + }, + }, + }, + Status: infrav1.AWSClusterStatus{ + Network: infrav1.NetworkStatus{ + SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{ + infrav1.SecurityGroupControlPlane: { + ID: "1", + }, + infrav1.SecurityGroupNode: { + ID: "2", + }, + infrav1.SecurityGroupLB: { + ID: "3", + }, + }, + APIServerELB: infrav1.LoadBalancer{ + DNSName: "test-apiserver.us-east-1.aws", + }, + }, + }, + }, + expect: func(m *mocks.MockEC2APIMockRecorder) { + m. // TODO: Restore these parameters, but with the tags as well + RunInstancesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.Reservation{ + Instances: []*ec2.Instance{ + { + State: &ec2.InstanceState{ + Name: aws.String(ec2.InstanceStateNamePending), + }, + IamInstanceProfile: &ec2.IamInstanceProfile{ + Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"), + }, + InstanceId: aws.String("two"), + InstanceType: aws.String("m5.large"), + SubnetId: aws.String("subnet-1"), + ImageId: aws.String("ami-1"), + RootDeviceName: aws.String("device-1"), + BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{ + { + DeviceName: aws.String("device-1"), + Ebs: &ec2.EbsInstanceBlockDevice{ + VolumeId: aws.String("volume-1"), + }, + }, + }, + Placement: &ec2.Placement{ + AvailabilityZone: &az, + }, + NetworkInterfaces: []*ec2.InstanceNetworkInterface{ + { + NetworkInterfaceId: aws.String("eni-1"), + PrivateIpAddress: aws.String("192.168.1.10"), + PrivateDnsName: aws.String("ip-192-168-1-10.ec2.internal"), + }, + }, + VpcId: aws.String("vpc-exists"), + }, + }, + }, nil) + m. + DescribeInstanceTypesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{ + InstanceTypes: []*string{ + aws.String("m5.large"), + }, + })). + Return(&ec2.DescribeInstanceTypesOutput{ + InstanceTypes: []*ec2.InstanceTypeInfo{ + { + ProcessorInfo: &ec2.ProcessorInfo{ + SupportedArchitectures: []*string{ + aws.String("x86_64"), + }, + }, + }, + }, + }, nil) + m. + DescribeNetworkInterfacesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.DescribeNetworkInterfacesOutput{ + NetworkInterfaces: []*ec2.NetworkInterface{}, + }, nil) + m. + DescribeVpcs(&ec2.DescribeVpcsInput{ + VpcIds: []*string{aws.String("vpc-exists")}, + }).Return(&ec2.DescribeVpcsOutput{ + Vpcs: []*ec2.Vpc{ + { + VpcId: aws.String("vpc-exists"), + CidrBlock: aws.String("192.168.1.0/24"), + IsDefault: aws.Bool(false), + State: aws.String("available"), + DhcpOptionsId: aws.String("dopt-12345678"), + }, + }, + }, nil) + m. + DescribeDhcpOptions(&ec2.DescribeDhcpOptionsInput{ + DhcpOptionsIds: []*string{aws.String("dopt-12345678")}, + }).Return(&ec2.DescribeDhcpOptionsOutput{ + DhcpOptions: []*ec2.DhcpOptions{ + { + DhcpConfigurations: []*ec2.DhcpConfiguration{ + { + Key: aws.String("domain-name"), + Values: []*ec2.AttributeValue{ + { + Value: aws.String("example.com"), + }, + }, + }, + }, + }, + }, + }, nil) + }, + check: func(instance *infrav1.Instance, err error) { + g := NewWithT(t) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(len(instance.Addresses)).To(Equal(3)) + for _, address := range instance.Addresses { + if address.Type == clusterv1.MachineInternalIP { + g.Expect(address.Address).To(Equal("192.168.1.10")) + } + + if address.Type == clusterv1.MachineInternalDNS { + g.Expect(address.Address).To(Or(Equal("ip-192-168-1-10.ec2.internal"), Equal("ip-192-168-1-10.example.com"))) + } + } + }, + }, + } for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { mockCtrl := gomock.NewController(t) @@ -4036,7 +5215,6 @@ func TestCreateInstance(t *testing.T) { machineScope, err := scope.NewMachineScope(scope.MachineScopeParams{ Client: client, Cluster: cluster, - ControlPlane: &unstructured.Unstructured{}, Machine: machine, AWSMachine: awsMachine, InfraCluster: clusterScope, @@ -4297,3 +5475,181 @@ func TestGetFilteredSecurityGroupID(t *testing.T) { }) } } + +func TestGetDHCPOptionSetDomainName(t *testing.T) { + testsCases := []struct { + name string + vpcID string + dhcpOpt *ec2.DhcpOptions + expectedPrivateDNSName *string + mockCalls func(m *mocks.MockEC2APIMockRecorder) + }{ + { + name: "dhcpOptions with domain-name", + vpcID: "vpc-exists", + dhcpOpt: &ec2.DhcpOptions{ + DhcpConfigurations: []*ec2.DhcpConfiguration{ + { + Key: aws.String("domain-name"), + Values: []*ec2.AttributeValue{ + { + Value: aws.String("example.com"), + }, + }, + }, + }, + }, + expectedPrivateDNSName: aws.String("example.com"), + mockCalls: mockedGetPrivateDNSDomainNameFromDHCPOptionsCalls, + }, + { + name: "dhcpOptions without domain-name", + vpcID: "vpc-empty-domain-name", + dhcpOpt: &ec2.DhcpOptions{ + DhcpConfigurations: []*ec2.DhcpConfiguration{ + { + Key: aws.String("domain-name"), + Values: []*ec2.AttributeValue{}, + }, + }, + }, + expectedPrivateDNSName: nil, + mockCalls: mockedGetPrivateDNSDomainNameFromDHCPOptionsEmptyCalls, + }, + } + for _, tc := range testsCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + ec2Mock := mocks.NewMockEC2API(mockCtrl) + scheme, err := setupScheme() + g.Expect(err).ToNot(HaveOccurred()) + expect := func(m *mocks.MockEC2APIMockRecorder) { + tc.mockCalls(m) + } + expect(ec2Mock.EXPECT()) + + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + cs, err := scope.NewClusterScope( + scope.ClusterScopeParams{ + Client: client, + Cluster: &clusterv1.Cluster{}, + AWSCluster: &infrav1.AWSCluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: infrav1.AWSClusterSpec{ + NetworkSpec: infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + ID: tc.vpcID, + }, + }, + }, + }, + }) + g.Expect(err).ToNot(HaveOccurred()) + + ec2Svc := NewService(cs) + ec2Svc.EC2Client = ec2Mock + dhcpOptsDomainName := ec2Svc.GetDHCPOptionSetDomainName(ec2Svc.EC2Client, &cs.VPC().ID) + g.Expect(dhcpOptsDomainName).To(Equal(tc.expectedPrivateDNSName)) + }) + } +} + +func mockedGetPrivateDNSDomainNameFromDHCPOptionsCalls(m *mocks.MockEC2APIMockRecorder) { + m.DescribeVpcs(&ec2.DescribeVpcsInput{ + VpcIds: []*string{aws.String("vpc-exists")}, + }).Return(&ec2.DescribeVpcsOutput{ + Vpcs: []*ec2.Vpc{ + { + VpcId: aws.String("vpc-exists"), + CidrBlock: aws.String("10.0.0.0/16"), + IsDefault: aws.Bool(false), + State: aws.String("available"), + DhcpOptionsId: aws.String("dopt-12345678"), + }, + }, + }, nil) + m.DescribeDhcpOptions(&ec2.DescribeDhcpOptionsInput{ + DhcpOptionsIds: []*string{aws.String("dopt-12345678")}, + }).Return(&ec2.DescribeDhcpOptionsOutput{ + DhcpOptions: []*ec2.DhcpOptions{ + { + DhcpConfigurations: []*ec2.DhcpConfiguration{ + { + Key: aws.String("domain-name"), + Values: []*ec2.AttributeValue{ + { + Value: aws.String("example.com"), + }, + }, + }, + }, + }, + }, + }, nil) +} + +func mockedGetPrivateDNSDomainNameFromDHCPOptionsEmptyCalls(m *mocks.MockEC2APIMockRecorder) { + m.DescribeVpcs(&ec2.DescribeVpcsInput{ + VpcIds: []*string{aws.String("vpc-empty-domain-name")}, + }).Return(&ec2.DescribeVpcsOutput{ + Vpcs: []*ec2.Vpc{ + { + VpcId: aws.String("vpc-exists"), + CidrBlock: aws.String("10.0.0.0/16"), + IsDefault: aws.Bool(false), + State: aws.String("available"), + DhcpOptionsId: aws.String("dopt-empty"), + }, + }, + }, nil) + m.DescribeDhcpOptions(&ec2.DescribeDhcpOptionsInput{ + DhcpOptionsIds: []*string{aws.String("dopt-empty")}, + }).Return(&ec2.DescribeDhcpOptionsOutput{ + DhcpOptions: []*ec2.DhcpOptions{ + { + DhcpConfigurations: []*ec2.DhcpConfiguration{ + { + Key: aws.String("domain-name"), + Values: []*ec2.AttributeValue{}, + }, + }, + }, + }, + }, nil) +} + +func TestGetCapacityReservationSpecification(t *testing.T) { + mockCapacityReservationID := "cr-123" + mockCapacityReservationIDPtr := &mockCapacityReservationID + testCases := []struct { + name string + capacityReservationID *string + expectedRequest *ec2.CapacityReservationSpecification + }{ + { + name: "with no CapacityReservationID options specified", + capacityReservationID: nil, + expectedRequest: nil, + }, + { + name: "with a valid CapacityReservationID specified", + capacityReservationID: mockCapacityReservationIDPtr, + expectedRequest: &ec2.CapacityReservationSpecification{ + CapacityReservationTarget: &ec2.CapacityReservationTarget{ + CapacityReservationId: aws.String(mockCapacityReservationID), + }, + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + request := getCapacityReservationSpecification(tc.capacityReservationID) + if !cmp.Equal(request, tc.expectedRequest) { + t.Errorf("Case: %s. Got: %v, expected: %v", tc.name, request, tc.expectedRequest) + } + }) + } +} diff --git a/pkg/cloud/services/ec2/launchtemplate.go b/pkg/cloud/services/ec2/launchtemplate.go index 356433ed91..5da57f2521 100644 --- a/pkg/cloud/services/ec2/launchtemplate.go +++ b/pkg/cloud/services/ec2/launchtemplate.go @@ -114,7 +114,9 @@ func (s *Service) ReconcileLaunchTemplate( return err } scope.SetLaunchTemplateLatestVersionStatus(launchTemplateVersion) - return scope.PatchObject() + if err := scope.PatchObject(); err != nil { + return err + } } annotation, err := MachinePoolAnnotationJSON(scope, TagsLastAppliedAnnotation) @@ -187,6 +189,7 @@ func (s *Service) ReconcileLaunchTemplate( return nil } +// ReconcileTags reconciles the tags for the AWSMachinePool instances. func (s *Service) ReconcileTags(scope scope.LaunchTemplateScope, resourceServicesToUpdate []scope.ResourceServiceToUpdate) error { additionalTags := scope.AdditionalTags() @@ -226,6 +229,7 @@ func (s *Service) ensureTags(scope scope.LaunchTemplateScope, resourceServicesTo return changed, nil } +// MachinePoolAnnotationJSON returns the annotation's json value as a map. func MachinePoolAnnotationJSON(lts scope.LaunchTemplateScope, annotation string) (map[string]interface{}, error) { out := map[string]interface{}{} @@ -246,6 +250,7 @@ func machinePoolAnnotation(lts scope.LaunchTemplateScope, annotation string) str return lts.GetObjectMeta().GetAnnotations()[annotation] } +// UpdateMachinePoolAnnotationJSON updates the annotation with the given content. func UpdateMachinePoolAnnotationJSON(lts scope.LaunchTemplateScope, annotation string, content map[string]interface{}) error { b, err := json.Marshal(content) if err != nil { @@ -517,6 +522,8 @@ func (s *Service) createLaunchTemplateData(scope scope.LaunchTemplateScope, imag data.InstanceMarketOptions = getLaunchTemplateInstanceMarketOptionsRequest(scope.GetLaunchTemplate().SpotMarketOptions) data.PrivateDnsNameOptions = getLaunchTemplatePrivateDNSNameOptionsRequest(scope.GetLaunchTemplate().PrivateDNSName) + blockDeviceMappings := []*ec2.LaunchTemplateBlockDeviceMappingRequest{} + // Set up root volume if lt.RootVolume != nil { rootDeviceName, err := s.checkRootVolume(lt.RootVolume, *data.ImageId) @@ -527,9 +534,18 @@ func (s *Service) createLaunchTemplateData(scope scope.LaunchTemplateScope, imag lt.RootVolume.DeviceName = aws.StringValue(rootDeviceName) req := volumeToLaunchTemplateBlockDeviceMappingRequest(lt.RootVolume) - data.BlockDeviceMappings = []*ec2.LaunchTemplateBlockDeviceMappingRequest{ - req, - } + blockDeviceMappings = append(blockDeviceMappings, req) + } + + for vi := range lt.NonRootVolumes { + nonRootVolume := lt.NonRootVolumes[vi] + + blockDeviceMapping := volumeToLaunchTemplateBlockDeviceMappingRequest(&nonRootVolume) + blockDeviceMappings = append(blockDeviceMappings, blockDeviceMapping) + } + + if len(blockDeviceMappings) > 0 { + data.BlockDeviceMappings = blockDeviceMappings } data.TagSpecifications = s.buildLaunchTemplateTagSpecificationRequest(scope, userDataSecretKey) @@ -618,6 +634,7 @@ func (s *Service) PruneLaunchTemplateVersions(id string) error { return s.deleteLaunchTemplateVersion(id, versionToPrune) } +// GetLaunchTemplateLatestVersion returns the latest version of a launch template. func (s *Service) GetLaunchTemplateLatestVersion(id string) (string, error) { input := &ec2.DescribeLaunchTemplateVersionsInput{ LaunchTemplateId: aws.String(id), @@ -854,6 +871,7 @@ func (s *Service) DiscoverLaunchTemplateAMI(scope scope.LaunchTemplateScope) (*s return aws.String(lookupAMI), nil } +// GetAdditionalSecurityGroupsIDs returns the security group IDs for the additional security groups. func (s *Service) GetAdditionalSecurityGroupsIDs(securityGroups []infrav1.AWSResourceReference) ([]string, error) { var additionalSecurityGroupsIDs []string diff --git a/pkg/cloud/services/ec2/service.go b/pkg/cloud/services/ec2/service.go index f303f1a095..03e08b1203 100644 --- a/pkg/cloud/services/ec2/service.go +++ b/pkg/cloud/services/ec2/service.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package ec2 provides a way to interact with the AWS EC2 API. package ec2 import ( @@ -21,14 +22,16 @@ import ( "github.com/aws/aws-sdk-go/service/ssm/ssmiface" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/network" ) // Service holds a collection of interfaces. // The interfaces are broken down like this to group functions together. // One alternative is to have a large list of functions from the ec2 client. type Service struct { - scope scope.EC2Scope - EC2Client ec2iface.EC2API + scope scope.EC2Scope + EC2Client ec2iface.EC2API + netService *network.Service // SSMClient is used to look up the official EKS AMI ID SSMClient ssmiface.SSMAPI @@ -37,8 +40,9 @@ type Service struct { // NewService returns a new service given the ec2 api client. func NewService(clusterScope scope.EC2Scope) *Service { return &Service{ - scope: clusterScope, - EC2Client: scope.NewEC2Client(clusterScope, clusterScope, clusterScope, clusterScope.InfraCluster()), - SSMClient: scope.NewSSMClient(clusterScope, clusterScope, clusterScope, clusterScope.InfraCluster()), + scope: clusterScope, + EC2Client: scope.NewEC2Client(clusterScope, clusterScope, clusterScope, clusterScope.InfraCluster()), + SSMClient: scope.NewSSMClient(clusterScope, clusterScope, clusterScope, clusterScope.InfraCluster()), + netService: network.NewService(clusterScope.(scope.NetworkScope)), } } diff --git a/pkg/cloud/services/eks/cluster.go b/pkg/cloud/services/eks/cluster.go index c7d786a690..62c990bd36 100644 --- a/pkg/cloud/services/eks/cluster.go +++ b/pkg/cloud/services/eks/cluster.go @@ -121,6 +121,10 @@ func (s *Service) reconcileCluster(ctx context.Context) error { return errors.Wrap(err, "failed reconciling cluster config") } + if err := s.reconcileLogging(cluster.Logging); err != nil { + return errors.Wrap(err, "failed reconciling logging") + } + if err := s.reconcileEKSEncryptionConfig(cluster.EncryptionConfig); err != nil { return errors.Wrap(err, "failed reconciling eks encryption config") } @@ -230,7 +234,7 @@ func makeEksEncryptionConfigs(encryptionConfig *ekscontrolplanev1.EncryptionConf if encryptionConfig == nil { return cfg } - //TODO: change EncryptionConfig so that provider and resources are required if encruptionConfig is specified + // TODO: change EncryptionConfig so that provider and resources are required if encruptionConfig is specified if encryptionConfig.Provider == nil || len(*encryptionConfig.Provider) == 0 { return cfg } @@ -275,11 +279,11 @@ func makeVpcConfig(subnets infrav1.Subnets, endpointAccess ekscontrolplanev1.End return nil, awserrors.NewFailedDependency("subnets in at least 2 different az's are required") } - subnetIds := make([]*string, 0) + subnetIDs := make([]*string, 0) for i := range subnets { subnet := subnets[i] subnetID := subnet.GetResourceID() - subnetIds = append(subnetIds, &subnetID) + subnetIDs = append(subnetIDs, &subnetID) } cidrs := make([]*string, 0) @@ -295,7 +299,7 @@ func makeVpcConfig(subnets infrav1.Subnets, endpointAccess ekscontrolplanev1.End vpcConfig := &eks.VpcConfigRequest{ EndpointPublicAccess: endpointAccess.Public, EndpointPrivateAccess: endpointAccess.Private, - SubnetIds: subnetIds, + SubnetIds: subnetIDs, } if len(cidrs) > 0 { @@ -312,8 +316,8 @@ func makeEksLogging(loggingSpec *ekscontrolplanev1.ControlPlaneLoggingSpec) *eks if loggingSpec == nil { return nil } - var on = true - var off = false + on := true + off := false var enabledTypes []string var disabledTypes []string @@ -355,9 +359,18 @@ func makeEksLogging(loggingSpec *ekscontrolplanev1.ControlPlaneLoggingSpec) *eks } func (s *Service) createCluster(eksClusterName string) (*eks.Cluster, error) { + var ( + vpcConfig *eks.VpcConfigRequest + err error + ) logging := makeEksLogging(s.scope.ControlPlane.Spec.Logging) encryptionConfigs := makeEksEncryptionConfigs(s.scope.ControlPlane.Spec.EncryptionConfig) - vpcConfig, err := makeVpcConfig(s.scope.Subnets(), s.scope.ControlPlane.Spec.EndpointAccess, s.scope.SecurityGroups()) + if s.scope.ControlPlane.Spec.RestrictPrivateSubnets { + s.scope.Info("Filtering private subnets") + vpcConfig, err = makeVpcConfig(s.scope.Subnets().FilterPrivate(), s.scope.ControlPlane.Spec.EndpointAccess, s.scope.SecurityGroups()) + } else { + vpcConfig, err = makeVpcConfig(s.scope.Subnets(), s.scope.ControlPlane.Spec.EndpointAccess, s.scope.SecurityGroups()) + } if err != nil { return nil, errors.Wrap(err, "couldn't create vpc config for cluster") } @@ -390,10 +403,18 @@ func (s *Service) createCluster(eksClusterName string) (*eks.Cluster, error) { return nil, errors.Wrapf(err, "error getting control plane iam role: %s", *s.scope.ControlPlane.Spec.RoleName) } - v := versionToEKS(parseEKSVersion(*s.scope.ControlPlane.Spec.Version)) + var eksVersion *string + if s.scope.ControlPlane.Spec.Version != nil { + specVersion, err := parseEKSVersion(*s.scope.ControlPlane.Spec.Version) + if err != nil { + return nil, fmt.Errorf("parsing EKS version from spec: %w", err) + } + v := versionToEKS(specVersion) + eksVersion = &v + } input := &eks.CreateClusterInput{ Name: aws.String(eksClusterName), - Version: aws.String(v), + Version: eksVersion, Logging: logging, EncryptionConfig: encryptionConfigs, ResourcesVpcConfig: vpcConfig, @@ -413,7 +434,7 @@ func (s *Service) createCluster(eksClusterName string) (*eks.Cluster, error) { conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneCreatingCondition) record.Eventf(s.scope.ControlPlane, "InitiatedCreateEKSControlPlane", "Initiated creation of a new EKS control plane %s", s.scope.KubernetesClusterName()) return true, nil - }, awserrors.ResourceNotFound); err != nil { //TODO: change the error that can be retried + }, awserrors.ResourceNotFound); err != nil { // TODO: change the error that can be retried record.Warnf(s.scope.ControlPlane, "FailedCreateEKSControlPlane", "Failed to initiate creation of a new EKS control plane: %v", err) return nil, errors.Wrapf(err, "failed to create EKS cluster") } @@ -449,11 +470,6 @@ func (s *Service) reconcileClusterConfig(cluster *eks.Cluster) error { var needsUpdate bool input := eks.UpdateClusterConfigInput{Name: aws.String(s.scope.KubernetesClusterName())} - if updateLogging := s.reconcileLogging(cluster.Logging); updateLogging != nil { - needsUpdate = true - input.Logging = updateLogging - } - updateVpcConfig, err := s.reconcileVpcConfig(cluster.ResourcesVpcConfig) if err != nil { return errors.Wrap(err, "couldn't create vpc config for cluster") @@ -485,15 +501,39 @@ func (s *Service) reconcileClusterConfig(cluster *eks.Cluster) error { return nil } -func (s *Service) reconcileLogging(logging *eks.Logging) *eks.Logging { +func (s *Service) reconcileLogging(logging *eks.Logging) error { + input := eks.UpdateClusterConfigInput{Name: aws.String(s.scope.KubernetesClusterName())} + for _, logSetup := range logging.ClusterLogging { for _, l := range logSetup.Types { enabled := s.scope.ControlPlane.Spec.Logging.IsLogEnabled(*l) if enabled != *logSetup.Enabled { - return makeEksLogging(s.scope.ControlPlane.Spec.Logging) + input.Logging = makeEksLogging(s.scope.ControlPlane.Spec.Logging) + } + } + } + + if input.Logging != nil { + if err := input.Validate(); err != nil { + return errors.Wrap(err, "created invalid UpdateClusterConfigInput") + } + + if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) { + if _, err := s.EKSClient.UpdateClusterConfig(&input); err != nil { + if aerr, ok := err.(awserr.Error); ok { + return false, aerr + } + return false, err } + conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition) + record.Eventf(s.scope.ControlPlane, "InitiatedUpdateEKSControlPlane", "Initiated logging update for EKS control plane %s", s.scope.KubernetesClusterName()) + return true, nil + }); err != nil { + record.Warnf(s.scope.ControlPlane, "FailedUpdateEKSControlPlane", "Failed to update EKS control plane logging: %v", err) + return errors.Wrapf(err, "failed to update EKS cluster") } } + return nil } @@ -511,8 +551,16 @@ func publicAccessCIDRsEqual(as []*string, bs []*string) bool { } func (s *Service) reconcileVpcConfig(vpcConfig *eks.VpcConfigResponse) (*eks.VpcConfigRequest, error) { + var ( + updatedVpcConfig *eks.VpcConfigRequest + err error + ) endpointAccess := s.scope.ControlPlane.Spec.EndpointAccess - updatedVpcConfig, err := makeVpcConfig(s.scope.Subnets(), endpointAccess, s.scope.SecurityGroups()) + if s.scope.ControlPlane.Spec.RestrictPrivateSubnets { + updatedVpcConfig, err = makeVpcConfig(s.scope.Subnets().FilterPrivate(), endpointAccess, s.scope.SecurityGroups()) + } else { + updatedVpcConfig, err = makeVpcConfig(s.scope.Subnets(), endpointAccess, s.scope.SecurityGroups()) + } if err != nil { return nil, err } @@ -557,9 +605,12 @@ func (s *Service) reconcileEKSEncryptionConfig(currentClusterConfig []*eks.Encry return errors.Errorf("failed to update the EKS control plane: disabling EKS encryption is not allowed after it has been enabled") } -func parseEKSVersion(raw string) *version.Version { - v := version.MustParseGeneric(raw) - return version.MustParseGeneric(fmt.Sprintf("%d.%d", v.Major(), v.Minor())) +func parseEKSVersion(raw string) (*version.Version, error) { + v, err := version.ParseGeneric(raw) + if err != nil { + return nil, err + } + return version.MustParseGeneric(fmt.Sprintf("%d.%d", v.Major(), v.Minor())), nil } func versionToEKS(v *version.Version) string { @@ -567,10 +618,18 @@ func versionToEKS(v *version.Version) string { } func (s *Service) reconcileClusterVersion(cluster *eks.Cluster) error { - specVersion := parseEKSVersion(*s.scope.ControlPlane.Spec.Version) + var specVersion *version.Version + if s.scope.ControlPlane.Spec.Version != nil { + var err error + specVersion, err = parseEKSVersion(*s.scope.ControlPlane.Spec.Version) + if err != nil { + return fmt.Errorf("parsing EKS version from spec: %w", err) + } + } + clusterVersion := version.MustParseGeneric(*cluster.Version) - if clusterVersion.LessThan(specVersion) { + if specVersion != nil && clusterVersion.LessThan(specVersion) { // NOTE: you can only upgrade increments of minor versions. If you want to upgrade 1.14 to 1.16 we // need to go 1.14-> 1.15 and then 1.15 -> 1.16. nextVersionString := versionToEKS(clusterVersion.WithMinor(clusterVersion.Minor() + 1)) diff --git a/pkg/cloud/services/eks/cluster_test.go b/pkg/cloud/services/eks/cluster_test.go index eeb92bbac0..7079c62de5 100644 --- a/pkg/cloud/services/eks/cluster_test.go +++ b/pkg/cloud/services/eks/cluster_test.go @@ -98,7 +98,9 @@ func TestParseEKSVersion(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - g.Expect(*parseEKSVersion(tc.input)).To(Equal(tc.expect)) + v, err := parseEKSVersion(tc.input) + g.Expect(err).To(BeNil()) + g.Expect(*v).To(Equal(tc.expect)) }) } } @@ -524,10 +526,10 @@ func TestCreateCluster(t *testing.T) { }, }, }) - subnetIds := make([]*string, 0) + subnetIDs := make([]*string, 0) for i := range tc.subnets { subnet := tc.subnets[i] - subnetIds = append(subnetIds, &subnet.ID) + subnetIDs = append(subnetIDs, &subnet.ID) } if !tc.expectError { @@ -537,7 +539,7 @@ func TestCreateCluster(t *testing.T) { Name: aws.String(clusterName), EncryptionConfig: []*eks.EncryptionConfig{}, ResourcesVpcConfig: &eks.VpcConfigRequest{ - SubnetIds: subnetIds, + SubnetIds: subnetIDs, }, RoleArn: tc.role, Tags: tc.tags, diff --git a/pkg/cloud/services/eks/config.go b/pkg/cloud/services/eks/config.go index 8559c2fa7f..a894f18557 100644 --- a/pkg/cloud/services/eks/config.go +++ b/pkg/cloud/services/eks/config.go @@ -31,9 +31,12 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd/api" + "sigs.k8s.io/controller-runtime/pkg/client" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/cluster-api/util/secret" ) @@ -42,6 +45,9 @@ const ( tokenPrefix = "k8s-aws-v1." //nolint:gosec clusterNameHeader = "x-k8s-aws-id" tokenAgeMins = 15 + + relativeKubeconfigKey = "relative" + relativeTokenFileKey = "token-file" ) func (s *Service) reconcileKubeconfig(ctx context.Context, cluster *eks.Cluster) error { @@ -110,28 +116,44 @@ func (s *Service) createCAPIKubeconfigSecret(ctx context.Context, cluster *eks.C clusterName := s.scope.KubernetesClusterName() userName := s.getKubeConfigUserName(clusterName, false) - cfg, err := s.createBaseKubeConfig(cluster, userName) + config, err := s.createBaseKubeConfig(cluster, userName) if err != nil { return fmt.Errorf("creating base kubeconfig: %w", err) } + clusterConfig := config.DeepCopy() token, err := s.generateToken() if err != nil { return fmt.Errorf("generating presigned token: %w", err) } - cfg.AuthInfos = map[string]*api.AuthInfo{ + clusterConfig.AuthInfos = map[string]*api.AuthInfo{ userName: { Token: token, }, } - out, err := clientcmd.Write(*cfg) + out, err := clientcmd.Write(*clusterConfig) if err != nil { return errors.Wrap(err, "failed to serialize config to yaml") } - kubeconfigSecret := kubeconfig.GenerateSecretWithOwner(*clusterRef, out, controllerOwnerRef) + secretData := make(map[string][]byte) + secretData[secret.KubeconfigDataName] = out + + config.AuthInfos = map[string]*api.AuthInfo{ + userName: { + TokenFile: "./" + relativeTokenFileKey, + }, + } + out, err = clientcmd.Write(*config) + if err != nil { + return errors.Wrap(err, "failed to serialize config to yaml") + } + secretData[relativeKubeconfigKey] = out + secretData[relativeTokenFileKey] = []byte(token) + + kubeconfigSecret := generateSecretWithOwner(*clusterRef, secretData, controllerOwnerRef) if err := s.scope.Client.Create(ctx, kubeconfigSecret); err != nil { return errors.Wrap(err, "failed to create kubeconfig secret") } @@ -142,32 +164,49 @@ func (s *Service) createCAPIKubeconfigSecret(ctx context.Context, cluster *eks.C func (s *Service) updateCAPIKubeconfigSecret(ctx context.Context, configSecret *corev1.Secret, cluster *eks.Cluster) error { s.scope.Debug("Updating EKS kubeconfigs for cluster", "cluster-name", s.scope.KubernetesClusterName()) + controllerOwnerRef := *metav1.NewControllerRef(s.scope.ControlPlane, ekscontrolplanev1.GroupVersion.WithKind("AWSManagedControlPlane")) - data, ok := configSecret.Data[secret.KubeconfigDataName] - if !ok { - return errors.Errorf("missing key %q in secret data", secret.KubeconfigDataName) + if !util.HasOwnerRef(configSecret.OwnerReferences, controllerOwnerRef) { + return fmt.Errorf("EKS kubeconfig %s/%s missing expected AWSManagedControlPlane ownership", configSecret.Namespace, configSecret.Name) } - config, err := clientcmd.Load(data) + clusterName := s.scope.KubernetesClusterName() + userName := s.getKubeConfigUserName(clusterName, false) + config, err := s.createBaseKubeConfig(cluster, userName) if err != nil { - return errors.Wrap(err, "failed to convert kubeconfig Secret into a clientcmdapi.Config") + return fmt.Errorf("creating base kubeconfig: %w", err) } + clusterConfig := config.DeepCopy() token, err := s.generateToken() if err != nil { return fmt.Errorf("generating presigned token: %w", err) } - userName := s.getKubeConfigUserName(*cluster.Name, false) - config.AuthInfos[userName].Token = token + clusterConfig.AuthInfos = map[string]*api.AuthInfo{ + userName: { + Token: token, + }, + } - out, err := clientcmd.Write(*config) + out, err := clientcmd.Write(*clusterConfig) if err != nil { return errors.Wrap(err, "failed to serialize config to yaml") } - configSecret.Data[secret.KubeconfigDataName] = out + config.AuthInfos = map[string]*api.AuthInfo{ + userName: { + TokenFile: "./" + relativeTokenFileKey, + }, + } + out, err = clientcmd.Write(*config) + if err != nil { + return errors.Wrap(err, "failed to serialize config to yaml") + } + configSecret.Data[relativeKubeconfigKey] = out + configSecret.Data[relativeTokenFileKey] = []byte(token) + err = s.scope.Client.Update(ctx, configSecret) if err != nil { return fmt.Errorf("updating kubeconfig secret: %w", err) @@ -283,3 +322,21 @@ func (s *Service) getKubeConfigUserName(clusterName string, isUser bool) string return fmt.Sprintf("%s-capi-admin", clusterName) } + +// generateSecretWithOwner returns a Kubernetes secret for the given Cluster name, namespace, kubeconfig data, and ownerReference. +func generateSecretWithOwner(clusterName client.ObjectKey, data map[string][]byte, owner metav1.OwnerReference) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secret.Name(clusterName.Name, secret.Kubeconfig), + Namespace: clusterName.Namespace, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: clusterName.Name, + }, + OwnerReferences: []metav1.OwnerReference{ + owner, + }, + }, + Data: data, + Type: clusterv1.ClusterSecretType, + } +} diff --git a/pkg/cloud/services/eks/config_test.go b/pkg/cloud/services/eks/config_test.go new file mode 100644 index 0000000000..6d6f4ce2ec --- /dev/null +++ b/pkg/cloud/services/eks/config_test.go @@ -0,0 +1,268 @@ +package eks + +import ( + "context" + "net/http" + "net/url" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/golang/mock/gomock" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/sts/mock_stsiface" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/secret" +) + +func Test_createCAPIKubeconfigSecret(t *testing.T) { + testCases := []struct { + name string + input *eks.Cluster + serviceFunc func() *Service + wantErr bool + }{ + { + name: "create kubeconfig secret", + input: &eks.Cluster{ + CertificateAuthority: &eks.Certificate{Data: aws.String("")}, + Endpoint: aws.String("https://F00BA4.gr4.us-east-2.eks.amazonaws.com"), + }, + serviceFunc: func() *Service { + mockCtrl := gomock.NewController(t) + stsMock := mock_stsiface.NewMockSTSAPI(mockCtrl) + op := request.Request{ + Operation: &request.Operation{Name: "GetCallerIdentity", + HTTPMethod: "POST", + HTTPPath: "/", + }, + HTTPRequest: &http.Request{ + Header: make(http.Header), + URL: &url.URL{ + Scheme: "https", + Host: "F00BA4.gr4.us-east-2.eks.amazonaws.com", + }, + }, + } + stsMock.EXPECT().GetCallerIdentityRequest(gomock.Any()).Return(&op, &sts.GetCallerIdentityOutput{}) + + scheme := runtime.NewScheme() + _ = infrav1.AddToScheme(scheme) + _ = ekscontrolplanev1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + client := fake.NewClientBuilder().WithScheme(scheme).Build() + managedScope, _ := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{ + Client: client, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "capi-cluster-foo", + }, + }, + ControlPlane: &ekscontrolplanev1.AWSManagedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "capi-cluster-foo", + UID: types.UID("1"), + }, + Spec: ekscontrolplanev1.AWSManagedControlPlaneSpec{ + EKSClusterName: "cluster-foo", + }, + }, + }) + + service := NewService(managedScope) + service.STSClient = stsMock + return service + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + service := tc.serviceFunc() + clusterRef := types.NamespacedName{ + Namespace: service.scope.Namespace(), + Name: service.scope.Name(), + } + err := service.createCAPIKubeconfigSecret(context.TODO(), tc.input, &clusterRef) + if tc.wantErr { + g.Expect(err).ToNot(BeNil()) + } else { + g.Expect(err).To(BeNil()) + var kubeconfigSecret corev1.Secret + g.Expect(service.scope.Client.Get(context.TODO(), types.NamespacedName{Namespace: "ns", Name: "capi-cluster-foo-kubeconfig"}, &kubeconfigSecret)).To(BeNil()) + g.Expect(kubeconfigSecret.Data).ToNot(BeNil()) + g.Expect(len(kubeconfigSecret.Data)).To(BeIdenticalTo(3)) + g.Expect(kubeconfigSecret.Data[secret.KubeconfigDataName]).ToNot(BeEmpty()) + g.Expect(kubeconfigSecret.Data[relativeKubeconfigKey]).ToNot(BeEmpty()) + g.Expect(kubeconfigSecret.Data[relativeTokenFileKey]).ToNot(BeEmpty()) + } + }) + } +} + +func Test_updateCAPIKubeconfigSecret(t *testing.T) { + type testCase struct { + name string + input *eks.Cluster + secret *corev1.Secret + serviceFunc func(tc testCase) *Service + wantErr bool + } + testCases := []testCase{ + { + name: "update kubeconfig secret", + input: &eks.Cluster{ + Name: aws.String("cluster-foo"), + CertificateAuthority: &eks.Certificate{Data: aws.String("")}, + Endpoint: aws.String("https://F00BA4.gr4.us-east-2.eks.amazonaws.com"), + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "capi-cluster-foo-kubeconfig", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "controlplane.cluster.x-k8s.io/v1beta2", + Kind: "AWSManagedControlPlane", + Name: "capi-cluster-foo", + UID: "1", + Controller: aws.Bool(true), + }, + }, + }, + Data: make(map[string][]byte), + }, + serviceFunc: func(tc testCase) *Service { + mockCtrl := gomock.NewController(t) + stsMock := mock_stsiface.NewMockSTSAPI(mockCtrl) + op := request.Request{ + Operation: &request.Operation{Name: "GetCallerIdentity", + HTTPMethod: "POST", + HTTPPath: "/", + }, + HTTPRequest: &http.Request{ + Header: make(http.Header), + URL: &url.URL{ + Scheme: "https", + Host: "F00BA4.gr4.us-east-2.eks.amazonaws.com", + }, + }, + } + stsMock.EXPECT().GetCallerIdentityRequest(gomock.Any()).Return(&op, &sts.GetCallerIdentityOutput{}) + + scheme := runtime.NewScheme() + _ = infrav1.AddToScheme(scheme) + _ = ekscontrolplanev1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tc.secret).Build() + managedScope, _ := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{ + Client: client, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "capi-cluster-foo", + }, + }, + ControlPlane: &ekscontrolplanev1.AWSManagedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "capi-cluster-foo", + UID: "1", + }, + Spec: ekscontrolplanev1.AWSManagedControlPlaneSpec{ + EKSClusterName: "cluster-foo", + }, + }, + }) + + service := NewService(managedScope) + service.STSClient = stsMock + return service + }, + }, + { + name: "detect incorrect ownership on the kubeconfig secret", + input: &eks.Cluster{ + Name: aws.String("cluster-foo"), + CertificateAuthority: &eks.Certificate{Data: aws.String("")}, + Endpoint: aws.String("https://F00BA4.gr4.us-east-2.eks.amazonaws.com"), + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "capi-cluster-foo-kubeconfig", + }, + Data: make(map[string][]byte), + }, + serviceFunc: func(tc testCase) *Service { + scheme := runtime.NewScheme() + _ = infrav1.AddToScheme(scheme) + _ = ekscontrolplanev1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tc.secret).Build() + managedScope, _ := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{ + Client: client, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "capi-cluster-foo", + }, + }, + ControlPlane: &ekscontrolplanev1.AWSManagedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "capi-cluster-foo", + UID: "1", + }, + Spec: ekscontrolplanev1.AWSManagedControlPlaneSpec{ + EKSClusterName: "cluster-foo", + }, + }, + }) + + service := NewService(managedScope) + return service + }, + wantErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + service := tc.serviceFunc(tc) + err := service.updateCAPIKubeconfigSecret(context.TODO(), tc.secret, tc.input) + if tc.wantErr { + g.Expect(err).ToNot(BeNil()) + } else { + g.Expect(err).To(BeNil()) + var kubeconfigSecret corev1.Secret + g.Expect(service.scope.Client.Get(context.TODO(), types.NamespacedName{Namespace: "ns", Name: "capi-cluster-foo-kubeconfig"}, &kubeconfigSecret)).To(BeNil()) + g.Expect(kubeconfigSecret.Data).ToNot(BeNil()) + g.Expect(len(kubeconfigSecret.Data)).To(BeIdenticalTo(3)) + g.Expect(kubeconfigSecret.Data[secret.KubeconfigDataName]).ToNot(BeEmpty()) + g.Expect(kubeconfigSecret.Data[relativeKubeconfigKey]).ToNot(BeEmpty()) + g.Expect(kubeconfigSecret.Data[relativeTokenFileKey]).ToNot(BeEmpty()) + } + }) + } +} diff --git a/pkg/cloud/services/eks/eks.go b/pkg/cloud/services/eks/eks.go index 7b0c81a374..958230bccd 100644 --- a/pkg/cloud/services/eks/eks.go +++ b/pkg/cloud/services/eks/eks.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package eks provides a service to reconcile EKS control plane and nodegroups. package eks import ( diff --git a/pkg/cloud/services/eks/iam/iam.go b/pkg/cloud/services/eks/iam/iam.go index e8b13e4747..bb4db97670 100644 --- a/pkg/cloud/services/eks/iam/iam.go +++ b/pkg/cloud/services/eks/iam/iam.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package iam provides a service for managing IAM roles and policies. package iam import ( @@ -483,7 +484,7 @@ func (s *IAMService) FindAndVerifyOIDCProvider(cluster *eks.Cluster) (string, er func fetchRootCAThumbprint(issuerURL string, client *http.Client) (string, error) { // needed to appease noctx. - req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, issuerURL, nil) + req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, issuerURL, http.NoBody) if err != nil { return "", err } diff --git a/pkg/cloud/services/eks/mock_eksiface/eksapi_mock.go b/pkg/cloud/services/eks/mock_eksiface/eksapi_mock.go index b37aa06c6a..d89f61dfe5 100644 --- a/pkg/cloud/services/eks/mock_eksiface/eksapi_mock.go +++ b/pkg/cloud/services/eks/mock_eksiface/eksapi_mock.go @@ -52,6 +52,56 @@ func (m *MockEKSAPI) EXPECT() *MockEKSAPIMockRecorder { return m.recorder } +// AssociateAccessPolicy mocks base method. +func (m *MockEKSAPI) AssociateAccessPolicy(arg0 *eks.AssociateAccessPolicyInput) (*eks.AssociateAccessPolicyOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AssociateAccessPolicy", arg0) + ret0, _ := ret[0].(*eks.AssociateAccessPolicyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AssociateAccessPolicy indicates an expected call of AssociateAccessPolicy. +func (mr *MockEKSAPIMockRecorder) AssociateAccessPolicy(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AssociateAccessPolicy", reflect.TypeOf((*MockEKSAPI)(nil).AssociateAccessPolicy), arg0) +} + +// AssociateAccessPolicyRequest mocks base method. +func (m *MockEKSAPI) AssociateAccessPolicyRequest(arg0 *eks.AssociateAccessPolicyInput) (*request.Request, *eks.AssociateAccessPolicyOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AssociateAccessPolicyRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.AssociateAccessPolicyOutput) + return ret0, ret1 +} + +// AssociateAccessPolicyRequest indicates an expected call of AssociateAccessPolicyRequest. +func (mr *MockEKSAPIMockRecorder) AssociateAccessPolicyRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AssociateAccessPolicyRequest", reflect.TypeOf((*MockEKSAPI)(nil).AssociateAccessPolicyRequest), arg0) +} + +// AssociateAccessPolicyWithContext mocks base method. +func (m *MockEKSAPI) AssociateAccessPolicyWithContext(arg0 context.Context, arg1 *eks.AssociateAccessPolicyInput, arg2 ...request.Option) (*eks.AssociateAccessPolicyOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "AssociateAccessPolicyWithContext", varargs...) + ret0, _ := ret[0].(*eks.AssociateAccessPolicyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AssociateAccessPolicyWithContext indicates an expected call of AssociateAccessPolicyWithContext. +func (mr *MockEKSAPIMockRecorder) AssociateAccessPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AssociateAccessPolicyWithContext", reflect.TypeOf((*MockEKSAPI)(nil).AssociateAccessPolicyWithContext), varargs...) +} + // AssociateEncryptionConfig mocks base method. func (m *MockEKSAPI) AssociateEncryptionConfig(arg0 *eks.AssociateEncryptionConfigInput) (*eks.AssociateEncryptionConfigOutput, error) { m.ctrl.T.Helper() @@ -152,6 +202,56 @@ func (mr *MockEKSAPIMockRecorder) AssociateIdentityProviderConfigWithContext(arg return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AssociateIdentityProviderConfigWithContext", reflect.TypeOf((*MockEKSAPI)(nil).AssociateIdentityProviderConfigWithContext), varargs...) } +// CreateAccessEntry mocks base method. +func (m *MockEKSAPI) CreateAccessEntry(arg0 *eks.CreateAccessEntryInput) (*eks.CreateAccessEntryOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateAccessEntry", arg0) + ret0, _ := ret[0].(*eks.CreateAccessEntryOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateAccessEntry indicates an expected call of CreateAccessEntry. +func (mr *MockEKSAPIMockRecorder) CreateAccessEntry(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAccessEntry", reflect.TypeOf((*MockEKSAPI)(nil).CreateAccessEntry), arg0) +} + +// CreateAccessEntryRequest mocks base method. +func (m *MockEKSAPI) CreateAccessEntryRequest(arg0 *eks.CreateAccessEntryInput) (*request.Request, *eks.CreateAccessEntryOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateAccessEntryRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.CreateAccessEntryOutput) + return ret0, ret1 +} + +// CreateAccessEntryRequest indicates an expected call of CreateAccessEntryRequest. +func (mr *MockEKSAPIMockRecorder) CreateAccessEntryRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAccessEntryRequest", reflect.TypeOf((*MockEKSAPI)(nil).CreateAccessEntryRequest), arg0) +} + +// CreateAccessEntryWithContext mocks base method. +func (m *MockEKSAPI) CreateAccessEntryWithContext(arg0 context.Context, arg1 *eks.CreateAccessEntryInput, arg2 ...request.Option) (*eks.CreateAccessEntryOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateAccessEntryWithContext", varargs...) + ret0, _ := ret[0].(*eks.CreateAccessEntryOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateAccessEntryWithContext indicates an expected call of CreateAccessEntryWithContext. +func (mr *MockEKSAPIMockRecorder) CreateAccessEntryWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAccessEntryWithContext", reflect.TypeOf((*MockEKSAPI)(nil).CreateAccessEntryWithContext), varargs...) +} + // CreateAddon mocks base method. func (m *MockEKSAPI) CreateAddon(arg0 *eks.CreateAddonInput) (*eks.CreateAddonOutput, error) { m.ctrl.T.Helper() @@ -252,6 +352,56 @@ func (mr *MockEKSAPIMockRecorder) CreateClusterWithContext(arg0, arg1 interface{ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateClusterWithContext", reflect.TypeOf((*MockEKSAPI)(nil).CreateClusterWithContext), varargs...) } +// CreateEksAnywhereSubscription mocks base method. +func (m *MockEKSAPI) CreateEksAnywhereSubscription(arg0 *eks.CreateEksAnywhereSubscriptionInput) (*eks.CreateEksAnywhereSubscriptionOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateEksAnywhereSubscription", arg0) + ret0, _ := ret[0].(*eks.CreateEksAnywhereSubscriptionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateEksAnywhereSubscription indicates an expected call of CreateEksAnywhereSubscription. +func (mr *MockEKSAPIMockRecorder) CreateEksAnywhereSubscription(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateEksAnywhereSubscription", reflect.TypeOf((*MockEKSAPI)(nil).CreateEksAnywhereSubscription), arg0) +} + +// CreateEksAnywhereSubscriptionRequest mocks base method. +func (m *MockEKSAPI) CreateEksAnywhereSubscriptionRequest(arg0 *eks.CreateEksAnywhereSubscriptionInput) (*request.Request, *eks.CreateEksAnywhereSubscriptionOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateEksAnywhereSubscriptionRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.CreateEksAnywhereSubscriptionOutput) + return ret0, ret1 +} + +// CreateEksAnywhereSubscriptionRequest indicates an expected call of CreateEksAnywhereSubscriptionRequest. +func (mr *MockEKSAPIMockRecorder) CreateEksAnywhereSubscriptionRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateEksAnywhereSubscriptionRequest", reflect.TypeOf((*MockEKSAPI)(nil).CreateEksAnywhereSubscriptionRequest), arg0) +} + +// CreateEksAnywhereSubscriptionWithContext mocks base method. +func (m *MockEKSAPI) CreateEksAnywhereSubscriptionWithContext(arg0 context.Context, arg1 *eks.CreateEksAnywhereSubscriptionInput, arg2 ...request.Option) (*eks.CreateEksAnywhereSubscriptionOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateEksAnywhereSubscriptionWithContext", varargs...) + ret0, _ := ret[0].(*eks.CreateEksAnywhereSubscriptionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateEksAnywhereSubscriptionWithContext indicates an expected call of CreateEksAnywhereSubscriptionWithContext. +func (mr *MockEKSAPIMockRecorder) CreateEksAnywhereSubscriptionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateEksAnywhereSubscriptionWithContext", reflect.TypeOf((*MockEKSAPI)(nil).CreateEksAnywhereSubscriptionWithContext), varargs...) +} + // CreateFargateProfile mocks base method. func (m *MockEKSAPI) CreateFargateProfile(arg0 *eks.CreateFargateProfileInput) (*eks.CreateFargateProfileOutput, error) { m.ctrl.T.Helper() @@ -352,6 +502,106 @@ func (mr *MockEKSAPIMockRecorder) CreateNodegroupWithContext(arg0, arg1 interfac return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateNodegroupWithContext", reflect.TypeOf((*MockEKSAPI)(nil).CreateNodegroupWithContext), varargs...) } +// CreatePodIdentityAssociation mocks base method. +func (m *MockEKSAPI) CreatePodIdentityAssociation(arg0 *eks.CreatePodIdentityAssociationInput) (*eks.CreatePodIdentityAssociationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreatePodIdentityAssociation", arg0) + ret0, _ := ret[0].(*eks.CreatePodIdentityAssociationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreatePodIdentityAssociation indicates an expected call of CreatePodIdentityAssociation. +func (mr *MockEKSAPIMockRecorder) CreatePodIdentityAssociation(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePodIdentityAssociation", reflect.TypeOf((*MockEKSAPI)(nil).CreatePodIdentityAssociation), arg0) +} + +// CreatePodIdentityAssociationRequest mocks base method. +func (m *MockEKSAPI) CreatePodIdentityAssociationRequest(arg0 *eks.CreatePodIdentityAssociationInput) (*request.Request, *eks.CreatePodIdentityAssociationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreatePodIdentityAssociationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.CreatePodIdentityAssociationOutput) + return ret0, ret1 +} + +// CreatePodIdentityAssociationRequest indicates an expected call of CreatePodIdentityAssociationRequest. +func (mr *MockEKSAPIMockRecorder) CreatePodIdentityAssociationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePodIdentityAssociationRequest", reflect.TypeOf((*MockEKSAPI)(nil).CreatePodIdentityAssociationRequest), arg0) +} + +// CreatePodIdentityAssociationWithContext mocks base method. +func (m *MockEKSAPI) CreatePodIdentityAssociationWithContext(arg0 context.Context, arg1 *eks.CreatePodIdentityAssociationInput, arg2 ...request.Option) (*eks.CreatePodIdentityAssociationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreatePodIdentityAssociationWithContext", varargs...) + ret0, _ := ret[0].(*eks.CreatePodIdentityAssociationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreatePodIdentityAssociationWithContext indicates an expected call of CreatePodIdentityAssociationWithContext. +func (mr *MockEKSAPIMockRecorder) CreatePodIdentityAssociationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePodIdentityAssociationWithContext", reflect.TypeOf((*MockEKSAPI)(nil).CreatePodIdentityAssociationWithContext), varargs...) +} + +// DeleteAccessEntry mocks base method. +func (m *MockEKSAPI) DeleteAccessEntry(arg0 *eks.DeleteAccessEntryInput) (*eks.DeleteAccessEntryOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteAccessEntry", arg0) + ret0, _ := ret[0].(*eks.DeleteAccessEntryOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteAccessEntry indicates an expected call of DeleteAccessEntry. +func (mr *MockEKSAPIMockRecorder) DeleteAccessEntry(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAccessEntry", reflect.TypeOf((*MockEKSAPI)(nil).DeleteAccessEntry), arg0) +} + +// DeleteAccessEntryRequest mocks base method. +func (m *MockEKSAPI) DeleteAccessEntryRequest(arg0 *eks.DeleteAccessEntryInput) (*request.Request, *eks.DeleteAccessEntryOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteAccessEntryRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.DeleteAccessEntryOutput) + return ret0, ret1 +} + +// DeleteAccessEntryRequest indicates an expected call of DeleteAccessEntryRequest. +func (mr *MockEKSAPIMockRecorder) DeleteAccessEntryRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAccessEntryRequest", reflect.TypeOf((*MockEKSAPI)(nil).DeleteAccessEntryRequest), arg0) +} + +// DeleteAccessEntryWithContext mocks base method. +func (m *MockEKSAPI) DeleteAccessEntryWithContext(arg0 context.Context, arg1 *eks.DeleteAccessEntryInput, arg2 ...request.Option) (*eks.DeleteAccessEntryOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteAccessEntryWithContext", varargs...) + ret0, _ := ret[0].(*eks.DeleteAccessEntryOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteAccessEntryWithContext indicates an expected call of DeleteAccessEntryWithContext. +func (mr *MockEKSAPIMockRecorder) DeleteAccessEntryWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAccessEntryWithContext", reflect.TypeOf((*MockEKSAPI)(nil).DeleteAccessEntryWithContext), varargs...) +} + // DeleteAddon mocks base method. func (m *MockEKSAPI) DeleteAddon(arg0 *eks.DeleteAddonInput) (*eks.DeleteAddonOutput, error) { m.ctrl.T.Helper() @@ -452,6 +702,56 @@ func (mr *MockEKSAPIMockRecorder) DeleteClusterWithContext(arg0, arg1 interface{ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteClusterWithContext", reflect.TypeOf((*MockEKSAPI)(nil).DeleteClusterWithContext), varargs...) } +// DeleteEksAnywhereSubscription mocks base method. +func (m *MockEKSAPI) DeleteEksAnywhereSubscription(arg0 *eks.DeleteEksAnywhereSubscriptionInput) (*eks.DeleteEksAnywhereSubscriptionOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteEksAnywhereSubscription", arg0) + ret0, _ := ret[0].(*eks.DeleteEksAnywhereSubscriptionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteEksAnywhereSubscription indicates an expected call of DeleteEksAnywhereSubscription. +func (mr *MockEKSAPIMockRecorder) DeleteEksAnywhereSubscription(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteEksAnywhereSubscription", reflect.TypeOf((*MockEKSAPI)(nil).DeleteEksAnywhereSubscription), arg0) +} + +// DeleteEksAnywhereSubscriptionRequest mocks base method. +func (m *MockEKSAPI) DeleteEksAnywhereSubscriptionRequest(arg0 *eks.DeleteEksAnywhereSubscriptionInput) (*request.Request, *eks.DeleteEksAnywhereSubscriptionOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteEksAnywhereSubscriptionRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.DeleteEksAnywhereSubscriptionOutput) + return ret0, ret1 +} + +// DeleteEksAnywhereSubscriptionRequest indicates an expected call of DeleteEksAnywhereSubscriptionRequest. +func (mr *MockEKSAPIMockRecorder) DeleteEksAnywhereSubscriptionRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteEksAnywhereSubscriptionRequest", reflect.TypeOf((*MockEKSAPI)(nil).DeleteEksAnywhereSubscriptionRequest), arg0) +} + +// DeleteEksAnywhereSubscriptionWithContext mocks base method. +func (m *MockEKSAPI) DeleteEksAnywhereSubscriptionWithContext(arg0 context.Context, arg1 *eks.DeleteEksAnywhereSubscriptionInput, arg2 ...request.Option) (*eks.DeleteEksAnywhereSubscriptionOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteEksAnywhereSubscriptionWithContext", varargs...) + ret0, _ := ret[0].(*eks.DeleteEksAnywhereSubscriptionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteEksAnywhereSubscriptionWithContext indicates an expected call of DeleteEksAnywhereSubscriptionWithContext. +func (mr *MockEKSAPIMockRecorder) DeleteEksAnywhereSubscriptionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteEksAnywhereSubscriptionWithContext", reflect.TypeOf((*MockEKSAPI)(nil).DeleteEksAnywhereSubscriptionWithContext), varargs...) +} + // DeleteFargateProfile mocks base method. func (m *MockEKSAPI) DeleteFargateProfile(arg0 *eks.DeleteFargateProfileInput) (*eks.DeleteFargateProfileOutput, error) { m.ctrl.T.Helper() @@ -552,6 +852,56 @@ func (mr *MockEKSAPIMockRecorder) DeleteNodegroupWithContext(arg0, arg1 interfac return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNodegroupWithContext", reflect.TypeOf((*MockEKSAPI)(nil).DeleteNodegroupWithContext), varargs...) } +// DeletePodIdentityAssociation mocks base method. +func (m *MockEKSAPI) DeletePodIdentityAssociation(arg0 *eks.DeletePodIdentityAssociationInput) (*eks.DeletePodIdentityAssociationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeletePodIdentityAssociation", arg0) + ret0, _ := ret[0].(*eks.DeletePodIdentityAssociationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeletePodIdentityAssociation indicates an expected call of DeletePodIdentityAssociation. +func (mr *MockEKSAPIMockRecorder) DeletePodIdentityAssociation(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePodIdentityAssociation", reflect.TypeOf((*MockEKSAPI)(nil).DeletePodIdentityAssociation), arg0) +} + +// DeletePodIdentityAssociationRequest mocks base method. +func (m *MockEKSAPI) DeletePodIdentityAssociationRequest(arg0 *eks.DeletePodIdentityAssociationInput) (*request.Request, *eks.DeletePodIdentityAssociationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeletePodIdentityAssociationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.DeletePodIdentityAssociationOutput) + return ret0, ret1 +} + +// DeletePodIdentityAssociationRequest indicates an expected call of DeletePodIdentityAssociationRequest. +func (mr *MockEKSAPIMockRecorder) DeletePodIdentityAssociationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePodIdentityAssociationRequest", reflect.TypeOf((*MockEKSAPI)(nil).DeletePodIdentityAssociationRequest), arg0) +} + +// DeletePodIdentityAssociationWithContext mocks base method. +func (m *MockEKSAPI) DeletePodIdentityAssociationWithContext(arg0 context.Context, arg1 *eks.DeletePodIdentityAssociationInput, arg2 ...request.Option) (*eks.DeletePodIdentityAssociationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeletePodIdentityAssociationWithContext", varargs...) + ret0, _ := ret[0].(*eks.DeletePodIdentityAssociationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeletePodIdentityAssociationWithContext indicates an expected call of DeletePodIdentityAssociationWithContext. +func (mr *MockEKSAPIMockRecorder) DeletePodIdentityAssociationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePodIdentityAssociationWithContext", reflect.TypeOf((*MockEKSAPI)(nil).DeletePodIdentityAssociationWithContext), varargs...) +} + // DeregisterCluster mocks base method. func (m *MockEKSAPI) DeregisterCluster(arg0 *eks.DeregisterClusterInput) (*eks.DeregisterClusterOutput, error) { m.ctrl.T.Helper() @@ -602,6 +952,56 @@ func (mr *MockEKSAPIMockRecorder) DeregisterClusterWithContext(arg0, arg1 interf return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeregisterClusterWithContext", reflect.TypeOf((*MockEKSAPI)(nil).DeregisterClusterWithContext), varargs...) } +// DescribeAccessEntry mocks base method. +func (m *MockEKSAPI) DescribeAccessEntry(arg0 *eks.DescribeAccessEntryInput) (*eks.DescribeAccessEntryOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeAccessEntry", arg0) + ret0, _ := ret[0].(*eks.DescribeAccessEntryOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeAccessEntry indicates an expected call of DescribeAccessEntry. +func (mr *MockEKSAPIMockRecorder) DescribeAccessEntry(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeAccessEntry", reflect.TypeOf((*MockEKSAPI)(nil).DescribeAccessEntry), arg0) +} + +// DescribeAccessEntryRequest mocks base method. +func (m *MockEKSAPI) DescribeAccessEntryRequest(arg0 *eks.DescribeAccessEntryInput) (*request.Request, *eks.DescribeAccessEntryOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeAccessEntryRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.DescribeAccessEntryOutput) + return ret0, ret1 +} + +// DescribeAccessEntryRequest indicates an expected call of DescribeAccessEntryRequest. +func (mr *MockEKSAPIMockRecorder) DescribeAccessEntryRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeAccessEntryRequest", reflect.TypeOf((*MockEKSAPI)(nil).DescribeAccessEntryRequest), arg0) +} + +// DescribeAccessEntryWithContext mocks base method. +func (m *MockEKSAPI) DescribeAccessEntryWithContext(arg0 context.Context, arg1 *eks.DescribeAccessEntryInput, arg2 ...request.Option) (*eks.DescribeAccessEntryOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeAccessEntryWithContext", varargs...) + ret0, _ := ret[0].(*eks.DescribeAccessEntryOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeAccessEntryWithContext indicates an expected call of DescribeAccessEntryWithContext. +func (mr *MockEKSAPIMockRecorder) DescribeAccessEntryWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeAccessEntryWithContext", reflect.TypeOf((*MockEKSAPI)(nil).DescribeAccessEntryWithContext), varargs...) +} + // DescribeAddon mocks base method. func (m *MockEKSAPI) DescribeAddon(arg0 *eks.DescribeAddonInput) (*eks.DescribeAddonOutput, error) { m.ctrl.T.Helper() @@ -835,59 +1235,109 @@ func (mr *MockEKSAPIMockRecorder) DescribeClusterWithContext(arg0, arg1 interfac return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeClusterWithContext", reflect.TypeOf((*MockEKSAPI)(nil).DescribeClusterWithContext), varargs...) } -// DescribeFargateProfile mocks base method. -func (m *MockEKSAPI) DescribeFargateProfile(arg0 *eks.DescribeFargateProfileInput) (*eks.DescribeFargateProfileOutput, error) { +// DescribeEksAnywhereSubscription mocks base method. +func (m *MockEKSAPI) DescribeEksAnywhereSubscription(arg0 *eks.DescribeEksAnywhereSubscriptionInput) (*eks.DescribeEksAnywhereSubscriptionOutput, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DescribeFargateProfile", arg0) - ret0, _ := ret[0].(*eks.DescribeFargateProfileOutput) + ret := m.ctrl.Call(m, "DescribeEksAnywhereSubscription", arg0) + ret0, _ := ret[0].(*eks.DescribeEksAnywhereSubscriptionOutput) ret1, _ := ret[1].(error) return ret0, ret1 } -// DescribeFargateProfile indicates an expected call of DescribeFargateProfile. -func (mr *MockEKSAPIMockRecorder) DescribeFargateProfile(arg0 interface{}) *gomock.Call { +// DescribeEksAnywhereSubscription indicates an expected call of DescribeEksAnywhereSubscription. +func (mr *MockEKSAPIMockRecorder) DescribeEksAnywhereSubscription(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeFargateProfile", reflect.TypeOf((*MockEKSAPI)(nil).DescribeFargateProfile), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeEksAnywhereSubscription", reflect.TypeOf((*MockEKSAPI)(nil).DescribeEksAnywhereSubscription), arg0) } -// DescribeFargateProfileRequest mocks base method. -func (m *MockEKSAPI) DescribeFargateProfileRequest(arg0 *eks.DescribeFargateProfileInput) (*request.Request, *eks.DescribeFargateProfileOutput) { +// DescribeEksAnywhereSubscriptionRequest mocks base method. +func (m *MockEKSAPI) DescribeEksAnywhereSubscriptionRequest(arg0 *eks.DescribeEksAnywhereSubscriptionInput) (*request.Request, *eks.DescribeEksAnywhereSubscriptionOutput) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DescribeFargateProfileRequest", arg0) + ret := m.ctrl.Call(m, "DescribeEksAnywhereSubscriptionRequest", arg0) ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*eks.DescribeFargateProfileOutput) + ret1, _ := ret[1].(*eks.DescribeEksAnywhereSubscriptionOutput) return ret0, ret1 } -// DescribeFargateProfileRequest indicates an expected call of DescribeFargateProfileRequest. -func (mr *MockEKSAPIMockRecorder) DescribeFargateProfileRequest(arg0 interface{}) *gomock.Call { +// DescribeEksAnywhereSubscriptionRequest indicates an expected call of DescribeEksAnywhereSubscriptionRequest. +func (mr *MockEKSAPIMockRecorder) DescribeEksAnywhereSubscriptionRequest(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeFargateProfileRequest", reflect.TypeOf((*MockEKSAPI)(nil).DescribeFargateProfileRequest), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeEksAnywhereSubscriptionRequest", reflect.TypeOf((*MockEKSAPI)(nil).DescribeEksAnywhereSubscriptionRequest), arg0) } -// DescribeFargateProfileWithContext mocks base method. -func (m *MockEKSAPI) DescribeFargateProfileWithContext(arg0 context.Context, arg1 *eks.DescribeFargateProfileInput, arg2 ...request.Option) (*eks.DescribeFargateProfileOutput, error) { +// DescribeEksAnywhereSubscriptionWithContext mocks base method. +func (m *MockEKSAPI) DescribeEksAnywhereSubscriptionWithContext(arg0 context.Context, arg1 *eks.DescribeEksAnywhereSubscriptionInput, arg2 ...request.Option) (*eks.DescribeEksAnywhereSubscriptionOutput, error) { m.ctrl.T.Helper() varargs := []interface{}{arg0, arg1} for _, a := range arg2 { varargs = append(varargs, a) } - ret := m.ctrl.Call(m, "DescribeFargateProfileWithContext", varargs...) - ret0, _ := ret[0].(*eks.DescribeFargateProfileOutput) + ret := m.ctrl.Call(m, "DescribeEksAnywhereSubscriptionWithContext", varargs...) + ret0, _ := ret[0].(*eks.DescribeEksAnywhereSubscriptionOutput) ret1, _ := ret[1].(error) return ret0, ret1 } -// DescribeFargateProfileWithContext indicates an expected call of DescribeFargateProfileWithContext. -func (mr *MockEKSAPIMockRecorder) DescribeFargateProfileWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { +// DescribeEksAnywhereSubscriptionWithContext indicates an expected call of DescribeEksAnywhereSubscriptionWithContext. +func (mr *MockEKSAPIMockRecorder) DescribeEksAnywhereSubscriptionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeFargateProfileWithContext", reflect.TypeOf((*MockEKSAPI)(nil).DescribeFargateProfileWithContext), varargs...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeEksAnywhereSubscriptionWithContext", reflect.TypeOf((*MockEKSAPI)(nil).DescribeEksAnywhereSubscriptionWithContext), varargs...) } -// DescribeIdentityProviderConfig mocks base method. -func (m *MockEKSAPI) DescribeIdentityProviderConfig(arg0 *eks.DescribeIdentityProviderConfigInput) (*eks.DescribeIdentityProviderConfigOutput, error) { - m.ctrl.T.Helper() +// DescribeFargateProfile mocks base method. +func (m *MockEKSAPI) DescribeFargateProfile(arg0 *eks.DescribeFargateProfileInput) (*eks.DescribeFargateProfileOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeFargateProfile", arg0) + ret0, _ := ret[0].(*eks.DescribeFargateProfileOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeFargateProfile indicates an expected call of DescribeFargateProfile. +func (mr *MockEKSAPIMockRecorder) DescribeFargateProfile(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeFargateProfile", reflect.TypeOf((*MockEKSAPI)(nil).DescribeFargateProfile), arg0) +} + +// DescribeFargateProfileRequest mocks base method. +func (m *MockEKSAPI) DescribeFargateProfileRequest(arg0 *eks.DescribeFargateProfileInput) (*request.Request, *eks.DescribeFargateProfileOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeFargateProfileRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.DescribeFargateProfileOutput) + return ret0, ret1 +} + +// DescribeFargateProfileRequest indicates an expected call of DescribeFargateProfileRequest. +func (mr *MockEKSAPIMockRecorder) DescribeFargateProfileRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeFargateProfileRequest", reflect.TypeOf((*MockEKSAPI)(nil).DescribeFargateProfileRequest), arg0) +} + +// DescribeFargateProfileWithContext mocks base method. +func (m *MockEKSAPI) DescribeFargateProfileWithContext(arg0 context.Context, arg1 *eks.DescribeFargateProfileInput, arg2 ...request.Option) (*eks.DescribeFargateProfileOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeFargateProfileWithContext", varargs...) + ret0, _ := ret[0].(*eks.DescribeFargateProfileOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeFargateProfileWithContext indicates an expected call of DescribeFargateProfileWithContext. +func (mr *MockEKSAPIMockRecorder) DescribeFargateProfileWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeFargateProfileWithContext", reflect.TypeOf((*MockEKSAPI)(nil).DescribeFargateProfileWithContext), varargs...) +} + +// DescribeIdentityProviderConfig mocks base method. +func (m *MockEKSAPI) DescribeIdentityProviderConfig(arg0 *eks.DescribeIdentityProviderConfigInput) (*eks.DescribeIdentityProviderConfigOutput, error) { + m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DescribeIdentityProviderConfig", arg0) ret0, _ := ret[0].(*eks.DescribeIdentityProviderConfigOutput) ret1, _ := ret[1].(error) @@ -935,6 +1385,56 @@ func (mr *MockEKSAPIMockRecorder) DescribeIdentityProviderConfigWithContext(arg0 return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeIdentityProviderConfigWithContext", reflect.TypeOf((*MockEKSAPI)(nil).DescribeIdentityProviderConfigWithContext), varargs...) } +// DescribeInsight mocks base method. +func (m *MockEKSAPI) DescribeInsight(arg0 *eks.DescribeInsightInput) (*eks.DescribeInsightOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeInsight", arg0) + ret0, _ := ret[0].(*eks.DescribeInsightOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeInsight indicates an expected call of DescribeInsight. +func (mr *MockEKSAPIMockRecorder) DescribeInsight(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeInsight", reflect.TypeOf((*MockEKSAPI)(nil).DescribeInsight), arg0) +} + +// DescribeInsightRequest mocks base method. +func (m *MockEKSAPI) DescribeInsightRequest(arg0 *eks.DescribeInsightInput) (*request.Request, *eks.DescribeInsightOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeInsightRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.DescribeInsightOutput) + return ret0, ret1 +} + +// DescribeInsightRequest indicates an expected call of DescribeInsightRequest. +func (mr *MockEKSAPIMockRecorder) DescribeInsightRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeInsightRequest", reflect.TypeOf((*MockEKSAPI)(nil).DescribeInsightRequest), arg0) +} + +// DescribeInsightWithContext mocks base method. +func (m *MockEKSAPI) DescribeInsightWithContext(arg0 context.Context, arg1 *eks.DescribeInsightInput, arg2 ...request.Option) (*eks.DescribeInsightOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeInsightWithContext", varargs...) + ret0, _ := ret[0].(*eks.DescribeInsightOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeInsightWithContext indicates an expected call of DescribeInsightWithContext. +func (mr *MockEKSAPIMockRecorder) DescribeInsightWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeInsightWithContext", reflect.TypeOf((*MockEKSAPI)(nil).DescribeInsightWithContext), varargs...) +} + // DescribeNodegroup mocks base method. func (m *MockEKSAPI) DescribeNodegroup(arg0 *eks.DescribeNodegroupInput) (*eks.DescribeNodegroupOutput, error) { m.ctrl.T.Helper() @@ -985,6 +1485,56 @@ func (mr *MockEKSAPIMockRecorder) DescribeNodegroupWithContext(arg0, arg1 interf return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeNodegroupWithContext", reflect.TypeOf((*MockEKSAPI)(nil).DescribeNodegroupWithContext), varargs...) } +// DescribePodIdentityAssociation mocks base method. +func (m *MockEKSAPI) DescribePodIdentityAssociation(arg0 *eks.DescribePodIdentityAssociationInput) (*eks.DescribePodIdentityAssociationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribePodIdentityAssociation", arg0) + ret0, _ := ret[0].(*eks.DescribePodIdentityAssociationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribePodIdentityAssociation indicates an expected call of DescribePodIdentityAssociation. +func (mr *MockEKSAPIMockRecorder) DescribePodIdentityAssociation(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribePodIdentityAssociation", reflect.TypeOf((*MockEKSAPI)(nil).DescribePodIdentityAssociation), arg0) +} + +// DescribePodIdentityAssociationRequest mocks base method. +func (m *MockEKSAPI) DescribePodIdentityAssociationRequest(arg0 *eks.DescribePodIdentityAssociationInput) (*request.Request, *eks.DescribePodIdentityAssociationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribePodIdentityAssociationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.DescribePodIdentityAssociationOutput) + return ret0, ret1 +} + +// DescribePodIdentityAssociationRequest indicates an expected call of DescribePodIdentityAssociationRequest. +func (mr *MockEKSAPIMockRecorder) DescribePodIdentityAssociationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribePodIdentityAssociationRequest", reflect.TypeOf((*MockEKSAPI)(nil).DescribePodIdentityAssociationRequest), arg0) +} + +// DescribePodIdentityAssociationWithContext mocks base method. +func (m *MockEKSAPI) DescribePodIdentityAssociationWithContext(arg0 context.Context, arg1 *eks.DescribePodIdentityAssociationInput, arg2 ...request.Option) (*eks.DescribePodIdentityAssociationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribePodIdentityAssociationWithContext", varargs...) + ret0, _ := ret[0].(*eks.DescribePodIdentityAssociationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribePodIdentityAssociationWithContext indicates an expected call of DescribePodIdentityAssociationWithContext. +func (mr *MockEKSAPIMockRecorder) DescribePodIdentityAssociationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribePodIdentityAssociationWithContext", reflect.TypeOf((*MockEKSAPI)(nil).DescribePodIdentityAssociationWithContext), varargs...) +} + // DescribeUpdate mocks base method. func (m *MockEKSAPI) DescribeUpdate(arg0 *eks.DescribeUpdateInput) (*eks.DescribeUpdateOutput, error) { m.ctrl.T.Helper() @@ -1035,6 +1585,56 @@ func (mr *MockEKSAPIMockRecorder) DescribeUpdateWithContext(arg0, arg1 interface return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeUpdateWithContext", reflect.TypeOf((*MockEKSAPI)(nil).DescribeUpdateWithContext), varargs...) } +// DisassociateAccessPolicy mocks base method. +func (m *MockEKSAPI) DisassociateAccessPolicy(arg0 *eks.DisassociateAccessPolicyInput) (*eks.DisassociateAccessPolicyOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DisassociateAccessPolicy", arg0) + ret0, _ := ret[0].(*eks.DisassociateAccessPolicyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DisassociateAccessPolicy indicates an expected call of DisassociateAccessPolicy. +func (mr *MockEKSAPIMockRecorder) DisassociateAccessPolicy(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisassociateAccessPolicy", reflect.TypeOf((*MockEKSAPI)(nil).DisassociateAccessPolicy), arg0) +} + +// DisassociateAccessPolicyRequest mocks base method. +func (m *MockEKSAPI) DisassociateAccessPolicyRequest(arg0 *eks.DisassociateAccessPolicyInput) (*request.Request, *eks.DisassociateAccessPolicyOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DisassociateAccessPolicyRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.DisassociateAccessPolicyOutput) + return ret0, ret1 +} + +// DisassociateAccessPolicyRequest indicates an expected call of DisassociateAccessPolicyRequest. +func (mr *MockEKSAPIMockRecorder) DisassociateAccessPolicyRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisassociateAccessPolicyRequest", reflect.TypeOf((*MockEKSAPI)(nil).DisassociateAccessPolicyRequest), arg0) +} + +// DisassociateAccessPolicyWithContext mocks base method. +func (m *MockEKSAPI) DisassociateAccessPolicyWithContext(arg0 context.Context, arg1 *eks.DisassociateAccessPolicyInput, arg2 ...request.Option) (*eks.DisassociateAccessPolicyOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DisassociateAccessPolicyWithContext", varargs...) + ret0, _ := ret[0].(*eks.DisassociateAccessPolicyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DisassociateAccessPolicyWithContext indicates an expected call of DisassociateAccessPolicyWithContext. +func (mr *MockEKSAPIMockRecorder) DisassociateAccessPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisassociateAccessPolicyWithContext", reflect.TypeOf((*MockEKSAPI)(nil).DisassociateAccessPolicyWithContext), varargs...) +} + // DisassociateIdentityProviderConfig mocks base method. func (m *MockEKSAPI) DisassociateIdentityProviderConfig(arg0 *eks.DisassociateIdentityProviderConfigInput) (*eks.DisassociateIdentityProviderConfigOutput, error) { m.ctrl.T.Helper() @@ -1085,6 +1685,172 @@ func (mr *MockEKSAPIMockRecorder) DisassociateIdentityProviderConfigWithContext( return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisassociateIdentityProviderConfigWithContext", reflect.TypeOf((*MockEKSAPI)(nil).DisassociateIdentityProviderConfigWithContext), varargs...) } +// ListAccessEntries mocks base method. +func (m *MockEKSAPI) ListAccessEntries(arg0 *eks.ListAccessEntriesInput) (*eks.ListAccessEntriesOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListAccessEntries", arg0) + ret0, _ := ret[0].(*eks.ListAccessEntriesOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListAccessEntries indicates an expected call of ListAccessEntries. +func (mr *MockEKSAPIMockRecorder) ListAccessEntries(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAccessEntries", reflect.TypeOf((*MockEKSAPI)(nil).ListAccessEntries), arg0) +} + +// ListAccessEntriesPages mocks base method. +func (m *MockEKSAPI) ListAccessEntriesPages(arg0 *eks.ListAccessEntriesInput, arg1 func(*eks.ListAccessEntriesOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListAccessEntriesPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListAccessEntriesPages indicates an expected call of ListAccessEntriesPages. +func (mr *MockEKSAPIMockRecorder) ListAccessEntriesPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAccessEntriesPages", reflect.TypeOf((*MockEKSAPI)(nil).ListAccessEntriesPages), arg0, arg1) +} + +// ListAccessEntriesPagesWithContext mocks base method. +func (m *MockEKSAPI) ListAccessEntriesPagesWithContext(arg0 context.Context, arg1 *eks.ListAccessEntriesInput, arg2 func(*eks.ListAccessEntriesOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListAccessEntriesPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListAccessEntriesPagesWithContext indicates an expected call of ListAccessEntriesPagesWithContext. +func (mr *MockEKSAPIMockRecorder) ListAccessEntriesPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAccessEntriesPagesWithContext", reflect.TypeOf((*MockEKSAPI)(nil).ListAccessEntriesPagesWithContext), varargs...) +} + +// ListAccessEntriesRequest mocks base method. +func (m *MockEKSAPI) ListAccessEntriesRequest(arg0 *eks.ListAccessEntriesInput) (*request.Request, *eks.ListAccessEntriesOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListAccessEntriesRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.ListAccessEntriesOutput) + return ret0, ret1 +} + +// ListAccessEntriesRequest indicates an expected call of ListAccessEntriesRequest. +func (mr *MockEKSAPIMockRecorder) ListAccessEntriesRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAccessEntriesRequest", reflect.TypeOf((*MockEKSAPI)(nil).ListAccessEntriesRequest), arg0) +} + +// ListAccessEntriesWithContext mocks base method. +func (m *MockEKSAPI) ListAccessEntriesWithContext(arg0 context.Context, arg1 *eks.ListAccessEntriesInput, arg2 ...request.Option) (*eks.ListAccessEntriesOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListAccessEntriesWithContext", varargs...) + ret0, _ := ret[0].(*eks.ListAccessEntriesOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListAccessEntriesWithContext indicates an expected call of ListAccessEntriesWithContext. +func (mr *MockEKSAPIMockRecorder) ListAccessEntriesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAccessEntriesWithContext", reflect.TypeOf((*MockEKSAPI)(nil).ListAccessEntriesWithContext), varargs...) +} + +// ListAccessPolicies mocks base method. +func (m *MockEKSAPI) ListAccessPolicies(arg0 *eks.ListAccessPoliciesInput) (*eks.ListAccessPoliciesOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListAccessPolicies", arg0) + ret0, _ := ret[0].(*eks.ListAccessPoliciesOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListAccessPolicies indicates an expected call of ListAccessPolicies. +func (mr *MockEKSAPIMockRecorder) ListAccessPolicies(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAccessPolicies", reflect.TypeOf((*MockEKSAPI)(nil).ListAccessPolicies), arg0) +} + +// ListAccessPoliciesPages mocks base method. +func (m *MockEKSAPI) ListAccessPoliciesPages(arg0 *eks.ListAccessPoliciesInput, arg1 func(*eks.ListAccessPoliciesOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListAccessPoliciesPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListAccessPoliciesPages indicates an expected call of ListAccessPoliciesPages. +func (mr *MockEKSAPIMockRecorder) ListAccessPoliciesPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAccessPoliciesPages", reflect.TypeOf((*MockEKSAPI)(nil).ListAccessPoliciesPages), arg0, arg1) +} + +// ListAccessPoliciesPagesWithContext mocks base method. +func (m *MockEKSAPI) ListAccessPoliciesPagesWithContext(arg0 context.Context, arg1 *eks.ListAccessPoliciesInput, arg2 func(*eks.ListAccessPoliciesOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListAccessPoliciesPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListAccessPoliciesPagesWithContext indicates an expected call of ListAccessPoliciesPagesWithContext. +func (mr *MockEKSAPIMockRecorder) ListAccessPoliciesPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAccessPoliciesPagesWithContext", reflect.TypeOf((*MockEKSAPI)(nil).ListAccessPoliciesPagesWithContext), varargs...) +} + +// ListAccessPoliciesRequest mocks base method. +func (m *MockEKSAPI) ListAccessPoliciesRequest(arg0 *eks.ListAccessPoliciesInput) (*request.Request, *eks.ListAccessPoliciesOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListAccessPoliciesRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.ListAccessPoliciesOutput) + return ret0, ret1 +} + +// ListAccessPoliciesRequest indicates an expected call of ListAccessPoliciesRequest. +func (mr *MockEKSAPIMockRecorder) ListAccessPoliciesRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAccessPoliciesRequest", reflect.TypeOf((*MockEKSAPI)(nil).ListAccessPoliciesRequest), arg0) +} + +// ListAccessPoliciesWithContext mocks base method. +func (m *MockEKSAPI) ListAccessPoliciesWithContext(arg0 context.Context, arg1 *eks.ListAccessPoliciesInput, arg2 ...request.Option) (*eks.ListAccessPoliciesOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListAccessPoliciesWithContext", varargs...) + ret0, _ := ret[0].(*eks.ListAccessPoliciesOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListAccessPoliciesWithContext indicates an expected call of ListAccessPoliciesWithContext. +func (mr *MockEKSAPIMockRecorder) ListAccessPoliciesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAccessPoliciesWithContext", reflect.TypeOf((*MockEKSAPI)(nil).ListAccessPoliciesWithContext), varargs...) +} + // ListAddons mocks base method. func (m *MockEKSAPI) ListAddons(arg0 *eks.ListAddonsInput) (*eks.ListAddonsOutput, error) { m.ctrl.T.Helper() @@ -1121,51 +1887,134 @@ func (m *MockEKSAPI) ListAddonsPagesWithContext(arg0 context.Context, arg1 *eks. for _, a := range arg3 { varargs = append(varargs, a) } - ret := m.ctrl.Call(m, "ListAddonsPagesWithContext", varargs...) + ret := m.ctrl.Call(m, "ListAddonsPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListAddonsPagesWithContext indicates an expected call of ListAddonsPagesWithContext. +func (mr *MockEKSAPIMockRecorder) ListAddonsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAddonsPagesWithContext", reflect.TypeOf((*MockEKSAPI)(nil).ListAddonsPagesWithContext), varargs...) +} + +// ListAddonsRequest mocks base method. +func (m *MockEKSAPI) ListAddonsRequest(arg0 *eks.ListAddonsInput) (*request.Request, *eks.ListAddonsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListAddonsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.ListAddonsOutput) + return ret0, ret1 +} + +// ListAddonsRequest indicates an expected call of ListAddonsRequest. +func (mr *MockEKSAPIMockRecorder) ListAddonsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAddonsRequest", reflect.TypeOf((*MockEKSAPI)(nil).ListAddonsRequest), arg0) +} + +// ListAddonsWithContext mocks base method. +func (m *MockEKSAPI) ListAddonsWithContext(arg0 context.Context, arg1 *eks.ListAddonsInput, arg2 ...request.Option) (*eks.ListAddonsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListAddonsWithContext", varargs...) + ret0, _ := ret[0].(*eks.ListAddonsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListAddonsWithContext indicates an expected call of ListAddonsWithContext. +func (mr *MockEKSAPIMockRecorder) ListAddonsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAddonsWithContext", reflect.TypeOf((*MockEKSAPI)(nil).ListAddonsWithContext), varargs...) +} + +// ListAssociatedAccessPolicies mocks base method. +func (m *MockEKSAPI) ListAssociatedAccessPolicies(arg0 *eks.ListAssociatedAccessPoliciesInput) (*eks.ListAssociatedAccessPoliciesOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListAssociatedAccessPolicies", arg0) + ret0, _ := ret[0].(*eks.ListAssociatedAccessPoliciesOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListAssociatedAccessPolicies indicates an expected call of ListAssociatedAccessPolicies. +func (mr *MockEKSAPIMockRecorder) ListAssociatedAccessPolicies(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAssociatedAccessPolicies", reflect.TypeOf((*MockEKSAPI)(nil).ListAssociatedAccessPolicies), arg0) +} + +// ListAssociatedAccessPoliciesPages mocks base method. +func (m *MockEKSAPI) ListAssociatedAccessPoliciesPages(arg0 *eks.ListAssociatedAccessPoliciesInput, arg1 func(*eks.ListAssociatedAccessPoliciesOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListAssociatedAccessPoliciesPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListAssociatedAccessPoliciesPages indicates an expected call of ListAssociatedAccessPoliciesPages. +func (mr *MockEKSAPIMockRecorder) ListAssociatedAccessPoliciesPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAssociatedAccessPoliciesPages", reflect.TypeOf((*MockEKSAPI)(nil).ListAssociatedAccessPoliciesPages), arg0, arg1) +} + +// ListAssociatedAccessPoliciesPagesWithContext mocks base method. +func (m *MockEKSAPI) ListAssociatedAccessPoliciesPagesWithContext(arg0 context.Context, arg1 *eks.ListAssociatedAccessPoliciesInput, arg2 func(*eks.ListAssociatedAccessPoliciesOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListAssociatedAccessPoliciesPagesWithContext", varargs...) ret0, _ := ret[0].(error) return ret0 } -// ListAddonsPagesWithContext indicates an expected call of ListAddonsPagesWithContext. -func (mr *MockEKSAPIMockRecorder) ListAddonsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { +// ListAssociatedAccessPoliciesPagesWithContext indicates an expected call of ListAssociatedAccessPoliciesPagesWithContext. +func (mr *MockEKSAPIMockRecorder) ListAssociatedAccessPoliciesPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAddonsPagesWithContext", reflect.TypeOf((*MockEKSAPI)(nil).ListAddonsPagesWithContext), varargs...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAssociatedAccessPoliciesPagesWithContext", reflect.TypeOf((*MockEKSAPI)(nil).ListAssociatedAccessPoliciesPagesWithContext), varargs...) } -// ListAddonsRequest mocks base method. -func (m *MockEKSAPI) ListAddonsRequest(arg0 *eks.ListAddonsInput) (*request.Request, *eks.ListAddonsOutput) { +// ListAssociatedAccessPoliciesRequest mocks base method. +func (m *MockEKSAPI) ListAssociatedAccessPoliciesRequest(arg0 *eks.ListAssociatedAccessPoliciesInput) (*request.Request, *eks.ListAssociatedAccessPoliciesOutput) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListAddonsRequest", arg0) + ret := m.ctrl.Call(m, "ListAssociatedAccessPoliciesRequest", arg0) ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*eks.ListAddonsOutput) + ret1, _ := ret[1].(*eks.ListAssociatedAccessPoliciesOutput) return ret0, ret1 } -// ListAddonsRequest indicates an expected call of ListAddonsRequest. -func (mr *MockEKSAPIMockRecorder) ListAddonsRequest(arg0 interface{}) *gomock.Call { +// ListAssociatedAccessPoliciesRequest indicates an expected call of ListAssociatedAccessPoliciesRequest. +func (mr *MockEKSAPIMockRecorder) ListAssociatedAccessPoliciesRequest(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAddonsRequest", reflect.TypeOf((*MockEKSAPI)(nil).ListAddonsRequest), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAssociatedAccessPoliciesRequest", reflect.TypeOf((*MockEKSAPI)(nil).ListAssociatedAccessPoliciesRequest), arg0) } -// ListAddonsWithContext mocks base method. -func (m *MockEKSAPI) ListAddonsWithContext(arg0 context.Context, arg1 *eks.ListAddonsInput, arg2 ...request.Option) (*eks.ListAddonsOutput, error) { +// ListAssociatedAccessPoliciesWithContext mocks base method. +func (m *MockEKSAPI) ListAssociatedAccessPoliciesWithContext(arg0 context.Context, arg1 *eks.ListAssociatedAccessPoliciesInput, arg2 ...request.Option) (*eks.ListAssociatedAccessPoliciesOutput, error) { m.ctrl.T.Helper() varargs := []interface{}{arg0, arg1} for _, a := range arg2 { varargs = append(varargs, a) } - ret := m.ctrl.Call(m, "ListAddonsWithContext", varargs...) - ret0, _ := ret[0].(*eks.ListAddonsOutput) + ret := m.ctrl.Call(m, "ListAssociatedAccessPoliciesWithContext", varargs...) + ret0, _ := ret[0].(*eks.ListAssociatedAccessPoliciesOutput) ret1, _ := ret[1].(error) return ret0, ret1 } -// ListAddonsWithContext indicates an expected call of ListAddonsWithContext. -func (mr *MockEKSAPIMockRecorder) ListAddonsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { +// ListAssociatedAccessPoliciesWithContext indicates an expected call of ListAssociatedAccessPoliciesWithContext. +func (mr *MockEKSAPIMockRecorder) ListAssociatedAccessPoliciesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAddonsWithContext", reflect.TypeOf((*MockEKSAPI)(nil).ListAddonsWithContext), varargs...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAssociatedAccessPoliciesWithContext", reflect.TypeOf((*MockEKSAPI)(nil).ListAssociatedAccessPoliciesWithContext), varargs...) } // ListClusters mocks base method. @@ -1251,6 +2100,89 @@ func (mr *MockEKSAPIMockRecorder) ListClustersWithContext(arg0, arg1 interface{} return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListClustersWithContext", reflect.TypeOf((*MockEKSAPI)(nil).ListClustersWithContext), varargs...) } +// ListEksAnywhereSubscriptions mocks base method. +func (m *MockEKSAPI) ListEksAnywhereSubscriptions(arg0 *eks.ListEksAnywhereSubscriptionsInput) (*eks.ListEksAnywhereSubscriptionsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListEksAnywhereSubscriptions", arg0) + ret0, _ := ret[0].(*eks.ListEksAnywhereSubscriptionsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListEksAnywhereSubscriptions indicates an expected call of ListEksAnywhereSubscriptions. +func (mr *MockEKSAPIMockRecorder) ListEksAnywhereSubscriptions(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListEksAnywhereSubscriptions", reflect.TypeOf((*MockEKSAPI)(nil).ListEksAnywhereSubscriptions), arg0) +} + +// ListEksAnywhereSubscriptionsPages mocks base method. +func (m *MockEKSAPI) ListEksAnywhereSubscriptionsPages(arg0 *eks.ListEksAnywhereSubscriptionsInput, arg1 func(*eks.ListEksAnywhereSubscriptionsOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListEksAnywhereSubscriptionsPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListEksAnywhereSubscriptionsPages indicates an expected call of ListEksAnywhereSubscriptionsPages. +func (mr *MockEKSAPIMockRecorder) ListEksAnywhereSubscriptionsPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListEksAnywhereSubscriptionsPages", reflect.TypeOf((*MockEKSAPI)(nil).ListEksAnywhereSubscriptionsPages), arg0, arg1) +} + +// ListEksAnywhereSubscriptionsPagesWithContext mocks base method. +func (m *MockEKSAPI) ListEksAnywhereSubscriptionsPagesWithContext(arg0 context.Context, arg1 *eks.ListEksAnywhereSubscriptionsInput, arg2 func(*eks.ListEksAnywhereSubscriptionsOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListEksAnywhereSubscriptionsPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListEksAnywhereSubscriptionsPagesWithContext indicates an expected call of ListEksAnywhereSubscriptionsPagesWithContext. +func (mr *MockEKSAPIMockRecorder) ListEksAnywhereSubscriptionsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListEksAnywhereSubscriptionsPagesWithContext", reflect.TypeOf((*MockEKSAPI)(nil).ListEksAnywhereSubscriptionsPagesWithContext), varargs...) +} + +// ListEksAnywhereSubscriptionsRequest mocks base method. +func (m *MockEKSAPI) ListEksAnywhereSubscriptionsRequest(arg0 *eks.ListEksAnywhereSubscriptionsInput) (*request.Request, *eks.ListEksAnywhereSubscriptionsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListEksAnywhereSubscriptionsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.ListEksAnywhereSubscriptionsOutput) + return ret0, ret1 +} + +// ListEksAnywhereSubscriptionsRequest indicates an expected call of ListEksAnywhereSubscriptionsRequest. +func (mr *MockEKSAPIMockRecorder) ListEksAnywhereSubscriptionsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListEksAnywhereSubscriptionsRequest", reflect.TypeOf((*MockEKSAPI)(nil).ListEksAnywhereSubscriptionsRequest), arg0) +} + +// ListEksAnywhereSubscriptionsWithContext mocks base method. +func (m *MockEKSAPI) ListEksAnywhereSubscriptionsWithContext(arg0 context.Context, arg1 *eks.ListEksAnywhereSubscriptionsInput, arg2 ...request.Option) (*eks.ListEksAnywhereSubscriptionsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListEksAnywhereSubscriptionsWithContext", varargs...) + ret0, _ := ret[0].(*eks.ListEksAnywhereSubscriptionsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListEksAnywhereSubscriptionsWithContext indicates an expected call of ListEksAnywhereSubscriptionsWithContext. +func (mr *MockEKSAPIMockRecorder) ListEksAnywhereSubscriptionsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListEksAnywhereSubscriptionsWithContext", reflect.TypeOf((*MockEKSAPI)(nil).ListEksAnywhereSubscriptionsWithContext), varargs...) +} + // ListFargateProfiles mocks base method. func (m *MockEKSAPI) ListFargateProfiles(arg0 *eks.ListFargateProfilesInput) (*eks.ListFargateProfilesOutput, error) { m.ctrl.T.Helper() @@ -1417,6 +2349,89 @@ func (mr *MockEKSAPIMockRecorder) ListIdentityProviderConfigsWithContext(arg0, a return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListIdentityProviderConfigsWithContext", reflect.TypeOf((*MockEKSAPI)(nil).ListIdentityProviderConfigsWithContext), varargs...) } +// ListInsights mocks base method. +func (m *MockEKSAPI) ListInsights(arg0 *eks.ListInsightsInput) (*eks.ListInsightsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListInsights", arg0) + ret0, _ := ret[0].(*eks.ListInsightsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListInsights indicates an expected call of ListInsights. +func (mr *MockEKSAPIMockRecorder) ListInsights(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListInsights", reflect.TypeOf((*MockEKSAPI)(nil).ListInsights), arg0) +} + +// ListInsightsPages mocks base method. +func (m *MockEKSAPI) ListInsightsPages(arg0 *eks.ListInsightsInput, arg1 func(*eks.ListInsightsOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListInsightsPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListInsightsPages indicates an expected call of ListInsightsPages. +func (mr *MockEKSAPIMockRecorder) ListInsightsPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListInsightsPages", reflect.TypeOf((*MockEKSAPI)(nil).ListInsightsPages), arg0, arg1) +} + +// ListInsightsPagesWithContext mocks base method. +func (m *MockEKSAPI) ListInsightsPagesWithContext(arg0 context.Context, arg1 *eks.ListInsightsInput, arg2 func(*eks.ListInsightsOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListInsightsPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListInsightsPagesWithContext indicates an expected call of ListInsightsPagesWithContext. +func (mr *MockEKSAPIMockRecorder) ListInsightsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListInsightsPagesWithContext", reflect.TypeOf((*MockEKSAPI)(nil).ListInsightsPagesWithContext), varargs...) +} + +// ListInsightsRequest mocks base method. +func (m *MockEKSAPI) ListInsightsRequest(arg0 *eks.ListInsightsInput) (*request.Request, *eks.ListInsightsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListInsightsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.ListInsightsOutput) + return ret0, ret1 +} + +// ListInsightsRequest indicates an expected call of ListInsightsRequest. +func (mr *MockEKSAPIMockRecorder) ListInsightsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListInsightsRequest", reflect.TypeOf((*MockEKSAPI)(nil).ListInsightsRequest), arg0) +} + +// ListInsightsWithContext mocks base method. +func (m *MockEKSAPI) ListInsightsWithContext(arg0 context.Context, arg1 *eks.ListInsightsInput, arg2 ...request.Option) (*eks.ListInsightsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListInsightsWithContext", varargs...) + ret0, _ := ret[0].(*eks.ListInsightsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListInsightsWithContext indicates an expected call of ListInsightsWithContext. +func (mr *MockEKSAPIMockRecorder) ListInsightsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListInsightsWithContext", reflect.TypeOf((*MockEKSAPI)(nil).ListInsightsWithContext), varargs...) +} + // ListNodegroups mocks base method. func (m *MockEKSAPI) ListNodegroups(arg0 *eks.ListNodegroupsInput) (*eks.ListNodegroupsOutput, error) { m.ctrl.T.Helper() @@ -1500,6 +2515,89 @@ func (mr *MockEKSAPIMockRecorder) ListNodegroupsWithContext(arg0, arg1 interface return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNodegroupsWithContext", reflect.TypeOf((*MockEKSAPI)(nil).ListNodegroupsWithContext), varargs...) } +// ListPodIdentityAssociations mocks base method. +func (m *MockEKSAPI) ListPodIdentityAssociations(arg0 *eks.ListPodIdentityAssociationsInput) (*eks.ListPodIdentityAssociationsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListPodIdentityAssociations", arg0) + ret0, _ := ret[0].(*eks.ListPodIdentityAssociationsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListPodIdentityAssociations indicates an expected call of ListPodIdentityAssociations. +func (mr *MockEKSAPIMockRecorder) ListPodIdentityAssociations(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodIdentityAssociations", reflect.TypeOf((*MockEKSAPI)(nil).ListPodIdentityAssociations), arg0) +} + +// ListPodIdentityAssociationsPages mocks base method. +func (m *MockEKSAPI) ListPodIdentityAssociationsPages(arg0 *eks.ListPodIdentityAssociationsInput, arg1 func(*eks.ListPodIdentityAssociationsOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListPodIdentityAssociationsPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListPodIdentityAssociationsPages indicates an expected call of ListPodIdentityAssociationsPages. +func (mr *MockEKSAPIMockRecorder) ListPodIdentityAssociationsPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodIdentityAssociationsPages", reflect.TypeOf((*MockEKSAPI)(nil).ListPodIdentityAssociationsPages), arg0, arg1) +} + +// ListPodIdentityAssociationsPagesWithContext mocks base method. +func (m *MockEKSAPI) ListPodIdentityAssociationsPagesWithContext(arg0 context.Context, arg1 *eks.ListPodIdentityAssociationsInput, arg2 func(*eks.ListPodIdentityAssociationsOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListPodIdentityAssociationsPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListPodIdentityAssociationsPagesWithContext indicates an expected call of ListPodIdentityAssociationsPagesWithContext. +func (mr *MockEKSAPIMockRecorder) ListPodIdentityAssociationsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodIdentityAssociationsPagesWithContext", reflect.TypeOf((*MockEKSAPI)(nil).ListPodIdentityAssociationsPagesWithContext), varargs...) +} + +// ListPodIdentityAssociationsRequest mocks base method. +func (m *MockEKSAPI) ListPodIdentityAssociationsRequest(arg0 *eks.ListPodIdentityAssociationsInput) (*request.Request, *eks.ListPodIdentityAssociationsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListPodIdentityAssociationsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.ListPodIdentityAssociationsOutput) + return ret0, ret1 +} + +// ListPodIdentityAssociationsRequest indicates an expected call of ListPodIdentityAssociationsRequest. +func (mr *MockEKSAPIMockRecorder) ListPodIdentityAssociationsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodIdentityAssociationsRequest", reflect.TypeOf((*MockEKSAPI)(nil).ListPodIdentityAssociationsRequest), arg0) +} + +// ListPodIdentityAssociationsWithContext mocks base method. +func (m *MockEKSAPI) ListPodIdentityAssociationsWithContext(arg0 context.Context, arg1 *eks.ListPodIdentityAssociationsInput, arg2 ...request.Option) (*eks.ListPodIdentityAssociationsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListPodIdentityAssociationsWithContext", varargs...) + ret0, _ := ret[0].(*eks.ListPodIdentityAssociationsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListPodIdentityAssociationsWithContext indicates an expected call of ListPodIdentityAssociationsWithContext. +func (mr *MockEKSAPIMockRecorder) ListPodIdentityAssociationsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodIdentityAssociationsWithContext", reflect.TypeOf((*MockEKSAPI)(nil).ListPodIdentityAssociationsWithContext), varargs...) +} + // ListTagsForResource mocks base method. func (m *MockEKSAPI) ListTagsForResource(arg0 *eks.ListTagsForResourceInput) (*eks.ListTagsForResourceOutput, error) { m.ctrl.T.Helper() @@ -1783,6 +2881,56 @@ func (mr *MockEKSAPIMockRecorder) UntagResourceWithContext(arg0, arg1 interface{ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagResourceWithContext", reflect.TypeOf((*MockEKSAPI)(nil).UntagResourceWithContext), varargs...) } +// UpdateAccessEntry mocks base method. +func (m *MockEKSAPI) UpdateAccessEntry(arg0 *eks.UpdateAccessEntryInput) (*eks.UpdateAccessEntryOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateAccessEntry", arg0) + ret0, _ := ret[0].(*eks.UpdateAccessEntryOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateAccessEntry indicates an expected call of UpdateAccessEntry. +func (mr *MockEKSAPIMockRecorder) UpdateAccessEntry(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAccessEntry", reflect.TypeOf((*MockEKSAPI)(nil).UpdateAccessEntry), arg0) +} + +// UpdateAccessEntryRequest mocks base method. +func (m *MockEKSAPI) UpdateAccessEntryRequest(arg0 *eks.UpdateAccessEntryInput) (*request.Request, *eks.UpdateAccessEntryOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateAccessEntryRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.UpdateAccessEntryOutput) + return ret0, ret1 +} + +// UpdateAccessEntryRequest indicates an expected call of UpdateAccessEntryRequest. +func (mr *MockEKSAPIMockRecorder) UpdateAccessEntryRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAccessEntryRequest", reflect.TypeOf((*MockEKSAPI)(nil).UpdateAccessEntryRequest), arg0) +} + +// UpdateAccessEntryWithContext mocks base method. +func (m *MockEKSAPI) UpdateAccessEntryWithContext(arg0 context.Context, arg1 *eks.UpdateAccessEntryInput, arg2 ...request.Option) (*eks.UpdateAccessEntryOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateAccessEntryWithContext", varargs...) + ret0, _ := ret[0].(*eks.UpdateAccessEntryOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateAccessEntryWithContext indicates an expected call of UpdateAccessEntryWithContext. +func (mr *MockEKSAPIMockRecorder) UpdateAccessEntryWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAccessEntryWithContext", reflect.TypeOf((*MockEKSAPI)(nil).UpdateAccessEntryWithContext), varargs...) +} + // UpdateAddon mocks base method. func (m *MockEKSAPI) UpdateAddon(arg0 *eks.UpdateAddonInput) (*eks.UpdateAddonOutput, error) { m.ctrl.T.Helper() @@ -1933,6 +3081,56 @@ func (mr *MockEKSAPIMockRecorder) UpdateClusterVersionWithContext(arg0, arg1 int return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateClusterVersionWithContext", reflect.TypeOf((*MockEKSAPI)(nil).UpdateClusterVersionWithContext), varargs...) } +// UpdateEksAnywhereSubscription mocks base method. +func (m *MockEKSAPI) UpdateEksAnywhereSubscription(arg0 *eks.UpdateEksAnywhereSubscriptionInput) (*eks.UpdateEksAnywhereSubscriptionOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateEksAnywhereSubscription", arg0) + ret0, _ := ret[0].(*eks.UpdateEksAnywhereSubscriptionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateEksAnywhereSubscription indicates an expected call of UpdateEksAnywhereSubscription. +func (mr *MockEKSAPIMockRecorder) UpdateEksAnywhereSubscription(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateEksAnywhereSubscription", reflect.TypeOf((*MockEKSAPI)(nil).UpdateEksAnywhereSubscription), arg0) +} + +// UpdateEksAnywhereSubscriptionRequest mocks base method. +func (m *MockEKSAPI) UpdateEksAnywhereSubscriptionRequest(arg0 *eks.UpdateEksAnywhereSubscriptionInput) (*request.Request, *eks.UpdateEksAnywhereSubscriptionOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateEksAnywhereSubscriptionRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.UpdateEksAnywhereSubscriptionOutput) + return ret0, ret1 +} + +// UpdateEksAnywhereSubscriptionRequest indicates an expected call of UpdateEksAnywhereSubscriptionRequest. +func (mr *MockEKSAPIMockRecorder) UpdateEksAnywhereSubscriptionRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateEksAnywhereSubscriptionRequest", reflect.TypeOf((*MockEKSAPI)(nil).UpdateEksAnywhereSubscriptionRequest), arg0) +} + +// UpdateEksAnywhereSubscriptionWithContext mocks base method. +func (m *MockEKSAPI) UpdateEksAnywhereSubscriptionWithContext(arg0 context.Context, arg1 *eks.UpdateEksAnywhereSubscriptionInput, arg2 ...request.Option) (*eks.UpdateEksAnywhereSubscriptionOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateEksAnywhereSubscriptionWithContext", varargs...) + ret0, _ := ret[0].(*eks.UpdateEksAnywhereSubscriptionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateEksAnywhereSubscriptionWithContext indicates an expected call of UpdateEksAnywhereSubscriptionWithContext. +func (mr *MockEKSAPIMockRecorder) UpdateEksAnywhereSubscriptionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateEksAnywhereSubscriptionWithContext", reflect.TypeOf((*MockEKSAPI)(nil).UpdateEksAnywhereSubscriptionWithContext), varargs...) +} + // UpdateNodegroupConfig mocks base method. func (m *MockEKSAPI) UpdateNodegroupConfig(arg0 *eks.UpdateNodegroupConfigInput) (*eks.UpdateNodegroupConfigOutput, error) { m.ctrl.T.Helper() @@ -2033,6 +3231,56 @@ func (mr *MockEKSAPIMockRecorder) UpdateNodegroupVersionWithContext(arg0, arg1 i return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateNodegroupVersionWithContext", reflect.TypeOf((*MockEKSAPI)(nil).UpdateNodegroupVersionWithContext), varargs...) } +// UpdatePodIdentityAssociation mocks base method. +func (m *MockEKSAPI) UpdatePodIdentityAssociation(arg0 *eks.UpdatePodIdentityAssociationInput) (*eks.UpdatePodIdentityAssociationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdatePodIdentityAssociation", arg0) + ret0, _ := ret[0].(*eks.UpdatePodIdentityAssociationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdatePodIdentityAssociation indicates an expected call of UpdatePodIdentityAssociation. +func (mr *MockEKSAPIMockRecorder) UpdatePodIdentityAssociation(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePodIdentityAssociation", reflect.TypeOf((*MockEKSAPI)(nil).UpdatePodIdentityAssociation), arg0) +} + +// UpdatePodIdentityAssociationRequest mocks base method. +func (m *MockEKSAPI) UpdatePodIdentityAssociationRequest(arg0 *eks.UpdatePodIdentityAssociationInput) (*request.Request, *eks.UpdatePodIdentityAssociationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdatePodIdentityAssociationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*eks.UpdatePodIdentityAssociationOutput) + return ret0, ret1 +} + +// UpdatePodIdentityAssociationRequest indicates an expected call of UpdatePodIdentityAssociationRequest. +func (mr *MockEKSAPIMockRecorder) UpdatePodIdentityAssociationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePodIdentityAssociationRequest", reflect.TypeOf((*MockEKSAPI)(nil).UpdatePodIdentityAssociationRequest), arg0) +} + +// UpdatePodIdentityAssociationWithContext mocks base method. +func (m *MockEKSAPI) UpdatePodIdentityAssociationWithContext(arg0 context.Context, arg1 *eks.UpdatePodIdentityAssociationInput, arg2 ...request.Option) (*eks.UpdatePodIdentityAssociationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdatePodIdentityAssociationWithContext", varargs...) + ret0, _ := ret[0].(*eks.UpdatePodIdentityAssociationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdatePodIdentityAssociationWithContext indicates an expected call of UpdatePodIdentityAssociationWithContext. +func (mr *MockEKSAPIMockRecorder) UpdatePodIdentityAssociationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePodIdentityAssociationWithContext", reflect.TypeOf((*MockEKSAPI)(nil).UpdatePodIdentityAssociationWithContext), varargs...) +} + // WaitUntilAddonActive mocks base method. func (m *MockEKSAPI) WaitUntilAddonActive(arg0 *eks.DescribeAddonInput) error { m.ctrl.T.Helper() diff --git a/pkg/cloud/services/eks/nodegroup.go b/pkg/cloud/services/eks/nodegroup.go index acffe3cdd3..a196db11ac 100644 --- a/pkg/cloud/services/eks/nodegroup.go +++ b/pkg/cloud/services/eks/nodegroup.go @@ -260,7 +260,7 @@ func (s *NodegroupService) createNodegroup() (*eks.Nodegroup, error) { if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { - // TODO + // TODO: handle other errors case eks.ErrCodeResourceNotFoundException: return nil, nil default: @@ -304,7 +304,7 @@ func (s *NodegroupService) deleteNodegroupAndWait() (reterr error) { if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { - // TODO + // TODO handle other errors case eks.ErrCodeResourceNotFoundException: return nil default: @@ -330,7 +330,11 @@ func (s *NodegroupService) deleteNodegroupAndWait() (reterr error) { func (s *NodegroupService) reconcileNodegroupVersion(ng *eks.Nodegroup) error { var specVersion *version.Version if s.scope.Version() != nil { - specVersion = parseEKSVersion(*s.scope.Version()) + var err error + specVersion, err = parseEKSVersion(*s.scope.Version()) + if err != nil { + return fmt.Errorf("parsing EKS version from spec: %w", err) + } } ngVersion := version.MustParseGeneric(*ng.Version) specAMI := s.scope.ManagedMachinePool.Spec.AMIVersion @@ -351,6 +355,12 @@ func (s *NodegroupService) reconcileNodegroupVersion(ng *eks.Nodegroup) error { var updateMsg string // Either update k8s version or AMI version switch { + case statusLaunchTemplateVersion != nil && *statusLaunchTemplateVersion != *ngLaunchTemplateVersion: + input.LaunchTemplate = &eks.LaunchTemplateSpecification{ + Id: s.scope.ManagedMachinePool.Status.LaunchTemplateID, + Version: statusLaunchTemplateVersion, + } + updateMsg = fmt.Sprintf("to launch template version %s", *statusLaunchTemplateVersion) case specVersion != nil && ngVersion.LessThan(specVersion): // NOTE: you can only upgrade increments of minor versions. If you want to upgrade 1.14 to 1.16 we // need to go 1.14-> 1.15 and then 1.15 -> 1.16. @@ -359,12 +369,6 @@ func (s *NodegroupService) reconcileNodegroupVersion(ng *eks.Nodegroup) error { case specAMI != nil && *specAMI != ngAMI: input.ReleaseVersion = specAMI updateMsg = fmt.Sprintf("to AMI version %s", *input.ReleaseVersion) - case statusLaunchTemplateVersion != nil && *statusLaunchTemplateVersion != *ngLaunchTemplateVersion: - input.LaunchTemplate = &eks.LaunchTemplateSpecification{ - Id: s.scope.ManagedMachinePool.Status.LaunchTemplateID, - Version: statusLaunchTemplateVersion, - } - updateMsg = fmt.Sprintf("to launch template version %s", *statusLaunchTemplateVersion) } if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) { diff --git a/pkg/cloud/services/eks/service.go b/pkg/cloud/services/eks/service.go index 22684defdf..9160a398a1 100644 --- a/pkg/cloud/services/eks/service.go +++ b/pkg/cloud/services/eks/service.go @@ -52,6 +52,7 @@ type Service struct { STSClient stsiface.STSAPI } +// ServiceOpts defines the functional arguments for the service. type ServiceOpts func(s *Service) // WithIAMClient creates an access spec with a custom http client. diff --git a/pkg/cloud/services/elb/eip.go b/pkg/cloud/services/elb/eip.go new file mode 100644 index 0000000000..27c9ebee70 --- /dev/null +++ b/pkg/cloud/services/elb/eip.go @@ -0,0 +1,55 @@ +package elb + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/elbv2" + + infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" +) + +func getElasticIPRoleName() string { + return fmt.Sprintf("lb-%s", infrav1.APIServerRoleTagValue) +} + +// allocatePublicIpv4AddressFromByoIPPool claims for Elastic IPs from an user-defined public IPv4 pool, +// allocating it to the NetworkMapping structure from an Network Load Balancer. +func (s *Service) allocatePublicIpv4AddressFromByoIPPool(input *elbv2.CreateLoadBalancerInput) error { + // Custom Public IPv4 Pool isn't set. + if s.scope.VPC().GetPublicIpv4Pool() == nil { + return nil + } + + // Only NLB is supported + if input.Type == nil { + return fmt.Errorf("PublicIpv4Pool is supported only when the Load Balancer type is %q", elbv2.LoadBalancerTypeEnumNetwork) + } + if *input.Type != string(elbv2.LoadBalancerTypeEnumNetwork) { + return fmt.Errorf("PublicIpv4Pool is not supported with Load Balancer type %s. Use Network Load Balancer instead", *input.Type) + } + + // Custom SubnetMappings should not be defined or overridden by user-defined mapping. + if len(input.SubnetMappings) > 0 { + return fmt.Errorf("PublicIpv4Pool is mutually exclusive with SubnetMappings") + } + + eips, err := s.netService.GetOrAllocateAddresses(s.scope.VPC().GetElasticIPPool(), len(input.Subnets), getElasticIPRoleName()) + if err != nil { + return fmt.Errorf("failed to allocate address from Public IPv4 Pool %q to role %s: %w", *s.scope.VPC().GetPublicIpv4Pool(), getElasticIPRoleName(), err) + } + if len(eips) != len(input.Subnets) { + return fmt.Errorf("number of allocated EIP addresses (%d) from pool %q must match with the subnet count (%d)", len(eips), *s.scope.VPC().GetPublicIpv4Pool(), len(input.Subnets)) + } + for cnt, sb := range input.Subnets { + input.SubnetMappings = append(input.SubnetMappings, &elbv2.SubnetMapping{ + SubnetId: aws.String(*sb), + AllocationId: aws.String(eips[cnt]), + }) + } + // Subnets and SubnetMappings are mutual exclusive. Cleaning Subnets when BYO IP is defined, + // and SubnetMappings are mounted. + input.Subnets = []*string{} + + return nil +} diff --git a/pkg/cloud/services/elb/errors.go b/pkg/cloud/services/elb/errors.go index c1e7a2bc44..28a7c4a0ba 100644 --- a/pkg/cloud/services/elb/errors.go +++ b/pkg/cloud/services/elb/errors.go @@ -56,6 +56,15 @@ func NewConflict(msg string) error { } } +// NewInstanceNotRunning returns an error which indicates that the request cannot be processed due to the instance not +// being in a running state. +func NewInstanceNotRunning(msg string) error { + return &ELBError{ + msg: msg, + Code: http.StatusTooEarly, + } +} + // IsNotFound returns true if the error was created by NewNotFound. func IsNotFound(err error) bool { if ReasonForError(err) == http.StatusNotFound { @@ -90,6 +99,11 @@ func IsSDKError(err error) (ok bool) { return } +// IsInstanceNotRunning returns true if the error was created by NewInstanceNotRunning. +func IsInstanceNotRunning(err error) (ok bool) { + return ReasonForError(err) == http.StatusTooEarly +} + // ReasonForError returns the HTTP status for a particular error. func ReasonForError(err error) int { if t, ok := errors.Cause(err).(*ELBError); ok { diff --git a/pkg/cloud/services/elb/loadbalancer.go b/pkg/cloud/services/elb/loadbalancer.go index 0d03adbfcc..ce9fecd36e 100644 --- a/pkg/cloud/services/elb/loadbalancer.go +++ b/pkg/cloud/services/elb/loadbalancer.go @@ -34,6 +34,8 @@ import ( "github.com/pkg/errors" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/storage/names" + "k8s.io/utils/ptr" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" @@ -54,6 +56,14 @@ const elbResourceType = "elasticloadbalancing:loadbalancer" // see: https://docs.aws.amazon.com/elasticloadbalancing/2012-06-01/APIReference/API_DescribeTags.html const maxELBsDescribeTagsRequest = 20 +// apiServerTargetGroupPrefix is the target group name prefix used when creating a target group for the API server +// listener. +const apiServerTargetGroupPrefix = "apiserver-target-" + +// additionalTargetGroupPrefix is the target group name prefix used when creating target groups for additional +// listeners. +const additionalTargetGroupPrefix = "additional-listener-" + // ReconcileLoadbalancers reconciles the load balancers for the given cluster. func (s *Service) ReconcileLoadbalancers() error { s.scope.Debug("Reconciling load balancers") @@ -86,7 +96,7 @@ func (s *Service) reconcileV2LB(lbSpec *infrav1.AWSLoadBalancerSpec) error { } // Get default api server spec. - spec, err := s.getAPIServerLBSpec(name, lbSpec) + desiredLB, err := s.getAPIServerLBSpec(name, lbSpec) if err != nil { return err } @@ -96,7 +106,7 @@ func (s *Service) reconcileV2LB(lbSpec *infrav1.AWSLoadBalancerSpec) error { // if elb is not found and owner cluster ControlPlaneEndpoint is already populated, then we should not recreate the elb. return errors.Wrapf(err, "no loadbalancer exists for the AWSCluster %s, the cluster has become unrecoverable and should be deleted manually", s.scope.InfraClusterName()) case IsNotFound(err): - lb, err = s.createLB(spec, lbSpec) + lb, err = s.createLB(desiredLB, lbSpec) if err != nil { s.scope.Error(err, "failed to create LB") return err @@ -111,36 +121,43 @@ func (s *Service) reconcileV2LB(lbSpec *infrav1.AWSLoadBalancerSpec) error { // set up the type for later processing lb.LoadBalancerType = lbSpec.LoadBalancerType if lb.IsManaged(s.scope.Name()) { - if !cmp.Equal(spec.ELBAttributes, lb.ELBAttributes) { - if err := s.configureLBAttributes(lb.ARN, spec.ELBAttributes); err != nil { + // Reconcile the target groups and listeners from the spec and the ones currently attached to the load balancer. + // Pass in the ARN that AWS gave us, as well as the rest of the desired specification. + _, _, err := s.reconcileTargetGroupsAndListeners(lb.ARN, desiredLB, lbSpec) + if err != nil { + return errors.Wrapf(err, "failed to create target groups/listeners for load balancer %q", lb.Name) + } + + if !cmp.Equal(desiredLB.ELBAttributes, lb.ELBAttributes) { + if err := s.configureLBAttributes(lb.ARN, desiredLB.ELBAttributes); err != nil { return err } } - if err := s.reconcileV2LBTags(lb, spec.Tags); err != nil { + if err := s.reconcileV2LBTags(lb, desiredLB.Tags); err != nil { return errors.Wrapf(err, "failed to reconcile tags for apiserver load balancer %q", lb.Name) } - // Reconcile the subnets and availability zones from the spec + // Reconcile the subnets and availability zones from the desiredLB // and the ones currently attached to the load balancer. - if len(lb.SubnetIDs) != len(spec.SubnetIDs) { + if len(lb.SubnetIDs) != len(desiredLB.SubnetIDs) { _, err := s.ELBV2Client.SetSubnets(&elbv2.SetSubnetsInput{ LoadBalancerArn: &lb.ARN, - Subnets: aws.StringSlice(spec.SubnetIDs), + Subnets: aws.StringSlice(desiredLB.SubnetIDs), }) if err != nil { return errors.Wrapf(err, "failed to set subnets for apiserver load balancer '%s'", lb.Name) } } - if len(lb.AvailabilityZones) != len(spec.AvailabilityZones) { - lb.AvailabilityZones = spec.AvailabilityZones + if len(lb.AvailabilityZones) != len(desiredLB.AvailabilityZones) { + lb.AvailabilityZones = desiredLB.AvailabilityZones } - // Reconcile the security groups from the spec and the ones currently attached to the load balancer - if shouldReconcileSGs(s.scope, lb, spec.SecurityGroupIDs) { + // Reconcile the security groups from the desiredLB and the ones currently attached to the load balancer + if shouldReconcileSGs(s.scope, lb, desiredLB.SecurityGroupIDs) { _, err := s.ELBV2Client.SetSecurityGroups(&elbv2.SetSecurityGroupsInput{ LoadBalancerArn: &lb.ARN, - SecurityGroups: aws.StringSlice(spec.SecurityGroupIDs), + SecurityGroups: aws.StringSlice(desiredLB.SecurityGroupIDs), }) if err != nil { return errors.Wrapf(err, "failed to apply security groups to load balancer %q", lb.Name) @@ -159,6 +176,86 @@ func (s *Service) reconcileV2LB(lbSpec *infrav1.AWSLoadBalancerSpec) error { return nil } +// getAPITargetGroupHealthCheck creates the health check for the Kube apiserver target group, +// limiting the customization for the health check probe counters (skipping standarized/reserved +// fields: Protocol, Port or Path). To customize the health check protocol, use HealthCheckProtocol instead. +func (s *Service) getAPITargetGroupHealthCheck(lbSpec *infrav1.AWSLoadBalancerSpec) *infrav1.TargetGroupHealthCheck { + apiHealthCheckProtocol := infrav1.ELBProtocolTCP.String() + if lbSpec != nil && lbSpec.HealthCheckProtocol != nil { + s.scope.Trace("Found API health check protocol override in the Load Balancer spec, applying it to the API Target Group", "api-server-elb", lbSpec.HealthCheckProtocol.String()) + apiHealthCheckProtocol = lbSpec.HealthCheckProtocol.String() + } + apiHealthCheck := &infrav1.TargetGroupHealthCheck{ + Protocol: aws.String(apiHealthCheckProtocol), + Port: aws.String(infrav1.DefaultAPIServerPortString), + Path: nil, + IntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec), + TimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec), + ThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount), + UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount), + } + if apiHealthCheckProtocol == infrav1.ELBProtocolHTTP.String() || apiHealthCheckProtocol == infrav1.ELBProtocolHTTPS.String() { + apiHealthCheck.Path = aws.String(infrav1.DefaultAPIServerHealthCheckPath) + } + + if lbSpec != nil && lbSpec.HealthCheck != nil { + s.scope.Trace("Found API health check override in the Load Balancer spec, applying it to the API Target Group", "api-server-elb", lbSpec.HealthCheck) + if lbSpec.HealthCheck.IntervalSeconds != nil { + apiHealthCheck.IntervalSeconds = lbSpec.HealthCheck.IntervalSeconds + } + if lbSpec.HealthCheck.TimeoutSeconds != nil { + apiHealthCheck.TimeoutSeconds = lbSpec.HealthCheck.TimeoutSeconds + } + if lbSpec.HealthCheck.ThresholdCount != nil { + apiHealthCheck.ThresholdCount = lbSpec.HealthCheck.ThresholdCount + } + if lbSpec.HealthCheck.UnhealthyThresholdCount != nil { + apiHealthCheck.UnhealthyThresholdCount = lbSpec.HealthCheck.UnhealthyThresholdCount + } + } + return apiHealthCheck +} + +// getAdditionalTargetGroupHealthCheck creates the target group health check for additional listener. +// Additional listeners allows to set customized attributes for health check. +func (s *Service) getAdditionalTargetGroupHealthCheck(ln infrav1.AdditionalListenerSpec) *infrav1.TargetGroupHealthCheck { + healthCheck := &infrav1.TargetGroupHealthCheck{ + Port: aws.String(fmt.Sprintf("%d", ln.Port)), + Protocol: aws.String(ln.Protocol.String()), + Path: nil, + IntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec), + TimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec), + ThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount), + UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount), + } + if ln.HealthCheck == nil { + return healthCheck + } + if ln.HealthCheck.Protocol != nil { + healthCheck.Protocol = aws.String(*ln.HealthCheck.Protocol) + } + if ln.HealthCheck.Port != nil { + healthCheck.Port = aws.String(*ln.HealthCheck.Port) + } + if ln.HealthCheck.Path != nil { + healthCheck.Path = aws.String(*ln.HealthCheck.Path) + } + if ln.HealthCheck.IntervalSeconds != nil { + healthCheck.IntervalSeconds = aws.Int64(*ln.HealthCheck.IntervalSeconds) + } + if ln.HealthCheck.TimeoutSeconds != nil { + healthCheck.TimeoutSeconds = aws.Int64(*ln.HealthCheck.TimeoutSeconds) + } + if ln.HealthCheck.ThresholdCount != nil { + healthCheck.ThresholdCount = aws.Int64(*ln.HealthCheck.ThresholdCount) + } + if ln.HealthCheck.UnhealthyThresholdCount != nil { + healthCheck.UnhealthyThresholdCount = aws.Int64(*ln.HealthCheck.UnhealthyThresholdCount) + } + + return healthCheck +} + func (s *Service) getAPIServerLBSpec(elbName string, lbSpec *infrav1.AWSLoadBalancerSpec) (*infrav1.LoadBalancer, error) { var securityGroupIDs []string if lbSpec != nil { @@ -172,6 +269,8 @@ func (s *Service) getAPIServerLBSpec(elbName string, lbSpec *infrav1.AWSLoadBala scheme = *lbSpec.Scheme } + // The default API health check is TCP, allowing customization to HTTP or HTTPS when HealthCheckProtocol is set. + apiHealthCheck := s.getAPITargetGroupHealthCheck(lbSpec) res := &infrav1.LoadBalancer{ Name: elbName, Scheme: scheme, @@ -181,14 +280,11 @@ func (s *Service) getAPIServerLBSpec(elbName string, lbSpec *infrav1.AWSLoadBala Protocol: infrav1.ELBProtocolTCP, Port: infrav1.DefaultAPIServerPort, TargetGroup: infrav1.TargetGroupSpec{ - Name: fmt.Sprintf("apiserver-target-%d", time.Now().Unix()), - Port: infrav1.DefaultAPIServerPort, - Protocol: infrav1.ELBProtocolTCP, - VpcID: s.scope.VPC().ID, - HealthCheck: &infrav1.TargetGroupHealthCheck{ - Protocol: aws.String(string(infrav1.ELBProtocolTCP)), - Port: aws.String(infrav1.DefaultAPIServerPortString), - }, + Name: names.SimpleNameGenerator.GenerateName(apiServerTargetGroupPrefix), + Port: infrav1.DefaultAPIServerPort, + Protocol: infrav1.ELBProtocolTCP, + VpcID: s.scope.VPC().ID, + HealthCheck: apiHealthCheck, }, }, }, @@ -196,19 +292,24 @@ func (s *Service) getAPIServerLBSpec(elbName string, lbSpec *infrav1.AWSLoadBala } if lbSpec != nil { - for _, additionalListeners := range lbSpec.AdditionalListeners { + for _, listener := range lbSpec.AdditionalListeners { + lnHealthCheck := &infrav1.TargetGroupHealthCheck{ + Protocol: aws.String(string(listener.Protocol)), + Port: aws.String(strconv.FormatInt(listener.Port, 10)), + } + if listener.HealthCheck != nil { + s.scope.Trace("Found health check override in the additional listener spec, applying it to the Target Group", listener.HealthCheck) + lnHealthCheck = s.getAdditionalTargetGroupHealthCheck(listener) + } res.ELBListeners = append(res.ELBListeners, infrav1.Listener{ - Protocol: additionalListeners.Protocol, - Port: additionalListeners.Port, + Protocol: listener.Protocol, + Port: listener.Port, TargetGroup: infrav1.TargetGroupSpec{ - Name: fmt.Sprintf("additional-listener-%d", time.Now().Unix()), - Port: additionalListeners.Port, - Protocol: additionalListeners.Protocol, - VpcID: s.scope.VPC().ID, - HealthCheck: &infrav1.TargetGroupHealthCheck{ - Protocol: aws.String(string(additionalListeners.Protocol)), - Port: aws.String(strconv.FormatInt(additionalListeners.Port, 10)), - }, + Name: names.SimpleNameGenerator.GenerateName(additionalTargetGroupPrefix), + Port: listener.Port, + Protocol: listener.Protocol, + VpcID: s.scope.VPC().ID, + HealthCheck: lnHealthCheck, }, }) } @@ -248,10 +349,10 @@ func (s *Service) getAPIServerLBSpec(elbName string, lbSpec *infrav1.AWSLoadBala } } else { // The load balancer APIs require us to only attach one subnet for each AZ. - subnets := s.scope.Subnets().FilterPrivate() + subnets := s.scope.Subnets().FilterPrivate().FilterNonCni() - if s.scope.ControlPlaneLoadBalancerScheme() == infrav1.ELBSchemeInternetFacing { - subnets = s.scope.Subnets().FilterPublic() + if scheme == infrav1.ELBSchemeInternetFacing { + subnets = s.scope.Subnets().FilterPublic().FilterNonCni() } subnetLoop: @@ -294,6 +395,22 @@ func (s *Service) createLB(spec *infrav1.LoadBalancer, lbSpec *infrav1.AWSLoadBa input.IpAddressType = aws.String("dualstack") } + // Allocate custom addresses (Elastic IP) to internet-facing Load Balancers, when defined. + // Custom, or BYO, Public IPv4 Pool need to be created prior install, and the Pool ID must be + // set in the VpcSpec.ElasticIPPool.PublicIPv4Pool to allow Elastic IP be consumed from + // public ip address of user-provided CIDR blocks. + if spec.Scheme == infrav1.ELBSchemeInternetFacing { + if err := s.allocatePublicIpv4AddressFromByoIPPool(input); err != nil { + return nil, fmt.Errorf("failed to allocate addresses to load balancer: %w", err) + } + } + + // Subnets and SubnetMappings are mutually exclusive. SubnetMappings is set by users or when + // BYO Public IPv4 Pool is set. + if len(input.SubnetMappings) == 0 { + input.Subnets = aws.StringSlice(spec.SubnetIDs) + } + out, err := s.ELBV2Client.CreateLoadBalancer(input) if err != nil { return nil, errors.Wrapf(err, "failed to create load balancer: %v", spec) @@ -303,76 +420,14 @@ func (s *Service) createLB(spec *infrav1.LoadBalancer, lbSpec *infrav1.AWSLoadBa return nil, errors.New("no new network load balancer was created; the returned list is empty") } - // TODO(Skarlso): Add options to set up SSL. - // https://github.com/kubernetes-sigs/cluster-api-provider-aws/issues/3899 - for _, ln := range spec.ELBListeners { - // create the target group first - targetGroupInput := &elbv2.CreateTargetGroupInput{ - Name: aws.String(ln.TargetGroup.Name), - Port: aws.Int64(ln.TargetGroup.Port), - Protocol: aws.String(ln.TargetGroup.Protocol.String()), - VpcId: aws.String(ln.TargetGroup.VpcID), - Tags: input.Tags, - } - if s.scope.VPC().IsIPv6Enabled() { - targetGroupInput.IpAddressType = aws.String("ipv6") - } - if ln.TargetGroup.HealthCheck != nil { - targetGroupInput.HealthCheckEnabled = aws.Bool(true) - targetGroupInput.HealthCheckProtocol = ln.TargetGroup.HealthCheck.Protocol - targetGroupInput.HealthCheckPort = ln.TargetGroup.HealthCheck.Port - } - s.scope.Debug("creating target group", "group", targetGroupInput, "listener", ln) - group, err := s.ELBV2Client.CreateTargetGroup(targetGroupInput) - if err != nil { - return nil, errors.Wrapf(err, "failed to create target group for load balancer") - } - if len(group.TargetGroups) == 0 { - return nil, errors.New("no target group was created; the returned list is empty") - } - - if !lbSpec.PreserveClientIP { - targetGroupAttributeInput := &elbv2.ModifyTargetGroupAttributesInput{ - TargetGroupArn: group.TargetGroups[0].TargetGroupArn, - Attributes: []*elbv2.TargetGroupAttribute{ - { - Key: aws.String(infrav1.TargetGroupAttributeEnablePreserveClientIP), - Value: aws.String("false"), - }, - }, - } - if _, err := s.ELBV2Client.ModifyTargetGroupAttributes(targetGroupAttributeInput); err != nil { - return nil, errors.Wrapf(err, "failed to modify target group attribute") - } - } - - listenerInput := &elbv2.CreateListenerInput{ - DefaultActions: []*elbv2.Action{ - { - TargetGroupArn: group.TargetGroups[0].TargetGroupArn, - Type: aws.String(elbv2.ActionTypeEnumForward), - }, - }, - LoadBalancerArn: out.LoadBalancers[0].LoadBalancerArn, - Port: aws.Int64(ln.Port), - Protocol: aws.String(string(ln.Protocol)), - Tags: converters.MapToV2Tags(spec.Tags), - } - // Create ClassicELBListeners - listener, err := s.ELBV2Client.CreateListener(listenerInput) - if err != nil { - return nil, errors.Wrap(err, "failed to create listener") - } - if len(listener.Listeners) == 0 { - return nil, errors.New("no listener was created; the returned list is empty") - } - } + // Target Groups and listeners will be reconciled separately s.scope.Info("Created network load balancer", "dns-name", *out.LoadBalancers[0].DNSName) res := spec.DeepCopy() s.scope.Debug("applying load balancer DNS to result", "dns", *out.LoadBalancers[0].DNSName) res.DNSName = *out.LoadBalancers[0].DNSName + res.ARN = *out.LoadBalancers[0].LoadBalancerArn return res, nil } @@ -812,7 +867,7 @@ func (s *Service) RegisterInstanceWithAPIServerLB(instance *infrav1.Instance, lb return errors.Wrapf(err, "error describing ELB's target groups %q", name) } if len(targetGroups.TargetGroups) == 0 { - return errors.New(fmt.Sprintf("no target groups found for load balancer with arn '%s'", out.ARN)) + return fmt.Errorf("no target groups found for load balancer with arn '%s'", out.ARN) } // Since TargetGroups and Listeners don't care, or are not aware, of subnets before registration, we ignore that check. // Also, registering with AZ is not supported using the an InstanceID. @@ -989,9 +1044,14 @@ func (s *Service) getAPIServerClassicELBSpec(elbName string) (*infrav1.LoadBalan } securityGroupIDs = append(securityGroupIDs, s.scope.SecurityGroups()[infrav1.SecurityGroupAPIServerLB].ID) + scheme := infrav1.ELBSchemeInternetFacing + if controlPlaneLoadBalancer != nil && controlPlaneLoadBalancer.Scheme != nil { + scheme = *controlPlaneLoadBalancer.Scheme + } + res := &infrav1.LoadBalancer{ Name: elbName, - Scheme: s.scope.ControlPlaneLoadBalancerScheme(), + Scheme: scheme, ClassicELBListeners: []infrav1.ClassicELBListener{ { Protocol: infrav1.ELBProtocolTCP, @@ -1002,10 +1062,10 @@ func (s *Service) getAPIServerClassicELBSpec(elbName string) (*infrav1.LoadBalan }, HealthCheck: &infrav1.ClassicELBHealthCheck{ Target: s.getHealthCheckTarget(), - Interval: 10 * time.Second, - Timeout: 5 * time.Second, - HealthyThreshold: 5, - UnhealthyThreshold: 3, + Interval: infrav1.DefaultAPIServerHealthCheckIntervalSec * time.Second, + Timeout: infrav1.DefaultAPIServerHealthCheckTimeoutSec * time.Second, + HealthyThreshold: infrav1.DefaultAPIServerHealthThresholdCount, + UnhealthyThreshold: infrav1.DefaultAPIServerUnhealthThresholdCount, }, SecurityGroupIDs: securityGroupIDs, ClassicElbAttributes: infrav1.ClassicELBAttributes{ @@ -1042,10 +1102,10 @@ func (s *Service) getAPIServerClassicELBSpec(elbName string) (*infrav1.LoadBalan } } else { // The load balancer APIs require us to only attach one subnet for each AZ. - subnets := s.scope.Subnets().FilterPrivate() + subnets := s.scope.Subnets().FilterPrivate().FilterNonCni() - if s.scope.ControlPlaneLoadBalancerScheme() == infrav1.ELBSchemeInternetFacing { - subnets = s.scope.Subnets().FilterPublic() + if scheme == infrav1.ELBSchemeInternetFacing { + subnets = s.scope.Subnets().FilterPublic().FilterNonCni() } subnetLoop: @@ -1248,27 +1308,28 @@ func (s *Service) listByTag(tag string) ([]string, error) { err := s.ResourceTaggingClient.GetResourcesPages(&input, func(r *rgapi.GetResourcesOutput, last bool) bool { for _, tagmapping := range r.ResourceTagMappingList { - if tagmapping.ResourceARN != nil { - parsedARN, err := arn.Parse(*tagmapping.ResourceARN) - if err != nil { - s.scope.Info("failed to parse ARN", "arn", *tagmapping.ResourceARN, "tag", tag) - continue - } - if strings.Contains(parsedARN.Resource, "loadbalancer/net/") { - s.scope.Info("ignoring nlb created by service, consider enabling garbage collection", "arn", *tagmapping.ResourceARN, "tag", tag) - continue - } - if strings.Contains(parsedARN.Resource, "loadbalancer/app/") { - s.scope.Info("ignoring alb created by service, consider enabling garbage collection", "arn", *tagmapping.ResourceARN, "tag", tag) - continue - } - name := strings.ReplaceAll(parsedARN.Resource, "loadbalancer/", "") - if name == "" { - s.scope.Info("failed to parse ARN", "arn", *tagmapping.ResourceARN, "tag", tag) - continue - } - names = append(names, name) + if tagmapping.ResourceARN == nil { + continue + } + parsedARN, err := arn.Parse(*tagmapping.ResourceARN) + if err != nil { + s.scope.Info("failed to parse ARN", "arn", *tagmapping.ResourceARN, "tag", tag) + continue + } + if strings.Contains(parsedARN.Resource, "loadbalancer/net/") { + s.scope.Info("ignoring nlb created by service, consider enabling garbage collection", "arn", *tagmapping.ResourceARN, "tag", tag) + continue + } + if strings.Contains(parsedARN.Resource, "loadbalancer/app/") { + s.scope.Info("ignoring alb created by service, consider enabling garbage collection", "arn", *tagmapping.ResourceARN, "tag", tag) + continue } + name := strings.ReplaceAll(parsedARN.Resource, "loadbalancer/", "") + if name == "" { + s.scope.Info("failed to parse ARN", "arn", *tagmapping.ResourceARN, "tag", tag) + continue + } + names = append(names, name) } return true }) @@ -1494,13 +1555,169 @@ func (s *Service) reconcileV2LBTags(lb *infrav1.LoadBalancer, desiredTags map[st return nil } +// reconcileTargetGroupsAndListeners reconciles a Load Balancer's defined listeners with corresponding AWS Target Groups and Listeners. +// These are combined into a single function since they are tightly integrated. +func (s *Service) reconcileTargetGroupsAndListeners(lbARN string, spec *infrav1.LoadBalancer, lbSpec *infrav1.AWSLoadBalancerSpec) ([]*elbv2.TargetGroup, []*elbv2.Listener, error) { + existingTargetGroups, err := s.ELBV2Client.DescribeTargetGroups( + &elbv2.DescribeTargetGroupsInput{ + LoadBalancerArn: aws.String(lbARN), + }) + if err != nil { + s.scope.Error(err, "could not describe target groups for load balancer", "arn", lbARN) + return nil, nil, err + } + + existingListeners, err := s.ELBV2Client.DescribeListeners( + &elbv2.DescribeListenersInput{ + LoadBalancerArn: aws.String(lbARN), + }) + if err != nil { + s.scope.Error(err, "could not describe listeners for load balancer", "arn", lbARN) + } + + createdTargetGroups := make([]*elbv2.TargetGroup, 0, len(spec.ELBListeners)) + createdListeners := make([]*elbv2.Listener, 0, len(spec.ELBListeners)) + + // TODO(Skarlso): Add options to set up SSL. + // https://github.com/kubernetes-sigs/cluster-api-provider-aws/issues/3899 + for _, ln := range spec.ELBListeners { + var group *elbv2.TargetGroup + tgSpec := ln.TargetGroup + for _, g := range existingTargetGroups.TargetGroups { + if isSDKTargetGroupEqualToTargetGroup(g, &tgSpec) { + group = g + break + } + } + // create the target group first + if group == nil { + group, err = s.createTargetGroup(ln, spec.Tags) + if err != nil { + return nil, nil, err + } + createdTargetGroups = append(createdTargetGroups, group) + + if !lbSpec.PreserveClientIP { + targetGroupAttributeInput := &elbv2.ModifyTargetGroupAttributesInput{ + TargetGroupArn: group.TargetGroupArn, + Attributes: []*elbv2.TargetGroupAttribute{ + { + Key: aws.String(infrav1.TargetGroupAttributeEnablePreserveClientIP), + Value: aws.String("false"), + }, + }, + } + if _, err := s.ELBV2Client.ModifyTargetGroupAttributes(targetGroupAttributeInput); err != nil { + return nil, nil, errors.Wrapf(err, "failed to modify target group attribute") + } + } + } + + var listener *elbv2.Listener + for _, l := range existingListeners.Listeners { + if l.DefaultActions != nil && len(l.DefaultActions) > 0 && *l.DefaultActions[0].TargetGroupArn == *group.TargetGroupArn { + listener = l + break + } + } + + if listener == nil { + listener, err = s.createListener(ln, group, lbARN, spec.Tags) + if err != nil { + return nil, nil, err + } + createdListeners = append(createdListeners, listener) + } + } + + return createdTargetGroups, createdListeners, nil +} + +// createListener creates a single Listener. +func (s *Service) createListener(ln infrav1.Listener, group *elbv2.TargetGroup, lbARN string, tags map[string]string) (*elbv2.Listener, error) { + listenerInput := &elbv2.CreateListenerInput{ + DefaultActions: []*elbv2.Action{ + { + TargetGroupArn: group.TargetGroupArn, + Type: aws.String(elbv2.ActionTypeEnumForward), + }, + }, + LoadBalancerArn: aws.String(lbARN), + Port: aws.Int64(ln.Port), + Protocol: aws.String(string(ln.Protocol)), + Tags: converters.MapToV2Tags(tags), + } + // Create ClassicELBListeners + listener, err := s.ELBV2Client.CreateListener(listenerInput) + if err != nil { + return nil, errors.Wrap(err, "failed to create listener") + } + if len(listener.Listeners) == 0 { + return nil, errors.New("no listener was created; the returned list is empty") + } + if len(listener.Listeners) > 1 { + return nil, errors.New("more than one listener created; expected only one") + } + return listener.Listeners[0], nil +} + +// createTargetGroup creates a single Target Group. +func (s *Service) createTargetGroup(ln infrav1.Listener, tags map[string]string) (*elbv2.TargetGroup, error) { + targetGroupInput := &elbv2.CreateTargetGroupInput{ + Name: aws.String(ln.TargetGroup.Name), + Port: aws.Int64(ln.TargetGroup.Port), + Protocol: aws.String(ln.TargetGroup.Protocol.String()), + VpcId: aws.String(ln.TargetGroup.VpcID), + Tags: converters.MapToV2Tags(tags), + HealthCheckIntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec), + HealthCheckTimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec), + HealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount), + UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount), + } + if s.scope.VPC().IsIPv6Enabled() { + targetGroupInput.IpAddressType = aws.String("ipv6") + } + if ln.TargetGroup.HealthCheck != nil { + targetGroupInput.HealthCheckEnabled = aws.Bool(true) + targetGroupInput.HealthCheckProtocol = ln.TargetGroup.HealthCheck.Protocol + targetGroupInput.HealthCheckPort = ln.TargetGroup.HealthCheck.Port + if ln.TargetGroup.HealthCheck.Path != nil { + targetGroupInput.HealthCheckPath = ln.TargetGroup.HealthCheck.Path + } + if ln.TargetGroup.HealthCheck.IntervalSeconds != nil { + targetGroupInput.HealthCheckIntervalSeconds = ln.TargetGroup.HealthCheck.IntervalSeconds + } + if ln.TargetGroup.HealthCheck.TimeoutSeconds != nil { + targetGroupInput.HealthCheckTimeoutSeconds = ln.TargetGroup.HealthCheck.TimeoutSeconds + } + if ln.TargetGroup.HealthCheck.ThresholdCount != nil { + targetGroupInput.HealthyThresholdCount = ln.TargetGroup.HealthCheck.ThresholdCount + } + if ln.TargetGroup.HealthCheck.UnhealthyThresholdCount != nil { + targetGroupInput.UnhealthyThresholdCount = ln.TargetGroup.HealthCheck.UnhealthyThresholdCount + } + } + s.scope.Debug("creating target group", "group", targetGroupInput, "listener", ln) + group, err := s.ELBV2Client.CreateTargetGroup(targetGroupInput) + if err != nil { + return nil, errors.Wrapf(err, "failed to create target group for load balancer") + } + if len(group.TargetGroups) == 0 { + return nil, errors.New("no target group was created; the returned list is empty") + } + if len(group.TargetGroups) > 1 { + return nil, errors.New("more than one target group created; expected only one") + } + return group.TargetGroups[0], nil +} + func (s *Service) getHealthCheckTarget() string { controlPlaneELB := s.scope.ControlPlaneLoadBalancer() protocol := &infrav1.ELBProtocolSSL if controlPlaneELB != nil && controlPlaneELB.HealthCheckProtocol != nil { protocol = controlPlaneELB.HealthCheckProtocol if protocol.String() == infrav1.ELBProtocolHTTP.String() || protocol.String() == infrav1.ELBProtocolHTTPS.String() { - return fmt.Sprintf("%v:%d/readyz", protocol, infrav1.DefaultAPIServerPort) + return fmt.Sprintf("%v:%d%s", protocol, infrav1.DefaultAPIServerPort, infrav1.DefaultAPIServerHealthCheckPath) } } return fmt.Sprintf("%v:%d", protocol, infrav1.DefaultAPIServerPort) @@ -1527,17 +1744,17 @@ func fromSDKTypeToClassicELB(v *elb.LoadBalancerDescription, attrs *elb.LoadBala } func fromSDKTypeToLB(v *elbv2.LoadBalancer, attrs []*elbv2.LoadBalancerAttribute, tags []*elbv2.Tag) *infrav1.LoadBalancer { - subnetIds := make([]*string, len(v.AvailabilityZones)) + subnetIDs := make([]*string, len(v.AvailabilityZones)) availabilityZones := make([]*string, len(v.AvailabilityZones)) for i, az := range v.AvailabilityZones { - subnetIds[i] = az.SubnetId + subnetIDs[i] = az.SubnetId availabilityZones[i] = az.ZoneName } res := &infrav1.LoadBalancer{ ARN: aws.StringValue(v.LoadBalancerArn), Name: aws.StringValue(v.LoadBalancerName), Scheme: infrav1.ELBScheme(aws.StringValue(v.Scheme)), - SubnetIDs: aws.StringValueSlice(subnetIds), + SubnetIDs: aws.StringValueSlice(subnetIDs), SecurityGroupIDs: aws.StringValueSlice(v.SecurityGroups), AvailabilityZones: aws.StringValueSlice(availabilityZones), DNSName: aws.StringValue(v.DNSName), @@ -1579,3 +1796,23 @@ func shouldReconcileSGs(scope scope.ELBScope, lb *infrav1.LoadBalancer, specSGs } return true } + +// isSDKTargetGroupEqualToTargetGroup checks if a given AWS SDK target group matches a target group spec. +func isSDKTargetGroupEqualToTargetGroup(elbTG *elbv2.TargetGroup, spec *infrav1.TargetGroupSpec) bool { + // We can't check only the target group's name because it's randomly generated every time we get a spec + // But CAPA-created target groups are guaranteed to have the "apiserver-target-" or "additional-listener-" prefix. + switch { + case strings.HasPrefix(*elbTG.TargetGroupName, apiServerTargetGroupPrefix): + if !strings.HasPrefix(spec.Name, apiServerTargetGroupPrefix) { + return false + } + case strings.HasPrefix(*elbTG.TargetGroupName, additionalTargetGroupPrefix): + if !strings.HasPrefix(spec.Name, additionalTargetGroupPrefix) { + return false + } + default: + // Not created by CAPA + return false + } + return ptr.Deref(elbTG.Port, 0) == spec.Port && strings.EqualFold(*elbTG.Protocol, spec.Protocol.String()) +} diff --git a/pkg/cloud/services/elb/loadbalancer_test.go b/pkg/cloud/services/elb/loadbalancer_test.go index 4762edc251..f2b4b1dbbe 100644 --- a/pkg/cloud/services/elb/loadbalancer_test.go +++ b/pkg/cloud/services/elb/loadbalancer_test.go @@ -19,6 +19,7 @@ package elb import ( "context" "fmt" + "reflect" "strings" "testing" @@ -29,6 +30,7 @@ import ( "github.com/aws/aws-sdk-go/service/elbv2" rgapi "github.com/aws/aws-sdk-go/service/resourcegroupstaggingapi" "github.com/golang/mock/gomock" + "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -38,11 +40,24 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" + "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" ) +var stubInfraV1TargetGroupSpecAPI = infrav1.TargetGroupSpec{ + Name: "name", + Port: infrav1.DefaultAPIServerPort, + Protocol: "TCP", + HealthCheck: &infrav1.TargetGroupHealthCheck{ + IntervalSeconds: aws.Int64(10), + TimeoutSeconds: aws.Int64(5), + ThresholdCount: aws.Int64(5), + UnhealthyThresholdCount: aws.Int64(3), + }, +} + func TestELBName(t *testing.T) { tests := []struct { name string @@ -766,6 +781,7 @@ func TestRegisterInstanceWithAPIServerNLB(t *testing.T) { elbName = "bar-apiserver" elbArn = "arn::apiserver" elbSubnetID = "elb-subnet" + tgArn = "arn::target-group" instanceID = "test-instance" az = "us-west-1a" differentAZ = "us-east-2c" @@ -842,19 +858,19 @@ func TestRegisterInstanceWithAPIServerNLB(t *testing.T) { TargetGroups: []*elbv2.TargetGroup{ { HealthCheckEnabled: aws.Bool(true), - HealthCheckPort: aws.String("infrav1.DefaultAPIServerPort"), + HealthCheckPort: aws.String(infrav1.DefaultAPIServerPortString), HealthCheckProtocol: aws.String("TCP"), LoadBalancerArns: aws.StringSlice([]string{elbArn}), Port: aws.Int64(infrav1.DefaultAPIServerPort), Protocol: aws.String("TCP"), - TargetGroupArn: aws.String("target-group::arn"), + TargetGroupArn: aws.String(tgArn), TargetGroupName: aws.String("something-generated"), VpcId: aws.String("vpc-id"), }, }, }, nil) m.RegisterTargets(gomock.Eq(&elbv2.RegisterTargetsInput{ - TargetGroupArn: aws.String("target-group::arn"), + TargetGroupArn: aws.String(tgArn), Targets: []*elbv2.TargetDescription{ { Id: aws.String(instanceID), @@ -945,12 +961,12 @@ func TestRegisterInstanceWithAPIServerNLB(t *testing.T) { TargetGroups: []*elbv2.TargetGroup{ { HealthCheckEnabled: aws.Bool(true), - HealthCheckPort: aws.String("infrav1.DefaultAPIServerPort"), + HealthCheckPort: aws.String(infrav1.DefaultAPIServerPortString), HealthCheckProtocol: aws.String("TCP"), LoadBalancerArns: aws.StringSlice([]string{elbArn}), Port: aws.Int64(infrav1.DefaultAPIServerPort), Protocol: aws.String("TCP"), - TargetGroupArn: aws.String("target-group::arn"), + TargetGroupArn: aws.String(tgArn), TargetGroupName: aws.String("something-generated"), VpcId: aws.String("vpc-id"), }, @@ -979,7 +995,7 @@ func TestRegisterInstanceWithAPIServerNLB(t *testing.T) { }, }, nil) m.RegisterTargets(gomock.Eq(&elbv2.RegisterTargetsInput{ - TargetGroupArn: aws.String("target-group::arn"), + TargetGroupArn: aws.String(tgArn), Targets: []*elbv2.TargetDescription{ { Id: aws.String(instanceID), @@ -1188,58 +1204,126 @@ func TestCreateNLB(t *testing.T) { }, }, }, nil) - m.CreateTargetGroup(gomock.Eq(&elbv2.CreateTargetGroupInput{ - HealthCheckEnabled: aws.Bool(true), - HealthCheckPort: aws.String("infrav1.DefaultAPIServerPort"), - HealthCheckProtocol: aws.String("tcp"), - Name: aws.String("name"), - Port: aws.Int64(infrav1.DefaultAPIServerPort), - Protocol: aws.String("TCP"), - VpcId: aws.String(vpcID), + }, + check: func(t *testing.T, lb *infrav1.LoadBalancer, err error) { + t.Helper() + if err != nil { + t.Fatalf("did not expect error: %v", err) + } + if lb.DNSName != dns { + t.Fatalf("DNSName did not equal expected value; was: '%s'", lb.DNSName) + } + }, + }, + { + name: "created with ipv6 vpc", + spec: func(spec infrav1.LoadBalancer) infrav1.LoadBalancer { + return spec + }, + awsCluster: func(acl infrav1.AWSCluster) infrav1.AWSCluster { + acl.Spec.NetworkSpec.VPC.IPv6 = &infrav1.IPv6{ + CidrBlock: "2022:1234::/64", + PoolID: "pool-id", + } + return acl + }, + elbV2APIMocks: func(m *mocks.MockELBV2APIMockRecorder) { + m.CreateLoadBalancer(gomock.Eq(&elbv2.CreateLoadBalancerInput{ + Name: aws.String(elbName), + IpAddressType: aws.String("dualstack"), + Scheme: aws.String("internet-facing"), + SecurityGroups: aws.StringSlice([]string{}), + Type: aws.String("network"), + Subnets: aws.StringSlice([]string{clusterSubnetID}), Tags: []*elbv2.Tag{ { Key: aws.String("test"), Value: aws.String("tag"), }, }, - })).Return(&elbv2.CreateTargetGroupOutput{ - TargetGroups: []*elbv2.TargetGroup{ + })).Return(&elbv2.CreateLoadBalancerOutput{ + LoadBalancers: []*elbv2.LoadBalancer{ { - TargetGroupArn: aws.String("target-group::arn"), - TargetGroupName: aws.String("name"), - VpcId: aws.String(vpcID), + LoadBalancerArn: aws.String(elbArn), + LoadBalancerName: aws.String(elbName), + Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)), + DNSName: aws.String(dns), }, }, }, nil) - m.ModifyTargetGroupAttributes(gomock.Eq(&elbv2.ModifyTargetGroupAttributesInput{ - TargetGroupArn: aws.String("target-group::arn"), - Attributes: []*elbv2.TargetGroupAttribute{ - { - Key: aws.String(infrav1.TargetGroupAttributeEnablePreserveClientIP), - Value: aws.String("false"), - }, - }, - })).Return(nil, nil) - m.CreateListener(gomock.Eq(&elbv2.CreateListenerInput{ - DefaultActions: []*elbv2.Action{ + }, + check: func(t *testing.T, lb *infrav1.LoadBalancer, err error) { + t.Helper() + if err != nil { + t.Fatalf("did not expect error: %v", err) + } + if lb.DNSName != dns { + t.Fatalf("DNSName did not equal expected value; was: '%s'", lb.DNSName) + } + }, + }, + { + name: "creating a load balancer fails", + spec: func(spec infrav1.LoadBalancer) infrav1.LoadBalancer { + return spec + }, + awsCluster: func(acl infrav1.AWSCluster) infrav1.AWSCluster { + return acl + }, + elbV2APIMocks: func(m *mocks.MockELBV2APIMockRecorder) { + m.CreateLoadBalancer(gomock.Eq(&elbv2.CreateLoadBalancerInput{ + Name: aws.String(elbName), + Scheme: aws.String("internet-facing"), + SecurityGroups: []*string{}, + Type: aws.String("network"), + Subnets: aws.StringSlice([]string{clusterSubnetID}), + Tags: []*elbv2.Tag{ { - TargetGroupArn: aws.String("target-group::arn"), - Type: aws.String(elbv2.ActionTypeEnumForward), + Key: aws.String("test"), + Value: aws.String("tag"), }, }, - LoadBalancerArn: aws.String(elbArn), - Port: aws.Int64(infrav1.DefaultAPIServerPort), - Protocol: aws.String("TCP"), + })).Return(nil, errors.New("nope")) + }, + check: func(t *testing.T, _ *infrav1.LoadBalancer, err error) { + t.Helper() + if err == nil { + t.Fatal("expected error, got nothing") + } + if !strings.Contains(err.Error(), "nope") { + t.Fatalf("expected error to contain 'nope' was instead: %s", err) + } + }, + }, + { + name: "PreserveClientIP is enabled", + spec: func(spec infrav1.LoadBalancer) infrav1.LoadBalancer { + return spec + }, + awsCluster: func(acl infrav1.AWSCluster) infrav1.AWSCluster { + acl.Spec.ControlPlaneLoadBalancer.PreserveClientIP = true + return acl + }, + elbV2APIMocks: func(m *mocks.MockELBV2APIMockRecorder) { + m.CreateLoadBalancer(gomock.Eq(&elbv2.CreateLoadBalancerInput{ + Name: aws.String(elbName), + Scheme: aws.String("internet-facing"), + SecurityGroups: aws.StringSlice([]string{}), + Type: aws.String("network"), + Subnets: aws.StringSlice([]string{clusterSubnetID}), Tags: []*elbv2.Tag{ { Key: aws.String("test"), Value: aws.String("tag"), }, }, - })).Return(&elbv2.CreateListenerOutput{ - Listeners: []*elbv2.Listener{ + })).Return(&elbv2.CreateLoadBalancerOutput{ + LoadBalancers: []*elbv2.LoadBalancer{ { - ListenerArn: aws.String("listener::arn"), + LoadBalancerArn: aws.String(elbArn), + LoadBalancerName: aws.String(elbName), + Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)), + DNSName: aws.String(dns), }, }, }, nil) @@ -1255,31 +1339,28 @@ func TestCreateNLB(t *testing.T) { }, }, { - name: "created with ipv6 vpc", + name: "load balancer is not an NLB scope security groups will be added", spec: func(spec infrav1.LoadBalancer) infrav1.LoadBalancer { + spec.SecurityGroupIDs = []string{"sg-id"} return spec }, awsCluster: func(acl infrav1.AWSCluster) infrav1.AWSCluster { - acl.Spec.NetworkSpec.VPC.IPv6 = &infrav1.IPv6{ - CidrBlock: "2022:1234::/64", - PoolID: "pool-id", - } + acl.Spec.ControlPlaneLoadBalancer.LoadBalancerType = infrav1.LoadBalancerTypeALB return acl }, elbV2APIMocks: func(m *mocks.MockELBV2APIMockRecorder) { m.CreateLoadBalancer(gomock.Eq(&elbv2.CreateLoadBalancerInput{ - Name: aws.String(elbName), - IpAddressType: aws.String("dualstack"), - Scheme: aws.String("internet-facing"), - SecurityGroups: aws.StringSlice([]string{}), - Type: aws.String("network"), - Subnets: aws.StringSlice([]string{clusterSubnetID}), + Name: aws.String(elbName), + Scheme: aws.String("internet-facing"), + Type: aws.String("application"), + Subnets: aws.StringSlice([]string{clusterSubnetID}), Tags: []*elbv2.Tag{ { Key: aws.String("test"), Value: aws.String("tag"), }, }, + SecurityGroups: aws.StringSlice([]string{"sg-id"}), })).Return(&elbv2.CreateLoadBalancerOutput{ LoadBalancers: []*elbv2.LoadBalancer{ { @@ -1290,32 +1371,166 @@ func TestCreateNLB(t *testing.T) { }, }, }, nil) + }, + check: func(t *testing.T, lb *infrav1.LoadBalancer, err error) { + t.Helper() + if err != nil { + t.Fatalf("did not expect error: %v", err) + } + if lb.DNSName != dns { + t.Fatalf("DNSName did not equal expected value; was: '%s'", lb.DNSName) + } + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + elbV2APIMocks := mocks.NewMockELBV2API(mockCtrl) + + scheme, err := setupScheme() + if err != nil { + t.Fatal(err) + } + awsCluster := &infrav1.AWSCluster{ + ObjectMeta: metav1.ObjectMeta{Name: clusterName}, + Spec: infrav1.AWSClusterSpec{ + ControlPlaneLoadBalancer: &infrav1.AWSLoadBalancerSpec{ + Name: aws.String(elbName), + LoadBalancerType: infrav1.LoadBalancerTypeNLB, + }, + NetworkSpec: infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + ID: vpcID, + }, + }, + }, + } + client := fake.NewClientBuilder().WithScheme(scheme).Build() + cluster := tc.awsCluster(*awsCluster) + clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{ + Client: client, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: clusterName, + }, + }, + AWSCluster: &cluster, + }) + if err != nil { + t.Fatal(err) + } + + tc.elbV2APIMocks(elbV2APIMocks.EXPECT()) + + s := &Service{ + scope: clusterScope, + ELBV2Client: elbV2APIMocks, + } + + loadBalancerSpec := &infrav1.LoadBalancer{ + ARN: elbArn, + Name: elbName, + Scheme: infrav1.ELBSchemeInternetFacing, + Tags: map[string]string{ + "test": "tag", + }, + ELBListeners: []infrav1.Listener{ + { + Protocol: "TCP", + Port: infrav1.DefaultAPIServerPort, + TargetGroup: infrav1.TargetGroupSpec{ + Name: "name", + Port: infrav1.DefaultAPIServerPort, + Protocol: "TCP", + VpcID: vpcID, + HealthCheck: &infrav1.TargetGroupHealthCheck{ + Protocol: aws.String("tcp"), + Port: aws.String(infrav1.DefaultAPIServerPortString), + }, + }, + }, + }, + LoadBalancerType: infrav1.LoadBalancerTypeNLB, + SubnetIDs: []string{clusterSubnetID}, + } + + spec := tc.spec(*loadBalancerSpec) + lb, err := s.createLB(&spec, clusterScope.ControlPlaneLoadBalancer()) + tc.check(t, lb, err) + }) + } +} + +func TestReconcileTargetGroupsAndListeners(t *testing.T) { + const ( + namespace = "foo" + clusterName = "bar" + clusterSubnetID = "subnet-1" + elbName = "bar-apiserver" + elbArn = "arn::apiserver" + tgArn = "arn::target-group" + vpcID = "vpc-id" + dns = "asdf:9999/asdf" + ) + + tests := []struct { + name string + elbV2APIMocks func(m *mocks.MockELBV2APIMockRecorder) + check func(t *testing.T, tgs []*elbv2.TargetGroup, listeners []*elbv2.Listener, err error) + awsCluster func(acl infrav1.AWSCluster) infrav1.AWSCluster + spec func(spec infrav1.LoadBalancer) infrav1.LoadBalancer + }{ + { + name: "main create flow", + spec: func(spec infrav1.LoadBalancer) infrav1.LoadBalancer { + return spec + }, + awsCluster: func(acl infrav1.AWSCluster) infrav1.AWSCluster { + return acl + }, + elbV2APIMocks: func(m *mocks.MockELBV2APIMockRecorder) { + m.DescribeTargetGroups(gomock.Eq(&elbv2.DescribeTargetGroupsInput{ + LoadBalancerArn: aws.String(elbArn), + })).Return(&elbv2.DescribeTargetGroupsOutput{ + TargetGroups: []*elbv2.TargetGroup{}, + }, nil) m.CreateTargetGroup(gomock.Eq(&elbv2.CreateTargetGroupInput{ - HealthCheckEnabled: aws.Bool(true), - HealthCheckPort: aws.String("infrav1.DefaultAPIServerPort"), - HealthCheckProtocol: aws.String("tcp"), - Name: aws.String("name"), - Port: aws.Int64(infrav1.DefaultAPIServerPort), - Protocol: aws.String("TCP"), - VpcId: aws.String(vpcID), - IpAddressType: aws.String("ipv6"), + Name: aws.String("name"), + Port: aws.Int64(infrav1.DefaultAPIServerPort), + Protocol: aws.String("TCP"), + VpcId: aws.String(vpcID), Tags: []*elbv2.Tag{ { Key: aws.String("test"), Value: aws.String("tag"), }, }, + HealthCheckEnabled: aws.Bool(true), + HealthCheckPort: aws.String(infrav1.DefaultAPIServerPortString), + HealthCheckProtocol: aws.String("tcp"), + HealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount), + UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount), + HealthCheckIntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec), + HealthCheckTimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec), })).Return(&elbv2.CreateTargetGroupOutput{ TargetGroups: []*elbv2.TargetGroup{ { - TargetGroupArn: aws.String("target-group::arn"), - TargetGroupName: aws.String("name"), - VpcId: aws.String(vpcID), + TargetGroupArn: aws.String(tgArn), + TargetGroupName: aws.String("name"), + VpcId: aws.String(vpcID), + HealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount), + UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount), + HealthCheckIntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec), + HealthCheckTimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec), }, }, }, nil) m.ModifyTargetGroupAttributes(gomock.Eq(&elbv2.ModifyTargetGroupAttributesInput{ - TargetGroupArn: aws.String("target-group::arn"), + TargetGroupArn: aws.String(tgArn), Attributes: []*elbv2.TargetGroupAttribute{ { Key: aws.String(infrav1.TargetGroupAttributeEnablePreserveClientIP), @@ -1323,10 +1538,15 @@ func TestCreateNLB(t *testing.T) { }, }, })).Return(nil, nil) + m.DescribeListeners(gomock.Eq(&elbv2.DescribeListenersInput{ + LoadBalancerArn: aws.String(elbArn), + })).Return(&elbv2.DescribeListenersOutput{ + Listeners: []*elbv2.Listener{}, + }, nil) m.CreateListener(gomock.Eq(&elbv2.CreateListenerInput{ DefaultActions: []*elbv2.Action{ { - TargetGroupArn: aws.String("target-group::arn"), + TargetGroupArn: aws.String(tgArn), Type: aws.String(elbv2.ActionTypeEnumForward), }, }, @@ -1342,51 +1562,138 @@ func TestCreateNLB(t *testing.T) { })).Return(&elbv2.CreateListenerOutput{ Listeners: []*elbv2.Listener{ { + DefaultActions: []*elbv2.Action{ + { + TargetGroupArn: aws.String(tgArn), + Type: aws.String(elbv2.ActionTypeEnumForward), + }, + }, ListenerArn: aws.String("listener::arn"), + Port: aws.Int64(infrav1.DefaultAPIServerPort), + Protocol: aws.String("TCP"), }, - }, - }, nil) + }}, nil) }, - check: func(t *testing.T, lb *infrav1.LoadBalancer, err error) { + check: func(t *testing.T, tgs []*elbv2.TargetGroup, listeners []*elbv2.Listener, err error) { t.Helper() if err != nil { t.Fatalf("did not expect error: %v", err) } - if lb.DNSName != dns { - t.Fatalf("DNSName did not equal expected value; was: '%s'", lb.DNSName) + if len(tgs) != 1 { + t.Fatalf("no target groups created") + } + if len(listeners) != 1 { + t.Fatalf("no listeners created") + } + + if len(listeners[0].DefaultActions) != 1 { + t.Fatalf("no default actions created") + } + + if *tgs[0].TargetGroupArn != *listeners[0].DefaultActions[0].TargetGroupArn { + t.Fatalf("target group and listener did not have matching arns. target group ARN: %q. listener's target group ARN: %q", *tgs[0].TargetGroupArn, *listeners[0].DefaultActions[0].TargetGroupArn) } }, }, { - name: "creating a load balancer fails", + name: "created with ipv6 vpc", spec: func(spec infrav1.LoadBalancer) infrav1.LoadBalancer { return spec }, awsCluster: func(acl infrav1.AWSCluster) infrav1.AWSCluster { + acl.Spec.NetworkSpec.VPC.IPv6 = &infrav1.IPv6{ + CidrBlock: "2022:1234::/64", + PoolID: "pool-id", + } return acl }, elbV2APIMocks: func(m *mocks.MockELBV2APIMockRecorder) { - m.CreateLoadBalancer(gomock.Eq(&elbv2.CreateLoadBalancerInput{ - Name: aws.String(elbName), - Scheme: aws.String("internet-facing"), - SecurityGroups: []*string{}, - Type: aws.String("network"), - Subnets: aws.StringSlice([]string{clusterSubnetID}), + m.DescribeTargetGroups(gomock.Eq(&elbv2.DescribeTargetGroupsInput{ + LoadBalancerArn: aws.String(elbArn), + })).Return(&elbv2.DescribeTargetGroupsOutput{ + TargetGroups: []*elbv2.TargetGroup{}, + }, nil) + m.CreateTargetGroup(gomock.Eq(&elbv2.CreateTargetGroupInput{ + Name: aws.String("name"), + Port: aws.Int64(infrav1.DefaultAPIServerPort), + Protocol: aws.String("TCP"), + VpcId: aws.String(vpcID), + IpAddressType: aws.String("ipv6"), Tags: []*elbv2.Tag{ { Key: aws.String("test"), Value: aws.String("tag"), }, }, - })).Return(nil, errors.New("nope")) + HealthCheckEnabled: aws.Bool(true), + HealthCheckPort: aws.String(infrav1.DefaultAPIServerPortString), + HealthCheckProtocol: aws.String("tcp"), + HealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount), + UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount), + HealthCheckIntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec), + HealthCheckTimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec), + })).Return(&elbv2.CreateTargetGroupOutput{ + TargetGroups: []*elbv2.TargetGroup{ + { + TargetGroupArn: aws.String(tgArn), + TargetGroupName: aws.String("name"), + VpcId: aws.String(vpcID), + HealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount), + UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount), + HealthCheckIntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec), + HealthCheckTimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec), + IpAddressType: aws.String("ipv6"), + }, + }, + }, nil) + m.ModifyTargetGroupAttributes(gomock.Eq(&elbv2.ModifyTargetGroupAttributesInput{ + TargetGroupArn: aws.String(tgArn), + Attributes: []*elbv2.TargetGroupAttribute{ + { + Key: aws.String(infrav1.TargetGroupAttributeEnablePreserveClientIP), + Value: aws.String("false"), + }, + }, + })).Return(nil, nil) + m.DescribeListeners(gomock.Eq(&elbv2.DescribeListenersInput{ + LoadBalancerArn: aws.String(elbArn), + })).Return(&elbv2.DescribeListenersOutput{ + Listeners: []*elbv2.Listener{}, + }, nil) + m.CreateListener(gomock.Eq(&elbv2.CreateListenerInput{ + DefaultActions: []*elbv2.Action{ + { + TargetGroupArn: aws.String(tgArn), + Type: aws.String(elbv2.ActionTypeEnumForward), + }, + }, + LoadBalancerArn: aws.String(elbArn), + Port: aws.Int64(infrav1.DefaultAPIServerPort), + Protocol: aws.String("TCP"), + Tags: []*elbv2.Tag{ + { + Key: aws.String("test"), + Value: aws.String("tag"), + }, + }, + })).Return(&elbv2.CreateListenerOutput{ + Listeners: []*elbv2.Listener{ + { + ListenerArn: aws.String("listener::arn"), + }, + }, + }, nil) }, - check: func(t *testing.T, lb *infrav1.LoadBalancer, err error) { + check: func(t *testing.T, tgs []*elbv2.TargetGroup, _ []*elbv2.Listener, err error) { t.Helper() - if err == nil { - t.Fatal("expected error, got nothing") + if err != nil { + t.Fatalf("did not expect error: %v", err) } - if !strings.Contains(err.Error(), "nope") { - t.Fatalf("expected error to contain 'nope' was instead: %s", err) + tg := tgs[0] + got := *tg.IpAddressType + want := "ipv6" + if got != want { + t.Fatalf("did not set ip address type to ipv6") } }, }, @@ -1411,27 +1718,10 @@ func TestCreateNLB(t *testing.T) { return acl }, elbV2APIMocks: func(m *mocks.MockELBV2APIMockRecorder) { - m.CreateLoadBalancer(gomock.Eq(&elbv2.CreateLoadBalancerInput{ - Name: aws.String(elbName), - Scheme: aws.String("internet-facing"), - SecurityGroups: aws.StringSlice([]string{}), - Type: aws.String("network"), - Subnets: aws.StringSlice([]string{clusterSubnetID}), - Tags: []*elbv2.Tag{ - { - Key: aws.String("test"), - Value: aws.String("tag"), - }, - }, - })).Return(&elbv2.CreateLoadBalancerOutput{ - LoadBalancers: []*elbv2.LoadBalancer{ - { - LoadBalancerArn: aws.String(elbArn), - LoadBalancerName: aws.String(elbName), - Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)), - DNSName: aws.String(dns), - }, - }, + m.DescribeTargetGroups(gomock.Eq(&elbv2.DescribeTargetGroupsInput{ + LoadBalancerArn: aws.String(elbArn), + })).Return(&elbv2.DescribeTargetGroupsOutput{ + TargetGroups: []*elbv2.TargetGroup{}, }, nil) m.CreateTargetGroup(gomock.Eq(&elbv2.CreateTargetGroupInput{ Name: aws.String("name"), @@ -1444,17 +1734,26 @@ func TestCreateNLB(t *testing.T) { Value: aws.String("tag"), }, }, + HealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount), + UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount), + HealthCheckIntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec), + HealthCheckTimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec), })).Return(&elbv2.CreateTargetGroupOutput{ TargetGroups: []*elbv2.TargetGroup{ { - TargetGroupArn: aws.String("target-group::arn"), - TargetGroupName: aws.String("name"), - VpcId: aws.String(vpcID), + TargetGroupArn: aws.String(tgArn), + TargetGroupName: aws.String("name"), + VpcId: aws.String(vpcID), + HealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount), + UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount), + HealthCheckIntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec), + HealthCheckTimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec), + HealthCheckEnabled: aws.Bool(false), }, }, }, nil) m.ModifyTargetGroupAttributes(gomock.Eq(&elbv2.ModifyTargetGroupAttributesInput{ - TargetGroupArn: aws.String("target-group::arn"), + TargetGroupArn: aws.String(tgArn), Attributes: []*elbv2.TargetGroupAttribute{ { Key: aws.String(infrav1.TargetGroupAttributeEnablePreserveClientIP), @@ -1462,10 +1761,15 @@ func TestCreateNLB(t *testing.T) { }, }, })).Return(nil, nil) + m.DescribeListeners(gomock.Eq(&elbv2.DescribeListenersInput{ + LoadBalancerArn: aws.String(elbArn), + })).Return(&elbv2.DescribeListenersOutput{ + Listeners: []*elbv2.Listener{}, + }, nil) m.CreateListener(gomock.Eq(&elbv2.CreateListenerInput{ DefaultActions: []*elbv2.Action{ { - TargetGroupArn: aws.String("target-group::arn"), + TargetGroupArn: aws.String(tgArn), Type: aws.String(elbv2.ActionTypeEnumForward), }, }, @@ -1486,13 +1790,15 @@ func TestCreateNLB(t *testing.T) { }, }, nil) }, - check: func(t *testing.T, lb *infrav1.LoadBalancer, err error) { + check: func(t *testing.T, tgs []*elbv2.TargetGroup, _ []*elbv2.Listener, err error) { t.Helper() if err != nil { t.Fatalf("did not expect error: %v", err) } - if lb.DNSName != dns { - t.Fatalf("DNSName did not equal expected value; was: '%s'", lb.DNSName) + got := *tgs[0].HealthCheckEnabled + want := false + if got != want { + t.Fatalf("health check not disabled on target group") } }, }, @@ -1506,31 +1812,14 @@ func TestCreateNLB(t *testing.T) { return acl }, elbV2APIMocks: func(m *mocks.MockELBV2APIMockRecorder) { - m.CreateLoadBalancer(gomock.Eq(&elbv2.CreateLoadBalancerInput{ - Name: aws.String(elbName), - Scheme: aws.String("internet-facing"), - SecurityGroups: aws.StringSlice([]string{}), - Type: aws.String("network"), - Subnets: aws.StringSlice([]string{clusterSubnetID}), - Tags: []*elbv2.Tag{ - { - Key: aws.String("test"), - Value: aws.String("tag"), - }, - }, - })).Return(&elbv2.CreateLoadBalancerOutput{ - LoadBalancers: []*elbv2.LoadBalancer{ - { - LoadBalancerArn: aws.String(elbArn), - LoadBalancerName: aws.String(elbName), - Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)), - DNSName: aws.String(dns), - }, - }, + m.DescribeTargetGroups(gomock.Eq(&elbv2.DescribeTargetGroupsInput{ + LoadBalancerArn: aws.String(elbArn), + })).Return(&elbv2.DescribeTargetGroupsOutput{ + TargetGroups: []*elbv2.TargetGroup{}, }, nil) m.CreateTargetGroup(gomock.Eq(&elbv2.CreateTargetGroupInput{ HealthCheckEnabled: aws.Bool(true), - HealthCheckPort: aws.String("infrav1.DefaultAPIServerPort"), + HealthCheckPort: aws.String(infrav1.DefaultAPIServerPortString), HealthCheckProtocol: aws.String("tcp"), Name: aws.String("name"), Port: aws.Int64(infrav1.DefaultAPIServerPort), @@ -1542,19 +1831,32 @@ func TestCreateNLB(t *testing.T) { Value: aws.String("tag"), }, }, + HealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount), + UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount), + HealthCheckIntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec), + HealthCheckTimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec), })).Return(&elbv2.CreateTargetGroupOutput{ TargetGroups: []*elbv2.TargetGroup{ { - TargetGroupArn: aws.String("target-group::arn"), - TargetGroupName: aws.String("name"), - VpcId: aws.String(vpcID), + TargetGroupArn: aws.String(tgArn), + TargetGroupName: aws.String("name"), + VpcId: aws.String(vpcID), + HealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount), + UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount), + HealthCheckIntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec), + HealthCheckTimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec), }, }, }, nil) + m.DescribeListeners(gomock.Eq(&elbv2.DescribeListenersInput{ + LoadBalancerArn: aws.String(elbArn), + })).Return(&elbv2.DescribeListenersOutput{ + Listeners: []*elbv2.Listener{}, + }, nil) m.CreateListener(gomock.Eq(&elbv2.CreateListenerInput{ DefaultActions: []*elbv2.Action{ { - TargetGroupArn: aws.String("target-group::arn"), + TargetGroupArn: aws.String(tgArn), Type: aws.String(elbv2.ActionTypeEnumForward), }, }, @@ -1575,74 +1877,116 @@ func TestCreateNLB(t *testing.T) { }, }, nil) }, - check: func(t *testing.T, lb *infrav1.LoadBalancer, err error) { + check: func(t *testing.T, tgs []*elbv2.TargetGroup, listeners []*elbv2.Listener, err error) { t.Helper() if err != nil { t.Fatalf("did not expect error: %v", err) } - if lb.DNSName != dns { - t.Fatalf("DNSName did not equal expected value; was: '%s'", lb.DNSName) + + if len(tgs) != 1 { + t.Fatalf("did not create target groups") + } + + if len(listeners) != 1 { + t.Fatalf("did not create any listeners") } }, }, { - name: "load balancer is not an NLB scope security groups will be added", - spec: func(spec infrav1.LoadBalancer) infrav1.LoadBalancer { - spec.SecurityGroupIDs = []string{"sg-id"} - return spec - }, + name: "NLB with HTTP health check", awsCluster: func(acl infrav1.AWSCluster) infrav1.AWSCluster { - acl.Spec.ControlPlaneLoadBalancer.LoadBalancerType = infrav1.LoadBalancerTypeALB + acl.Spec.ControlPlaneLoadBalancer.Scheme = &infrav1.ELBSchemeInternetFacing + acl.Spec.ControlPlaneLoadBalancer.LoadBalancerType = infrav1.LoadBalancerTypeNLB + acl.Spec.ControlPlaneLoadBalancer.HealthCheckProtocol = &infrav1.ELBProtocolHTTP return acl }, + spec: func(spec infrav1.LoadBalancer) infrav1.LoadBalancer { + tg := stubInfraV1TargetGroupSpecAPI + tg.VpcID = vpcID + tg.HealthCheck.Protocol = aws.String("HTTP") + tg.HealthCheck.Port = aws.String(infrav1.DefaultAPIServerPortString) + tg.HealthCheck.Path = aws.String("/readyz") + spec.ELBListeners = []infrav1.Listener{ + { + Protocol: "TCP", + Port: infrav1.DefaultAPIServerPort, + TargetGroup: tg, + }, + } + return spec + }, elbV2APIMocks: func(m *mocks.MockELBV2APIMockRecorder) { - m.CreateLoadBalancer(gomock.Eq(&elbv2.CreateLoadBalancerInput{ - Name: aws.String(elbName), - Scheme: aws.String("internet-facing"), - Type: aws.String("application"), - Subnets: aws.StringSlice([]string{clusterSubnetID}), + m.DescribeTargetGroups(gomock.Eq(&elbv2.DescribeTargetGroupsInput{ + LoadBalancerArn: aws.String(elbArn), + })).Return(&elbv2.DescribeTargetGroupsOutput{ + TargetGroups: []*elbv2.TargetGroup{}, + }, nil) + m.CreateTargetGroup(gomock.Eq(&elbv2.CreateTargetGroupInput{ + Name: aws.String("name"), + Port: aws.Int64(infrav1.DefaultAPIServerPort), + Protocol: aws.String("TCP"), + VpcId: aws.String(vpcID), + HealthCheckEnabled: aws.Bool(true), + HealthCheckPort: aws.String(infrav1.DefaultAPIServerPortString), + HealthCheckProtocol: aws.String("HTTP"), + HealthCheckPath: aws.String("/readyz"), + HealthCheckIntervalSeconds: aws.Int64(10), + HealthCheckTimeoutSeconds: aws.Int64(5), + HealthyThresholdCount: aws.Int64(5), + UnhealthyThresholdCount: aws.Int64(3), Tags: []*elbv2.Tag{ { Key: aws.String("test"), Value: aws.String("tag"), }, }, - SecurityGroups: aws.StringSlice([]string{"sg-id"}), - })).Return(&elbv2.CreateLoadBalancerOutput{ - LoadBalancers: []*elbv2.LoadBalancer{ + })).Return(&elbv2.CreateTargetGroupOutput{ + TargetGroups: []*elbv2.TargetGroup{ { - LoadBalancerArn: aws.String(elbArn), - LoadBalancerName: aws.String(elbName), - Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)), - DNSName: aws.String(dns), + TargetGroupArn: aws.String(tgArn), + TargetGroupName: aws.String("name"), + VpcId: aws.String(vpcID), + HealthCheckEnabled: aws.Bool(true), + HealthCheckPort: aws.String(infrav1.DefaultAPIServerPortString), + HealthCheckProtocol: aws.String("HTTP"), + HealthCheckPath: aws.String("/readyz"), + HealthCheckIntervalSeconds: aws.Int64(10), + HealthCheckTimeoutSeconds: aws.Int64(5), + HealthyThresholdCount: aws.Int64(5), + UnhealthyThresholdCount: aws.Int64(3), }, }, }, nil) - m.CreateTargetGroup(gomock.Eq(&elbv2.CreateTargetGroupInput{ - HealthCheckEnabled: aws.Bool(true), - HealthCheckPort: aws.String("infrav1.DefaultAPIServerPort"), - HealthCheckProtocol: aws.String("tcp"), - Name: aws.String("name"), - Port: aws.Int64(infrav1.DefaultAPIServerPort), - Protocol: aws.String("TCP"), - VpcId: aws.String(vpcID), + m.DescribeListeners(gomock.Eq(&elbv2.DescribeListenersInput{ + LoadBalancerArn: aws.String(elbArn), + })).Return(&elbv2.DescribeListenersOutput{ + Listeners: []*elbv2.Listener{}, + }, nil) + m.CreateListener(gomock.Eq(&elbv2.CreateListenerInput{ + DefaultActions: []*elbv2.Action{ + { + TargetGroupArn: aws.String(tgArn), + Type: aws.String(elbv2.ActionTypeEnumForward), + }, + }, + LoadBalancerArn: aws.String(elbArn), + Port: aws.Int64(infrav1.DefaultAPIServerPort), + Protocol: aws.String("TCP"), Tags: []*elbv2.Tag{ { Key: aws.String("test"), Value: aws.String("tag"), }, }, - })).Return(&elbv2.CreateTargetGroupOutput{ - TargetGroups: []*elbv2.TargetGroup{ + })).Return(&elbv2.CreateListenerOutput{ + Listeners: []*elbv2.Listener{ { - TargetGroupArn: aws.String("target-group::arn"), - TargetGroupName: aws.String("name"), - VpcId: aws.String(vpcID), + ListenerArn: aws.String("listener::arn"), }, }, }, nil) m.ModifyTargetGroupAttributes(gomock.Eq(&elbv2.ModifyTargetGroupAttributesInput{ - TargetGroupArn: aws.String("target-group::arn"), + TargetGroupArn: aws.String(tgArn), Attributes: []*elbv2.TargetGroupAttribute{ { Key: aws.String(infrav1.TargetGroupAttributeEnablePreserveClientIP), @@ -1650,10 +1994,93 @@ func TestCreateNLB(t *testing.T) { }, }, })).Return(nil, nil) + }, + check: func(t *testing.T, tgs []*elbv2.TargetGroup, _ []*elbv2.Listener, err error) { + t.Helper() + if err != nil { + t.Fatalf("did not expect error: %v", err) + } + got := *tgs[0].HealthCheckProtocol + want := "HTTP" + if got != want { + t.Fatalf("Health Check protocol for the API Target group did not equal expected value: %s; was: '%s'", want, got) + } + }, + }, + { + name: "NLB with HTTPS health check", + awsCluster: func(acl infrav1.AWSCluster) infrav1.AWSCluster { + acl.Spec.ControlPlaneLoadBalancer.Scheme = &infrav1.ELBSchemeInternetFacing + acl.Spec.ControlPlaneLoadBalancer.LoadBalancerType = infrav1.LoadBalancerTypeNLB + acl.Spec.ControlPlaneLoadBalancer.HealthCheckProtocol = &infrav1.ELBProtocolHTTPS + return acl + }, + spec: func(spec infrav1.LoadBalancer) infrav1.LoadBalancer { + tg := stubInfraV1TargetGroupSpecAPI + tg.VpcID = vpcID + tg.HealthCheck.Protocol = aws.String("HTTPS") + tg.HealthCheck.Port = aws.String(infrav1.DefaultAPIServerPortString) + tg.HealthCheck.Path = aws.String("/readyz") + spec.ELBListeners = []infrav1.Listener{ + { + Protocol: "TCP", + Port: infrav1.DefaultAPIServerPort, + TargetGroup: tg, + }, + } + return spec + }, + elbV2APIMocks: func(m *mocks.MockELBV2APIMockRecorder) { + m.DescribeTargetGroups(gomock.Eq(&elbv2.DescribeTargetGroupsInput{ + LoadBalancerArn: aws.String(elbArn), + })).Return(&elbv2.DescribeTargetGroupsOutput{ + TargetGroups: []*elbv2.TargetGroup{}, + }, nil) + m.CreateTargetGroup(gomock.Eq(&elbv2.CreateTargetGroupInput{ + Name: aws.String("name"), + Port: aws.Int64(infrav1.DefaultAPIServerPort), + Protocol: aws.String("TCP"), + VpcId: aws.String(vpcID), + HealthCheckEnabled: aws.Bool(true), + HealthCheckPort: aws.String(infrav1.DefaultAPIServerPortString), + HealthCheckProtocol: aws.String("HTTPS"), + HealthCheckPath: aws.String("/readyz"), + HealthCheckIntervalSeconds: aws.Int64(10), + HealthCheckTimeoutSeconds: aws.Int64(5), + HealthyThresholdCount: aws.Int64(5), + UnhealthyThresholdCount: aws.Int64(3), + Tags: []*elbv2.Tag{ + { + Key: aws.String("test"), + Value: aws.String("tag"), + }, + }, + })).Return(&elbv2.CreateTargetGroupOutput{ + TargetGroups: []*elbv2.TargetGroup{ + { + TargetGroupArn: aws.String(tgArn), + TargetGroupName: aws.String("name"), + VpcId: aws.String(vpcID), + HealthCheckEnabled: aws.Bool(true), + HealthCheckPort: aws.String(infrav1.DefaultAPIServerPortString), + HealthCheckProtocol: aws.String("HTTPS"), + HealthCheckPath: aws.String("/readyz"), + HealthCheckIntervalSeconds: aws.Int64(10), + HealthCheckTimeoutSeconds: aws.Int64(5), + HealthyThresholdCount: aws.Int64(5), + UnhealthyThresholdCount: aws.Int64(3), + }, + }, + }, nil) + m.DescribeListeners(gomock.Eq(&elbv2.DescribeListenersInput{ + LoadBalancerArn: aws.String(elbArn), + })).Return(&elbv2.DescribeListenersOutput{ + Listeners: []*elbv2.Listener{}, + }, nil) m.CreateListener(gomock.Eq(&elbv2.CreateListenerInput{ DefaultActions: []*elbv2.Action{ { - TargetGroupArn: aws.String("target-group::arn"), + TargetGroupArn: aws.String(tgArn), Type: aws.String(elbv2.ActionTypeEnumForward), }, }, @@ -1673,14 +2100,25 @@ func TestCreateNLB(t *testing.T) { }, }, }, nil) + m.ModifyTargetGroupAttributes(gomock.Eq(&elbv2.ModifyTargetGroupAttributesInput{ + TargetGroupArn: aws.String(tgArn), + Attributes: []*elbv2.TargetGroupAttribute{ + { + Key: aws.String(infrav1.TargetGroupAttributeEnablePreserveClientIP), + Value: aws.String("false"), + }, + }, + })).Return(nil, nil) }, - check: func(t *testing.T, lb *infrav1.LoadBalancer, err error) { + check: func(t *testing.T, tgs []*elbv2.TargetGroup, _ []*elbv2.Listener, err error) { t.Helper() if err != nil { t.Fatalf("did not expect error: %v", err) } - if lb.DNSName != dns { - t.Fatalf("DNSName did not equal expected value; was: '%s'", lb.DNSName) + got := *tgs[0].HealthCheckProtocol + want := "HTTPS" + if got != want { + t.Fatalf("Health Check protocol for the API Target group did not equal expected value: %s; was: '%s'", want, got) } }, }, @@ -1751,7 +2189,7 @@ func TestCreateNLB(t *testing.T) { VpcID: vpcID, HealthCheck: &infrav1.TargetGroupHealthCheck{ Protocol: aws.String("tcp"), - Port: aws.String("infrav1.DefaultAPIServerPort"), + Port: aws.String(infrav1.DefaultAPIServerPortString), }, }, }, @@ -1761,8 +2199,8 @@ func TestCreateNLB(t *testing.T) { } spec := tc.spec(*loadBalancerSpec) - lb, err := s.createLB(&spec, clusterScope.ControlPlaneLoadBalancer()) - tc.check(t, lb, err) + tgs, listeners, err := s.reconcileTargetGroupsAndListeners(spec.ARN, &spec, clusterScope.ControlPlaneLoadBalancer()) + tc.check(t, tgs, listeners, err) }) } } @@ -1774,6 +2212,7 @@ func TestReconcileV2LB(t *testing.T) { clusterSubnetID = "subnet-1" elbName = "bar-apiserver" elbArn = "arn::apiserver" + tgArn = "arn::target-group" vpcID = "vpc-id" az = "us-west-1a" ) @@ -1878,6 +2317,20 @@ func TestReconcileV2LB(t *testing.T) { }, }, }, nil) + m.DescribeTargetGroups(gomock.Eq(&elbv2.DescribeTargetGroupsInput{ + LoadBalancerArn: aws.String(elbArn), + })). + Return(&elbv2.DescribeTargetGroupsOutput{ + NextMarker: new(string), + TargetGroups: []*elbv2.TargetGroup{ + { + HealthCheckEnabled: aws.Bool(true), + LoadBalancerArns: []*string{aws.String(elbArn)}, + Matcher: &elbv2.Matcher{}, + TargetGroupArn: aws.String(tgArn), + TargetGroupName: aws.String("targetGroup"), + }}, + }, nil) m.ModifyLoadBalancerAttributes(&elbv2.ModifyLoadBalancerAttributesInput{ LoadBalancerArn: aws.String(elbArn), Attributes: []*elbv2.LoadBalancerAttribute{ @@ -1887,6 +2340,107 @@ func TestReconcileV2LB(t *testing.T) { }, }}). Return(&elbv2.ModifyLoadBalancerAttributesOutput{}, nil) + + m.CreateTargetGroup(helpers.PartialMatchCreateTargetGroupInput(t, &elbv2.CreateTargetGroupInput{ + HealthCheckEnabled: aws.Bool(true), + HealthCheckIntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec), + HealthCheckPort: aws.String(infrav1.DefaultAPIServerPortString), + HealthCheckProtocol: aws.String("TCP"), + HealthCheckTimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec), + HealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount), + // Note: this is treated as a prefix with the partial matcher. + Name: aws.String("apiserver-target"), + Port: aws.Int64(infrav1.DefaultAPIServerPort), + Protocol: aws.String("TCP"), + Tags: []*elbv2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String("bar-apiserver"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/bar"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("apiserver"), + }, + }, + UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount), + VpcId: aws.String(vpcID), + })).Return(&elbv2.CreateTargetGroupOutput{ + TargetGroups: []*elbv2.TargetGroup{ + { + TargetGroupArn: aws.String(tgArn), + VpcId: aws.String(vpcID), + HealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount), + UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount), + HealthCheckIntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec), + HealthCheckTimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec), + }, + }, + }, nil) + + m.ModifyTargetGroupAttributes(gomock.Eq(&elbv2.ModifyTargetGroupAttributesInput{ + TargetGroupArn: aws.String(tgArn), + Attributes: []*elbv2.TargetGroupAttribute{ + { + Key: aws.String(infrav1.TargetGroupAttributeEnablePreserveClientIP), + Value: aws.String("false"), + }, + }, + })).Return(nil, nil) + + m.DescribeListeners(gomock.Eq(&elbv2.DescribeListenersInput{ + LoadBalancerArn: aws.String(elbArn), + })). + Return(&elbv2.DescribeListenersOutput{ + Listeners: []*elbv2.Listener{{ + DefaultActions: []*elbv2.Action{{ + TargetGroupArn: aws.String("arn::targetgroup"), + }}, + ListenerArn: aws.String("arn::listener"), + LoadBalancerArn: aws.String(elbArn), + }}, + }, nil) + m.CreateListener(gomock.Eq(&elbv2.CreateListenerInput{ + DefaultActions: []*elbv2.Action{ + { + TargetGroupArn: aws.String(tgArn), + Type: aws.String(elbv2.ActionTypeEnumForward), + }, + }, + LoadBalancerArn: aws.String(elbArn), + Port: aws.Int64(infrav1.DefaultAPIServerPort), + Protocol: aws.String("TCP"), + Tags: []*elbv2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String("bar-apiserver"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/bar"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("apiserver"), + }, + }, + })).Return(&elbv2.CreateListenerOutput{ + Listeners: []*elbv2.Listener{ + { + DefaultActions: []*elbv2.Action{ + { + TargetGroupArn: aws.String(tgArn), + Type: aws.String(elbv2.ActionTypeEnumForward), + }, + }, + ListenerArn: aws.String("listener::arn"), + Port: aws.Int64(infrav1.DefaultAPIServerPort), + Protocol: aws.String("TCP"), + }, + }}, nil) m.DescribeLoadBalancerAttributes(&elbv2.DescribeLoadBalancerAttributesInput{LoadBalancerArn: aws.String(elbArn)}).Return( &elbv2.DescribeLoadBalancerAttributesOutput{ Attributes: []*elbv2.LoadBalancerAttribute{ @@ -1899,9 +2453,7 @@ func TestReconcileV2LB(t *testing.T) { Value: aws.String(string(infrav1.ResourceLifecycleOwned)), }, }, - }, - nil, - ) + }, nil) m.DescribeTags(&elbv2.DescribeTagsInput{ResourceArns: []*string{aws.String(elbArn)}}).Return( &elbv2.DescribeTagsOutput{ TagDescriptions: []*elbv2.TagDescription{ @@ -1915,9 +2467,7 @@ func TestReconcileV2LB(t *testing.T) { }, }, }, - }, - nil, - ) + }, nil) // Avoid the need to sort the AddTagsInput.Tags slice m.AddTags(gomock.AssignableToTypeOf(&elbv2.AddTagsInput{})).Return(&elbv2.AddTagsOutput{}, nil) @@ -2180,7 +2730,7 @@ func TestReconcileLoadbalancers(t *testing.T) { } func TestDeleteAPIServerELB(t *testing.T) { - clusterName := "bar" //nolint:goconst // does not need to be a package-level const + clusterName := "bar" elbName := "bar-apiserver" tests := []struct { name string @@ -2377,6 +2927,7 @@ func TestDeleteNLB(t *testing.T) { clusterName := "bar" elbName := "bar-apiserver" elbArn := "apiserver::arn" + tgArn := "arn::target-group" tests := []struct { name string elbv2ApiMock func(m *mocks.MockELBV2APIMockRecorder) @@ -2486,11 +3037,11 @@ func TestDeleteNLB(t *testing.T) { m.DescribeTargetGroups(&elbv2.DescribeTargetGroupsInput{LoadBalancerArn: aws.String(elbArn)}).Return(&elbv2.DescribeTargetGroupsOutput{ TargetGroups: []*elbv2.TargetGroup{ { - TargetGroupArn: aws.String("target-group::arn"), + TargetGroupArn: aws.String(tgArn), }, }, }, nil) - m.DeleteTargetGroup(&elbv2.DeleteTargetGroupInput{TargetGroupArn: aws.String("target-group::arn")}).Return(&elbv2.DeleteTargetGroupOutput{}, nil) + m.DeleteTargetGroup(&elbv2.DeleteTargetGroupInput{TargetGroupArn: aws.String(tgArn)}).Return(&elbv2.DeleteTargetGroupOutput{}, nil) // delete the load balancer m.DeleteLoadBalancer(&elbv2.DeleteLoadBalancerInput{LoadBalancerArn: aws.String(elbArn)}).Return( @@ -2988,3 +3539,257 @@ func setupScheme() (*runtime.Scheme, error) { } return scheme, nil } + +func stubGetBaseService(t *testing.T, clusterName string) *Service { + t.Helper() + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + rgapiMock := mocks.NewMockResourceGroupsTaggingAPIAPI(mockCtrl) + elbV2ApiMock := mocks.NewMockELBV2API(mockCtrl) + + scheme, err := setupScheme() + if err != nil { + t.Fatal(err) + } + awsCluster := &infrav1.AWSCluster{ + ObjectMeta: metav1.ObjectMeta{Name: clusterName}, + Spec: infrav1.AWSClusterSpec{ControlPlaneLoadBalancer: &infrav1.AWSLoadBalancerSpec{ + Scheme: &infrav1.ELBSchemeInternetFacing, + LoadBalancerType: infrav1.LoadBalancerTypeNLB, + }}, + } + + client := fake.NewClientBuilder().WithScheme(scheme).Build() + ctx := context.TODO() + client.Create(ctx, awsCluster) + clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: clusterName, + }, + }, + AWSCluster: awsCluster, + Client: client, + }) + if err != nil { + t.Fatal(err) + } + + return &Service{ + scope: clusterScope, + ResourceTaggingClient: rgapiMock, + ELBV2Client: elbV2ApiMock, + } +} + +func TestService_getAPITargetGroupHealthCheck(t *testing.T) { + tests := []struct { + name string + lbSpec *infrav1.AWSLoadBalancerSpec + want *infrav1.TargetGroupHealthCheck + }{ + { + name: "default config", + lbSpec: nil, + want: &infrav1.TargetGroupHealthCheck{ + Protocol: aws.String("TCP"), + Port: aws.String("6443"), + Path: nil, + IntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec), + TimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec), + ThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount), + UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount), + }, + }, + { + name: "default attributes, API health check TCP", + lbSpec: &infrav1.AWSLoadBalancerSpec{}, + want: &infrav1.TargetGroupHealthCheck{ + Protocol: aws.String("TCP"), + Port: aws.String("6443"), + Path: nil, + IntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec), + TimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec), + ThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount), + UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount), + }, + }, + { + name: "default attributes, API health check HTTP", + lbSpec: &infrav1.AWSLoadBalancerSpec{ + HealthCheckProtocol: &infrav1.ELBProtocolHTTP, + }, + want: &infrav1.TargetGroupHealthCheck{ + Protocol: aws.String("HTTP"), + Port: aws.String("6443"), + Path: aws.String("/readyz"), + IntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec), + TimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec), + ThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount), + UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount), + }, + }, + { + name: "default attributes, API health check HTTPS", + lbSpec: &infrav1.AWSLoadBalancerSpec{ + HealthCheckProtocol: &infrav1.ELBProtocolHTTPS, + }, + want: &infrav1.TargetGroupHealthCheck{ + Protocol: aws.String("HTTPS"), + Port: aws.String("6443"), + Path: aws.String("/readyz"), + IntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec), + TimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec), + ThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount), + UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := stubGetBaseService(t, "foo") + if got := s.getAPITargetGroupHealthCheck(tt.lbSpec); !reflect.DeepEqual(got, tt.want) { + t.Errorf("Service.getAPITargetGroupHealthCheck() Got unexpected result:\n%v", cmp.Diff(got, tt.want)) + } + }) + } +} + +func TestService_getAdditionalTargetGroupHealthCheck(t *testing.T) { + tests := []struct { + name string + listener infrav1.AdditionalListenerSpec + want *infrav1.TargetGroupHealthCheck + wantErr bool + }{ + { + name: "TCP defaults", + listener: infrav1.AdditionalListenerSpec{ + Protocol: "TCP", + Port: 22623, + }, + want: &infrav1.TargetGroupHealthCheck{ + Protocol: aws.String("TCP"), + Port: aws.String("22623"), + IntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec), + TimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec), + ThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount), + UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount), + }, + }, + { + name: "Listener TCP, Health check protocol TCP, probe defaults", + listener: infrav1.AdditionalListenerSpec{ + Port: 22623, + Protocol: infrav1.ELBProtocolTCP, + }, + want: &infrav1.TargetGroupHealthCheck{ + Protocol: aws.String("TCP"), + Port: aws.String("22623"), + IntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec), + TimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec), + ThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount), + UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount), + }, + }, + { + name: "Listener TCP, Health check protocol HTTP, probe defaults", + listener: infrav1.AdditionalListenerSpec{ + Port: 22623, + Protocol: infrav1.ELBProtocolTCP, + HealthCheck: &infrav1.TargetGroupHealthCheckAdditionalSpec{ + Protocol: aws.String("HTTP"), + Path: aws.String("/healthz"), + }, + }, + want: &infrav1.TargetGroupHealthCheck{ + Protocol: aws.String("HTTP"), + Path: aws.String("/healthz"), + Port: aws.String("22623"), + IntervalSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckIntervalSec), + TimeoutSeconds: aws.Int64(infrav1.DefaultAPIServerHealthCheckTimeoutSec), + ThresholdCount: aws.Int64(infrav1.DefaultAPIServerHealthThresholdCount), + UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount), + }, + }, + { + name: "Listener TCP, Health check protocol HTTP, probe customized", + listener: infrav1.AdditionalListenerSpec{ + Port: 22623, + Protocol: infrav1.ELBProtocolTCP, + HealthCheck: &infrav1.TargetGroupHealthCheckAdditionalSpec{ + Protocol: aws.String("HTTP"), + Path: aws.String("/healthz"), + IntervalSeconds: aws.Int64(5), + TimeoutSeconds: aws.Int64(5), + ThresholdCount: aws.Int64(2), + UnhealthyThresholdCount: aws.Int64(2), + }, + }, + want: &infrav1.TargetGroupHealthCheck{ + Protocol: aws.String("HTTP"), + Port: aws.String("22623"), + Path: aws.String("/healthz"), + IntervalSeconds: aws.Int64(5), + TimeoutSeconds: aws.Int64(5), + ThresholdCount: aws.Int64(2), + UnhealthyThresholdCount: aws.Int64(2), + }, + }, + { + name: "Listener TCP, Health check protocol HTTPS, custom health check port and probes", + listener: infrav1.AdditionalListenerSpec{ + Port: 22623, + Protocol: infrav1.ELBProtocolTCP, + HealthCheck: &infrav1.TargetGroupHealthCheckAdditionalSpec{ + Protocol: aws.String("HTTPS"), + Port: aws.String("22624"), + Path: aws.String("/healthz"), + IntervalSeconds: aws.Int64(5), + TimeoutSeconds: aws.Int64(5), + ThresholdCount: aws.Int64(2), + UnhealthyThresholdCount: aws.Int64(2), + }, + }, + want: &infrav1.TargetGroupHealthCheck{ + Protocol: aws.String("HTTPS"), + Port: aws.String("22624"), + Path: aws.String("/healthz"), + IntervalSeconds: aws.Int64(5), + TimeoutSeconds: aws.Int64(5), + ThresholdCount: aws.Int64(2), + UnhealthyThresholdCount: aws.Int64(2), + }, + }, + { + name: "Listener TCP, Health check protocol TCP, custom health check port and probes, missing UnhealthyThresholdCount, want default", + listener: infrav1.AdditionalListenerSpec{ + Port: 22623, + Protocol: infrav1.ELBProtocolTCP, + HealthCheck: &infrav1.TargetGroupHealthCheckAdditionalSpec{ + IntervalSeconds: aws.Int64(5), + TimeoutSeconds: aws.Int64(5), + ThresholdCount: aws.Int64(2), + }, + }, + want: &infrav1.TargetGroupHealthCheck{ + Protocol: aws.String("TCP"), + Port: aws.String("22623"), + IntervalSeconds: aws.Int64(5), + TimeoutSeconds: aws.Int64(5), + ThresholdCount: aws.Int64(2), + UnhealthyThresholdCount: aws.Int64(infrav1.DefaultAPIServerUnhealthThresholdCount), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := stubGetBaseService(t, "bar") + if got := s.getAdditionalTargetGroupHealthCheck(tt.listener); !reflect.DeepEqual(got, tt.want) { + t.Errorf("Service.getAdditionalTargetGroupHealthCheck() Got unexpected result:\n %v", cmp.Diff(got, tt.want)) + } + }) + } +} diff --git a/pkg/cloud/services/elb/service.go b/pkg/cloud/services/elb/service.go index b1b78ca358..709329001b 100644 --- a/pkg/cloud/services/elb/service.go +++ b/pkg/cloud/services/elb/service.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package elb provides a service for managing AWS load balancers. package elb import ( @@ -23,6 +24,7 @@ import ( "github.com/aws/aws-sdk-go/service/resourcegroupstaggingapi/resourcegroupstaggingapiiface" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/network" ) // Service holds a collection of interfaces. @@ -34,6 +36,7 @@ type Service struct { ELBClient elbiface.ELBAPI ELBV2Client elbv2iface.ELBV2API ResourceTaggingClient resourcegroupstaggingapiiface.ResourceGroupsTaggingAPIAPI + netService *network.Service } // NewService returns a new service given the api clients. @@ -44,5 +47,6 @@ func NewService(elbScope scope.ELBScope) *Service { ELBClient: scope.NewELBClient(elbScope, elbScope, elbScope, elbScope.InfraCluster()), ELBV2Client: scope.NewELBv2Client(elbScope, elbScope, elbScope, elbScope.InfraCluster()), ResourceTaggingClient: scope.NewResourgeTaggingClient(elbScope, elbScope, elbScope, elbScope.InfraCluster()), + netService: network.NewService(elbScope.(scope.NetworkScope)), } } diff --git a/pkg/cloud/services/gc/ec2.go b/pkg/cloud/services/gc/ec2.go index 817dbd78a1..823163dddc 100644 --- a/pkg/cloud/services/gc/ec2.go +++ b/pkg/cloud/services/gc/ec2.go @@ -72,7 +72,7 @@ func (s *Service) deleteSecurityGroup(ctx context.Context, securityGroupID strin } // getProviderOwnedSecurityGroups gets cloud provider created security groups of ELBs for this cluster, filtering by tag: kubernetes.io/cluster/:owned and VPC Id. -func (s *Service) getProviderOwnedSecurityGroups(ctx context.Context) ([]*AWSResource, error) { +func (s *Service) getProviderOwnedSecurityGroups(_ context.Context) ([]*AWSResource, error) { input := &ec2.DescribeSecurityGroupsInput{ Filters: []*ec2.Filter{ filter.EC2.ProviderOwned(s.scope.KubernetesClusterName()), diff --git a/pkg/cloud/services/gc/options.go b/pkg/cloud/services/gc/options.go index c2ebb49af7..445977bcd3 100644 --- a/pkg/cloud/services/gc/options.go +++ b/pkg/cloud/services/gc/options.go @@ -54,6 +54,7 @@ func withEC2Client(client ec2iface.EC2API) ServiceOption { } } +// WithGCStrategy is an option for specifying using the alternative GC strategy. func WithGCStrategy(alternativeGCStrategy bool) ServiceOption { if alternativeGCStrategy { return func(s *Service) { diff --git a/pkg/cloud/services/gc/service.go b/pkg/cloud/services/gc/service.go index 9eb9f789a6..27b48d653e 100644 --- a/pkg/cloud/services/gc/service.go +++ b/pkg/cloud/services/gc/service.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package gc provides a way to perform gc operations against a tenant/workload/child cluster. package gc import ( diff --git a/pkg/cloud/services/iamauth/mock_iamauth/doc.go b/pkg/cloud/services/iamauth/mock_iamauth/doc.go index 15669ccb8f..d33311cf0a 100644 --- a/pkg/cloud/services/iamauth/mock_iamauth/doc.go +++ b/pkg/cloud/services/iamauth/mock_iamauth/doc.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package mock_iamauth provides a mock implementation for the IAMAPI interface. // Run go generate to regenerate this mock. +// //go:generate ../../../../../hack/tools/bin/mockgen -destination iamauth_mock.go -package mock_iamauth github.com/aws/aws-sdk-go/service/iam/iamiface IAMAPI //go:generate /usr/bin/env bash -c "cat ../../../../../hack/boilerplate/boilerplate.generatego.txt iamauth_mock.go > _iamauth_mock.go && mv _iamauth_mock.go iamauth_mock.go" - package mock_iamauth //nolint:stylecheck diff --git a/pkg/cloud/services/iamauth/service.go b/pkg/cloud/services/iamauth/service.go index 477e7c4928..27241b0c69 100644 --- a/pkg/cloud/services/iamauth/service.go +++ b/pkg/cloud/services/iamauth/service.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package iamauth provides a way to interact with AWS IAM. package iamauth import ( diff --git a/pkg/cloud/services/instancestate/mock_eventbridgeiface/doc.go b/pkg/cloud/services/instancestate/mock_eventbridgeiface/doc.go index 877fd9feb0..9d3af84e3b 100644 --- a/pkg/cloud/services/instancestate/mock_eventbridgeiface/doc.go +++ b/pkg/cloud/services/instancestate/mock_eventbridgeiface/doc.go @@ -18,4 +18,5 @@ limitations under the License. //go:generate ../../../../../hack/tools/bin/mockgen -destination eventbridgeiface_mock.go -package mock_eventbridgeiface github.com/aws/aws-sdk-go/service/eventbridge/eventbridgeiface EventBridgeAPI //go:generate /usr/bin/env bash -c "cat ../../../../../hack/boilerplate/boilerplate.generatego.txt eventbridgeiface_mock.go > _eventbridgeiface_mock.go && mv _eventbridgeiface_mock.go eventbridgeiface_mock.go" +// Package mock_eventbridgeiface provides a mock implementation for the EventBridgeAPI interface. package mock_eventbridgeiface //nolint:stylecheck diff --git a/pkg/cloud/services/instancestate/mock_sqsiface/doc.go b/pkg/cloud/services/instancestate/mock_sqsiface/doc.go index 356d813633..57fb6a9347 100644 --- a/pkg/cloud/services/instancestate/mock_sqsiface/doc.go +++ b/pkg/cloud/services/instancestate/mock_sqsiface/doc.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package mock_sqsiface provides a mock implementation for the SQSAPI interface. // Run go generate to regenerate this mock. +// //go:generate ../../../../../hack/tools/bin/mockgen -destination sqsiface_mock.go -package mock_sqsiface github.com/aws/aws-sdk-go/service/sqs/sqsiface SQSAPI //go:generate /usr/bin/env bash -c "cat ../../../../../hack/boilerplate/boilerplate.generatego.txt sqsiface_mock.go > _sqsiface_mock.go && mv _sqsiface_mock.go sqsiface_mock.go" - package mock_sqsiface //nolint:stylecheck diff --git a/pkg/cloud/services/instancestate/service.go b/pkg/cloud/services/instancestate/service.go index 62ea5be2f1..b798967ffc 100644 --- a/pkg/cloud/services/instancestate/service.go +++ b/pkg/cloud/services/instancestate/service.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package instancestate provides a way to interact with the EC2 instance state. package instancestate import ( diff --git a/pkg/cloud/services/interfaces.go b/pkg/cloud/services/interfaces.go index 893c5ae278..f993d9bd84 100644 --- a/pkg/cloud/services/interfaces.go +++ b/pkg/cloud/services/interfaces.go @@ -14,9 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package services contains the interfaces for the AWS services. package services import ( + "context" + apimachinerytypes "k8s.io/apimachinery/pkg/types" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" @@ -78,6 +81,11 @@ type EC2Interface interface { LaunchTemplateNeedsUpdate(scope scope.LaunchTemplateScope, incoming *expinfrav1.AWSLaunchTemplate, existing *expinfrav1.AWSLaunchTemplate) (bool, error) DeleteBastion() error ReconcileBastion() error + // ReconcileElasticIPFromPublicPool reconciles the elastic IP from a custom Public IPv4 Pool. + ReconcileElasticIPFromPublicPool(pool *infrav1.ElasticIPPool, instance *infrav1.Instance) error + + // ReleaseElasticIP reconciles the elastic IP from a custom Public IPv4 Pool. + ReleaseElasticIP(instanceID string) error } // MachinePoolReconcileInterface encapsulates high-level reconciliation functions regarding EC2 reconciliation. It is @@ -130,3 +138,18 @@ type ObjectStoreInterface interface { Delete(m *scope.MachineScope) error Create(m *scope.MachineScope, data []byte) (objectURL string, err error) } + +// AWSNodeInterface installs the CNI for EKS clusters. +type AWSNodeInterface interface { + ReconcileCNI(ctx context.Context) error +} + +// IAMAuthenticatorInterface installs aws-iam-authenticator for EKS clusters. +type IAMAuthenticatorInterface interface { + ReconcileIAMAuthenticator(ctx context.Context) error +} + +// KubeProxyInterface installs kube-proxy for EKS clusters. +type KubeProxyInterface interface { + ReconcileKubeProxy(ctx context.Context) error +} diff --git a/pkg/cloud/services/kubeproxy/service.go b/pkg/cloud/services/kubeproxy/service.go index 16fbf38eed..17a4bd73af 100644 --- a/pkg/cloud/services/kubeproxy/service.go +++ b/pkg/cloud/services/kubeproxy/service.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package kubeproxy provides a way to interact with the kube-proxy service. package kubeproxy import ( diff --git a/pkg/cloud/services/mock_services/aws_node_interface_mock.go b/pkg/cloud/services/mock_services/aws_node_interface_mock.go new file mode 100644 index 0000000000..7e503e3d59 --- /dev/null +++ b/pkg/cloud/services/mock_services/aws_node_interface_mock.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by MockGen. DO NOT EDIT. +// Source: sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services (interfaces: AWSNodeInterface) + +// Package mock_services is a generated GoMock package. +package mock_services + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockAWSNodeInterface is a mock of AWSNodeInterface interface. +type MockAWSNodeInterface struct { + ctrl *gomock.Controller + recorder *MockAWSNodeInterfaceMockRecorder +} + +// MockAWSNodeInterfaceMockRecorder is the mock recorder for MockAWSNodeInterface. +type MockAWSNodeInterfaceMockRecorder struct { + mock *MockAWSNodeInterface +} + +// NewMockAWSNodeInterface creates a new mock instance. +func NewMockAWSNodeInterface(ctrl *gomock.Controller) *MockAWSNodeInterface { + mock := &MockAWSNodeInterface{ctrl: ctrl} + mock.recorder = &MockAWSNodeInterfaceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockAWSNodeInterface) EXPECT() *MockAWSNodeInterfaceMockRecorder { + return m.recorder +} + +// ReconcileCNI mocks base method. +func (m *MockAWSNodeInterface) ReconcileCNI(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReconcileCNI", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// ReconcileCNI indicates an expected call of ReconcileCNI. +func (mr *MockAWSNodeInterfaceMockRecorder) ReconcileCNI(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReconcileCNI", reflect.TypeOf((*MockAWSNodeInterface)(nil).ReconcileCNI), arg0) +} diff --git a/pkg/cloud/services/mock_services/doc.go b/pkg/cloud/services/mock_services/doc.go index 04493e0002..35d0b43cbe 100644 --- a/pkg/cloud/services/mock_services/doc.go +++ b/pkg/cloud/services/mock_services/doc.go @@ -14,7 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package mock_services provides a way to generate mock services for the cloud provider. // Run go generate to regenerate this mock. //nolint:revive +// //go:generate ../../../../hack/tools/bin/mockgen -destination ec2_interface_mock.go -package mock_services sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services EC2Interface //go:generate /usr/bin/env bash -c "cat ../../../../hack/boilerplate/boilerplate.generatego.txt ec2_interface_mock.go > _ec2_interface_mock.go && mv _ec2_interface_mock.go ec2_interface_mock.go" //go:generate ../../../../hack/tools/bin/mockgen -destination reconcile_interface_mock.go -package mock_services sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services MachinePoolReconcileInterface @@ -31,5 +33,10 @@ limitations under the License. //go:generate /usr/bin/env bash -c "cat ../../../../hack/boilerplate/boilerplate.generatego.txt network_interface_mock.go > _network_interface_mock.go && mv _network_interface_mock.go network_interface_mock.go" //go:generate ../../../../hack/tools/bin/mockgen -destination security_group_interface_mock.go -package mock_services sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services SecurityGroupInterface //go:generate /usr/bin/env bash -c "cat ../../../../hack/boilerplate/boilerplate.generatego.txt security_group_interface_mock.go > _security_group_interface_mock.go && mv _security_group_interface_mock.go security_group_interface_mock.go" - +//go:generate ../../../../hack/tools/bin/mockgen -destination aws_node_interface_mock.go -package mock_services sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services AWSNodeInterface +//go:generate /usr/bin/env bash -c "cat ../../../../hack/boilerplate/boilerplate.generatego.txt aws_node_interface_mock.go > _aws_node_interface_mock.go && mv _aws_node_interface_mock.go aws_node_interface_mock.go" +//go:generate ../../../../hack/tools/bin/mockgen -destination iam_authenticator_interface_mock.go -package mock_services sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services IAMAuthenticatorInterface +//go:generate /usr/bin/env bash -c "cat ../../../../hack/boilerplate/boilerplate.generatego.txt iam_authenticator_interface_mock.go > _iam_authenticator_interface_mock.go && mv _iam_authenticator_interface_mock.go iam_authenticator_interface_mock.go" +//go:generate ../../../../hack/tools/bin/mockgen -destination kube_proxy_interface_mock.go -package mock_services sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services KubeProxyInterface +//go:generate /usr/bin/env bash -c "cat ../../../../hack/boilerplate/boilerplate.generatego.txt kube_proxy_interface_mock.go > _kube_proxy_interface_mock.go && mv _kube_proxy_interface_mock.go kube_proxy_interface_mock.go" package mock_services //nolint:stylecheck diff --git a/pkg/cloud/services/mock_services/ec2_interface_mock.go b/pkg/cloud/services/mock_services/ec2_interface_mock.go index 922d5f3360..02c3e09c47 100644 --- a/pkg/cloud/services/mock_services/ec2_interface_mock.go +++ b/pkg/cloud/services/mock_services/ec2_interface_mock.go @@ -333,6 +333,34 @@ func (mr *MockEC2InterfaceMockRecorder) ReconcileBastion() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReconcileBastion", reflect.TypeOf((*MockEC2Interface)(nil).ReconcileBastion)) } +// ReconcileElasticIPFromPublicPool mocks base method. +func (m *MockEC2Interface) ReconcileElasticIPFromPublicPool(arg0 *v1beta2.ElasticIPPool, arg1 *v1beta2.Instance) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReconcileElasticIPFromPublicPool", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ReconcileElasticIPFromPublicPool indicates an expected call of ReconcileElasticIPFromPublicPool. +func (mr *MockEC2InterfaceMockRecorder) ReconcileElasticIPFromPublicPool(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReconcileElasticIPFromPublicPool", reflect.TypeOf((*MockEC2Interface)(nil).ReconcileElasticIPFromPublicPool), arg0, arg1) +} + +// ReleaseElasticIP mocks base method. +func (m *MockEC2Interface) ReleaseElasticIP(arg0 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReleaseElasticIP", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// ReleaseElasticIP indicates an expected call of ReleaseElasticIP. +func (mr *MockEC2InterfaceMockRecorder) ReleaseElasticIP(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReleaseElasticIP", reflect.TypeOf((*MockEC2Interface)(nil).ReleaseElasticIP), arg0) +} + // TerminateInstance mocks base method. func (m *MockEC2Interface) TerminateInstance(arg0 string) error { m.ctrl.T.Helper() diff --git a/pkg/cloud/services/mock_services/iam_authenticator_interface_mock.go b/pkg/cloud/services/mock_services/iam_authenticator_interface_mock.go new file mode 100644 index 0000000000..ba34f7a13a --- /dev/null +++ b/pkg/cloud/services/mock_services/iam_authenticator_interface_mock.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by MockGen. DO NOT EDIT. +// Source: sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services (interfaces: IAMAuthenticatorInterface) + +// Package mock_services is a generated GoMock package. +package mock_services + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockIAMAuthenticatorInterface is a mock of IAMAuthenticatorInterface interface. +type MockIAMAuthenticatorInterface struct { + ctrl *gomock.Controller + recorder *MockIAMAuthenticatorInterfaceMockRecorder +} + +// MockIAMAuthenticatorInterfaceMockRecorder is the mock recorder for MockIAMAuthenticatorInterface. +type MockIAMAuthenticatorInterfaceMockRecorder struct { + mock *MockIAMAuthenticatorInterface +} + +// NewMockIAMAuthenticatorInterface creates a new mock instance. +func NewMockIAMAuthenticatorInterface(ctrl *gomock.Controller) *MockIAMAuthenticatorInterface { + mock := &MockIAMAuthenticatorInterface{ctrl: ctrl} + mock.recorder = &MockIAMAuthenticatorInterfaceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIAMAuthenticatorInterface) EXPECT() *MockIAMAuthenticatorInterfaceMockRecorder { + return m.recorder +} + +// ReconcileIAMAuthenticator mocks base method. +func (m *MockIAMAuthenticatorInterface) ReconcileIAMAuthenticator(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReconcileIAMAuthenticator", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// ReconcileIAMAuthenticator indicates an expected call of ReconcileIAMAuthenticator. +func (mr *MockIAMAuthenticatorInterfaceMockRecorder) ReconcileIAMAuthenticator(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReconcileIAMAuthenticator", reflect.TypeOf((*MockIAMAuthenticatorInterface)(nil).ReconcileIAMAuthenticator), arg0) +} diff --git a/pkg/cloud/services/mock_services/kube_proxy_interface_mock.go b/pkg/cloud/services/mock_services/kube_proxy_interface_mock.go new file mode 100644 index 0000000000..792460fdf1 --- /dev/null +++ b/pkg/cloud/services/mock_services/kube_proxy_interface_mock.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by MockGen. DO NOT EDIT. +// Source: sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services (interfaces: KubeProxyInterface) + +// Package mock_services is a generated GoMock package. +package mock_services + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockKubeProxyInterface is a mock of KubeProxyInterface interface. +type MockKubeProxyInterface struct { + ctrl *gomock.Controller + recorder *MockKubeProxyInterfaceMockRecorder +} + +// MockKubeProxyInterfaceMockRecorder is the mock recorder for MockKubeProxyInterface. +type MockKubeProxyInterfaceMockRecorder struct { + mock *MockKubeProxyInterface +} + +// NewMockKubeProxyInterface creates a new mock instance. +func NewMockKubeProxyInterface(ctrl *gomock.Controller) *MockKubeProxyInterface { + mock := &MockKubeProxyInterface{ctrl: ctrl} + mock.recorder = &MockKubeProxyInterfaceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockKubeProxyInterface) EXPECT() *MockKubeProxyInterfaceMockRecorder { + return m.recorder +} + +// ReconcileKubeProxy mocks base method. +func (m *MockKubeProxyInterface) ReconcileKubeProxy(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReconcileKubeProxy", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// ReconcileKubeProxy indicates an expected call of ReconcileKubeProxy. +func (mr *MockKubeProxyInterfaceMockRecorder) ReconcileKubeProxy(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReconcileKubeProxy", reflect.TypeOf((*MockKubeProxyInterface)(nil).ReconcileKubeProxy), arg0) +} diff --git a/pkg/cloud/services/network/carriergateways.go b/pkg/cloud/services/network/carriergateways.go new file mode 100644 index 0000000000..6237df9052 --- /dev/null +++ b/pkg/cloud/services/network/carriergateways.go @@ -0,0 +1,145 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package network + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/pkg/errors" + + infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/filter" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" + "sigs.k8s.io/cluster-api/util/conditions" +) + +func (s *Service) reconcileCarrierGateway() error { + if s.scope.VPC().IsUnmanaged(s.scope.Name()) { + s.scope.Trace("Skipping carrier gateway reconcile in unmanaged mode") + return nil + } + + if !s.scope.Subnets().HasPublicSubnetWavelength() { + s.scope.Trace("Skipping carrier gateway reconcile in VPC without subnets in zone type wavelength-zone") + return nil + } + + s.scope.Debug("Reconciling carrier gateway") + + cagw, err := s.describeVpcCarrierGateway() + if awserrors.IsNotFound(err) { + if s.scope.VPC().IsUnmanaged(s.scope.Name()) { + return errors.Errorf("failed to validate network: no carrier gateway found in VPC %q", s.scope.VPC().ID) + } + + cg, err := s.createCarrierGateway() + if err != nil { + return err + } + cagw = cg + } else if err != nil { + return err + } + + s.scope.VPC().CarrierGatewayID = cagw.CarrierGatewayId + + // Make sure tags are up-to-date. + if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) { + buildParams := s.getGatewayTagParams(*cagw.CarrierGatewayId) + tagsBuilder := tags.New(&buildParams, tags.WithEC2(s.EC2Client)) + if err := tagsBuilder.Ensure(converters.TagsToMap(cagw.Tags)); err != nil { + return false, err + } + return true, nil + }, awserrors.InvalidCarrierGatewayNotFound); err != nil { + record.Warnf(s.scope.InfraCluster(), "FailedTagCarrierGateway", "Failed to tag managed Carrier Gateway %q: %v", cagw.CarrierGatewayId, err) + return errors.Wrapf(err, "failed to tag carrier gateway %q", *cagw.CarrierGatewayId) + } + conditions.MarkTrue(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition) + return nil +} + +func (s *Service) deleteCarrierGateway() error { + if s.scope.VPC().IsUnmanaged(s.scope.Name()) { + s.scope.Trace("Skipping carrier gateway deletion in unmanaged mode") + return nil + } + + cagw, err := s.describeVpcCarrierGateway() + if awserrors.IsNotFound(err) { + return nil + } else if err != nil { + return err + } + + deleteReq := &ec2.DeleteCarrierGatewayInput{ + CarrierGatewayId: cagw.CarrierGatewayId, + } + + if _, err = s.EC2Client.DeleteCarrierGatewayWithContext(context.TODO(), deleteReq); err != nil { + record.Warnf(s.scope.InfraCluster(), "FailedDeleteCarrierGateway", "Failed to delete Carrier Gateway %q previously attached to VPC %q: %v", *cagw.CarrierGatewayId, s.scope.VPC().ID, err) + return errors.Wrapf(err, "failed to delete carrier gateway %q", *cagw.CarrierGatewayId) + } + + record.Eventf(s.scope.InfraCluster(), "SuccessfulDeleteCarrierGateway", "Deleted Carrier Gateway %q previously attached to VPC %q", *cagw.CarrierGatewayId, s.scope.VPC().ID) + s.scope.Info("Deleted Carrier Gateway in VPC", "carrier-gateway-id", *cagw.CarrierGatewayId, "vpc-id", s.scope.VPC().ID) + + return nil +} + +func (s *Service) createCarrierGateway() (*ec2.CarrierGateway, error) { + ig, err := s.EC2Client.CreateCarrierGatewayWithContext(context.TODO(), &ec2.CreateCarrierGatewayInput{ + VpcId: aws.String(s.scope.VPC().ID), + TagSpecifications: []*ec2.TagSpecification{ + tags.BuildParamsToTagSpecification(ec2.ResourceTypeCarrierGateway, s.getGatewayTagParams(services.TemporaryResourceID)), + }, + }) + if err != nil { + record.Warnf(s.scope.InfraCluster(), "FailedCreateCarrierGateway", "Failed to create new managed Internet Gateway: %v", err) + return nil, errors.Wrap(err, "failed to create carrier gateway") + } + record.Eventf(s.scope.InfraCluster(), "SuccessfulCreateCarrierGateway", "Created new managed Internet Gateway %q", *ig.CarrierGateway.CarrierGatewayId) + s.scope.Info("Created Internet gateway for VPC", "internet-gateway-id", *ig.CarrierGateway.CarrierGatewayId, "vpc-id", s.scope.VPC().ID) + + return ig.CarrierGateway, nil +} + +func (s *Service) describeVpcCarrierGateway() (*ec2.CarrierGateway, error) { + out, err := s.EC2Client.DescribeCarrierGatewaysWithContext(context.TODO(), &ec2.DescribeCarrierGatewaysInput{ + Filters: []*ec2.Filter{ + filter.EC2.VPC(s.scope.VPC().ID), + }, + }) + if err != nil { + record.Eventf(s.scope.InfraCluster(), "FailedDescribeCarrierGateway", "Failed to describe carrier gateways in vpc %q: %v", s.scope.VPC().ID, err) + return nil, errors.Wrapf(err, "failed to describe carrier gateways in vpc %q", s.scope.VPC().ID) + } + + if len(out.CarrierGateways) == 0 { + return nil, awserrors.NewNotFound(fmt.Sprintf("no carrier gateways found in vpc %q", s.scope.VPC().ID)) + } + + return out.CarrierGateways[0], nil +} diff --git a/pkg/cloud/services/network/carriergateways_test.go b/pkg/cloud/services/network/carriergateways_test.go new file mode 100644 index 0000000000..6608375c72 --- /dev/null +++ b/pkg/cloud/services/network/carriergateways_test.go @@ -0,0 +1,257 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package network + +import ( + "context" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/golang/mock/gomock" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" + "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +) + +func TestReconcileCarrierGateway(t *testing.T) { + testCases := []struct { + name string + input *infrav1.NetworkSpec + expect func(m *mocks.MockEC2APIMockRecorder) + }{ + { + name: "has cagw", + input: &infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + ID: "vpc-cagw", + Tags: infrav1.Tags{ + infrav1.ClusterTagKey("test-cluster"): "owned", + }, + }, + }, + expect: func(m *mocks.MockEC2APIMockRecorder) { + m.DescribeCarrierGatewaysWithContext(context.TODO(), gomock.Eq(&ec2.DescribeCarrierGatewaysInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("vpc-id"), + Values: aws.StringSlice([]string{"vpc-cagw"}), + }, + }, + })). + Return(&ec2.DescribeCarrierGatewaysOutput{ + CarrierGateways: []*ec2.CarrierGateway{ + { + CarrierGatewayId: ptr.To("cagw-01"), + }, + }, + }, nil).AnyTimes() + + m.CreateTagsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.CreateTagsInput{})). + Return(nil, nil).AnyTimes() + }, + }, + { + name: "no cagw attached, creates one", + input: &infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + ID: "vpc-cagw", + Tags: infrav1.Tags{ + infrav1.ClusterTagKey("test-cluster"): "owned", + }, + }, + }, + expect: func(m *mocks.MockEC2APIMockRecorder) { + m.DescribeCarrierGatewaysWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeCarrierGatewaysInput{})). + Return(&ec2.DescribeCarrierGatewaysOutput{}, nil).AnyTimes() + + m.CreateCarrierGatewayWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.CreateCarrierGatewayInput{})). + Return(&ec2.CreateCarrierGatewayOutput{ + CarrierGateway: &ec2.CarrierGateway{ + CarrierGatewayId: aws.String("cagw-1"), + VpcId: aws.String("vpc-cagw"), + Tags: []*ec2.Tag{ + { + Key: aws.String(infrav1.ClusterTagKey("test-cluster")), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("common"), + }, + { + Key: aws.String("Name"), + Value: aws.String("test-cluster-cagw"), + }, + }, + }, + }, nil).AnyTimes() + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + ec2Mock := mocks.NewMockEC2API(mockCtrl) + + scheme := runtime.NewScheme() + _ = infrav1.AddToScheme(scheme) + client := fake.NewClientBuilder().WithScheme(scheme).Build() + scope, err := scope.NewClusterScope(scope.ClusterScopeParams{ + Client: client, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}, + }, + AWSCluster: &infrav1.AWSCluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: infrav1.AWSClusterSpec{ + NetworkSpec: *tc.input, + }, + }, + }) + if err != nil { + t.Fatalf("Failed to create test context: %v", err) + } + + tc.expect(ec2Mock.EXPECT()) + + s := NewService(scope) + s.EC2Client = ec2Mock + + if err := s.reconcileCarrierGateway(); err != nil { + t.Fatalf("got an unexpected error: %v", err) + } + mockCtrl.Finish() + }) + } +} + +func TestDeleteCarrierGateway(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + testCases := []struct { + name string + input *infrav1.NetworkSpec + expect func(m *mocks.MockEC2APIMockRecorder) + wantErr bool + }{ + { + name: "Should ignore deletion if vpc is unmanaged", + input: &infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + ID: "vpc-cagw", + }, + }, + expect: func(m *mocks.MockEC2APIMockRecorder) {}, + }, + { + name: "Should ignore deletion if carrier gateway is not found", + input: &infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + ID: "vpc-cagw", + Tags: infrav1.Tags{ + infrav1.ClusterTagKey("test-cluster"): "owned", + }, + }, + }, + expect: func(m *mocks.MockEC2APIMockRecorder) { + m.DescribeCarrierGatewaysWithContext(context.TODO(), gomock.Eq(&ec2.DescribeCarrierGatewaysInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("vpc-id"), + Values: aws.StringSlice([]string{"vpc-cagw"}), + }, + }, + })).Return(&ec2.DescribeCarrierGatewaysOutput{}, nil) + }, + }, + { + name: "Should successfully delete the carrier gateway", + input: &infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + ID: "vpc-cagw", + Tags: infrav1.Tags{ + infrav1.ClusterTagKey("test-cluster"): "owned", + }, + }, + }, + expect: func(m *mocks.MockEC2APIMockRecorder) { + m.DescribeCarrierGatewaysWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeCarrierGatewaysInput{})). + Return(&ec2.DescribeCarrierGatewaysOutput{ + CarrierGateways: []*ec2.CarrierGateway{ + { + CarrierGatewayId: aws.String("cagw-0"), + VpcId: aws.String("vpc-gateways"), + }, + }, + }, nil) + + m.DeleteCarrierGatewayWithContext(context.TODO(), &ec2.DeleteCarrierGatewayInput{ + CarrierGatewayId: aws.String("cagw-0"), + }).Return(&ec2.DeleteCarrierGatewayOutput{}, nil) + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + ec2Mock := mocks.NewMockEC2API(mockCtrl) + + scheme := runtime.NewScheme() + err := infrav1.AddToScheme(scheme) + g.Expect(err).NotTo(HaveOccurred()) + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + scope, err := scope.NewClusterScope(scope.ClusterScopeParams{ + Client: client, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}, + }, + AWSCluster: &infrav1.AWSCluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: infrav1.AWSClusterSpec{ + NetworkSpec: *tc.input, + }, + }, + }) + g.Expect(err).NotTo(HaveOccurred()) + + tc.expect(ec2Mock.EXPECT()) + + s := NewService(scope) + s.EC2Client = ec2Mock + + err = s.deleteCarrierGateway() + if tc.wantErr { + g.Expect(err).To(HaveOccurred()) + return + } + g.Expect(err).NotTo(HaveOccurred()) + }) + } +} diff --git a/pkg/cloud/services/network/eips.go b/pkg/cloud/services/network/eips.go index 666f96652e..f301650797 100644 --- a/pkg/cloud/services/network/eips.go +++ b/pkg/cloud/services/network/eips.go @@ -32,41 +32,53 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" ) -func (s *Service) getOrAllocateAddresses(num int, role string) (eips []string, err error) { +func (s *Service) getOrAllocateAddresses(num int, role string, pool *infrav1.ElasticIPPool) (eips []string, err error) { out, err := s.describeAddresses(role) if err != nil { record.Eventf(s.scope.InfraCluster(), "FailedDescribeAddresses", "Failed to query addresses for role %q: %v", role, err) return nil, errors.Wrap(err, "failed to query addresses") } + // Reuse existing unallocated addreses with the same role. for _, address := range out.Addresses { if address.AssociationId == nil { eips = append(eips, aws.StringValue(address.AllocationId)) } } + // allocate addresses when needed. + tagSpecifications := tags.BuildParamsToTagSpecification(ec2.ResourceTypeElasticIp, s.getEIPTagParams(role)) for len(eips) < num { - ip, err := s.allocateAddress(role) - if err != nil { + allocInput := &ec2.AllocateAddressInput{ + Domain: aws.String("vpc"), + TagSpecifications: []*ec2.TagSpecification{ + tagSpecifications, + }, + } + + // Set EIP to consume from BYO Public IPv4 pools when defined in NetworkSpec with preflight checks. + // The checks makes sure there is free IPs available in the pool before allocating it. + // The check also validate the fallback strategy to consume from Amazon pool when the + // pool is exchausted. + if err := s.setByoPublicIpv4(pool, allocInput); err != nil { return nil, err } + + ip, err := s.allocateAddress(allocInput) + if err != nil { + record.Warnf(s.scope.InfraCluster(), "FailedAllocateAddress", "Failed to allocate Elastic IP for %q: %v", role, err) + return nil, fmt.Errorf("failed to allocate Elastic IP for %q: %w", role, err) + } eips = append(eips, ip) } return eips, nil } -func (s *Service) allocateAddress(role string) (string, error) { - tagSpecifications := tags.BuildParamsToTagSpecification(ec2.ResourceTypeElasticIp, s.getEIPTagParams(role)) - out, err := s.EC2Client.AllocateAddressWithContext(context.TODO(), &ec2.AllocateAddressInput{ - Domain: aws.String("vpc"), - TagSpecifications: []*ec2.TagSpecification{ - tagSpecifications, - }, - }) +func (s *Service) allocateAddress(alloc *ec2.AllocateAddressInput) (string, error) { + out, err := s.EC2Client.AllocateAddressWithContext(context.TODO(), alloc) if err != nil { - record.Warnf(s.scope.InfraCluster(), "FailedAllocateEIP", "Failed to allocate Elastic IP for %q: %v", role, err) - return "", errors.Wrap(err, "failed to allocate Elastic IP") + return "", err } return aws.StringValue(out.AllocationId), nil @@ -103,9 +115,42 @@ func (s *Service) disassociateAddress(ip *ec2.Address) error { return nil } -func (s *Service) releaseAddresses() error { +// releaseAddress releases an given EIP address back to the pool. +func (s *Service) releaseAddress(ip *ec2.Address) error { + if ip.AssociationId != nil { + if _, err := s.EC2Client.DisassociateAddressWithContext(context.TODO(), &ec2.DisassociateAddressInput{ + AssociationId: ip.AssociationId, + }); err != nil { + record.Warnf(s.scope.InfraCluster(), "FailedDisassociateEIP", "Failed to disassociate Elastic IP %q: %v", *ip.AllocationId, err) + return errors.Errorf("failed to disassociate Elastic IP %q with allocation ID %q: Still associated with association ID %q", *ip.PublicIp, *ip.AllocationId, *ip.AssociationId) + } + } + + if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) { + _, err := s.EC2Client.ReleaseAddressWithContext(context.TODO(), &ec2.ReleaseAddressInput{AllocationId: ip.AllocationId}) + if err != nil { + if ip.AssociationId != nil { + if s.disassociateAddress(ip) != nil { + return false, err + } + } + return false, err + } + return true, nil + }, awserrors.AuthFailure, awserrors.InUseIPAddress); err != nil { + record.Warnf(s.scope.InfraCluster(), "FailedReleaseEIP", "Failed to disassociate Elastic IP %q: %v", *ip.AllocationId, err) + return errors.Wrapf(err, "failed to release ElasticIP %q", *ip.AllocationId) + } + + s.scope.Info("released ElasticIP", "eip", *ip.PublicIp, "allocation-id", *ip.AllocationId) + return nil +} + +// releaseAddressesWithFilter discovery address to be released based in filters, returning no error, +// when all addresses have been released. +func (s *Service) releaseAddressesWithFilter(filters []*ec2.Filter) error { out, err := s.EC2Client.DescribeAddressesWithContext(context.TODO(), &ec2.DescribeAddressesInput{ - Filters: []*ec2.Filter{filter.EC2.Cluster(s.scope.Name())}, + Filters: filters, }) if err != nil { return errors.Wrapf(err, "failed to describe elastic IPs %q", err) @@ -114,37 +159,21 @@ func (s *Service) releaseAddresses() error { return nil } for i := range out.Addresses { - ip := out.Addresses[i] - if ip.AssociationId != nil { - if _, err := s.EC2Client.DisassociateAddressWithContext(context.TODO(), &ec2.DisassociateAddressInput{ - AssociationId: ip.AssociationId, - }); err != nil { - record.Warnf(s.scope.InfraCluster(), "FailedDisassociateEIP", "Failed to disassociate Elastic IP %q: %v", *ip.AllocationId, err) - return errors.Errorf("failed to disassociate Elastic IP %q with allocation ID %q: Still associated with association ID %q", *ip.PublicIp, *ip.AllocationId, *ip.AssociationId) - } - } - - if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) { - _, err := s.EC2Client.ReleaseAddressWithContext(context.TODO(), &ec2.ReleaseAddressInput{AllocationId: ip.AllocationId}) - if err != nil { - if ip.AssociationId != nil { - if s.disassociateAddress(ip) != nil { - return false, err - } - } - return false, err - } - return true, nil - }, awserrors.AuthFailure, awserrors.InUseIPAddress); err != nil { - record.Warnf(s.scope.InfraCluster(), "FailedReleaseEIP", "Failed to disassociate Elastic IP %q: %v", *ip.AllocationId, err) - return errors.Wrapf(err, "failed to release ElasticIP %q", *ip.AllocationId) + if err := s.releaseAddress(out.Addresses[i]); err != nil { + return err } - - s.scope.Info("released ElasticIP", "eip", *ip.PublicIp, "allocation-id", *ip.AllocationId) } return nil } +// releaseAddresses is default cluster release flow, discoverying and releasing all +// addresses associated and owned by the cluster tag. +func (s *Service) releaseAddresses() error { + filters := []*ec2.Filter{filter.EC2.Cluster(s.scope.Name())} + filters = append(filters, filter.EC2.ClusterOwned(s.scope.Name())) + return s.releaseAddressesWithFilter(filters) +} + func (s *Service) getEIPTagParams(role string) infrav1.BuildParams { name := fmt.Sprintf("%s-eip-%s", s.scope.Name(), role) @@ -156,3 +185,75 @@ func (s *Service) getEIPTagParams(role string) infrav1.BuildParams { Additional: s.scope.AdditionalTags(), } } + +// GetOrAllocateAddresses exports the interface to allocate an address from external services. +func (s *Service) GetOrAllocateAddresses(pool *infrav1.ElasticIPPool, num int, role string) (eips []string, err error) { + return s.getOrAllocateAddresses(num, role, pool) +} + +// ReleaseAddressByRole releases EIP addresses filtering by tag CAPA provider role. +func (s *Service) ReleaseAddressByRole(role string) error { + clusterFilter := []*ec2.Filter{filter.EC2.Cluster(s.scope.Name())} + clusterFilter = append(clusterFilter, filter.EC2.ProviderRole(role)) + + return s.releaseAddressesWithFilter(clusterFilter) +} + +// setByoPublicIpv4 check if the config has Public IPv4 Pool defined, then +// check if there are IPs available to consume from allocation, otherwise +// fallback to Amazon pool when explicty failure isn't defined. +func (s *Service) setByoPublicIpv4(pool *infrav1.ElasticIPPool, alloc *ec2.AllocateAddressInput) error { + if pool == nil { + return nil + } + // check if pool has free IP. + ok, err := s.publicIpv4PoolHasAtLeastNFreeIPs(pool, 1) + if err != nil { + record.Warnf(s.scope.InfraCluster(), "FailedAllocateEIP", "Failed to allocate Elastic IP from Public IPv4 pool %q: %w", *pool.PublicIpv4Pool, err) + return fmt.Errorf("failed to update Elastic IP: %w", err) + } + + // use the custom public ipv4 pool to the Elastic IP allocation. + if ok { + alloc.PublicIpv4Pool = pool.PublicIpv4Pool + return nil + } + + // default, don't change allocation config, use Amazon pool. + return nil +} + +// publicIpv4PoolHasAtLeastNFreeIPs check if there are N IPs address available in a Public IPv4 Pool. +func (s *Service) publicIpv4PoolHasAtLeastNFreeIPs(pool *infrav1.ElasticIPPool, want int64) (bool, error) { + if pool == nil { + return true, nil + } + if pool.PublicIpv4Pool == nil { + return true, nil + } + publicIpv4Pool := pool.PublicIpv4Pool + pools, err := s.EC2Client.DescribePublicIpv4Pools(&ec2.DescribePublicIpv4PoolsInput{ + PoolIds: []*string{publicIpv4Pool}, + }) + if err != nil { + return false, fmt.Errorf("failed to describe Public IPv4 Pool %q: %w", *publicIpv4Pool, err) + } + if len(pools.PublicIpv4Pools) != 1 { + return false, fmt.Errorf("unexpected number of configured Public IPv4 Pools. want 1, got %d", len(pools.PublicIpv4Pools)) + } + + freeIPs := aws.Int64Value(pools.PublicIpv4Pools[0].TotalAvailableAddressCount) + hasFreeIPs := freeIPs >= want + + // force to fallback to Amazon pool when the custom pool is full. + fallbackToAmazonPool := pool.PublicIpv4PoolFallBackOrder != nil && pool.PublicIpv4PoolFallBackOrder.Equal(infrav1.PublicIpv4PoolFallbackOrderAmazonPool) + if !hasFreeIPs && fallbackToAmazonPool { + s.scope.Debug(fmt.Sprintf("public IPv4 pool %q has reached the limit with %d IPs available, using user-defined fallback config %q", *publicIpv4Pool, freeIPs, pool.PublicIpv4PoolFallBackOrder.String()), "eip") + return false, nil + } + if !hasFreeIPs { + return false, fmt.Errorf("public IPv4 pool %q does not have enough free IP addresses: want %d, got %d", *publicIpv4Pool, want, freeIPs) + } + s.scope.Debug(fmt.Sprintf("public IPv4 Pool %q has %d IPs available", *publicIpv4Pool, freeIPs), "eip") + return true, nil +} diff --git a/pkg/cloud/services/network/natgateways.go b/pkg/cloud/services/network/natgateways.go index 8038b42290..665f5cc250 100644 --- a/pkg/cloud/services/network/natgateways.go +++ b/pkg/cloud/services/network/natgateways.go @@ -19,6 +19,7 @@ package network import ( "context" "fmt" + "sort" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" @@ -45,7 +46,7 @@ func (s *Service) reconcileNatGateways() error { s.scope.Debug("Reconciling NAT gateways") - if len(s.scope.Subnets().FilterPrivate()) == 0 { + if len(s.scope.Subnets().FilterPrivate().FilterNonCni()) == 0 { s.scope.Debug("No private subnets available, skipping NAT gateways") conditions.MarkFalse( s.scope.InfraCluster(), @@ -54,7 +55,7 @@ func (s *Service) reconcileNatGateways() error { clusterv1.ConditionSeverityWarning, "No private subnets available, skipping NAT gateways") return nil - } else if len(s.scope.Subnets().FilterPublic()) == 0 { + } else if len(s.scope.Subnets().FilterPublic().FilterNonCni()) == 0 { s.scope.Debug("No public subnets available. Cannot create NAT gateways for private subnets, this might be a configuration error.") conditions.MarkFalse( s.scope.InfraCluster(), @@ -73,7 +74,7 @@ func (s *Service) reconcileNatGateways() error { natGatewaysIPs := []string{} subnetIDs := []string{} - for _, sn := range s.scope.Subnets().FilterPublic() { + for _, sn := range s.scope.Subnets().FilterPublic().FilterNonCni() { if sn.GetResourceID() == "" { continue } @@ -221,7 +222,7 @@ func (s *Service) getNatGatewayTagParams(id string) infrav1.BuildParams { } func (s *Service) createNatGateways(subnetIDs []string) (natgateways []*ec2.NatGateway, err error) { - eips, err := s.getOrAllocateAddresses(len(subnetIDs), infrav1.APIServerRoleTagValue) + eips, err := s.getOrAllocateAddresses(len(subnetIDs), infrav1.CommonRoleTagValue, s.scope.VPC().GetElasticIPPool()) if err != nil { return nil, errors.Wrapf(err, "failed to create one or more IP addresses for NAT gateways") } @@ -298,7 +299,7 @@ func (s *Service) deleteNatGateway(id string) error { } if out == nil || len(out.NatGateways) == 0 { - return false, errors.New(fmt.Sprintf("no NAT gateway returned for id %q", id)) + return false, fmt.Errorf("no NAT gateway returned for id %q", id) } ng := out.NatGateways[0] @@ -321,23 +322,55 @@ func (s *Service) deleteNatGateway(id string) error { return nil } +// getNatGatewayForSubnet return the nat gateway for private subnets. +// NAT gateways in edge zones (Local Zones) are not globally supported, +// private subnets in those locations uses Nat Gateways from the +// Parent Zone or, when not available, the first zone in the Region. func (s *Service) getNatGatewayForSubnet(sn *infrav1.SubnetSpec) (string, error) { if sn.IsPublic { return "", errors.Errorf("cannot get NAT gateway for a public subnet, got id %q", sn.GetResourceID()) } - azGateways := make(map[string][]string) + // Check if public edge subnet in the edge zone has nat gateway + azGateways := make(map[string]string) + azNames := []string{} for _, psn := range s.scope.Subnets().FilterPublic() { if psn.NatGatewayID == nil { continue } - - azGateways[psn.AvailabilityZone] = append(azGateways[psn.AvailabilityZone], *psn.NatGatewayID) + if _, ok := azGateways[psn.AvailabilityZone]; !ok { + azGateways[psn.AvailabilityZone] = *psn.NatGatewayID + azNames = append(azNames, psn.AvailabilityZone) + } } if gws, ok := azGateways[sn.AvailabilityZone]; ok && len(gws) > 0 { - return gws[0], nil + return gws, nil + } + + // return error when no gateway found for regular zones, availability-zone zone type. + if !sn.IsEdge() { + return "", errors.Errorf("no nat gateways available in %q for private subnet %q", sn.AvailabilityZone, sn.GetResourceID()) + } + + // edge zones only: trying to find nat gateway for Local or Wavelength zone based in the zone type. + + // Check if the parent zone public subnet has nat gateway + if sn.ParentZoneName != nil { + if gws, ok := azGateways[aws.StringValue(sn.ParentZoneName)]; ok && len(gws) > 0 { + return gws, nil + } + } + + // Get the first public subnet's nat gateway available + sort.Strings(azNames) + for _, zone := range azNames { + gw := azGateways[zone] + if len(gw) > 0 { + s.scope.Debug("Assigning route table", "table ID", gw, "source zone", zone, "target zone", sn.AvailabilityZone) + return gw, nil + } } - return "", errors.Errorf("no nat gateways available in %q for private subnet %q, current state: %+v", sn.AvailabilityZone, sn.GetResourceID(), azGateways) + return "", errors.Errorf("no nat gateways available in %q for private edge subnet %q, current state: %+v", sn.AvailabilityZone, sn.GetResourceID(), azGateways) } diff --git a/pkg/cloud/services/network/natgateways_test.go b/pkg/cloud/services/network/natgateways_test.go index 7a6430796e..a77686d61e 100644 --- a/pkg/cloud/services/network/natgateways_test.go +++ b/pkg/cloud/services/network/natgateways_test.go @@ -27,6 +27,7 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client/fake" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" @@ -121,7 +122,7 @@ func TestReconcileNatGateways(t *testing.T) { Tags: []*ec2.Tag{ { Key: aws.String("Name"), - Value: aws.String("test-cluster-eip-apiserver"), + Value: aws.String("test-cluster-eip-common"), }, { Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), @@ -129,7 +130,7 @@ func TestReconcileNatGateways(t *testing.T) { }, { Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), - Value: aws.String("apiserver"), + Value: aws.String("common"), }, }, }, @@ -228,7 +229,7 @@ func TestReconcileNatGateways(t *testing.T) { Tags: []*ec2.Tag{ { Key: aws.String("Name"), - Value: aws.String("test-cluster-eip-apiserver"), + Value: aws.String("test-cluster-eip-common"), }, { Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), @@ -236,7 +237,7 @@ func TestReconcileNatGateways(t *testing.T) { }, { Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), - Value: aws.String("apiserver"), + Value: aws.String("common"), }, }, }, @@ -728,3 +729,273 @@ var mockDescribeNatGatewaysOutput = func(ctx context.Context, _, y interface{}, SubnetId: aws.String("subnet-1"), }}}, true) } + +func TestGetdNatGatewayForEdgeSubnet(t *testing.T) { + subnetsSpec := infrav1.Subnets{ + { + ID: "subnet-az-1x-private", + AvailabilityZone: "us-east-1x", + IsPublic: false, + }, + { + ID: "subnet-az-1x-public", + AvailabilityZone: "us-east-1x", + IsPublic: true, + NatGatewayID: aws.String("natgw-az-1b-last"), + }, + { + ID: "subnet-az-1a-private", + AvailabilityZone: "us-east-1a", + IsPublic: false, + }, + { + ID: "subnet-az-1a-public", + AvailabilityZone: "us-east-1a", + IsPublic: true, + NatGatewayID: aws.String("natgw-az-1b-first"), + }, + { + ID: "subnet-az-1b-private", + AvailabilityZone: "us-east-1b", + IsPublic: false, + }, + { + ID: "subnet-az-1b-public", + AvailabilityZone: "us-east-1b", + IsPublic: true, + NatGatewayID: aws.String("natgw-az-1b-second"), + }, + { + ID: "subnet-az-1p-private", + AvailabilityZone: "us-east-1p", + IsPublic: false, + }, + } + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + testCases := []struct { + name string + spec infrav1.Subnets + input infrav1.SubnetSpec + expect string + expectErr bool + expectErrMessage string + }{ + { + name: "zone availability-zone, valid nat gateway", + input: infrav1.SubnetSpec{ + ID: "subnet-az-1b-private", + AvailabilityZone: "us-east-1b", + IsPublic: false, + }, + expect: "natgw-az-1b-second", + }, + { + name: "zone availability-zone, valid nat gateway", + input: infrav1.SubnetSpec{ + ID: "subnet-az-1a-private", + AvailabilityZone: "us-east-1a", + IsPublic: false, + }, + expect: "natgw-az-1b-first", + }, + { + name: "zone availability-zone, valid nat gateway", + input: infrav1.SubnetSpec{ + ID: "subnet-az-1x-private", + AvailabilityZone: "us-east-1x", + IsPublic: false, + }, + expect: "natgw-az-1b-last", + }, + { + name: "zone local-zone, valid nat gateway from parent", + input: infrav1.SubnetSpec{ + ID: "subnet-lz-nyc1a-private", + AvailabilityZone: "us-east-1-nyc-1a", + IsPublic: false, + ZoneType: ptr.To(infrav1.ZoneTypeLocalZone), + ParentZoneName: aws.String("us-east-1a"), + }, + expect: "natgw-az-1b-first", + }, + { + name: "zone local-zone, valid nat gateway from parent", + input: infrav1.SubnetSpec{ + ID: "subnet-lz-nyc1a-private", + AvailabilityZone: "us-east-1-nyc-1a", + IsPublic: false, + ZoneType: ptr.To(infrav1.ZoneTypeLocalZone), + ParentZoneName: aws.String("us-east-1x"), + }, + expect: "natgw-az-1b-last", + }, + { + name: "zone local-zone, valid nat gateway from fallback", + input: infrav1.SubnetSpec{ + ID: "subnet-lz-nyc1a-private", + AvailabilityZone: "us-east-1-nyc-1a", + IsPublic: false, + ZoneType: ptr.To(infrav1.ZoneTypeLocalZone), + ParentZoneName: aws.String("us-east-1-notAvailable"), + }, + expect: "natgw-az-1b-first", + }, + { + name: "edge zones without NAT GW support, no public subnet and NAT Gateway for the parent zone, return first nat gateway available", + input: infrav1.SubnetSpec{ + ID: "subnet-7", + AvailabilityZone: "us-east-1-nyc-1a", + ZoneType: ptr.To(infrav1.ZoneTypeLocalZone), + }, + expect: "natgw-az-1b-first", + }, + { + name: "edge zones without NAT GW support, no public subnet and NAT Gateway for the parent zone, return first nat gateway available", + input: infrav1.SubnetSpec{ + ID: "subnet-7", + CidrBlock: "10.0.10.0/24", + AvailabilityZone: "us-east-1-nyc-1a", + ZoneType: ptr.To(infrav1.ZoneTypeLocalZone), + ParentZoneName: aws.String("us-east-1-notFound"), + }, + expect: "natgw-az-1b-first", + }, + { + name: "edge zones without NAT GW support, valid public subnet and NAT Gateway for the parent zone, return parent's zone nat gateway", + input: infrav1.SubnetSpec{ + ID: "subnet-lz-7", + AvailabilityZone: "us-east-1-nyc-1a", + ZoneType: ptr.To(infrav1.ZoneTypeLocalZone), + ParentZoneName: aws.String("us-east-1b"), + }, + expect: "natgw-az-1b-second", + }, + { + name: "wavelength zones without Nat GW support, public subnet and Nat Gateway for the parent zone, return parent's zone nat gateway", + input: infrav1.SubnetSpec{ + ID: "subnet-7", + CidrBlock: "10.0.10.0/24", + AvailabilityZone: "us-east-1-wl1-nyc-wlz-1", + ZoneType: ptr.To(infrav1.ZoneTypeWavelengthZone), + ParentZoneName: aws.String("us-east-1x"), + }, + expect: "natgw-az-1b-last", + }, + // errors + { + name: "error if the subnet is public", + input: infrav1.SubnetSpec{ + ID: "subnet-az-1-public", + AvailabilityZone: "us-east-1a", + IsPublic: true, + }, + expectErr: true, + expectErrMessage: `cannot get NAT gateway for a public subnet, got id "subnet-az-1-public"`, + }, + { + name: "error if the subnet is public", + input: infrav1.SubnetSpec{ + ID: "subnet-lz-1-public", + AvailabilityZone: "us-east-1-nyc-1a", + IsPublic: true, + }, + expectErr: true, + expectErrMessage: `cannot get NAT gateway for a public subnet, got id "subnet-lz-1-public"`, + }, + { + name: "error if there are no nat gateways available in the subnets", + spec: infrav1.Subnets{}, + input: infrav1.SubnetSpec{ + ID: "subnet-az-1-private", + AvailabilityZone: "us-east-1p", + IsPublic: false, + }, + expectErr: true, + expectErrMessage: `no nat gateways available in "us-east-1p" for private subnet "subnet-az-1-private"`, + }, + { + name: "error if there are no nat gateways available in the subnets", + spec: infrav1.Subnets{}, + input: infrav1.SubnetSpec{ + ID: "subnet-lz-1", + AvailabilityZone: "us-east-1-nyc-1a", + IsPublic: false, + ZoneType: ptr.To(infrav1.ZoneTypeLocalZone), + }, + expectErr: true, + expectErrMessage: `no nat gateways available in "us-east-1-nyc-1a" for private edge subnet "subnet-lz-1", current state: map[]`, + }, + { + name: "error if the subnet is public", + input: infrav1.SubnetSpec{ + ID: "subnet-lz-1", + AvailabilityZone: "us-east-1-nyc-1a", + IsPublic: true, + }, + expectErr: true, + expectErrMessage: `cannot get NAT gateway for a public subnet, got id "subnet-lz-1"`, + }, + } + + for idx, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + subnets := subnetsSpec + if tc.spec != nil { + subnets = tc.spec + } + scheme := runtime.NewScheme() + _ = infrav1.AddToScheme(scheme) + awsCluster := &infrav1.AWSCluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: infrav1.AWSClusterSpec{ + NetworkSpec: infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + ID: subnetsVPCID, + Tags: infrav1.Tags{ + infrav1.ClusterTagKey("test-cluster"): "owned", + }, + }, + Subnets: subnets, + }, + }, + } + + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(awsCluster).WithStatusSubresource(awsCluster).Build() + + clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}, + }, + AWSCluster: awsCluster, + Client: client, + }) + if err != nil { + t.Fatalf("Failed to create test context: %v", err) + return + } + + s := NewService(clusterScope) + + id, err := s.getNatGatewayForSubnet(&testCases[idx].input) + + if tc.expectErr && err == nil { + t.Fatal("expected error but got no error") + } + if err != nil && len(tc.expectErrMessage) > 0 { + if err.Error() != tc.expectErrMessage { + t.Fatalf("got an unexpected error message:\nwant: %v\n got: %v\n", tc.expectErrMessage, err.Error()) + } + } + if !tc.expectErr && err != nil { + t.Fatalf("got an unexpected error: %v", err) + } + if len(tc.expect) > 0 { + g.Expect(id).To(Equal(tc.expect)) + } + }) + } +} diff --git a/pkg/cloud/services/network/network.go b/pkg/cloud/services/network/network.go index b2363b5aac..70d85fd682 100644 --- a/pkg/cloud/services/network/network.go +++ b/pkg/cloud/services/network/network.go @@ -37,8 +37,8 @@ func (s *Service) ReconcileNetwork() (err error) { } conditions.MarkTrue(s.scope.InfraCluster(), infrav1.VpcReadyCondition) - // Secondary CIDR - if err := s.associateSecondaryCidr(); err != nil { + // Secondary CIDRs + if err := s.associateSecondaryCidrs(); err != nil { conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, infrav1.SecondaryCidrReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), err.Error()) return err } @@ -55,6 +55,12 @@ func (s *Service) ReconcileNetwork() (err error) { return err } + // Carrier Gateway. + if err := s.reconcileCarrierGateway(); err != nil { + conditions.MarkFalse(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition, infrav1.CarrierGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), err.Error()) + return err + } + // Egress Only Internet Gateways. if err := s.reconcileEgressOnlyInternetGateways(); err != nil { conditions.MarkFalse(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition, infrav1.EgressOnlyInternetGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), err.Error()) @@ -158,6 +164,15 @@ func (s *Service) DeleteNetwork() (err error) { } conditions.MarkFalse(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + // Carrier Gateway. + if s.scope.VPC().CarrierGatewayID != nil { + if err := s.deleteCarrierGateway(); err != nil { + conditions.MarkFalse(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error()) + return err + } + conditions.MarkFalse(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + } + // Egress Only Internet Gateways. conditions.MarkFalse(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { @@ -184,7 +199,7 @@ func (s *Service) DeleteNetwork() (err error) { // Secondary CIDR. conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") - if err := s.disassociateSecondaryCidr(); err != nil { + if err := s.disassociateSecondaryCidrs(); err != nil { conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, "DisassociateFailed", clusterv1.ConditionSeverityWarning, err.Error()) return err } diff --git a/pkg/cloud/services/network/routetables.go b/pkg/cloud/services/network/routetables.go index 777c12fd64..66694b2dd3 100644 --- a/pkg/cloud/services/network/routetables.go +++ b/pkg/cloud/services/network/routetables.go @@ -60,30 +60,10 @@ func (s *Service) reconcileRouteTables() error { for i := range subnets { sn := &subnets[i] // We need to compile the minimum routes for this subnet first, so we can compare it or create them. - var routes []*ec2.Route - if sn.IsPublic { - if s.scope.VPC().InternetGatewayID == nil { - return errors.Errorf("failed to create routing tables: internet gateway for %q is nil", s.scope.VPC().ID) - } - routes = append(routes, s.getGatewayPublicRoute()) - if sn.IsIPv6 { - routes = append(routes, s.getGatewayPublicIPv6Route()) - } - } else { - natGatewayID, err := s.getNatGatewayForSubnet(sn) - if err != nil { - return err - } - routes = append(routes, s.getNatGatewayPrivateRoute(natGatewayID)) - if sn.IsIPv6 { - if !s.scope.VPC().IsIPv6Enabled() { - // Safety net because EgressOnlyInternetGateway needs the ID from the ipv6 block. - // if, for whatever reason by this point that is not available, we don't want to - // panic because of a nil pointer access. This should never occur. Famous last words though. - return errors.Errorf("ipv6 block missing for ipv6 enabled subnet, can't create egress only internet gateway") - } - routes = append(routes, s.getEgressOnlyInternetGateway()) - } + routes, err := s.getRoutesForSubnet(sn) + if err != nil { + record.Warnf(s.scope.InfraCluster(), "FailedRouteTableRoutes", "Failed to get routes for managed RouteTable for subnet %s: %v", sn.ID, err) + return errors.Wrapf(err, "failed to discover routes on route table %s", sn.ID) } if rt, ok := subnetRouteMap[sn.GetResourceID()]; ok { @@ -145,7 +125,7 @@ func (s *Service) reconcileRouteTables() error { return nil } -func (s *Service) fixMismatchedRouting(specRoute *ec2.Route, currentRoute *ec2.Route, rt *ec2.RouteTable) error { +func (s *Service) fixMismatchedRouting(specRoute *ec2.CreateRouteInput, currentRoute *ec2.Route, rt *ec2.RouteTable) error { var input *ec2.ReplaceRouteInput if specRoute.DestinationCidrBlock != nil { if (currentRoute.DestinationCidrBlock != nil && @@ -214,6 +194,32 @@ func (s *Service) describeVpcRouteTablesBySubnet() (map[string]*ec2.RouteTable, return res, nil } +func (s *Service) deleteRouteTable(rt *ec2.RouteTable) error { + for _, as := range rt.Associations { + if as.SubnetId == nil { + continue + } + + if _, err := s.EC2Client.DisassociateRouteTableWithContext(context.TODO(), &ec2.DisassociateRouteTableInput{AssociationId: as.RouteTableAssociationId}); err != nil { + record.Warnf(s.scope.InfraCluster(), "FailedDisassociateRouteTable", "Failed to disassociate managed RouteTable %q from Subnet %q: %v", *rt.RouteTableId, *as.SubnetId, err) + return errors.Wrapf(err, "failed to disassociate route table %q from subnet %q", *rt.RouteTableId, *as.SubnetId) + } + + record.Eventf(s.scope.InfraCluster(), "SuccessfulDisassociateRouteTable", "Disassociated managed RouteTable %q from subnet %q", *rt.RouteTableId, *as.SubnetId) + s.scope.Debug("Deleted association between route table and subnet", "route-table-id", *rt.RouteTableId, "subnet-id", *as.SubnetId) + } + + if _, err := s.EC2Client.DeleteRouteTableWithContext(context.TODO(), &ec2.DeleteRouteTableInput{RouteTableId: rt.RouteTableId}); err != nil { + record.Warnf(s.scope.InfraCluster(), "FailedDeleteRouteTable", "Failed to delete managed RouteTable %q: %v", *rt.RouteTableId, err) + return errors.Wrapf(err, "failed to delete route table %q", *rt.RouteTableId) + } + + record.Eventf(s.scope.InfraCluster(), "SuccessfulDeleteRouteTable", "Deleted managed RouteTable %q", *rt.RouteTableId) + s.scope.Info("Deleted route table", "route-table-id", *rt.RouteTableId) + + return nil +} + func (s *Service) deleteRouteTables() error { if s.scope.VPC().IsUnmanaged(s.scope.Name()) { s.scope.Trace("Skipping routing tables deletion in unmanaged mode") @@ -226,27 +232,10 @@ func (s *Service) deleteRouteTables() error { } for _, rt := range rts { - for _, as := range rt.Associations { - if as.SubnetId == nil { - continue - } - - if _, err := s.EC2Client.DisassociateRouteTableWithContext(context.TODO(), &ec2.DisassociateRouteTableInput{AssociationId: as.RouteTableAssociationId}); err != nil { - record.Warnf(s.scope.InfraCluster(), "FailedDisassociateRouteTable", "Failed to disassociate managed RouteTable %q from Subnet %q: %v", *rt.RouteTableId, *as.SubnetId, err) - return errors.Wrapf(err, "failed to disassociate route table %q from subnet %q", *rt.RouteTableId, *as.SubnetId) - } - - record.Eventf(s.scope.InfraCluster(), "SuccessfulDisassociateRouteTable", "Disassociated managed RouteTable %q from subnet %q", *rt.RouteTableId, *as.SubnetId) - s.scope.Debug("Deleted association between route table and subnet", "route-table-id", *rt.RouteTableId, "subnet-id", *as.SubnetId) - } - - if _, err := s.EC2Client.DeleteRouteTableWithContext(context.TODO(), &ec2.DeleteRouteTableInput{RouteTableId: rt.RouteTableId}); err != nil { - record.Warnf(s.scope.InfraCluster(), "FailedDeleteRouteTable", "Failed to delete managed RouteTable %q: %v", *rt.RouteTableId, err) - return errors.Wrapf(err, "failed to delete route table %q", *rt.RouteTableId) + err := s.deleteRouteTable(rt) + if err != nil { + return err } - - record.Eventf(s.scope.InfraCluster(), "SuccessfulDeleteRouteTable", "Deleted managed RouteTable %q", *rt.RouteTableId) - s.scope.Info("Deleted route table", "route-table-id", *rt.RouteTableId) } return nil } @@ -271,7 +260,7 @@ func (s *Service) describeVpcRouteTables() ([]*ec2.RouteTable, error) { return out.RouteTables, nil } -func (s *Service) createRouteTableWithRoutes(routes []*ec2.Route, isPublic bool, zone string) (*infrav1.RouteTable, error) { +func (s *Service) createRouteTableWithRoutes(routes []*ec2.CreateRouteInput, isPublic bool, zone string) (*infrav1.RouteTable, error) { out, err := s.EC2Client.CreateRouteTableWithContext(context.TODO(), &ec2.CreateRouteTableInput{ VpcId: aws.String(s.scope.VPC().ID), TagSpecifications: []*ec2.TagSpecification{ @@ -287,23 +276,17 @@ func (s *Service) createRouteTableWithRoutes(routes []*ec2.Route, isPublic bool, for i := range routes { route := routes[i] if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) { - if _, err := s.EC2Client.CreateRouteWithContext(context.TODO(), &ec2.CreateRouteInput{ - RouteTableId: out.RouteTable.RouteTableId, - DestinationCidrBlock: route.DestinationCidrBlock, - DestinationIpv6CidrBlock: route.DestinationIpv6CidrBlock, - EgressOnlyInternetGatewayId: route.EgressOnlyInternetGatewayId, - GatewayId: route.GatewayId, - InstanceId: route.InstanceId, - NatGatewayId: route.NatGatewayId, - NetworkInterfaceId: route.NetworkInterfaceId, - VpcPeeringConnectionId: route.VpcPeeringConnectionId, - }); err != nil { + route.RouteTableId = out.RouteTable.RouteTableId + if _, err := s.EC2Client.CreateRouteWithContext(context.TODO(), route); err != nil { return false, err } return true, nil }, awserrors.RouteTableNotFound, awserrors.NATGatewayNotFound, awserrors.GatewayNotFound); err != nil { - // TODO(vincepri): cleanup the route table if this fails. record.Warnf(s.scope.InfraCluster(), "FailedCreateRoute", "Failed to create route %s for RouteTable %q: %v", route.GoString(), *out.RouteTable.RouteTableId, err) + errDel := s.deleteRouteTable(out.RouteTable) + if errDel != nil { + record.Warnf(s.scope.InfraCluster(), "FailedDeleteRouteTable", "Failed to delete managed RouteTable %q: %v", *out.RouteTable.RouteTableId, errDel) + } return nil, errors.Wrapf(err, "failed to create route in route table %q: %s", *out.RouteTable.RouteTableId, route.GoString()) } record.Eventf(s.scope.InfraCluster(), "SuccessfulCreateRoute", "Created route %s for RouteTable %q", route.GoString(), *out.RouteTable.RouteTableId) @@ -329,34 +312,41 @@ func (s *Service) associateRouteTable(rt *infrav1.RouteTable, subnetID string) e return nil } -func (s *Service) getNatGatewayPrivateRoute(natGatewayID string) *ec2.Route { - return &ec2.Route{ +func (s *Service) getNatGatewayPrivateRoute(natGatewayID string) *ec2.CreateRouteInput { + return &ec2.CreateRouteInput{ NatGatewayId: aws.String(natGatewayID), DestinationCidrBlock: aws.String(services.AnyIPv4CidrBlock), } } -func (s *Service) getEgressOnlyInternetGateway() *ec2.Route { - return &ec2.Route{ +func (s *Service) getEgressOnlyInternetGateway() *ec2.CreateRouteInput { + return &ec2.CreateRouteInput{ DestinationIpv6CidrBlock: aws.String(services.AnyIPv6CidrBlock), EgressOnlyInternetGatewayId: s.scope.VPC().IPv6.EgressOnlyInternetGatewayID, } } -func (s *Service) getGatewayPublicRoute() *ec2.Route { - return &ec2.Route{ +func (s *Service) getGatewayPublicRoute() *ec2.CreateRouteInput { + return &ec2.CreateRouteInput{ DestinationCidrBlock: aws.String(services.AnyIPv4CidrBlock), GatewayId: aws.String(*s.scope.VPC().InternetGatewayID), } } -func (s *Service) getGatewayPublicIPv6Route() *ec2.Route { - return &ec2.Route{ +func (s *Service) getGatewayPublicIPv6Route() *ec2.CreateRouteInput { + return &ec2.CreateRouteInput{ DestinationIpv6CidrBlock: aws.String(services.AnyIPv6CidrBlock), GatewayId: aws.String(*s.scope.VPC().InternetGatewayID), } } +func (s *Service) getCarrierGatewayPublicIPv4Route() *ec2.CreateRouteInput { + return &ec2.CreateRouteInput{ + DestinationCidrBlock: aws.String(services.AnyIPv4CidrBlock), + CarrierGatewayId: aws.String(*s.scope.VPC().CarrierGatewayID), + } +} + func (s *Service) getRouteTableTagParams(id string, public bool, zone string) infrav1.BuildParams { var name strings.Builder @@ -382,3 +372,62 @@ func (s *Service) getRouteTableTagParams(id string, public bool, zone string) in Additional: additionalTags, } } + +func (s *Service) getRoutesToPublicSubnet(sn *infrav1.SubnetSpec) ([]*ec2.CreateRouteInput, error) { + var routes []*ec2.CreateRouteInput + + if sn.IsEdge() && sn.IsIPv6 { + return nil, errors.Errorf("can't determine routes for unsupported ipv6 subnet in zone type %q", sn.ZoneType) + } + + if sn.IsEdgeWavelength() { + if s.scope.VPC().CarrierGatewayID == nil { + return routes, errors.Errorf("failed to create carrier routing table: carrier gateway for VPC %q is not present", s.scope.VPC().ID) + } + routes = append(routes, s.getCarrierGatewayPublicIPv4Route()) + return routes, nil + } + + if s.scope.VPC().InternetGatewayID == nil { + return routes, errors.Errorf("failed to create routing tables: internet gateway for VPC %q is not present", s.scope.VPC().ID) + } + routes = append(routes, s.getGatewayPublicRoute()) + if sn.IsIPv6 { + routes = append(routes, s.getGatewayPublicIPv6Route()) + } + + return routes, nil +} + +func (s *Service) getRoutesToPrivateSubnet(sn *infrav1.SubnetSpec) (routes []*ec2.CreateRouteInput, err error) { + var natGatewayID string + + if sn.IsEdge() && sn.IsIPv6 { + return nil, errors.Errorf("can't determine routes for unsupported ipv6 subnet in zone type %q", sn.ZoneType) + } + + natGatewayID, err = s.getNatGatewayForSubnet(sn) + if err != nil { + return routes, err + } + + routes = append(routes, s.getNatGatewayPrivateRoute(natGatewayID)) + if sn.IsIPv6 { + if !s.scope.VPC().IsIPv6Enabled() { + // Safety net because EgressOnlyInternetGateway needs the ID from the ipv6 block. + // if, for whatever reason by this point that is not available, we don't want to + // panic because of a nil pointer access. This should never occur. Famous last words though. + return routes, errors.Errorf("ipv6 block missing for ipv6 enabled subnet, can't create route for egress only internet gateway") + } + routes = append(routes, s.getEgressOnlyInternetGateway()) + } + + return routes, nil +} + +func (s *Service) getRoutesForSubnet(sn *infrav1.SubnetSpec) ([]*ec2.CreateRouteInput, error) { + if sn.IsPublic { + return s.getRoutesToPublicSubnet(sn) + } + return s.getRoutesToPrivateSubnet(sn) +} diff --git a/pkg/cloud/services/network/routetables_test.go b/pkg/cloud/services/network/routetables_test.go index b8feb2aae8..6b6003a2d7 100644 --- a/pkg/cloud/services/network/routetables_test.go +++ b/pkg/cloud/services/network/routetables_test.go @@ -25,10 +25,12 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" "github.com/golang/mock/gomock" + "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client/fake" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" @@ -519,6 +521,44 @@ func TestReconcileRouteTables(t *testing.T) { }, nil) }, }, + { + name: "failed to create route, delete route table and fail", + input: &infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + InternetGatewayID: aws.String("igw-01"), + ID: "vpc-rtbs", + Tags: infrav1.Tags{ + infrav1.ClusterTagKey("test-cluster"): "owned", + }, + }, + Subnets: infrav1.Subnets{ + infrav1.SubnetSpec{ + ID: "subnet-rtbs-public", + IsPublic: true, + NatGatewayID: aws.String("nat-01"), + AvailabilityZone: "us-east-1a", + }, + }, + }, + expect: func(m *mocks.MockEC2APIMockRecorder) { + m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})). + Return(&ec2.DescribeRouteTablesOutput{}, nil) + + m.CreateRouteTableWithContext(context.TODO(), matchRouteTableInput(&ec2.CreateRouteTableInput{VpcId: aws.String("vpc-rtbs")})). + Return(&ec2.CreateRouteTableOutput{RouteTable: &ec2.RouteTable{RouteTableId: aws.String("rt-1")}}, nil) + + m.CreateRouteWithContext(context.TODO(), gomock.Eq(&ec2.CreateRouteInput{ + GatewayId: aws.String("igw-01"), + DestinationCidrBlock: aws.String("0.0.0.0/0"), + RouteTableId: aws.String("rt-1"), + })). + Return(nil, awserrors.NewNotFound("MissingParameter")) + + m.DeleteRouteTableWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DeleteRouteTableInput{})). + Return(&ec2.DeleteRouteTableOutput{}, nil) + }, + err: errors.New(`failed to create route in route table "rt-1"`), + }, } for _, tc := range testCases { @@ -560,59 +600,65 @@ func TestReconcileRouteTables(t *testing.T) { } } +// Delete Route Table(s). +var ( + stubEc2RouteTablePrivate = &ec2.RouteTable{ + RouteTableId: aws.String("route-table-private"), + Associations: []*ec2.RouteTableAssociation{ + { + SubnetId: nil, + }, + }, + Routes: []*ec2.Route{ + { + DestinationCidrBlock: aws.String("0.0.0.0/0"), + NatGatewayId: aws.String("outdated-nat-01"), + }, + }, + } + stubEc2RouteTablePublicWithAssociations = &ec2.RouteTable{ + RouteTableId: aws.String("route-table-public"), + Associations: []*ec2.RouteTableAssociation{ + { + SubnetId: aws.String("subnet-routetables-public"), + RouteTableAssociationId: aws.String("route-table-public"), + }, + }, + Routes: []*ec2.Route{ + { + DestinationCidrBlock: aws.String("0.0.0.0/0"), + GatewayId: aws.String("igw-01"), + }, + }, + Tags: []*ec2.Tag{ + { + Key: aws.String("kubernetes.io/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("common"), + }, + { + Key: aws.String("Name"), + Value: aws.String("test-cluster-rt-public-us-east-1a"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, + }, + } +) + func TestDeleteRouteTables(t *testing.T) { mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() describeRouteTableOutput := &ec2.DescribeRouteTablesOutput{ RouteTables: []*ec2.RouteTable{ - { - RouteTableId: aws.String("route-table-private"), - Associations: []*ec2.RouteTableAssociation{ - { - SubnetId: nil, - }, - }, - Routes: []*ec2.Route{ - { - DestinationCidrBlock: aws.String("0.0.0.0/0"), - NatGatewayId: aws.String("outdated-nat-01"), - }, - }, - }, - { - RouteTableId: aws.String("route-table-public"), - Associations: []*ec2.RouteTableAssociation{ - { - SubnetId: aws.String("subnet-routetables-public"), - RouteTableAssociationId: aws.String("route-table-public"), - }, - }, - Routes: []*ec2.Route{ - { - DestinationCidrBlock: aws.String("0.0.0.0/0"), - GatewayId: aws.String("igw-01"), - }, - }, - Tags: []*ec2.Tag{ - { - Key: aws.String("kubernetes.io/cluster/test-cluster"), - Value: aws.String("owned"), - }, - { - Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), - Value: aws.String("common"), - }, - { - Key: aws.String("Name"), - Value: aws.String("test-cluster-rt-public-us-east-1a"), - }, - { - Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), - Value: aws.String("owned"), - }, - }, - }, + stubEc2RouteTablePrivate, + stubEc2RouteTablePublicWithAssociations, }, } @@ -730,6 +776,81 @@ func TestDeleteRouteTables(t *testing.T) { } } +func TestDeleteRouteTable(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + testCases := []struct { + name string + input *ec2.RouteTable + expect func(m *mocks.MockEC2APIMockRecorder) + wantErr bool + }{ + { + name: "Should delete route table successfully", + input: stubEc2RouteTablePrivate, + expect: func(m *mocks.MockEC2APIMockRecorder) { + m.DeleteRouteTableWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DeleteRouteTableInput{})). + Return(&ec2.DeleteRouteTableOutput{}, nil) + }, + }, + { + name: "Should return error if delete route table fails", + input: stubEc2RouteTablePrivate, + expect: func(m *mocks.MockEC2APIMockRecorder) { + m.DeleteRouteTableWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DeleteRouteTableInput{})). + Return(nil, awserrors.NewNotFound("not found")) + }, + wantErr: true, + }, + { + name: "Should return error if disassociate route table fails", + input: stubEc2RouteTablePublicWithAssociations, + expect: func(m *mocks.MockEC2APIMockRecorder) { + m.DisassociateRouteTableWithContext(context.TODO(), gomock.Eq(&ec2.DisassociateRouteTableInput{ + AssociationId: aws.String("route-table-public"), + })).Return(nil, awserrors.NewNotFound("not found")) + }, + wantErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + ec2Mock := mocks.NewMockEC2API(mockCtrl) + + scheme := runtime.NewScheme() + _ = infrav1.AddToScheme(scheme) + client := fake.NewClientBuilder().WithScheme(scheme).Build() + scope, err := scope.NewClusterScope(scope.ClusterScopeParams{ + Client: client, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}, + }, + AWSCluster: &infrav1.AWSCluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: infrav1.AWSClusterSpec{}, + }, + }) + g.Expect(err).NotTo(HaveOccurred()) + if tc.expect != nil { + tc.expect(ec2Mock.EXPECT()) + } + + s := NewService(scope) + s.EC2Client = ec2Mock + + err = s.deleteRouteTable(tc.input) + if tc.wantErr { + g.Expect(err).To(HaveOccurred()) + return + } + g.Expect(err).NotTo(HaveOccurred()) + }) + } +} + type routeTableInputMatcher struct { routeTableInput *ec2.CreateRouteTableInput } @@ -754,3 +875,485 @@ func (r routeTableInputMatcher) String() string { func matchRouteTableInput(input *ec2.CreateRouteTableInput) gomock.Matcher { return routeTableInputMatcher{routeTableInput: input} } + +func TestService_getRoutesForSubnet(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + defaultSubnets := infrav1.Subnets{ + { + ResourceID: "subnet-az-2z-private", + AvailabilityZone: "us-east-2z", + IsPublic: false, + }, + { + ResourceID: "subnet-az-2z-public", + AvailabilityZone: "us-east-2z", + IsPublic: true, + NatGatewayID: ptr.To("nat-gw-fromZone-us-east-2z"), + }, + { + ResourceID: "subnet-az-1a-private", + AvailabilityZone: "us-east-1a", + IsPublic: false, + }, + { + ResourceID: "subnet-az-1a-public", + AvailabilityZone: "us-east-1a", + IsPublic: true, + NatGatewayID: ptr.To("nat-gw-fromZone-us-east-1a"), + }, + { + ResourceID: "subnet-lz-invalid2z-private", + AvailabilityZone: "us-east-2-inv-1z", + IsPublic: false, + ZoneType: ptr.To(infrav1.ZoneType("local-zone")), + ParentZoneName: ptr.To("us-east-2a"), + }, + { + ResourceID: "subnet-lz-invalid1a-public", + AvailabilityZone: "us-east-2-nyc-1z", + IsPublic: true, + ZoneType: ptr.To(infrav1.ZoneType("local-zone")), + ParentZoneName: ptr.To("us-east-2z"), + }, + { + ResourceID: "subnet-lz-1a-private", + AvailabilityZone: "us-east-1-nyc-1a", + IsPublic: false, + ZoneType: ptr.To(infrav1.ZoneType("local-zone")), + ParentZoneName: ptr.To("us-east-1a"), + }, + { + ResourceID: "subnet-lz-1a-public", + AvailabilityZone: "us-east-1-nyc-1a", + IsPublic: true, + ZoneType: ptr.To(infrav1.ZoneType("local-zone")), + ParentZoneName: ptr.To("us-east-1a"), + }, + { + ResourceID: "subnet-wl-invalid2z-private", + AvailabilityZone: "us-east-2-wl1-inv-wlz-1", + IsPublic: false, + ZoneType: ptr.To(infrav1.ZoneType("wavelength-zone")), + ParentZoneName: ptr.To("us-east-2z"), + }, + { + ResourceID: "subnet-wl-invalid2z-public", + AvailabilityZone: "us-east-2-wl1-inv-wlz-1", + IsPublic: true, + ZoneType: ptr.To(infrav1.ZoneType("wavelength-zone")), + ParentZoneName: ptr.To("us-east-2z"), + }, + { + ResourceID: "subnet-wl-1a-private", + AvailabilityZone: "us-east-1-wl1-nyc-wlz-1", + IsPublic: false, + ZoneType: ptr.To(infrav1.ZoneType("wavelength-zone")), + ParentZoneName: ptr.To("us-east-1a"), + }, + { + ResourceID: "subnet-wl-1a-public", + AvailabilityZone: "us-east-1-wl1-nyc-wlz-1", + IsPublic: true, + ZoneType: ptr.To(infrav1.ZoneType("wavelength-zone")), + ParentZoneName: ptr.To("us-east-1a"), + }, + } + + vpcName := "vpc-test-for-routes" + defaultNetwork := infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + ID: vpcName, + InternetGatewayID: aws.String("vpc-igw"), + CarrierGatewayID: aws.String("vpc-cagw"), + IPv6: &infrav1.IPv6{ + CidrBlock: "2001:db8:1234:1::/64", + EgressOnlyInternetGatewayID: aws.String("vpc-eigw"), + }, + }, + Subnets: defaultSubnets, + } + + tests := []struct { + name string + specOverrideNet *infrav1.NetworkSpec + specOverrideSubnets *infrav1.Subnets + inputSubnet *infrav1.SubnetSpec + want []*ec2.CreateRouteInput + wantErr bool + wantErrMessage string + }{ + { + name: "empty subnet should have empty routes", + specOverrideSubnets: &infrav1.Subnets{}, + inputSubnet: &infrav1.SubnetSpec{ + ID: "subnet-1-private", + }, + want: []*ec2.CreateRouteInput{}, + wantErrMessage: `no nat gateways available in "" for private subnet "subnet-1-private"`, + }, + { + name: "empty subnet should have empty routes", + inputSubnet: &infrav1.SubnetSpec{}, + want: []*ec2.CreateRouteInput{}, + wantErrMessage: `no nat gateways available in "" for private subnet ""`, + }, + // public subnets ipv4 + { + name: "public ipv4 subnet, availability zone, must have ipv4 default route to igw", + inputSubnet: &infrav1.SubnetSpec{ + ResourceID: "subnet-az-1a-public", + AvailabilityZone: "us-east-1a", + IsIPv6: false, + IsPublic: true, + }, + want: []*ec2.CreateRouteInput{ + { + DestinationCidrBlock: aws.String("0.0.0.0/0"), + GatewayId: aws.String("vpc-igw"), + }, + }, + }, + { + name: "public ipv6 subnet, availability zone, must have ipv6 default route to igw", + inputSubnet: &infrav1.SubnetSpec{ + ResourceID: "subnet-az-1a-public", + AvailabilityZone: "us-east-1a", + IsPublic: true, + IsIPv6: true, + }, + want: []*ec2.CreateRouteInput{ + { + DestinationCidrBlock: aws.String("0.0.0.0/0"), + GatewayId: aws.String("vpc-igw"), + }, + { + DestinationIpv6CidrBlock: aws.String("::/0"), + GatewayId: aws.String("vpc-igw"), + }, + }, + }, + { + name: "public ipv4 subnet, local zone, must have ipv4 default route to igw", + inputSubnet: &infrav1.SubnetSpec{ + ResourceID: "subnet-lz-1a-public", + AvailabilityZone: "us-east-1-nyc-1a", + ZoneType: ptr.To(infrav1.ZoneType("local-zone")), + IsPublic: true, + }, + want: []*ec2.CreateRouteInput{ + { + DestinationCidrBlock: aws.String("0.0.0.0/0"), + GatewayId: aws.String("vpc-igw"), + }, + }, + }, + { + name: "public ipv4 subnet, wavelength zone, must have ipv4 default route to carrier gateway", + inputSubnet: &infrav1.SubnetSpec{ + ResourceID: "subnet-wl-1a-public", + AvailabilityZone: "us-east-1-wl1-nyc-wlz-1", + ZoneType: ptr.To(infrav1.ZoneType("wavelength-zone")), + IsPublic: true, + }, + want: []*ec2.CreateRouteInput{ + { + DestinationCidrBlock: aws.String("0.0.0.0/0"), + CarrierGatewayId: aws.String("vpc-cagw"), + }, + }, + }, + // public subnet ipv4, GW not found. + { + name: "public ipv4 subnet, availability zone, must return error when no internet gateway available", + specOverrideNet: func() *infrav1.NetworkSpec { + net := defaultNetwork.DeepCopy() + net.VPC.InternetGatewayID = nil + return net + }(), + inputSubnet: &infrav1.SubnetSpec{ + ResourceID: "subnet-az-1a-public", + AvailabilityZone: "us-east-1a", + IsPublic: true, + }, + wantErrMessage: `failed to create routing tables: internet gateway for VPC "vpc-test-for-routes" is not present`, + }, + { + name: "public ipv4 subnet, local zone, must return error when no internet gateway available", + specOverrideNet: func() *infrav1.NetworkSpec { + net := defaultNetwork.DeepCopy() + net.VPC.InternetGatewayID = nil + return net + }(), + inputSubnet: &infrav1.SubnetSpec{ + ResourceID: "subnet-lz-1a-public", + AvailabilityZone: "us-east-1-nyc-1a", + IsPublic: true, + ZoneType: ptr.To(infrav1.ZoneType("local-zone")), + ParentZoneName: aws.String("us-east-1a"), + }, + wantErrMessage: `failed to create routing tables: internet gateway for VPC "vpc-test-for-routes" is not present`, + }, + { + name: "public ipv4 subnet, wavelength zone, must return error when no Carrier Gateway found", + specOverrideNet: func() *infrav1.NetworkSpec { + net := defaultNetwork.DeepCopy() + net.VPC.CarrierGatewayID = nil + return net + }(), + inputSubnet: &infrav1.SubnetSpec{ + ResourceID: "subnet-wl-1a-public", + AvailabilityZone: "us-east-1-wl1-nyc-wlz-1", + IsPublic: true, + ZoneType: ptr.To(infrav1.ZoneType("wavelength-zone")), + ParentZoneName: aws.String("us-east-1a"), + }, + wantErrMessage: `failed to create carrier routing table: carrier gateway for VPC "vpc-test-for-routes" is not present`, + }, + // public subnet ipv6, unsupported + { + name: "public ipv6 subnet, local zone, must return error for unsupported ip version", + inputSubnet: &infrav1.SubnetSpec{ + ResourceID: "subnet-lz-1a-public", + AvailabilityZone: "us-east-1-nyc-1a", + IsPublic: true, + IsIPv6: true, + ZoneType: ptr.To(infrav1.ZoneType("local-zone")), + ParentZoneName: aws.String("us-east-1a"), + }, + wantErrMessage: `can't determine routes for unsupported ipv6 subnet in zone type "local-zone"`, + }, + { + name: "public ipv6 subnet, wavelength zone, must return error for unsupported ip version", + inputSubnet: &infrav1.SubnetSpec{ + ResourceID: "subnet-wl-1a-public", + AvailabilityZone: "us-east-1-wl1-nyc-wlz-1", + IsPublic: true, + IsIPv6: true, + ZoneType: ptr.To(infrav1.ZoneType("wavelength-zone")), + ParentZoneName: aws.String("us-east-1a"), + }, + wantErr: true, + wantErrMessage: `can't determine routes for unsupported ipv6 subnet in zone type "wavelength-zone"`, + }, + // private subnets + { + name: "private ipv4 subnet, availability zone, must have ipv4 default route to nat gateway", + inputSubnet: &infrav1.SubnetSpec{ + ResourceID: "subnet-az-1a-private", + AvailabilityZone: "us-east-1a", + IsPublic: false, + }, + want: []*ec2.CreateRouteInput{ + { + DestinationCidrBlock: aws.String("0.0.0.0/0"), + NatGatewayId: aws.String("nat-gw-fromZone-us-east-1a"), + }, + }, + }, + { + name: "private ipv4 subnet, local zone, must have ipv4 default route to nat gateway", + inputSubnet: &infrav1.SubnetSpec{ + ResourceID: "subnet-lz-1a-private", + AvailabilityZone: "us-east-1-nyc-1a", + ZoneType: ptr.To(infrav1.ZoneType("local-zone")), + ParentZoneName: aws.String("us-east-1a"), + IsPublic: false, + }, + want: []*ec2.CreateRouteInput{ + { + DestinationCidrBlock: aws.String("0.0.0.0/0"), + NatGatewayId: aws.String("nat-gw-fromZone-us-east-1a"), + }, + }, + }, + { + name: "private ipv4 subnet, wavelength zone, must have ipv4 default route to nat gateway", + inputSubnet: &infrav1.SubnetSpec{ + ResourceID: "subnet-wl-1a-private", + AvailabilityZone: "us-east-1-wl1-nyc-wlz-1", + ZoneType: ptr.To(infrav1.ZoneType("wavelength-zone")), + ParentZoneName: aws.String("us-east-1a"), + IsPublic: false, + }, + want: []*ec2.CreateRouteInput{ + { + DestinationCidrBlock: aws.String("0.0.0.0/0"), + NatGatewayId: aws.String("nat-gw-fromZone-us-east-1a"), + }, + }, + }, + // egress-only subnet ipv6 + { + name: "egress-only ipv6 subnet, availability zone, must have ipv6 default route to egress-only gateway", + inputSubnet: &infrav1.SubnetSpec{ + ResourceID: "subnet-az-1a-private", + AvailabilityZone: "us-east-1a", + IsIPv6: true, + IsPublic: false, + }, + want: []*ec2.CreateRouteInput{ + { + DestinationCidrBlock: aws.String("0.0.0.0/0"), + NatGatewayId: aws.String("nat-gw-fromZone-us-east-1a"), + }, + { + DestinationIpv6CidrBlock: aws.String("::/0"), + EgressOnlyInternetGatewayId: aws.String("vpc-eigw"), + }, + }, + }, + { + name: "private ipv6 subnet, availability zone, non-ipv6 block, must return error", + specOverrideNet: func() *infrav1.NetworkSpec { + net := defaultNetwork.DeepCopy() + net.VPC.IPv6 = nil + return net + }(), + inputSubnet: &infrav1.SubnetSpec{ + ResourceID: "subnet-az-1a-private", + AvailabilityZone: "us-east-1a", + IsIPv6: true, + IsPublic: false, + }, + wantErrMessage: `ipv6 block missing for ipv6 enabled subnet, can't create route for egress only internet gateway`, + }, + // private subnet ipv6, unsupported + { + name: "private ipv6 subnet, local zone, must return unsupported", + inputSubnet: &infrav1.SubnetSpec{ + ResourceID: "subnet-lz-1a-private", + AvailabilityZone: "us-east-1-nyc-a", + IsIPv6: true, + IsPublic: false, + ZoneType: ptr.To(infrav1.ZoneType("local-zone")), + ParentZoneName: aws.String("us-east-1a"), + }, + wantErrMessage: `can't determine routes for unsupported ipv6 subnet in zone type "local-zone"`, + }, + { + name: "private ipv6 subnet, wavelength zone, must return unsupported", + inputSubnet: &infrav1.SubnetSpec{ + ResourceID: "subnet-wl-1a-private", + AvailabilityZone: "us-east-1-wl1-nyc-wlz-1", + ZoneType: ptr.To(infrav1.ZoneType("wavelength-zone")), + ParentZoneName: aws.String("us-east-1a"), + IsIPv6: true, + IsPublic: false, + }, + wantErrMessage: `can't determine routes for unsupported ipv6 subnet in zone type "wavelength-zone"`, + }, + // private subnet, gateway not found + { + name: "private ipv4 subnet, availability zone, must return error when invalid gateway", + specOverrideNet: func() *infrav1.NetworkSpec { + net := defaultNetwork.DeepCopy() + for i := range net.Subnets { + if net.Subnets[i].AvailabilityZone == "us-east-1a" && net.Subnets[i].IsPublic { + net.Subnets[i].NatGatewayID = nil + } + } + return net + }(), + inputSubnet: &infrav1.SubnetSpec{ + ResourceID: "subnet-az-1a-private", + AvailabilityZone: "us-east-1a", + IsPublic: false, + }, + wantErrMessage: `no nat gateways available in "us-east-1a" for private subnet "subnet-az-1a-private"`, + }, + { + name: "private ipv4 subnet, local zone, must return error when invalid gateway", + specOverrideNet: func() *infrav1.NetworkSpec { + net := defaultNetwork.DeepCopy() + for i := range net.Subnets { + if net.Subnets[i].AvailabilityZone == "us-east-1a" && net.Subnets[i].IsPublic { + net.Subnets[i].NatGatewayID = nil + } + } + return net + }(), + inputSubnet: &infrav1.SubnetSpec{ + ResourceID: "subnet-lz-1a-private", + AvailabilityZone: "us-east-1-nyc-1a", + IsIPv6: true, + IsPublic: false, + ZoneType: ptr.To(infrav1.ZoneType("local-zone")), + ParentZoneName: aws.String("us-east-1a"), + }, + wantErrMessage: `can't determine routes for unsupported ipv6 subnet in zone type "local-zone"`, + }, + { + name: "private ipv4 subnet, wavelength zone, must return error when invalid gateway", + specOverrideNet: func() *infrav1.NetworkSpec { + net := new(infrav1.NetworkSpec) + *net = defaultNetwork + net.VPC.CarrierGatewayID = nil + return net + }(), + inputSubnet: &infrav1.SubnetSpec{ + ResourceID: "subnet-wl-1a-private", + AvailabilityZone: "us-east-1-wl1-nyc-wlz-1", + IsIPv6: true, + IsPublic: false, + ZoneType: ptr.To(infrav1.ZoneType("wavelength-zone")), + ParentZoneName: aws.String("us-east-1a"), + }, + wantErrMessage: `can't determine routes for unsupported ipv6 subnet in zone type "wavelength-zone"`, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + scheme := runtime.NewScheme() + _ = infrav1.AddToScheme(scheme) + client := fake.NewClientBuilder().WithScheme(scheme).Build() + cluster := scope.ClusterScopeParams{ + Client: client, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cluster-routes"}, + }, + AWSCluster: &infrav1.AWSCluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: infrav1.AWSClusterSpec{}, + }, + } + cluster.AWSCluster.Spec.NetworkSpec = defaultNetwork + if tc.specOverrideNet != nil { + cluster.AWSCluster.Spec.NetworkSpec = *tc.specOverrideNet + } + if tc.specOverrideSubnets != nil { + cluster.AWSCluster.Spec.NetworkSpec.Subnets = *tc.specOverrideSubnets + } + + scope, err := scope.NewClusterScope(cluster) + if err != nil { + t.Errorf("Service.getRoutesForSubnet() error setting up the test case: %v", err) + } + + s := NewService(scope) + got, err := s.getRoutesForSubnet(tc.inputSubnet) + + wantErr := tc.wantErr + if len(tc.wantErrMessage) > 0 { + wantErr = true + } + if wantErr && err == nil { + t.Fatal("expected error but got no error") + } + if err != nil { + if !wantErr { + t.Fatalf("got an unexpected error: %v", err) + } + if wantErr && len(tc.wantErrMessage) > 0 && err.Error() != tc.wantErrMessage { + t.Fatalf("got an unexpected error message:\nwant: %v\n got: %v\n", tc.wantErrMessage, err) + } + } + if len(tc.want) > 0 { + if !cmp.Equal(got, tc.want) { + t.Errorf("got unexpect routes:\n%v", cmp.Diff(got, tc.want)) + } + } + }) + } +} diff --git a/pkg/cloud/services/network/secondarycidr.go b/pkg/cloud/services/network/secondarycidr.go index 54fb7c5816..829383bf1a 100644 --- a/pkg/cloud/services/network/secondarycidr.go +++ b/pkg/cloud/services/network/secondarycidr.go @@ -20,7 +20,6 @@ import ( "context" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/google/go-cmp/cmp" "github.com/pkg/errors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" @@ -30,8 +29,9 @@ func isVPCPresent(vpcs *ec2.DescribeVpcsOutput) bool { return vpcs != nil && len(vpcs.Vpcs) > 0 } -func (s *Service) associateSecondaryCidr() error { - if s.scope.SecondaryCidrBlock() == nil { +func (s *Service) associateSecondaryCidrs() error { + secondaryCidrBlocks := s.scope.AllSecondaryCidrBlocks() + if len(secondaryCidrBlocks) == 0 { return nil } @@ -46,30 +46,43 @@ func (s *Service) associateSecondaryCidr() error { return errors.Errorf("failed to associateSecondaryCidr as there are no VPCs present") } + // We currently only *add* associations. Here, we do not reconcile exactly against the provided list + // (i.e. disassociate what isn't listed in the spec). existingAssociations := vpcs.Vpcs[0].CidrBlockAssociationSet - for _, existing := range existingAssociations { - if *existing.CidrBlock == *s.scope.SecondaryCidrBlock() { - return nil + for _, desiredCidrBlock := range secondaryCidrBlocks { + desiredCidrBlock := desiredCidrBlock + + found := false + for _, existing := range existingAssociations { + if *existing.CidrBlock == desiredCidrBlock.IPv4CidrBlock { + found = true + break + } + } + if found { + continue } - } - out, err := s.EC2Client.AssociateVpcCidrBlockWithContext(context.TODO(), &ec2.AssociateVpcCidrBlockInput{ - VpcId: &s.scope.VPC().ID, - CidrBlock: s.scope.SecondaryCidrBlock(), - }) - if err != nil { - record.Warnf(s.scope.InfraCluster(), "FailedAssociateSecondaryCidr", "Failed associating secondary CIDR with VPC %v", err) - return err - } + out, err := s.EC2Client.AssociateVpcCidrBlockWithContext(context.TODO(), &ec2.AssociateVpcCidrBlockInput{ + VpcId: &s.scope.VPC().ID, + CidrBlock: &desiredCidrBlock.IPv4CidrBlock, + }) + if err != nil { + record.Warnf(s.scope.InfraCluster(), "FailedAssociateSecondaryCidr", "Failed associating secondary CIDR %q with VPC %v", desiredCidrBlock.IPv4CidrBlock, err) + return err + } - // once IPv6 is supported, we need to modify out.CidrBlockAssociation.AssociationId to out.Ipv6CidrBlockAssociation.AssociationId - record.Eventf(s.scope.InfraCluster(), "SuccessfulAssociateSecondaryCidr", "Associated secondary CIDR with VPC %q", *out.CidrBlockAssociation.AssociationId) + // Once IPv6 is supported, we need to consider both `out.CidrBlockAssociation.AssociationId` and + // `out.Ipv6CidrBlockAssociation.AssociationId` + record.Eventf(s.scope.InfraCluster(), "SuccessfulAssociateSecondaryCidr", "Associated secondary CIDR %q with VPC %q", desiredCidrBlock.IPv4CidrBlock, *out.CidrBlockAssociation.AssociationId) + } return nil } -func (s *Service) disassociateSecondaryCidr() error { - if s.scope.SecondaryCidrBlock() == nil { +func (s *Service) disassociateSecondaryCidrs() error { + secondaryCidrBlocks := s.scope.AllSecondaryCidrBlocks() + if len(secondaryCidrBlocks) == 0 { return nil } @@ -81,17 +94,20 @@ func (s *Service) disassociateSecondaryCidr() error { } if !isVPCPresent(vpcs) { - return errors.Errorf("failed to associateSecondaryCidr as there are no VPCs present") + return errors.Errorf("failed to disassociateSecondaryCidr as there are no VPCs present") } existingAssociations := vpcs.Vpcs[0].CidrBlockAssociationSet - for _, existing := range existingAssociations { - if cmp.Equal(existing.CidrBlock, s.scope.SecondaryCidrBlock()) { - if _, err := s.EC2Client.DisassociateVpcCidrBlockWithContext(context.TODO(), &ec2.DisassociateVpcCidrBlockInput{ - AssociationId: existing.AssociationId, - }); err != nil { - record.Warnf(s.scope.InfraCluster(), "FailedDisassociateSecondaryCidr", "Failed disassociating secondary CIDR with VPC %v", err) - return err + for _, cidrBlockToDelete := range secondaryCidrBlocks { + for _, existing := range existingAssociations { + if *existing.CidrBlock == cidrBlockToDelete.IPv4CidrBlock { + if _, err := s.EC2Client.DisassociateVpcCidrBlockWithContext(context.TODO(), &ec2.DisassociateVpcCidrBlockInput{ + AssociationId: existing.AssociationId, + }); err != nil { + record.Warnf(s.scope.InfraCluster(), "FailedDisassociateSecondaryCidr", "Failed disassociating secondary CIDR %q from VPC %v", cidrBlockToDelete.IPv4CidrBlock, err) + return err + } + break } } } diff --git a/pkg/cloud/services/network/secondarycidr_test.go b/pkg/cloud/services/network/secondarycidr_test.go index 5be6cf441e..ee5ea3f6f2 100644 --- a/pkg/cloud/services/network/secondarycidr_test.go +++ b/pkg/cloud/services/network/secondarycidr_test.go @@ -65,25 +65,33 @@ func TestServiceAssociateSecondaryCidr(t *testing.T) { defer mockCtrl.Finish() tests := []struct { - name string - haveSecondaryCIDR bool - expect func(m *mocks.MockEC2APIMockRecorder) - wantErr bool + name string + fillAWSManagedControlPlaneSecondaryCIDR bool + networkSecondaryCIDRBlocks []infrav1.VpcCidrBlock + expect func(m *mocks.MockEC2APIMockRecorder) + wantErr bool }{ { - name: "Should not associate secondary CIDR if no secondary cidr block info present in control plane", + name: "Should not associate secondary CIDR if no secondary cidr block info present in control plane", + fillAWSManagedControlPlaneSecondaryCIDR: false, + expect: func(m *mocks.MockEC2APIMockRecorder) { + // No calls expected + m.DescribeVpcsWithContext(context.TODO(), gomock.Any()).Times(0) + m.AssociateVpcCidrBlockWithContext(context.TODO(), gomock.Any()).Times(0) + }, + wantErr: false, }, { - name: "Should return error if unable to describe VPC", - haveSecondaryCIDR: true, + name: "Should return error if unable to describe VPC", + fillAWSManagedControlPlaneSecondaryCIDR: true, expect: func(m *mocks.MockEC2APIMockRecorder) { m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(nil, awserrors.NewFailedDependency("dependency-failure")) }, wantErr: true, }, { - name: "Should not associate secondary cidr block if already exist in VPC", - haveSecondaryCIDR: true, + name: "Should not associate secondary cidr block if already exist in VPC", + fillAWSManagedControlPlaneSecondaryCIDR: true, expect: func(m *mocks.MockEC2APIMockRecorder) { m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{ Vpcs: []*ec2.Vpc{ @@ -96,29 +104,102 @@ func TestServiceAssociateSecondaryCidr(t *testing.T) { }, }, { - name: "Should return error if no VPC found", - haveSecondaryCIDR: true, + name: "Should return error if no VPC found", + fillAWSManagedControlPlaneSecondaryCIDR: true, expect: func(m *mocks.MockEC2APIMockRecorder) { - m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(nil, nil) + m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{ + Vpcs: []*ec2.Vpc{}, + }, nil) }, wantErr: true, }, { - name: "Should return error if failed during associating secondary cidr block", - haveSecondaryCIDR: true, + name: "Should return error if failed during associating secondary cidr block", + fillAWSManagedControlPlaneSecondaryCIDR: true, expect: func(m *mocks.MockEC2APIMockRecorder) { m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{ Vpcs: []*ec2.Vpc{ { - CidrBlockAssociationSet: []*ec2.VpcCidrBlockAssociation{ - {CidrBlock: aws.String("secondary-cidr-new")}, - }, + CidrBlockAssociationSet: []*ec2.VpcCidrBlockAssociation{}, }, }}, nil) m.AssociateVpcCidrBlockWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.AssociateVpcCidrBlockInput{})).Return(nil, awserrors.NewFailedDependency("dependency-failure")) }, wantErr: true, }, + { + name: "Should successfully associate secondary CIDR block if none is associated yet", + fillAWSManagedControlPlaneSecondaryCIDR: true, + expect: func(m *mocks.MockEC2APIMockRecorder) { + m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{ + Vpcs: []*ec2.Vpc{ + { + CidrBlockAssociationSet: []*ec2.VpcCidrBlockAssociation{}, + }, + }}, nil) + m.AssociateVpcCidrBlockWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.AssociateVpcCidrBlockInput{})).Return(&ec2.AssociateVpcCidrBlockOutput{ + CidrBlockAssociation: &ec2.VpcCidrBlockAssociation{ + AssociationId: ptr.To[string]("association-id-success"), + }, + }, nil) + }, + wantErr: false, + }, + { + name: "Should successfully associate missing secondary CIDR blocks", + fillAWSManagedControlPlaneSecondaryCIDR: false, + networkSecondaryCIDRBlocks: []infrav1.VpcCidrBlock{ + { + IPv4CidrBlock: "10.0.1.0/24", + }, + { + IPv4CidrBlock: "10.0.2.0/24", + }, + { + IPv4CidrBlock: "10.0.3.0/24", + }, + { + IPv4CidrBlock: "10.0.4.0/24", + }, + }, + expect: func(m *mocks.MockEC2APIMockRecorder) { + // Two are simulated to exist... + m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{ + Vpcs: []*ec2.Vpc{ + { + CidrBlockAssociationSet: []*ec2.VpcCidrBlockAssociation{ + { + AssociationId: ptr.To[string]("association-id-existing-1"), + CidrBlock: ptr.To[string]("10.0.1.0/24"), + }, + { + AssociationId: ptr.To[string]("association-id-existing-3"), + CidrBlock: ptr.To[string]("10.0.3.0/24"), + }, + }, + }, + }}, nil) + + // ...the other two should be created + m.AssociateVpcCidrBlockWithContext(context.TODO(), gomock.Eq(&ec2.AssociateVpcCidrBlockInput{ + CidrBlock: ptr.To[string]("10.0.2.0/24"), + VpcId: ptr.To[string]("vpc-id"), + })).Return(&ec2.AssociateVpcCidrBlockOutput{ + CidrBlockAssociation: &ec2.VpcCidrBlockAssociation{ + AssociationId: ptr.To[string]("association-id-success-2"), + }, + }, nil) + m.AssociateVpcCidrBlockWithContext(context.TODO(), gomock.Eq(&ec2.AssociateVpcCidrBlockInput{ + CidrBlock: ptr.To[string]("10.0.4.0/24"), + VpcId: ptr.To[string]("vpc-id"), + })).Return(&ec2.AssociateVpcCidrBlockOutput{ + CidrBlockAssociation: &ec2.VpcCidrBlockAssociation{ + AssociationId: ptr.To[string]("association-id-success-4"), + }, + }, nil) + }, + wantErr: false, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -133,9 +214,10 @@ func TestServiceAssociateSecondaryCidr(t *testing.T) { mcpScope, err := setupNewManagedControlPlaneScope(cl) g.Expect(err).NotTo(HaveOccurred()) - if !tt.haveSecondaryCIDR { + if !tt.fillAWSManagedControlPlaneSecondaryCIDR { mcpScope.ControlPlane.Spec.SecondaryCidrBlock = nil } + mcpScope.ControlPlane.Spec.NetworkSpec.VPC.SecondaryCidrBlocks = tt.networkSecondaryCIDRBlocks s := NewService(mcpScope) s.EC2Client = ec2Mock @@ -144,7 +226,7 @@ func TestServiceAssociateSecondaryCidr(t *testing.T) { tt.expect(ec2Mock.EXPECT()) } - err = s.associateSecondaryCidr() + err = s.associateSecondaryCidrs() if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -159,33 +241,41 @@ func TestServiceDiassociateSecondaryCidr(t *testing.T) { defer mockCtrl.Finish() tests := []struct { - name string - haveSecondaryCIDR bool - expect func(m *mocks.MockEC2APIMockRecorder) - wantErr bool + name string + fillAWSManagedControlPlaneSecondaryCIDR bool + networkSecondaryCIDRBlocks []infrav1.VpcCidrBlock + expect func(m *mocks.MockEC2APIMockRecorder) + wantErr bool }{ { - name: "Should not disassociate secondary CIDR if no secondary cidr block info present in control plane", + name: "Should not disassociate secondary CIDR if no secondary cidr block info present in control plane", + fillAWSManagedControlPlaneSecondaryCIDR: false, + expect: func(m *mocks.MockEC2APIMockRecorder) { + // No calls expected + m.DescribeVpcsWithContext(context.TODO(), gomock.Any()).Times(0) + m.DisassociateVpcCidrBlockWithContext(context.TODO(), gomock.Any()).Times(0) + }, + wantErr: false, }, { - name: "Should return error if unable to describe VPC", - haveSecondaryCIDR: true, + name: "Should return error if unable to describe VPC", + fillAWSManagedControlPlaneSecondaryCIDR: true, expect: func(m *mocks.MockEC2APIMockRecorder) { m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(nil, awserrors.NewFailedDependency("dependency-failure")) }, wantErr: true, }, { - name: "Should return error if no VPC found", - haveSecondaryCIDR: true, + name: "Should return error if no VPC found", + fillAWSManagedControlPlaneSecondaryCIDR: true, expect: func(m *mocks.MockEC2APIMockRecorder) { m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(nil, nil) }, wantErr: true, }, { - name: "Should diassociate secondary cidr block if already exist in VPC", - haveSecondaryCIDR: true, + name: "Should diassociate secondary cidr block if already exist in VPC", + fillAWSManagedControlPlaneSecondaryCIDR: true, expect: func(m *mocks.MockEC2APIMockRecorder) { m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{ Vpcs: []*ec2.Vpc{ @@ -199,8 +289,8 @@ func TestServiceDiassociateSecondaryCidr(t *testing.T) { }, }, { - name: "Should return error if failed to diassociate secondary cidr block", - haveSecondaryCIDR: true, + name: "Should return error if failed to diassociate secondary cidr block", + fillAWSManagedControlPlaneSecondaryCIDR: true, expect: func(m *mocks.MockEC2APIMockRecorder) { m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{ Vpcs: []*ec2.Vpc{ @@ -214,6 +304,66 @@ func TestServiceDiassociateSecondaryCidr(t *testing.T) { }, wantErr: true, }, + { + name: "Should successfully return from disassociating secondary CIDR blocks if none is currently associated", + fillAWSManagedControlPlaneSecondaryCIDR: true, + expect: func(m *mocks.MockEC2APIMockRecorder) { + m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{ + Vpcs: []*ec2.Vpc{ + { + CidrBlockAssociationSet: []*ec2.VpcCidrBlockAssociation{}, + }, + }}, nil) + + // No calls expected + m.DisassociateVpcCidrBlockWithContext(context.TODO(), gomock.Any()).Times(0) + }, + wantErr: false, + }, + { + name: "Should successfully disassociate existing secondary CIDR blocks", + fillAWSManagedControlPlaneSecondaryCIDR: false, + networkSecondaryCIDRBlocks: []infrav1.VpcCidrBlock{ + { + IPv4CidrBlock: "10.0.1.0/24", + }, + { + IPv4CidrBlock: "10.0.2.0/24", + }, + { + IPv4CidrBlock: "10.0.3.0/24", + }, + { + IPv4CidrBlock: "10.0.4.0/24", + }, + }, + expect: func(m *mocks.MockEC2APIMockRecorder) { + // Two are simulated to exist... + m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{ + Vpcs: []*ec2.Vpc{ + { + CidrBlockAssociationSet: []*ec2.VpcCidrBlockAssociation{ + { + AssociationId: ptr.To[string]("association-id-existing-1"), + CidrBlock: ptr.To[string]("10.0.1.0/24"), + }, + { + AssociationId: ptr.To[string]("association-id-existing-3"), + CidrBlock: ptr.To[string]("10.0.3.0/24"), + }, + }, + }, + }}, nil) + + m.DisassociateVpcCidrBlockWithContext(context.TODO(), gomock.Eq(&ec2.DisassociateVpcCidrBlockInput{ + AssociationId: ptr.To[string]("association-id-existing-1"), // 10.0.1.0/24 (see above) + })).Return(&ec2.DisassociateVpcCidrBlockOutput{}, nil) + m.DisassociateVpcCidrBlockWithContext(context.TODO(), gomock.Eq(&ec2.DisassociateVpcCidrBlockInput{ + AssociationId: ptr.To[string]("association-id-existing-3"), // 10.0.3.0/24 (see above) + })).Return(&ec2.DisassociateVpcCidrBlockOutput{}, nil) + }, + wantErr: false, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -228,9 +378,10 @@ func TestServiceDiassociateSecondaryCidr(t *testing.T) { mcpScope, err := setupNewManagedControlPlaneScope(cl) g.Expect(err).NotTo(HaveOccurred()) - if !tt.haveSecondaryCIDR { + if !tt.fillAWSManagedControlPlaneSecondaryCIDR { mcpScope.ControlPlane.Spec.SecondaryCidrBlock = nil } + mcpScope.ControlPlane.Spec.NetworkSpec.VPC.SecondaryCidrBlocks = tt.networkSecondaryCIDRBlocks s := NewService(mcpScope) s.EC2Client = ec2Mock @@ -239,7 +390,7 @@ func TestServiceDiassociateSecondaryCidr(t *testing.T) { tt.expect(ec2Mock.EXPECT()) } - err = s.disassociateSecondaryCidr() + err = s.disassociateSecondaryCidrs() if tt.wantErr { g.Expect(err).To(HaveOccurred()) return diff --git a/pkg/cloud/services/network/service.go b/pkg/cloud/services/network/service.go index 32f6d8131a..8c223c5e6d 100644 --- a/pkg/cloud/services/network/service.go +++ b/pkg/cloud/services/network/service.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package network provides a service to manage AWS network resources. package network import ( diff --git a/pkg/cloud/services/network/subnets.go b/pkg/cloud/services/network/subnets.go index c4b7a0c44f..62277cd9fb 100644 --- a/pkg/cloud/services/network/subnets.go +++ b/pkg/cloud/services/network/subnets.go @@ -53,7 +53,6 @@ func (s *Service) reconcileSubnets() error { defer func() { s.scope.SetSubnets(subnets) }() - var ( err error existing infrav1.Subnets @@ -115,6 +114,7 @@ func (s *Service) reconcileSubnets() error { for i, sub := range subnetCIDRs { secondarySub := infrav1.SubnetSpec{ + ID: fmt.Sprintf("%s-subnet-%s-%s", s.scope.Name(), infrav1.SecondarySubnetTagValue, zones[i]), CidrBlock: sub.String(), AvailabilityZone: zones[i], IsPublic: false, @@ -133,10 +133,19 @@ func (s *Service) reconcileSubnets() error { sub := &subnets[i] existingSubnet := existing.FindEqual(sub) if existingSubnet != nil { - subnetTags := sub.Tags + if len(sub.ID) > 0 { + // NOTE: Describing subnets assumes the subnet.ID is the same as the subnet's identifier (i.e. subnet-), + // if we have a subnet ID specified in the spec, we need to restore it. + existingSubnet.ID = sub.ID + } + + // Update subnet spec with the existing subnet details + existingSubnet.DeepCopyInto(sub) + // Make sure tags are up-to-date. + subnetTags := sub.Tags if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) { - buildParams := s.getSubnetTagParams(unmanagedVPC, existingSubnet.GetResourceID(), existingSubnet.IsPublic, existingSubnet.AvailabilityZone, subnetTags) + buildParams := s.getSubnetTagParams(unmanagedVPC, existingSubnet.GetResourceID(), existingSubnet.IsPublic, existingSubnet.AvailabilityZone, subnetTags, existingSubnet.IsEdge()) tagsBuilder := tags.New(&buildParams, tags.WithEC2(s.EC2Client)) if err := tagsBuilder.Ensure(existingSubnet.Tags); err != nil { return false, err @@ -146,24 +155,13 @@ func (s *Service) reconcileSubnets() error { if !unmanagedVPC { record.Warnf(s.scope.InfraCluster(), "FailedTagSubnet", "Failed tagging managed Subnet %q: %v", existingSubnet.GetResourceID(), err) return errors.Wrapf(err, "failed to ensure tags on subnet %q", existingSubnet.GetResourceID()) - } else { - // We may not have a permission to tag unmanaged subnets. - // When tagging unmanaged subnet fails, record an event and proceed. - record.Warnf(s.scope.InfraCluster(), "FailedTagSubnet", "Failed tagging unmanaged Subnet %q: %v", existingSubnet.GetResourceID(), err) - break } - } - // TODO(vincepri): check if subnet needs to be updated. - - if len(sub.ID) > 0 { - // NOTE: Describing subnets assumes the subnet.ID is the same as the subnet's identifier (i.e. subnet-), - // if we have a subnet ID specified in the spec, we need to restore it. - existingSubnet.ID = sub.ID + // We may not have a permission to tag unmanaged subnets. + // When tagging unmanaged subnet fails, record an event and continue checking subnets. + record.Warnf(s.scope.InfraCluster(), "FailedTagSubnet", "Failed tagging unmanaged Subnet %q: %v", existingSubnet.GetResourceID(), err) + continue } - - // Update subnet spec with the existing subnet details - existingSubnet.DeepCopyInto(sub) } else if unmanagedVPC { // If there is no existing subnet and we have an umanaged vpc report an error record.Warnf(s.scope.InfraCluster(), "FailedMatchSubnet", "Using unmanaged VPC and failed to find existing subnet for specified subnet id %d, cidr %q", sub.GetResourceID(), sub.CidrBlock) @@ -177,6 +175,14 @@ func (s *Service) reconcileSubnets() error { return errors.New("expected at least 1 subnet but got 0") } + // Reconciling the zone information for the subnets. Subnets are grouped + // by regular zones (availability zones) or edge zones (local zones or wavelength zones) + // based in the zone-type attribute for zone. + if err := s.reconcileZoneInfo(subnets); err != nil { + record.Warnf(s.scope.InfraCluster(), "FailedNoZoneInfo", "Expected the zone attributes to be populated to subnet") + return errors.Wrapf(err, "expected the zone attributes to be populated to subnet") + } + // When the VPC is managed by CAPA, we need to create the subnets. if !unmanagedVPC { // Check that we need at least 1 private and 1 public subnet after we have updated the metadata @@ -211,6 +217,35 @@ func (s *Service) reconcileSubnets() error { return nil } +func (s *Service) retrieveZoneInfo(zoneNames []string) ([]*ec2.AvailabilityZone, error) { + zones, err := s.EC2Client.DescribeAvailabilityZonesWithContext(context.TODO(), &ec2.DescribeAvailabilityZonesInput{ + ZoneNames: aws.StringSlice(zoneNames), + }) + if err != nil { + record.Eventf(s.scope.InfraCluster(), "FailedDescribeAvailableZones", "Failed getting available zones: %v", err) + return nil, errors.Wrap(err, "failed to describe availability zones") + } + + return zones.AvailabilityZones, nil +} + +// reconcileZoneInfo discover the zones for all subnets, and retrieve +// persist the zone information from resource API, such as Type and +// Parent Zone. +func (s *Service) reconcileZoneInfo(subnets infrav1.Subnets) error { + if len(subnets) > 0 { + zones, err := s.retrieveZoneInfo(subnets.GetUniqueZones()) + if err != nil { + return err + } + // Extract zone attributes from resource API for each subnet. + if err := subnets.SetZoneInfo(zones); err != nil { + return err + } + } + return nil +} + func (s *Service) getDefaultSubnets() (infrav1.Subnets, error) { zones, err := s.getAvailableZones() if err != nil { @@ -240,26 +275,38 @@ func (s *Service) getDefaultSubnets() (infrav1.Subnets, error) { s.scope.Debug("zones selected", "region", s.scope.Region(), "zones", zones) } - // 1 private subnet for each AZ plus 1 other subnet that will be further sub-divided for the public subnets + // 1 private subnet for each AZ plus 1 other subnet that will be further sub-divided for the public subnets or vice versa if + // the subnet schema is set to prefer public subnets. // All subnets will have an ipv4 address for now as well. We aren't supporting ipv6-only yet. numSubnets := len(zones) + 1 var ( - subnetCIDRs []*net.IPNet - publicSubnetCIDRs []*net.IPNet - ipv6SubnetCIDRs []*net.IPNet - publicIPv6SubnetCIDRs []*net.IPNet - privateIPv6SubnetCIDRs []*net.IPNet + subnetCIDRs []*net.IPNet + preferredSubnetCIDRs []*net.IPNet + residualSubnetCIDRs []*net.IPNet + ipv6SubnetCIDRs []*net.IPNet + preferredIPv6SubnetCIDRs []*net.IPNet + residualIPv6SubnetCIDRs []*net.IPNet ) + subnetScheme := infrav1.SubnetSchemaPreferPrivate + if s.scope.VPC().SubnetSchema != nil { + subnetScheme = *s.scope.VPC().SubnetSchema + } + + residualSubnetsName := infrav1.SubnetSchemaPreferPublic.Name() + if subnetScheme == infrav1.SubnetSchemaPreferPublic { + residualSubnetsName = infrav1.SubnetSchemaPreferPrivate.Name() + } + subnetCIDRs, err = cidr.SplitIntoSubnetsIPv4(s.scope.VPC().CidrBlock, numSubnets) if err != nil { return nil, errors.Wrapf(err, "failed splitting VPC CIDR %q into subnets", s.scope.VPC().CidrBlock) } - publicSubnetCIDRs, err = cidr.SplitIntoSubnetsIPv4(subnetCIDRs[0].String(), len(zones)) + residualSubnetCIDRs, err = cidr.SplitIntoSubnetsIPv4(subnetCIDRs[0].String(), len(zones)) if err != nil { - return nil, errors.Wrapf(err, "failed splitting CIDR %q into public subnets", subnetCIDRs[0].String()) + return nil, errors.Wrapf(err, "failed splitting CIDR %q into %s subnets", subnetCIDRs[0].String(), residualSubnetsName) } - privateSubnetCIDRs := append(subnetCIDRs[:0], subnetCIDRs[1:]...) + preferredSubnetCIDRs = append(subnetCIDRs[:0], subnetCIDRs[1:]...) if s.scope.VPC().IsIPv6Enabled() { ipv6SubnetCIDRs, err = cidr.SplitIntoSubnetsIPv6(s.scope.VPC().IPv6.CidrBlock, numSubnets) @@ -268,12 +315,23 @@ func (s *Service) getDefaultSubnets() (infrav1.Subnets, error) { } // We need to take the last, so it doesn't conflict with the rest. The subnetID is increment each time by 1. - publicIPv6SubnetCIDRs, err = cidr.SplitIntoSubnetsIPv6(ipv6SubnetCIDRs[len(ipv6SubnetCIDRs)-1].String(), len(zones)) + ipv6SubnetCIDRsStr := ipv6SubnetCIDRs[len(ipv6SubnetCIDRs)-1].String() + residualIPv6SubnetCIDRs, err = cidr.SplitIntoSubnetsIPv6(ipv6SubnetCIDRsStr, len(zones)) if err != nil { - return nil, errors.Wrapf(err, "failed splitting IPv6 CIDR %q into public subnets", ipv6SubnetCIDRs[len(ipv6SubnetCIDRs)-1].String()) + return nil, errors.Wrapf(err, "failed splitting IPv6 CIDR %q into %s subnets", ipv6SubnetCIDRsStr, residualSubnetsName) } // TODO: this might need to be the last instead of the first.. - privateIPv6SubnetCIDRs = append(ipv6SubnetCIDRs[:0], ipv6SubnetCIDRs[1:]...) + preferredIPv6SubnetCIDRs = append(ipv6SubnetCIDRs[:0], ipv6SubnetCIDRs[1:]...) + } + + // By default, the preferred subnets are the private subnets and the residual subnets are the public subnets. + privateSubnetCIDRs, publicSubnetCIDRs := preferredSubnetCIDRs, residualSubnetCIDRs + privateIPv6SubnetCIDRs, publicIPv6SubnetCIDRs := preferredIPv6SubnetCIDRs, residualIPv6SubnetCIDRs + + // If the subnet schema is set to prefer public, we need to swap the private and public subnets. + if subnetScheme == infrav1.SubnetSchemaPreferPublic { + privateSubnetCIDRs, publicSubnetCIDRs = residualSubnetCIDRs, preferredSubnetCIDRs + privateIPv6SubnetCIDRs, publicIPv6SubnetCIDRs = residualIPv6SubnetCIDRs, preferredIPv6SubnetCIDRs } subnets := infrav1.Subnets{} @@ -376,6 +434,9 @@ func (s *Service) describeVpcSubnets() (infrav1.Subnets, error) { if route.GatewayId != nil && strings.HasPrefix(*route.GatewayId, "igw") { spec.IsPublic = true } + if route.CarrierGatewayId != nil && strings.HasPrefix(*route.CarrierGatewayId, "cagw-") { + spec.IsPublic = true + } } } @@ -420,6 +481,28 @@ func (s *Service) createSubnet(sn *infrav1.SubnetSpec) (*infrav1.SubnetSpec, err sn.Tags["Name"] = sn.ID } + // Retrieve zone information used later to change the zone attributes. + if len(sn.AvailabilityZone) > 0 { + zones, err := s.retrieveZoneInfo([]string{sn.AvailabilityZone}) + if err != nil { + return nil, errors.Wrapf(err, "failed to discover zone information for subnet's zone %q", sn.AvailabilityZone) + } + if err = sn.SetZoneInfo(zones); err != nil { + return nil, errors.Wrapf(err, "failed to update zone information for subnet's zone %q", sn.AvailabilityZone) + } + } + + // IPv6 subnets are not generally supported by AWS Local Zones and Wavelength Zones. + // Local Zones have limited zone support for IPv6 subnets: + // https://docs.aws.amazon.com/local-zones/latest/ug/how-local-zones-work.html#considerations + // Wavelength Zones is currently not supporting IPv6 subnets. + // https://docs.aws.amazon.com/wavelength/latest/developerguide/wavelength-quotas.html#vpc-considerations + if sn.IsIPv6 && sn.IsEdge() { + err := fmt.Errorf("failed to create subnet: IPv6 is not supported with zone type %q", sn.ZoneType) + record.Warnf(s.scope.InfraCluster(), "FailedCreateSubnet", "Failed creating managed Subnet for edge zones: %v", err) + return nil, err + } + // Build the subnet creation request. input := &ec2.CreateSubnetInput{ VpcId: aws.String(s.scope.VPC().ID), @@ -428,7 +511,7 @@ func (s *Service) createSubnet(sn *infrav1.SubnetSpec) (*infrav1.SubnetSpec, err TagSpecifications: []*ec2.TagSpecification{ tags.BuildParamsToTagSpecification( ec2.ResourceTypeSubnet, - s.getSubnetTagParams(false, services.TemporaryResourceID, sn.IsPublic, sn.AvailabilityZone, sn.Tags), + s.getSubnetTagParams(false, services.TemporaryResourceID, sn.IsPublic, sn.AvailabilityZone, sn.Tags, sn.IsEdge()), ), }, } @@ -472,7 +555,12 @@ func (s *Service) createSubnet(sn *infrav1.SubnetSpec) (*infrav1.SubnetSpec, err record.Eventf(s.scope.InfraCluster(), "SuccessfulModifySubnetAttributes", "Modified managed Subnet %q attributes", *out.Subnet.SubnetId) } - if sn.IsPublic { + // AWS Wavelength Zone's public subnets does not support to map Carrier IP address on launch, and + // MapPublicIpOnLaunch option[1] set to the subnet will fail, instead set the EC2 instance's network + // interface to associate Carrier IP Address on launch[2]. + // [1] https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifySubnetAttribute.html + // [2] https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceNetworkInterfaceSpecification.html + if sn.IsPublic && !sn.IsEdgeWavelength() { if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) { if _, err := s.EC2Client.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{ SubnetId: out.Subnet.SubnetId, @@ -546,7 +634,7 @@ func (s *Service) deleteSubnet(id string) error { return nil } -func (s *Service) getSubnetTagParams(unmanagedVPC bool, id string, public bool, zone string, manualTags infrav1.Tags) infrav1.BuildParams { +func (s *Service) getSubnetTagParams(unmanagedVPC bool, id string, public bool, zone string, manualTags infrav1.Tags, isEdge bool) infrav1.BuildParams { var role string additionalTags := make(map[string]string) @@ -555,14 +643,22 @@ func (s *Service) getSubnetTagParams(unmanagedVPC bool, id string, public bool, if public { role = infrav1.PublicRoleTagValue - additionalTags[externalLoadBalancerTag] = "1" + // Edge subnets should not have ELB tags to be selected by CCM to create load balancers. + if !isEdge { + additionalTags[externalLoadBalancerTag] = "1" + } } else { role = infrav1.PrivateRoleTagValue - additionalTags[internalLoadBalancerTag] = "1" + if !isEdge { + additionalTags[internalLoadBalancerTag] = "1" + } } - // Add tag needed for Service type=LoadBalancer - additionalTags[infrav1.ClusterAWSCloudProviderTagKey(s.scope.KubernetesClusterName())] = string(infrav1.ResourceLifecycleShared) + if unmanagedVPC { + additionalTags[infrav1.ClusterAWSCloudProviderTagKey(s.scope.KubernetesClusterName())] = string(infrav1.ResourceLifecycleShared) + } else { + additionalTags[infrav1.ClusterAWSCloudProviderTagKey(s.scope.KubernetesClusterName())] = string(infrav1.ResourceLifecycleOwned) + } } if !unmanagedVPC { @@ -590,10 +686,10 @@ func (s *Service) getSubnetTagParams(unmanagedVPC bool, id string, public bool, Role: aws.String(role), Additional: additionalTags, } - } else { - return infrav1.BuildParams{ - ResourceID: id, - Additional: additionalTags, - } + } + + return infrav1.BuildParams{ + ResourceID: id, + Additional: additionalTags, } } diff --git a/pkg/cloud/services/network/subnets_test.go b/pkg/cloud/services/network/subnets_test.go index f7c02d4359..447b1713b8 100644 --- a/pkg/cloud/services/network/subnets_test.go +++ b/pkg/cloud/services/network/subnets_test.go @@ -20,18 +20,22 @@ import ( "context" "encoding/json" "fmt" + "reflect" "testing" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" "github.com/golang/mock/gomock" "github.com/google/go-cmp/cmp" + . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client/fake" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -42,12 +46,50 @@ const ( ) func TestReconcileSubnets(t *testing.T) { + // SubnetSpecs for different zone types. + stubSubnetsAvailabilityZone := []infrav1.SubnetSpec{ + {ID: "subnet-private-us-east-1a", AvailabilityZone: "us-east-1a", CidrBlock: "10.0.1.0/24", IsPublic: false}, + {ID: "subnet-public-us-east-1a", AvailabilityZone: "us-east-1a", CidrBlock: "10.0.2.0/24", IsPublic: true}, + } + stubAdditionalSubnetsAvailabilityZone := []infrav1.SubnetSpec{ + {ID: "subnet-private-us-east-1b", AvailabilityZone: "us-east-1b", CidrBlock: "10.0.3.0/24", IsPublic: false}, + {ID: "subnet-public-us-east-1b", AvailabilityZone: "us-east-1b", CidrBlock: "10.0.4.0/24", IsPublic: true}, + } + stubSubnetsLocalZone := []infrav1.SubnetSpec{ + {ID: "subnet-private-us-east-1-nyc-1a", AvailabilityZone: "us-east-1-nyc-1a", CidrBlock: "10.0.5.0/24", IsPublic: false}, + {ID: "subnet-public-us-east-1-nyc-1a", AvailabilityZone: "us-east-1-nyc-1a", CidrBlock: "10.0.6.0/24", IsPublic: true}, + } + stubSubnetsWavelengthZone := []infrav1.SubnetSpec{ + {ID: "subnet-private-us-east-1-wl1-nyc-wlz-1", AvailabilityZone: "us-east-1-wl1-nyc-wlz-1", CidrBlock: "10.0.7.0/24", IsPublic: false}, + {ID: "subnet-public-us-east-1-wl1-nyc-wlz-1", AvailabilityZone: "us-east-1-wl1-nyc-wlz-1", CidrBlock: "10.0.8.0/24", IsPublic: true}, + } + // TODO(mtulio): replace by slices.Concat(...) on go 1.22+ + stubSubnetsAllZones := stubSubnetsAvailabilityZone + stubSubnetsAllZones = append(stubSubnetsAllZones, stubSubnetsLocalZone...) + stubSubnetsAllZones = append(stubSubnetsAllZones, stubSubnetsWavelengthZone...) + + // NetworkSpec with subnets in zone type availability-zone + stubNetworkSpecWithSubnets := &infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + ID: subnetsVPCID, + Tags: infrav1.Tags{ + infrav1.ClusterTagKey("test-cluster"): "owned", + }, + }, + Subnets: stubSubnetsAvailabilityZone, + } + // NetworkSpec with subnets in zone types availability-zone, local-zone and wavelength-zone + stubNetworkSpecWithSubnetsEdge := stubNetworkSpecWithSubnets.DeepCopy() + stubNetworkSpecWithSubnetsEdge.Subnets = stubSubnetsAllZones + testCases := []struct { name string input ScopeBuilder expect func(m *mocks.MockEC2APIMockRecorder) errorExpected bool + errorMessageExpected string tagUnmanagedNetworkResources bool + optionalExpectSubnets infrav1.Subnets }{ { name: "Unmanaged VPC, disable TagUnmanagedNetworkResources, 2 existing subnets in vpc, 2 subnet in spec, subnets match, with routes, should succeed", @@ -130,6 +172,16 @@ func TestReconcileSubnets(t *testing.T) { }, }), gomock.Any()).Return(nil) + + m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1a"), + ZoneType: aws.String("availability-zone"), + }, + }, + }, nil) }, tagUnmanagedNetworkResources: false, }, @@ -244,6 +296,16 @@ func TestReconcileSubnets(t *testing.T) { }, })). Return(&ec2.CreateTagsOutput{}, nil) + + m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1a"), + ZoneType: aws.String("availability-zone"), + }, + }, + }, nil) }, tagUnmanagedNetworkResources: true, }, @@ -382,6 +444,16 @@ func TestReconcileSubnets(t *testing.T) { }, })). Return(&ec2.CreateTagsOutput{}, nil) + + m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1a"), + ZoneType: aws.String("availability-zone"), + }, + }, + }, nil) }, tagUnmanagedNetworkResources: true, }, @@ -480,12 +552,22 @@ func TestReconcileSubnets(t *testing.T) { }, })). Return(&ec2.CreateTagsOutput{}, nil) + + m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1a"), + ZoneType: aws.String("availability-zone"), + }, + }, + }, nil) }, errorExpected: false, tagUnmanagedNetworkResources: true, }, { - name: "Unmanaged VPC, 2 existing matching subnets, subnet tagging fails, should succeed", + name: "Unmanaged VPC, one existing matching subnets, subnet tagging fails, should succeed", input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{ VPC: infrav1.VPCSpec{ ID: subnetsVPCID, @@ -494,9 +576,6 @@ func TestReconcileSubnets(t *testing.T) { { ID: "subnet-1", }, - { - ID: "subnet-2", - }, }, }).WithTagUnmanagedNetworkResources(true), expect: func(m *mocks.MockEC2APIMockRecorder) { @@ -521,13 +600,6 @@ func TestReconcileSubnets(t *testing.T) { CidrBlock: aws.String("10.0.10.0/24"), MapPublicIpOnLaunch: aws.Bool(false), }, - { - VpcId: aws.String(subnetsVPCID), - SubnetId: aws.String("subnet-2"), - AvailabilityZone: aws.String("us-east-1a"), - CidrBlock: aws.String("10.0.20.0/24"), - MapPublicIpOnLaunch: aws.Bool(false), - }, }, }, nil) @@ -566,6 +638,10 @@ func TestReconcileSubnets(t *testing.T) { }), gomock.Any()).Return(nil) + stubMockDescribeAvailabilityZonesWithContextCustomZones(m, []*ec2.AvailabilityZone{ + {ZoneName: aws.String("us-east-1a"), ZoneType: aws.String("availability-zone")}, + }).AnyTimes() + m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{ Resources: aws.StringSlice([]string{"subnet-1"}), Tags: []*ec2.Tag{ @@ -579,18 +655,32 @@ func TestReconcileSubnets(t *testing.T) { }, }, })). - Return(&ec2.CreateTagsOutput{}, fmt.Errorf("tagging failed")) + Return(&ec2.CreateTagsOutput{}, nil) }, tagUnmanagedNetworkResources: true, }, { - name: "Unmanaged VPC, 2 existing subnets in vpc, 0 subnet in spec, should fail", + name: "Unmanaged VPC, one existing matching subnets, subnet tagging fails with subnet update, should succeed", input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{ VPC: infrav1.VPCSpec{ ID: subnetsVPCID, }, - Subnets: []infrav1.SubnetSpec{}, + Subnets: []infrav1.SubnetSpec{ + { + ID: "subnet-1", + }, + }, }).WithTagUnmanagedNetworkResources(true), + optionalExpectSubnets: infrav1.Subnets{ + { + ID: "subnet-1", + ResourceID: "subnet-1", + AvailabilityZone: "us-east-1a", + CidrBlock: "10.0.10.0/24", + IsPublic: true, + Tags: infrav1.Tags{}, + }, + }, expect: func(m *mocks.MockEC2APIMockRecorder) { m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{ Filters: []*ec2.Filter{ @@ -613,18 +703,28 @@ func TestReconcileSubnets(t *testing.T) { CidrBlock: aws.String("10.0.10.0/24"), MapPublicIpOnLaunch: aws.Bool(false), }, - { - VpcId: aws.String(subnetsVPCID), - SubnetId: aws.String("subnet-2"), - AvailabilityZone: aws.String("us-east-1a"), - CidrBlock: aws.String("10.0.20.0/24"), - MapPublicIpOnLaunch: aws.Bool(false), - }, }, }, nil) m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})). - Return(&ec2.DescribeRouteTablesOutput{}, nil) + Return(&ec2.DescribeRouteTablesOutput{ + RouteTables: []*ec2.RouteTable{ + { + VpcId: aws.String(subnetsVPCID), + Associations: []*ec2.RouteTableAssociation{ + { + SubnetId: aws.String("subnet-1"), + RouteTableId: aws.String("rt-12345"), + }, + }, + Routes: []*ec2.Route{ + { + GatewayId: aws.String("igw-12345"), + }, + }, + }, + }, + }, nil) m.DescribeNatGatewaysPagesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeNatGatewaysInput{ @@ -640,84 +740,61 @@ func TestReconcileSubnets(t *testing.T) { }, }), gomock.Any()).Return(nil) - }, - errorExpected: true, - tagUnmanagedNetworkResources: true, - }, - { - name: "Unmanaged VPC, 0 existing subnets in vpc, 2 subnets in spec, should fail", - input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{ - VPC: infrav1.VPCSpec{ - ID: subnetsVPCID, - }, - Subnets: []infrav1.SubnetSpec{ - { - AvailabilityZone: "us-east-1a", - CidrBlock: "10.1.0.0/16", - IsPublic: false, - }, - { - AvailabilityZone: "us-east-1b", - CidrBlock: "10.2.0.0/16", - IsPublic: true, - }, - }, - }).WithTagUnmanagedNetworkResources(true), - expect: func(m *mocks.MockEC2APIMockRecorder) { - m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{ - Filters: []*ec2.Filter{ + + stubMockDescribeAvailabilityZonesWithContextCustomZones(m, []*ec2.AvailabilityZone{ + {ZoneName: aws.String("us-east-1a")}, + }).AnyTimes() + + m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{ + Resources: aws.StringSlice([]string{"subnet-1"}), + Tags: []*ec2.Tag{ { - Name: aws.String("state"), - Values: []*string{aws.String("pending"), aws.String("available")}, + Key: aws.String("kubernetes.io/cluster/test-cluster"), + Value: aws.String("shared"), }, { - Name: aws.String("vpc-id"), - Values: []*string{aws.String(subnetsVPCID)}, + Key: aws.String("kubernetes.io/role/elb"), + Value: aws.String("1"), }, }, })). - Return(&ec2.DescribeSubnetsOutput{}, nil) - - m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})). - Return(&ec2.DescribeRouteTablesOutput{}, nil) - - m.DescribeNatGatewaysPagesWithContext(context.TODO(), - gomock.Eq(&ec2.DescribeNatGatewaysInput{ - Filter: []*ec2.Filter{ - { - Name: aws.String("vpc-id"), - Values: []*string{aws.String(subnetsVPCID)}, - }, - { - Name: aws.String("state"), - Values: []*string{aws.String("pending"), aws.String("available")}, - }, - }, - }), - gomock.Any()).Return(nil) + Return(&ec2.CreateTagsOutput{}, fmt.Errorf("tagging failed")) }, - errorExpected: true, tagUnmanagedNetworkResources: true, }, { - name: "Unmanaged VPC, 2 subnets exist, 2 private subnet in spec, should succeed", + name: "Unmanaged VPC, 2 existing matching subnets, subnet tagging fails with subnet update, should succeed", input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{ VPC: infrav1.VPCSpec{ ID: subnetsVPCID, }, Subnets: []infrav1.SubnetSpec{ { - AvailabilityZone: "us-east-1a", - CidrBlock: "10.0.10.0/24", - IsPublic: false, + ID: "subnet-1", }, { - AvailabilityZone: "us-east-1b", - CidrBlock: "10.0.20.0/24", - IsPublic: false, + ID: "subnet-2", }, }, }).WithTagUnmanagedNetworkResources(true), + optionalExpectSubnets: infrav1.Subnets{ + { + ID: "subnet-1", + ResourceID: "subnet-1", + AvailabilityZone: "us-east-1a", + CidrBlock: "10.0.10.0/24", + IsPublic: true, + Tags: infrav1.Tags{}, + }, + { + ID: "subnet-2", + ResourceID: "subnet-2", + AvailabilityZone: "us-east-1b", + CidrBlock: "10.0.11.0/24", + IsPublic: true, + Tags: infrav1.Tags{}, + }, + }, expect: func(m *mocks.MockEC2APIMockRecorder) { m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{ Filters: []*ec2.Filter{ @@ -738,20 +815,51 @@ func TestReconcileSubnets(t *testing.T) { SubnetId: aws.String("subnet-1"), AvailabilityZone: aws.String("us-east-1a"), CidrBlock: aws.String("10.0.10.0/24"), - MapPublicIpOnLaunch: aws.Bool(false), + MapPublicIpOnLaunch: aws.Bool(true), }, { VpcId: aws.String(subnetsVPCID), SubnetId: aws.String("subnet-2"), - AvailabilityZone: aws.String("us-east-1a"), - CidrBlock: aws.String("10.0.20.0/24"), + AvailabilityZone: aws.String("us-east-1b"), + CidrBlock: aws.String("10.0.11.0/24"), MapPublicIpOnLaunch: aws.Bool(false), }, }, }, nil) m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})). - Return(&ec2.DescribeRouteTablesOutput{}, nil) + Return(&ec2.DescribeRouteTablesOutput{ + RouteTables: []*ec2.RouteTable{ + { + VpcId: aws.String(subnetsVPCID), + Associations: []*ec2.RouteTableAssociation{ + { + SubnetId: aws.String("subnet-1"), + RouteTableId: aws.String("rt-12345"), + }, + }, + Routes: []*ec2.Route{ + { + GatewayId: aws.String("igw-12345"), + }, + }, + }, + { + VpcId: aws.String(subnetsVPCID), + Associations: []*ec2.RouteTableAssociation{ + { + SubnetId: aws.String("subnet-2"), + RouteTableId: aws.String("rt-00000"), + }, + }, + Routes: []*ec2.Route{ + { + GatewayId: aws.String("igw-12345"), + }, + }, + }, + }, + }, nil) m.DescribeNatGatewaysPagesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeNatGatewaysInput{ @@ -768,7 +876,11 @@ func TestReconcileSubnets(t *testing.T) { }), gomock.Any()).Return(nil) - m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{ + stubMockDescribeAvailabilityZonesWithContextCustomZones(m, []*ec2.AvailabilityZone{ + {ZoneName: aws.String("us-east-1a")}, {ZoneName: aws.String("us-east-1b")}, + }).AnyTimes() + + subnet1tag := m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{ Resources: aws.StringSlice([]string{"subnet-1"}), Tags: []*ec2.Tag{ { @@ -776,12 +888,12 @@ func TestReconcileSubnets(t *testing.T) { Value: aws.String("shared"), }, { - Key: aws.String("kubernetes.io/role/internal-elb"), + Key: aws.String("kubernetes.io/role/elb"), Value: aws.String("1"), }, }, })). - Return(&ec2.CreateTagsOutput{}, nil) + Return(&ec2.CreateTagsOutput{}, fmt.Errorf("tagging failed")) m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{ Resources: aws.StringSlice([]string{"subnet-2"}), @@ -791,40 +903,32 @@ func TestReconcileSubnets(t *testing.T) { Value: aws.String("shared"), }, { - Key: aws.String("kubernetes.io/role/internal-elb"), + Key: aws.String("kubernetes.io/role/elb"), Value: aws.String("1"), }, }, })). - Return(&ec2.CreateTagsOutput{}, nil) + Return(&ec2.CreateTagsOutput{}, fmt.Errorf("tagging failed")).After(subnet1tag) }, - errorExpected: false, tagUnmanagedNetworkResources: true, }, { - name: "Managed VPC, no subnets exist, 1 private and 1 public subnet in spec, create both", + name: "Unmanaged VPC, 2 existing matching subnets, subnet tagging fails second call, should succeed", input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{ VPC: infrav1.VPCSpec{ ID: subnetsVPCID, - Tags: infrav1.Tags{ - infrav1.ClusterTagKey("test-cluster"): "owned", - }, }, Subnets: []infrav1.SubnetSpec{ { - AvailabilityZone: "us-east-1a", - CidrBlock: "10.1.0.0/16", - IsPublic: false, + ID: "subnet-1", }, { - AvailabilityZone: "us-east-1b", - CidrBlock: "10.2.0.0/16", - IsPublic: true, + ID: "subnet-2", }, }, - }), + }).WithTagUnmanagedNetworkResources(true), expect: func(m *mocks.MockEC2APIMockRecorder) { - describeCall := m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{ + m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{ Filters: []*ec2.Filter{ { Name: aws.String("state"), @@ -836,44 +940,1324 @@ func TestReconcileSubnets(t *testing.T) { }, }, })). - Return(&ec2.DescribeSubnetsOutput{}, nil) - - m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})). - Return(&ec2.DescribeRouteTablesOutput{}, nil) - - m.DescribeNatGatewaysPagesWithContext(context.TODO(), - gomock.Eq(&ec2.DescribeNatGatewaysInput{ - Filter: []*ec2.Filter{ + Return(&ec2.DescribeSubnetsOutput{ + Subnets: []*ec2.Subnet{ { - Name: aws.String("vpc-id"), - Values: []*string{aws.String(subnetsVPCID)}, + VpcId: aws.String(subnetsVPCID), + SubnetId: aws.String("subnet-1"), + AvailabilityZone: aws.String("us-east-1a"), + CidrBlock: aws.String("10.0.10.0/24"), + MapPublicIpOnLaunch: aws.Bool(false), }, { - Name: aws.String("state"), + VpcId: aws.String(subnetsVPCID), + SubnetId: aws.String("subnet-2"), + AvailabilityZone: aws.String("us-east-1b"), + CidrBlock: aws.String("10.0.20.0/24"), + MapPublicIpOnLaunch: aws.Bool(false), + }, + }, + }, nil) + + m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})). + Return(&ec2.DescribeRouteTablesOutput{ + RouteTables: []*ec2.RouteTable{ + { + VpcId: aws.String(subnetsVPCID), + Associations: []*ec2.RouteTableAssociation{ + { + SubnetId: aws.String("subnet-1"), + RouteTableId: aws.String("rt-12345"), + }, + }, + Routes: []*ec2.Route{ + { + GatewayId: aws.String("igw-12345"), + }, + }, + }, + { + VpcId: aws.String(subnetsVPCID), + Associations: []*ec2.RouteTableAssociation{ + { + SubnetId: aws.String("subnet-2"), + RouteTableId: aws.String("rt-22222"), + }, + }, + Routes: []*ec2.Route{ + { + GatewayId: aws.String("igw-12345"), + }, + }, + }, + }, + }, nil) + + m.DescribeNatGatewaysPagesWithContext(context.TODO(), + gomock.Eq(&ec2.DescribeNatGatewaysInput{ + Filter: []*ec2.Filter{ + { + Name: aws.String("vpc-id"), + Values: []*string{aws.String(subnetsVPCID)}, + }, + { + Name: aws.String("state"), + Values: []*string{aws.String("pending"), aws.String("available")}, + }, + }, + }), + gomock.Any()).Return(nil) + + stubMockDescribeAvailabilityZonesWithContextCustomZones(m, []*ec2.AvailabilityZone{ + {ZoneName: aws.String("us-east-1a")}, {ZoneName: aws.String("us-east-1b")}, + }).AnyTimes() + + secondSubnetTag := m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{ + Resources: aws.StringSlice([]string{"subnet-1"}), + Tags: []*ec2.Tag{ + { + Key: aws.String("kubernetes.io/cluster/test-cluster"), + Value: aws.String("shared"), + }, + { + Key: aws.String("kubernetes.io/role/elb"), + Value: aws.String("1"), + }, + }, + })). + Return(&ec2.CreateTagsOutput{}, nil) + + stubMockDescribeAvailabilityZonesWithContextCustomZones(m, []*ec2.AvailabilityZone{ + {ZoneName: aws.String("us-east-1a"), ZoneType: aws.String("availability-zone")}, + {ZoneName: aws.String("us-east-1b"), ZoneType: aws.String("availability-zone")}, + }).AnyTimes() + + m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{ + Resources: aws.StringSlice([]string{"subnet-2"}), + Tags: []*ec2.Tag{ + { + Key: aws.String("kubernetes.io/cluster/test-cluster"), + Value: aws.String("shared"), + }, + { + Key: aws.String("kubernetes.io/role/elb"), + Value: aws.String("1"), + }, + }, + })). + Return(&ec2.CreateTagsOutput{}, fmt.Errorf("tagging failed")).After(secondSubnetTag) + }, + tagUnmanagedNetworkResources: true, + }, + { + name: "Unmanaged VPC, 2 existing subnets in vpc, 0 subnet in spec, should fail", + input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + ID: subnetsVPCID, + }, + Subnets: []infrav1.SubnetSpec{}, + }).WithTagUnmanagedNetworkResources(true), + expect: func(m *mocks.MockEC2APIMockRecorder) { + m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("state"), + Values: []*string{aws.String("pending"), aws.String("available")}, + }, + { + Name: aws.String("vpc-id"), + Values: []*string{aws.String(subnetsVPCID)}, + }, + }, + })). + Return(&ec2.DescribeSubnetsOutput{ + Subnets: []*ec2.Subnet{ + { + VpcId: aws.String(subnetsVPCID), + SubnetId: aws.String("subnet-1"), + AvailabilityZone: aws.String("us-east-1a"), + CidrBlock: aws.String("10.0.10.0/24"), + MapPublicIpOnLaunch: aws.Bool(false), + }, + { + VpcId: aws.String(subnetsVPCID), + SubnetId: aws.String("subnet-2"), + AvailabilityZone: aws.String("us-east-1a"), + CidrBlock: aws.String("10.0.20.0/24"), + MapPublicIpOnLaunch: aws.Bool(false), + }, + }, + }, nil) + m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})). + Return(&ec2.DescribeRouteTablesOutput{}, nil) + + m.DescribeNatGatewaysPagesWithContext(context.TODO(), + gomock.Eq(&ec2.DescribeNatGatewaysInput{ + Filter: []*ec2.Filter{ + { + Name: aws.String("vpc-id"), + Values: []*string{aws.String(subnetsVPCID)}, + }, + { + Name: aws.String("state"), + Values: []*string{aws.String("pending"), aws.String("available")}, + }, + }, + }), + gomock.Any()).Return(nil) + }, + errorExpected: true, + tagUnmanagedNetworkResources: true, + }, + { + name: "Unmanaged VPC, 0 existing subnets in vpc, 2 subnets in spec, should fail", + input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + ID: subnetsVPCID, + }, + Subnets: []infrav1.SubnetSpec{ + { + AvailabilityZone: "us-east-1a", + CidrBlock: "10.1.0.0/16", + IsPublic: false, + }, + { + AvailabilityZone: "us-east-1b", + CidrBlock: "10.2.0.0/16", + IsPublic: true, + }, + }, + }).WithTagUnmanagedNetworkResources(true), + expect: func(m *mocks.MockEC2APIMockRecorder) { + m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("state"), + Values: []*string{aws.String("pending"), aws.String("available")}, + }, + { + Name: aws.String("vpc-id"), + Values: []*string{aws.String(subnetsVPCID)}, + }, + }, + })). + Return(&ec2.DescribeSubnetsOutput{}, nil) + + m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})). + Return(&ec2.DescribeRouteTablesOutput{}, nil) + + m.DescribeNatGatewaysPagesWithContext(context.TODO(), + gomock.Eq(&ec2.DescribeNatGatewaysInput{ + Filter: []*ec2.Filter{ + { + Name: aws.String("vpc-id"), + Values: []*string{aws.String(subnetsVPCID)}, + }, + { + Name: aws.String("state"), + Values: []*string{aws.String("pending"), aws.String("available")}, + }, + }, + }), + gomock.Any()).Return(nil) + }, + errorExpected: true, + tagUnmanagedNetworkResources: true, + }, + { + name: "Unmanaged VPC, 2 subnets exist, 2 private subnet in spec, should succeed", + input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + ID: subnetsVPCID, + }, + Subnets: []infrav1.SubnetSpec{ + { + AvailabilityZone: "us-east-1a", + CidrBlock: "10.0.10.0/24", + IsPublic: false, + }, + { + AvailabilityZone: "us-east-1b", + CidrBlock: "10.0.20.0/24", + IsPublic: false, + }, + }, + }).WithTagUnmanagedNetworkResources(true), + expect: func(m *mocks.MockEC2APIMockRecorder) { + m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("state"), + Values: []*string{aws.String("pending"), aws.String("available")}, + }, + { + Name: aws.String("vpc-id"), + Values: []*string{aws.String(subnetsVPCID)}, + }, + }, + })). + Return(&ec2.DescribeSubnetsOutput{ + Subnets: []*ec2.Subnet{ + { + VpcId: aws.String(subnetsVPCID), + SubnetId: aws.String("subnet-1"), + AvailabilityZone: aws.String("us-east-1a"), + CidrBlock: aws.String("10.0.10.0/24"), + MapPublicIpOnLaunch: aws.Bool(false), + }, + { + VpcId: aws.String(subnetsVPCID), + SubnetId: aws.String("subnet-2"), + AvailabilityZone: aws.String("us-east-1a"), + CidrBlock: aws.String("10.0.20.0/24"), + MapPublicIpOnLaunch: aws.Bool(false), + }, + }, + }, nil) + + m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})). + Return(&ec2.DescribeRouteTablesOutput{}, nil) + + m.DescribeNatGatewaysPagesWithContext(context.TODO(), + gomock.Eq(&ec2.DescribeNatGatewaysInput{ + Filter: []*ec2.Filter{ + { + Name: aws.String("vpc-id"), + Values: []*string{aws.String(subnetsVPCID)}, + }, + { + Name: aws.String("state"), + Values: []*string{aws.String("pending"), aws.String("available")}, + }, + }, + }), + gomock.Any()).Return(nil) + + m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{ + Resources: aws.StringSlice([]string{"subnet-1"}), + Tags: []*ec2.Tag{ + { + Key: aws.String("kubernetes.io/cluster/test-cluster"), + Value: aws.String("shared"), + }, + { + Key: aws.String("kubernetes.io/role/internal-elb"), + Value: aws.String("1"), + }, + }, + })). + Return(&ec2.CreateTagsOutput{}, nil) + + m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{ + Resources: aws.StringSlice([]string{"subnet-2"}), + Tags: []*ec2.Tag{ + { + Key: aws.String("kubernetes.io/cluster/test-cluster"), + Value: aws.String("shared"), + }, + { + Key: aws.String("kubernetes.io/role/internal-elb"), + Value: aws.String("1"), + }, + }, + })). + Return(&ec2.CreateTagsOutput{}, nil) + + m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1a"), + ZoneType: aws.String("availability-zone"), + }, + }, + }, nil) + }, + errorExpected: false, + tagUnmanagedNetworkResources: true, + }, + { + name: "Managed VPC, no subnets exist, 1 private and 1 public subnet in spec, create both", + input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + ID: subnetsVPCID, + Tags: infrav1.Tags{ + infrav1.ClusterTagKey("test-cluster"): "owned", + }, + }, + Subnets: []infrav1.SubnetSpec{ + { + AvailabilityZone: "us-east-1a", + CidrBlock: "10.1.0.0/16", + IsPublic: false, + }, + { + AvailabilityZone: "us-east-1b", + CidrBlock: "10.2.0.0/16", + IsPublic: true, + }, + }, + }), + expect: func(m *mocks.MockEC2APIMockRecorder) { + describeCall := m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("state"), + Values: []*string{aws.String("pending"), aws.String("available")}, + }, + { + Name: aws.String("vpc-id"), + Values: []*string{aws.String(subnetsVPCID)}, + }, + }, + })). + Return(&ec2.DescribeSubnetsOutput{}, nil) + + m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})). + Return(&ec2.DescribeRouteTablesOutput{}, nil) + + m.DescribeNatGatewaysPagesWithContext(context.TODO(), + gomock.Eq(&ec2.DescribeNatGatewaysInput{ + Filter: []*ec2.Filter{ + { + Name: aws.String("vpc-id"), + Values: []*string{aws.String(subnetsVPCID)}, + }, + { + Name: aws.String("state"), + Values: []*string{aws.String("pending"), aws.String("available")}, + }, + }, + }), + gomock.Any()).Return(nil) + + firstSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ + VpcId: aws.String(subnetsVPCID), + CidrBlock: aws.String("10.1.0.0/16"), + AvailabilityZone: aws.String("us-east-1a"), + TagSpecifications: []*ec2.TagSpecification{ + { + ResourceType: aws.String("subnet"), + Tags: []*ec2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String("test-cluster-subnet-private-us-east-1a"), + }, + { + Key: aws.String("kubernetes.io/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("kubernetes.io/role/internal-elb"), + Value: aws.String("1"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("private"), + }, + }, + }, + }, + })). + Return(&ec2.CreateSubnetOutput{ + Subnet: &ec2.Subnet{ + VpcId: aws.String(subnetsVPCID), + SubnetId: aws.String("subnet-1"), + CidrBlock: aws.String("10.1.0.0/16"), + AvailabilityZone: aws.String("us-east-1a"), + MapPublicIpOnLaunch: aws.Bool(false), + }, + }, nil). + After(describeCall) + + m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()). + After(firstSubnet) + + secondSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ + VpcId: aws.String(subnetsVPCID), + CidrBlock: aws.String("10.2.0.0/16"), + AvailabilityZone: aws.String("us-east-1b"), + TagSpecifications: []*ec2.TagSpecification{ + { + ResourceType: aws.String("subnet"), + Tags: []*ec2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String("test-cluster-subnet-public-us-east-1b"), + }, + { + Key: aws.String("kubernetes.io/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("kubernetes.io/role/elb"), + Value: aws.String("1"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("public"), + }, + }, + }, + }, + })). + Return(&ec2.CreateSubnetOutput{ + Subnet: &ec2.Subnet{ + VpcId: aws.String(subnetsVPCID), + SubnetId: aws.String("subnet-2"), + CidrBlock: aws.String("10.2.0.0/16"), + AvailabilityZone: aws.String("us-east-1a"), + MapPublicIpOnLaunch: aws.Bool(false), + }, + }, nil). + After(firstSubnet) + + m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()). + After(secondSubnet) + + m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{ + MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + SubnetId: aws.String("subnet-2"), + }). + Return(&ec2.ModifySubnetAttributeOutput{}, nil). + After(secondSubnet) + + m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1a"), + ZoneType: aws.String("availability-zone"), + }, + { + ZoneName: aws.String("us-east-1b"), + ZoneType: aws.String("availability-zone"), + }, + }, + }, nil).AnyTimes() + }, + }, + { + name: "Managed VPC, no subnets exist, 1 private subnet in spec (no public subnet), should fail", + input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + ID: subnetsVPCID, + Tags: infrav1.Tags{ + infrav1.ClusterTagKey("test-cluster"): "owned", + }, + }, + Subnets: []infrav1.SubnetSpec{ + { + AvailabilityZone: "us-east-1a", + CidrBlock: "10.1.0.0/16", + IsPublic: false, + }, + }, + }), + expect: func(m *mocks.MockEC2APIMockRecorder) { + m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("state"), + Values: []*string{aws.String("pending"), aws.String("available")}, + }, + { + Name: aws.String("vpc-id"), + Values: []*string{aws.String(subnetsVPCID)}, + }, + }, + })). + Return(&ec2.DescribeSubnetsOutput{}, nil) + + m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})). + Return(&ec2.DescribeRouteTablesOutput{}, nil) + + m.DescribeNatGatewaysPagesWithContext(context.TODO(), + gomock.Eq(&ec2.DescribeNatGatewaysInput{ + Filter: []*ec2.Filter{ + { + Name: aws.String("vpc-id"), + Values: []*string{aws.String(subnetsVPCID)}, + }, + { + Name: aws.String("state"), + Values: []*string{aws.String("pending"), aws.String("available")}, + }, + }, + }), + gomock.Any()).Return(nil) + + m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1a"), + ZoneType: aws.String("availability-zone"), + }, + { + ZoneName: aws.String("us-east-1b"), + ZoneType: aws.String("availability-zone"), + }, + }, + }, nil) + }, + errorExpected: true, + }, + { + name: "Managed VPC, no existing subnets exist, one az, expect one private and one public from default", + input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + ID: subnetsVPCID, + Tags: infrav1.Tags{ + infrav1.ClusterTagKey("test-cluster"): "owned", + }, + CidrBlock: defaultVPCCidr, + }, + Subnets: []infrav1.SubnetSpec{}, + }), + expect: func(m *mocks.MockEC2APIMockRecorder) { + describeCall := m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("state"), + Values: []*string{aws.String("pending"), aws.String("available")}, + }, + { + Name: aws.String("vpc-id"), + Values: []*string{aws.String(subnetsVPCID)}, + }, + }, + })). + Return(&ec2.DescribeSubnetsOutput{}, nil) + + m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})). + Return(&ec2.DescribeRouteTablesOutput{}, nil) + + m.DescribeNatGatewaysPagesWithContext(context.TODO(), + gomock.Eq(&ec2.DescribeNatGatewaysInput{ + Filter: []*ec2.Filter{ + { + Name: aws.String("vpc-id"), + Values: []*string{aws.String(subnetsVPCID)}, + }, + { + Name: aws.String("state"), + Values: []*string{aws.String("pending"), aws.String("available")}, + }, + }, + }), + gomock.Any()).Return(nil) + + m.DescribeAvailabilityZonesWithContext(context.TODO(), &ec2.DescribeAvailabilityZonesInput{ + ZoneNames: aws.StringSlice([]string{"us-east-1c"}), + }). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1c"), + ZoneType: aws.String("availability-zone"), + }, + }, + }, nil).AnyTimes() + + firstSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ + VpcId: aws.String(subnetsVPCID), + CidrBlock: aws.String("10.0.0.0/17"), + AvailabilityZone: aws.String("us-east-1c"), + TagSpecifications: []*ec2.TagSpecification{ + { + ResourceType: aws.String("subnet"), + Tags: []*ec2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String("test-cluster-subnet-public-us-east-1c"), + }, + { + Key: aws.String("kubernetes.io/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("kubernetes.io/role/elb"), + Value: aws.String("1"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("public"), + }, + }, + }, + }, + })). + Return(&ec2.CreateSubnetOutput{ + Subnet: &ec2.Subnet{ + VpcId: aws.String(subnetsVPCID), + SubnetId: aws.String("subnet-1"), + CidrBlock: aws.String("10.0.0.0/17"), + AvailabilityZone: aws.String("us-east-1c"), + MapPublicIpOnLaunch: aws.Bool(false), + }, + }, nil). + After(describeCall) + + m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()). + After(firstSubnet) + + m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{ + MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + SubnetId: aws.String("subnet-1"), + }). + Return(&ec2.ModifySubnetAttributeOutput{}, nil). + After(firstSubnet) + + secondSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ + VpcId: aws.String(subnetsVPCID), + CidrBlock: aws.String("10.0.128.0/17"), + AvailabilityZone: aws.String("us-east-1c"), + TagSpecifications: []*ec2.TagSpecification{ + { + ResourceType: aws.String("subnet"), + Tags: []*ec2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String("test-cluster-subnet-private-us-east-1c"), + }, + { + Key: aws.String("kubernetes.io/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("kubernetes.io/role/internal-elb"), + Value: aws.String("1"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("private"), + }, + }, + }, + }, + })). + Return(&ec2.CreateSubnetOutput{ + Subnet: &ec2.Subnet{ + VpcId: aws.String(subnetsVPCID), + SubnetId: aws.String("subnet-2"), + CidrBlock: aws.String("10.0.128.0/17"), + AvailabilityZone: aws.String("us-east-1c"), + MapPublicIpOnLaunch: aws.Bool(false), + }, + }, nil). + After(firstSubnet) + + m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()). + After(secondSubnet) + + m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1c"), + ZoneType: aws.String("availability-zone"), + }, + }, + }, nil) + }, + }, + { + name: "Managed IPv6 VPC, no existing subnets exist, one az, expect one private and one public from default", + input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + ID: subnetsVPCID, + Tags: infrav1.Tags{ + infrav1.ClusterTagKey("test-cluster"): "owned", + }, + CidrBlock: defaultVPCCidr, + IPv6: &infrav1.IPv6{ + CidrBlock: "2001:db8:1234:1a01::/56", + PoolID: "amazon", + }, + }, + Subnets: []infrav1.SubnetSpec{}, + }), + expect: func(m *mocks.MockEC2APIMockRecorder) { + describeCall := m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("state"), + Values: []*string{aws.String("pending"), aws.String("available")}, + }, + { + Name: aws.String("vpc-id"), + Values: []*string{aws.String(subnetsVPCID)}, + }, + }, + })). + Return(&ec2.DescribeSubnetsOutput{}, nil) + + m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})). + Return(&ec2.DescribeRouteTablesOutput{}, nil) + + m.DescribeNatGatewaysPagesWithContext(context.TODO(), + gomock.Eq(&ec2.DescribeNatGatewaysInput{ + Filter: []*ec2.Filter{ + { + Name: aws.String("vpc-id"), + Values: []*string{aws.String(subnetsVPCID)}, + }, + { + Name: aws.String("state"), + Values: []*string{aws.String("pending"), aws.String("available")}, + }, + }, + }), + gomock.Any()).Return(nil) + + m.DescribeAvailabilityZonesWithContext(context.TODO(), &ec2.DescribeAvailabilityZonesInput{ + ZoneNames: aws.StringSlice([]string{"us-east-1c"}), + }). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1c"), + ZoneType: aws.String("availability-zone"), + }, + }, + }, nil).AnyTimes() + + firstSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ + VpcId: aws.String(subnetsVPCID), + CidrBlock: aws.String("10.0.0.0/17"), + AvailabilityZone: aws.String("us-east-1c"), + Ipv6CidrBlock: aws.String("2001:db8:1234:1a03::/64"), + TagSpecifications: []*ec2.TagSpecification{ + { + ResourceType: aws.String("subnet"), + Tags: []*ec2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String("test-cluster-subnet-public-us-east-1c"), + }, + { + Key: aws.String("kubernetes.io/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("kubernetes.io/role/elb"), + Value: aws.String("1"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("public"), + }, + }, + }, + }, + })). + Return(&ec2.CreateSubnetOutput{ + Subnet: &ec2.Subnet{ + VpcId: aws.String(subnetsVPCID), + SubnetId: aws.String("subnet-1"), + CidrBlock: aws.String("10.0.0.0/17"), + AssignIpv6AddressOnCreation: aws.Bool(true), + Ipv6CidrBlockAssociationSet: []*ec2.SubnetIpv6CidrBlockAssociation{ + { + AssociationId: aws.String("amazon"), + Ipv6CidrBlock: aws.String("2001:db8:1234:1a03::/64"), + Ipv6CidrBlockState: &ec2.SubnetCidrBlockState{ + State: aws.String(ec2.SubnetCidrBlockStateCodeAssociated), + }, + }, + }, + AvailabilityZone: aws.String("us-east-1c"), + MapPublicIpOnLaunch: aws.Bool(false), + }, + }, nil). + After(describeCall) + + m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()). + After(firstSubnet) + + m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{ + AssignIpv6AddressOnCreation: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + SubnetId: aws.String("subnet-1"), + }). + Return(&ec2.ModifySubnetAttributeOutput{}, nil). + After(firstSubnet) + + m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{ + AssignIpv6AddressOnCreation: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + SubnetId: aws.String("subnet-2"), + }). + Return(&ec2.ModifySubnetAttributeOutput{}, nil). + After(firstSubnet) + + m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{ + MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + SubnetId: aws.String("subnet-1"), + }). + Return(&ec2.ModifySubnetAttributeOutput{}, nil). + After(firstSubnet) + + secondSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ + VpcId: aws.String(subnetsVPCID), + CidrBlock: aws.String("10.0.128.0/17"), + AvailabilityZone: aws.String("us-east-1c"), + Ipv6CidrBlock: aws.String("2001:db8:1234:1a02::/64"), + TagSpecifications: []*ec2.TagSpecification{ + { + ResourceType: aws.String("subnet"), + Tags: []*ec2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String("test-cluster-subnet-private-us-east-1c"), + }, + { + Key: aws.String("kubernetes.io/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("kubernetes.io/role/internal-elb"), + Value: aws.String("1"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("private"), + }, + }, + }, + }, + })). + Return(&ec2.CreateSubnetOutput{ + Subnet: &ec2.Subnet{ + VpcId: aws.String(subnetsVPCID), + SubnetId: aws.String("subnet-2"), + CidrBlock: aws.String("10.0.128.0/17"), + AssignIpv6AddressOnCreation: aws.Bool(true), + Ipv6CidrBlockAssociationSet: []*ec2.SubnetIpv6CidrBlockAssociation{ + { + AssociationId: aws.String("amazon"), + Ipv6CidrBlock: aws.String("2001:db8:1234:1a02::/64"), + Ipv6CidrBlockState: &ec2.SubnetCidrBlockState{ + State: aws.String(ec2.SubnetCidrBlockStateCodeAssociated), + }, + }, + }, + AvailabilityZone: aws.String("us-east-1c"), + MapPublicIpOnLaunch: aws.Bool(false), + }, + }, nil). + After(firstSubnet) + + m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()). + After(secondSubnet) + + m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1c"), + ZoneType: aws.String("availability-zone"), + }, + }, + }, nil) + }, + }, + { + name: "Managed VPC, no existing subnets exist, two az's, expect two private and two public from default", + input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + ID: subnetsVPCID, + Tags: infrav1.Tags{ + infrav1.ClusterTagKey("test-cluster"): "owned", + }, + CidrBlock: defaultVPCCidr, + }, + Subnets: []infrav1.SubnetSpec{}, + }), + expect: func(m *mocks.MockEC2APIMockRecorder) { + describeCall := m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("state"), + Values: []*string{aws.String("pending"), aws.String("available")}, + }, + { + Name: aws.String("vpc-id"), + Values: []*string{aws.String(subnetsVPCID)}, + }, + }, + })). + Return(&ec2.DescribeSubnetsOutput{}, nil) + + m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})). + Return(&ec2.DescribeRouteTablesOutput{}, nil) + + m.DescribeNatGatewaysPagesWithContext(context.TODO(), + gomock.Eq(&ec2.DescribeNatGatewaysInput{ + Filter: []*ec2.Filter{ + { + Name: aws.String("vpc-id"), + Values: []*string{aws.String(subnetsVPCID)}, + }, + { + Name: aws.String("state"), + Values: []*string{aws.String("pending"), aws.String("available")}, + }, + }, + }), + gomock.Any()).Return(nil) + + m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1b"), + ZoneType: aws.String("availability-zone"), + }, + { + ZoneName: aws.String("us-east-1c"), + ZoneType: aws.String("availability-zone"), + }, + }, + }, nil).AnyTimes() + + // Zone1 + m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeAvailabilityZonesInput{ + ZoneNames: aws.StringSlice([]string{"us-east-1b"}), + })). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1b"), + ZoneType: aws.String("availability-zone"), + }, + }, + }, nil).MaxTimes(2) + + zone1PublicSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ + VpcId: aws.String(subnetsVPCID), + CidrBlock: aws.String("10.0.0.0/19"), + AvailabilityZone: aws.String("us-east-1b"), + TagSpecifications: []*ec2.TagSpecification{ + { + ResourceType: aws.String("subnet"), + Tags: []*ec2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String("test-cluster-subnet-public-us-east-1b"), + }, + { + Key: aws.String("kubernetes.io/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("kubernetes.io/role/elb"), + Value: aws.String("1"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("public"), + }, + }, + }, + }, + })). + Return(&ec2.CreateSubnetOutput{ + Subnet: &ec2.Subnet{ + VpcId: aws.String(subnetsVPCID), + SubnetId: aws.String("subnet-1"), + CidrBlock: aws.String("10.0.0.0/19"), + AvailabilityZone: aws.String("us-east-1b"), + MapPublicIpOnLaunch: aws.Bool(false), + }, + }, nil). + After(describeCall) + + m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()). + After(zone1PublicSubnet) + + m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{ + MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + SubnetId: aws.String("subnet-1"), + }). + Return(&ec2.ModifySubnetAttributeOutput{}, nil). + After(zone1PublicSubnet) + + zone1PrivateSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ + VpcId: aws.String(subnetsVPCID), + CidrBlock: aws.String("10.0.64.0/18"), + AvailabilityZone: aws.String("us-east-1b"), + TagSpecifications: []*ec2.TagSpecification{ + { + ResourceType: aws.String("subnet"), + Tags: []*ec2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String("test-cluster-subnet-private-us-east-1b"), + }, + { + Key: aws.String("kubernetes.io/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("kubernetes.io/role/internal-elb"), + Value: aws.String("1"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("private"), + }, + }, + }, + }, + })). + Return(&ec2.CreateSubnetOutput{ + Subnet: &ec2.Subnet{ + VpcId: aws.String(subnetsVPCID), + SubnetId: aws.String("subnet-2"), + CidrBlock: aws.String("10.0.64.0/18"), + AvailabilityZone: aws.String("us-east-1b"), + MapPublicIpOnLaunch: aws.Bool(false), + }, + }, nil). + After(zone1PublicSubnet) + + m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()). + After(zone1PrivateSubnet) + + // zone 2 + m.DescribeAvailabilityZonesWithContext(context.TODO(), &ec2.DescribeAvailabilityZonesInput{ + ZoneNames: aws.StringSlice([]string{"us-east-1c"}), + }). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1c"), + ZoneType: aws.String("availability-zone"), + }, + }, + }, nil).AnyTimes() + + zone2PublicSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ + VpcId: aws.String(subnetsVPCID), + CidrBlock: aws.String("10.0.32.0/19"), + AvailabilityZone: aws.String("us-east-1c"), + TagSpecifications: []*ec2.TagSpecification{ + { + ResourceType: aws.String("subnet"), + Tags: []*ec2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String("test-cluster-subnet-public-us-east-1c"), + }, + { + Key: aws.String("kubernetes.io/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("kubernetes.io/role/elb"), + Value: aws.String("1"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("public"), + }, + }, + }, + }, + })). + Return(&ec2.CreateSubnetOutput{ + Subnet: &ec2.Subnet{ + VpcId: aws.String(subnetsVPCID), + SubnetId: aws.String("subnet-1"), + CidrBlock: aws.String("10.0.32.0/19"), + AvailabilityZone: aws.String("us-east-1c"), + MapPublicIpOnLaunch: aws.Bool(false), + }, + }, nil). + After(zone1PrivateSubnet) + + m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()). + After(zone2PublicSubnet) + + m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{ + MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + SubnetId: aws.String("subnet-1"), + }). + Return(&ec2.ModifySubnetAttributeOutput{}, nil). + After(zone2PublicSubnet) + + zone2PrivateSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ + VpcId: aws.String(subnetsVPCID), + CidrBlock: aws.String("10.0.128.0/18"), + AvailabilityZone: aws.String("us-east-1c"), + TagSpecifications: []*ec2.TagSpecification{ + { + ResourceType: aws.String("subnet"), + Tags: []*ec2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String("test-cluster-subnet-private-us-east-1c"), + }, + { + Key: aws.String("kubernetes.io/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("kubernetes.io/role/internal-elb"), + Value: aws.String("1"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("private"), + }, + }, + }, + }, + })). + Return(&ec2.CreateSubnetOutput{ + Subnet: &ec2.Subnet{ + VpcId: aws.String(subnetsVPCID), + SubnetId: aws.String("subnet-2"), + CidrBlock: aws.String("10.0.128.0/18"), + AvailabilityZone: aws.String("us-east-1c"), + MapPublicIpOnLaunch: aws.Bool(false), + }, + }, nil). + After(zone2PublicSubnet) + + m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()). + After(zone2PrivateSubnet) + }, + }, + { + name: "Managed VPC, no existing subnets exist, two az's, max num azs is 1, expect one private and one public from default", + input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + ID: subnetsVPCID, + Tags: infrav1.Tags{ + infrav1.ClusterTagKey("test-cluster"): "owned", + }, + CidrBlock: defaultVPCCidr, + AvailabilityZoneUsageLimit: aws.Int(1), + AvailabilityZoneSelection: &infrav1.AZSelectionSchemeOrdered, + }, + Subnets: []infrav1.SubnetSpec{}, + }), + expect: func(m *mocks.MockEC2APIMockRecorder) { + describeCall := m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("state"), + Values: []*string{aws.String("pending"), aws.String("available")}, + }, + { + Name: aws.String("vpc-id"), + Values: []*string{aws.String(subnetsVPCID)}, + }, + }, + })). + Return(&ec2.DescribeSubnetsOutput{}, nil) + + m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})). + Return(&ec2.DescribeRouteTablesOutput{}, nil) + + m.DescribeNatGatewaysPagesWithContext(context.TODO(), + gomock.Eq(&ec2.DescribeNatGatewaysInput{ + Filter: []*ec2.Filter{ + { + Name: aws.String("vpc-id"), + Values: []*string{aws.String(subnetsVPCID)}, + }, + { + Name: aws.String("state"), Values: []*string{aws.String("pending"), aws.String("available")}, }, }, }), gomock.Any()).Return(nil) - firstSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ + m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1b"), + ZoneType: aws.String("availability-zone"), + }, + }, + }, nil).AnyTimes() + + zone1PublicSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ VpcId: aws.String(subnetsVPCID), - CidrBlock: aws.String("10.1.0.0/16"), - AvailabilityZone: aws.String("us-east-1a"), + CidrBlock: aws.String("10.0.0.0/17"), + AvailabilityZone: aws.String("us-east-1b"), TagSpecifications: []*ec2.TagSpecification{ { ResourceType: aws.String("subnet"), Tags: []*ec2.Tag{ { Key: aws.String("Name"), - Value: aws.String("test-cluster-subnet-private-us-east-1a"), + Value: aws.String("test-cluster-subnet-public-us-east-1b"), }, { Key: aws.String("kubernetes.io/cluster/test-cluster"), - Value: aws.String("shared"), + Value: aws.String("owned"), }, { - Key: aws.String("kubernetes.io/role/internal-elb"), + Key: aws.String("kubernetes.io/role/elb"), Value: aws.String("1"), }, { @@ -882,7 +2266,7 @@ func TestReconcileSubnets(t *testing.T) { }, { Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), - Value: aws.String("private"), + Value: aws.String("public"), }, }, }, @@ -892,19 +2276,28 @@ func TestReconcileSubnets(t *testing.T) { Subnet: &ec2.Subnet{ VpcId: aws.String(subnetsVPCID), SubnetId: aws.String("subnet-1"), - CidrBlock: aws.String("10.1.0.0/16"), - AvailabilityZone: aws.String("us-east-1a"), + CidrBlock: aws.String("10.0.0.0/17"), + AvailabilityZone: aws.String("us-east-1b"), MapPublicIpOnLaunch: aws.Bool(false), }, }, nil). After(describeCall) m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()). - After(firstSubnet) + After(zone1PublicSubnet) - secondSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ + m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{ + MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + SubnetId: aws.String("subnet-1"), + }). + Return(&ec2.ModifySubnetAttributeOutput{}, nil). + After(zone1PublicSubnet) + + zone1PrivateSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ VpcId: aws.String(subnetsVPCID), - CidrBlock: aws.String("10.2.0.0/16"), + CidrBlock: aws.String("10.0.128.0/17"), AvailabilityZone: aws.String("us-east-1b"), TagSpecifications: []*ec2.TagSpecification{ { @@ -912,14 +2305,14 @@ func TestReconcileSubnets(t *testing.T) { Tags: []*ec2.Tag{ { Key: aws.String("Name"), - Value: aws.String("test-cluster-subnet-public-us-east-1b"), + Value: aws.String("test-cluster-subnet-private-us-east-1b"), }, { Key: aws.String("kubernetes.io/cluster/test-cluster"), - Value: aws.String("shared"), + Value: aws.String("owned"), }, { - Key: aws.String("kubernetes.io/role/elb"), + Key: aws.String("kubernetes.io/role/internal-elb"), Value: aws.String("1"), }, { @@ -928,7 +2321,7 @@ func TestReconcileSubnets(t *testing.T) { }, { Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), - Value: aws.String("public"), + Value: aws.String("private"), }, }, }, @@ -938,28 +2331,19 @@ func TestReconcileSubnets(t *testing.T) { Subnet: &ec2.Subnet{ VpcId: aws.String(subnetsVPCID), SubnetId: aws.String("subnet-2"), - CidrBlock: aws.String("10.2.0.0/16"), - AvailabilityZone: aws.String("us-east-1a"), + CidrBlock: aws.String("10.0.128.0/17"), + AvailabilityZone: aws.String("us-east-1b"), MapPublicIpOnLaunch: aws.Bool(false), }, }, nil). - After(firstSubnet) + After(zone1PublicSubnet) m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()). - After(secondSubnet) - - m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{ - MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{ - Value: aws.Bool(true), - }, - SubnetId: aws.String("subnet-2"), - }). - Return(&ec2.ModifySubnetAttributeOutput{}, nil). - After(secondSubnet) + After(zone1PrivateSubnet) }, }, { - name: "Managed VPC, no subnets exist, 1 private subnet in spec (no public subnet), should fail", + name: "Managed VPC, existing public subnet, 2 subnets in spec, should create 1 subnet", input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{ VPC: infrav1.VPCSpec{ ID: subnetsVPCID, @@ -969,8 +2353,14 @@ func TestReconcileSubnets(t *testing.T) { }, Subnets: []infrav1.SubnetSpec{ { + ID: "subnet-1", AvailabilityZone: "us-east-1a", - CidrBlock: "10.1.0.0/16", + CidrBlock: "10.0.0.0/17", + IsPublic: true, + }, + { + AvailabilityZone: "us-east-1a", + CidrBlock: "10.0.128.0/17", IsPublic: false, }, }, @@ -988,64 +2378,35 @@ func TestReconcileSubnets(t *testing.T) { }, }, })). - Return(&ec2.DescribeSubnetsOutput{}, nil) - - m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})). - Return(&ec2.DescribeRouteTablesOutput{}, nil) - - m.DescribeNatGatewaysPagesWithContext(context.TODO(), - gomock.Eq(&ec2.DescribeNatGatewaysInput{ - Filter: []*ec2.Filter{ - { - Name: aws.String("vpc-id"), - Values: []*string{aws.String(subnetsVPCID)}, - }, - { - Name: aws.String("state"), - Values: []*string{aws.String("pending"), aws.String("available")}, - }, - }, - }), - gomock.Any()).Return(nil) - }, - errorExpected: true, - }, - { - name: "Managed VPC, no existing subnets exist, one az, expect one private and one public from default", - input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{ - VPC: infrav1.VPCSpec{ - ID: subnetsVPCID, - Tags: infrav1.Tags{ - infrav1.ClusterTagKey("test-cluster"): "owned", - }, - CidrBlock: defaultVPCCidr, - }, - Subnets: []infrav1.SubnetSpec{}, - }), - expect: func(m *mocks.MockEC2APIMockRecorder) { - m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()). - Return(&ec2.DescribeAvailabilityZonesOutput{ - AvailabilityZones: []*ec2.AvailabilityZone{ + Return(&ec2.DescribeSubnetsOutput{ + Subnets: []*ec2.Subnet{ { - ZoneName: aws.String("us-east-1c"), + VpcId: aws.String(subnetsVPCID), + SubnetId: aws.String("subnet-1"), + AvailabilityZone: aws.String("us-east-1a"), + CidrBlock: aws.String("10.0.0.0/17"), + Tags: []*ec2.Tag{ + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("public"), + }, + { + Key: aws.String("Name"), + Value: aws.String("test-cluster-subnet-public"), + }, + { + Key: aws.String("kubernetes.io/cluster/test-cluster"), + Value: aws.String("owned"), + }, + }, }, }, }, nil) - describeCall := m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{ - Filters: []*ec2.Filter{ - { - Name: aws.String("state"), - Values: []*string{aws.String("pending"), aws.String("available")}, - }, - { - Name: aws.String("vpc-id"), - Values: []*string{aws.String(subnetsVPCID)}, - }, - }, - })). - Return(&ec2.DescribeSubnetsOutput{}, nil) - m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})). Return(&ec2.DescribeRouteTablesOutput{}, nil) @@ -1064,24 +2425,24 @@ func TestReconcileSubnets(t *testing.T) { }), gomock.Any()).Return(nil) - firstSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ + m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ VpcId: aws.String(subnetsVPCID), - CidrBlock: aws.String("10.0.0.0/17"), - AvailabilityZone: aws.String("us-east-1c"), + CidrBlock: aws.String("10.0.128.0/17"), + AvailabilityZone: aws.String("us-east-1a"), TagSpecifications: []*ec2.TagSpecification{ { ResourceType: aws.String("subnet"), Tags: []*ec2.Tag{ { Key: aws.String("Name"), - Value: aws.String("test-cluster-subnet-public-us-east-1c"), + Value: aws.String("test-cluster-subnet-private-us-east-1a"), }, { Key: aws.String("kubernetes.io/cluster/test-cluster"), - Value: aws.String("shared"), + Value: aws.String("owned"), }, { - Key: aws.String("kubernetes.io/role/elb"), + Key: aws.String("kubernetes.io/role/internal-elb"), Value: aws.String("1"), }, { @@ -1090,7 +2451,7 @@ func TestReconcileSubnets(t *testing.T) { }, { Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), - Value: aws.String("public"), + Value: aws.String("private"), }, }, }, @@ -1098,101 +2459,56 @@ func TestReconcileSubnets(t *testing.T) { })). Return(&ec2.CreateSubnetOutput{ Subnet: &ec2.Subnet{ - VpcId: aws.String(subnetsVPCID), - SubnetId: aws.String("subnet-1"), - CidrBlock: aws.String("10.0.0.0/17"), - AvailabilityZone: aws.String("us-east-1c"), - MapPublicIpOnLaunch: aws.Bool(false), + VpcId: aws.String(subnetsVPCID), + SubnetId: aws.String("subnet-2"), + CidrBlock: aws.String("10.0.128.0/17"), + AvailabilityZone: aws.String("us-east-1a"), }, - }, nil). - After(describeCall) + }, nil) - m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()). - After(firstSubnet) + m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()) - m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{ - MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{ - Value: aws.Bool(true), - }, - SubnetId: aws.String("subnet-1"), - }). - Return(&ec2.ModifySubnetAttributeOutput{}, nil). - After(firstSubnet) + // Public subnet + m.CreateTagsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.CreateTagsInput{})). + Return(nil, nil) - secondSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ - VpcId: aws.String(subnetsVPCID), - CidrBlock: aws.String("10.0.128.0/17"), - AvailabilityZone: aws.String("us-east-1c"), - TagSpecifications: []*ec2.TagSpecification{ - { - ResourceType: aws.String("subnet"), - Tags: []*ec2.Tag{ - { - Key: aws.String("Name"), - Value: aws.String("test-cluster-subnet-private-us-east-1c"), - }, - { - Key: aws.String("kubernetes.io/cluster/test-cluster"), - Value: aws.String("shared"), - }, - { - Key: aws.String("kubernetes.io/role/internal-elb"), - Value: aws.String("1"), - }, - { - Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), - Value: aws.String("owned"), - }, - { - Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), - Value: aws.String("private"), - }, + m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1a"), + ZoneType: aws.String("availability-zone"), }, }, - }, - })). - Return(&ec2.CreateSubnetOutput{ - Subnet: &ec2.Subnet{ - VpcId: aws.String(subnetsVPCID), - SubnetId: aws.String("subnet-2"), - CidrBlock: aws.String("10.0.128.0/17"), - AvailabilityZone: aws.String("us-east-1c"), - MapPublicIpOnLaunch: aws.Bool(false), - }, - }, nil). - After(firstSubnet) - - m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()). - After(secondSubnet) + }, nil).AnyTimes() }, }, { - name: "Managed IPv6 VPC, no existing subnets exist, one az, expect one private and one public from default", + name: "Managed VPC, existing public subnet, 2 subnets in spec, should create 1 subnet, custom Name tag", input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{ VPC: infrav1.VPCSpec{ ID: subnetsVPCID, Tags: infrav1.Tags{ infrav1.ClusterTagKey("test-cluster"): "owned", }, - CidrBlock: defaultVPCCidr, - IPv6: &infrav1.IPv6{ - CidrBlock: "2001:db8:1234:1a01::/56", - PoolID: "amazon", + }, + Subnets: []infrav1.SubnetSpec{ + { + ID: "subnet-1", + AvailabilityZone: "us-east-1a", + CidrBlock: "10.0.0.0/17", + IsPublic: true, + }, + { + AvailabilityZone: "us-east-1a", + CidrBlock: "10.0.128.0/17", + IsPublic: false, + Tags: map[string]string{"Name": "custom-sub"}, }, }, - Subnets: []infrav1.SubnetSpec{}, }), expect: func(m *mocks.MockEC2APIMockRecorder) { - m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()). - Return(&ec2.DescribeAvailabilityZonesOutput{ - AvailabilityZones: []*ec2.AvailabilityZone{ - { - ZoneName: aws.String("us-east-1c"), - }, - }, - }, nil) - - describeCall := m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{ + m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{ Filters: []*ec2.Filter{ { Name: aws.String("state"), @@ -1204,7 +2520,34 @@ func TestReconcileSubnets(t *testing.T) { }, }, })). - Return(&ec2.DescribeSubnetsOutput{}, nil) + Return(&ec2.DescribeSubnetsOutput{ + Subnets: []*ec2.Subnet{ + { + VpcId: aws.String(subnetsVPCID), + SubnetId: aws.String("subnet-1"), + AvailabilityZone: aws.String("us-east-1a"), + CidrBlock: aws.String("10.0.0.0/17"), + Tags: []*ec2.Tag{ + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("public"), + }, + { + Key: aws.String("Name"), + Value: aws.String("test-cluster-subnet-public"), + }, + { + Key: aws.String("kubernetes.io/cluster/test-cluster"), + Value: aws.String("owned"), + }, + }, + }, + }, + }, nil) m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})). Return(&ec2.DescribeRouteTablesOutput{}, nil) @@ -1224,25 +2567,24 @@ func TestReconcileSubnets(t *testing.T) { }), gomock.Any()).Return(nil) - firstSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ + m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ VpcId: aws.String(subnetsVPCID), - CidrBlock: aws.String("10.0.0.0/17"), - AvailabilityZone: aws.String("us-east-1c"), - Ipv6CidrBlock: aws.String("2001:db8:1234:1a03::/64"), + CidrBlock: aws.String("10.0.128.0/17"), + AvailabilityZone: aws.String("us-east-1a"), TagSpecifications: []*ec2.TagSpecification{ { ResourceType: aws.String("subnet"), Tags: []*ec2.Tag{ { Key: aws.String("Name"), - Value: aws.String("test-cluster-subnet-public-us-east-1c"), + Value: aws.String("custom-sub"), // must use the provided `Name` tag, not generate a name }, { Key: aws.String("kubernetes.io/cluster/test-cluster"), - Value: aws.String("shared"), + Value: aws.String("owned"), }, { - Key: aws.String("kubernetes.io/role/elb"), + Key: aws.String("kubernetes.io/role/internal-elb"), Value: aws.String("1"), }, { @@ -1251,7 +2593,7 @@ func TestReconcileSubnets(t *testing.T) { }, { Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), - Value: aws.String("public"), + Value: aws.String("private"), }, }, }, @@ -1259,137 +2601,58 @@ func TestReconcileSubnets(t *testing.T) { })). Return(&ec2.CreateSubnetOutput{ Subnet: &ec2.Subnet{ - VpcId: aws.String(subnetsVPCID), - SubnetId: aws.String("subnet-1"), - CidrBlock: aws.String("10.0.0.0/17"), - AssignIpv6AddressOnCreation: aws.Bool(true), - Ipv6CidrBlockAssociationSet: []*ec2.SubnetIpv6CidrBlockAssociation{ - { - AssociationId: aws.String("amazon"), - Ipv6CidrBlock: aws.String("2001:db8:1234:1a03::/64"), - Ipv6CidrBlockState: &ec2.SubnetCidrBlockState{ - State: aws.String(ec2.SubnetCidrBlockStateCodeAssociated), - }, - }, - }, - AvailabilityZone: aws.String("us-east-1c"), - MapPublicIpOnLaunch: aws.Bool(false), + VpcId: aws.String(subnetsVPCID), + SubnetId: aws.String("subnet-2"), + CidrBlock: aws.String("10.0.128.0/17"), + AvailabilityZone: aws.String("us-east-1a"), }, - }, nil). - After(describeCall) - - m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()). - After(firstSubnet) - - m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{ - AssignIpv6AddressOnCreation: &ec2.AttributeBooleanValue{ - Value: aws.Bool(true), - }, - SubnetId: aws.String("subnet-1"), - }). - Return(&ec2.ModifySubnetAttributeOutput{}, nil). - After(firstSubnet) + }, nil) - m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{ - AssignIpv6AddressOnCreation: &ec2.AttributeBooleanValue{ - Value: aws.Bool(true), - }, - SubnetId: aws.String("subnet-2"), - }). - Return(&ec2.ModifySubnetAttributeOutput{}, nil). - After(firstSubnet) + m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()) - m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{ - MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{ - Value: aws.Bool(true), - }, - SubnetId: aws.String("subnet-1"), - }). - Return(&ec2.ModifySubnetAttributeOutput{}, nil). - After(firstSubnet) + // Public subnet + m.CreateTagsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.CreateTagsInput{})). + Return(nil, nil) - secondSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ - VpcId: aws.String(subnetsVPCID), - CidrBlock: aws.String("10.0.128.0/17"), - AvailabilityZone: aws.String("us-east-1c"), - Ipv6CidrBlock: aws.String("2001:db8:1234:1a02::/64"), - TagSpecifications: []*ec2.TagSpecification{ - { - ResourceType: aws.String("subnet"), - Tags: []*ec2.Tag{ - { - Key: aws.String("Name"), - Value: aws.String("test-cluster-subnet-private-us-east-1c"), - }, - { - Key: aws.String("kubernetes.io/cluster/test-cluster"), - Value: aws.String("shared"), - }, - { - Key: aws.String("kubernetes.io/role/internal-elb"), - Value: aws.String("1"), - }, - { - Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), - Value: aws.String("owned"), - }, - { - Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), - Value: aws.String("private"), - }, - }, - }, - }, - })). - Return(&ec2.CreateSubnetOutput{ - Subnet: &ec2.Subnet{ - VpcId: aws.String(subnetsVPCID), - SubnetId: aws.String("subnet-2"), - CidrBlock: aws.String("10.0.128.0/17"), - AssignIpv6AddressOnCreation: aws.Bool(true), - Ipv6CidrBlockAssociationSet: []*ec2.SubnetIpv6CidrBlockAssociation{ - { - AssociationId: aws.String("amazon"), - Ipv6CidrBlock: aws.String("2001:db8:1234:1a02::/64"), - Ipv6CidrBlockState: &ec2.SubnetCidrBlockState{ - State: aws.String(ec2.SubnetCidrBlockStateCodeAssociated), - }, - }, + m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1a"), + ZoneType: aws.String("availability-zone"), }, - AvailabilityZone: aws.String("us-east-1c"), - MapPublicIpOnLaunch: aws.Bool(false), }, - }, nil). - After(firstSubnet) - - m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()). - After(secondSubnet) + }, nil).AnyTimes() }, }, { - name: "Managed VPC, no existing subnets exist, two az's, expect two private and two public from default", - input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{ - VPC: infrav1.VPCSpec{ - ID: subnetsVPCID, - Tags: infrav1.Tags{ - infrav1.ClusterTagKey("test-cluster"): "owned", + name: "With ManagedControlPlaneScope, Managed VPC, no existing subnets exist, two az's, expect two private and two public from default, created with tag including eksClusterName not a name of Cluster resource", + input: NewManagedControlPlaneScope(). + WithEKSClusterName("test-eks-cluster"). + WithNetwork(&infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + ID: subnetsVPCID, + Tags: infrav1.Tags{ + infrav1.ClusterTagKey("test-cluster"): "owned", + }, + CidrBlock: defaultVPCCidr, }, - CidrBlock: defaultVPCCidr, - }, - Subnets: []infrav1.SubnetSpec{}, - }), + Subnets: []infrav1.SubnetSpec{}, + }), expect: func(m *mocks.MockEC2APIMockRecorder) { m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()). Return(&ec2.DescribeAvailabilityZonesOutput{ AvailabilityZones: []*ec2.AvailabilityZone{ { ZoneName: aws.String("us-east-1b"), + ZoneType: aws.String("availability-zone"), }, { ZoneName: aws.String("us-east-1c"), + ZoneType: aws.String("availability-zone"), }, }, - }, nil) + }, nil).AnyTimes() describeCall := m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{ Filters: []*ec2.Filter{ @@ -1423,6 +2686,17 @@ func TestReconcileSubnets(t *testing.T) { }), gomock.Any()).Return(nil) + // Zone 1 subnet. + m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1b"), + ZoneType: aws.String("availability-zone"), + }, + }, + }, nil).AnyTimes() + zone1PublicSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ VpcId: aws.String(subnetsVPCID), CidrBlock: aws.String("10.0.0.0/19"), @@ -1436,8 +2710,8 @@ func TestReconcileSubnets(t *testing.T) { Value: aws.String("test-cluster-subnet-public-us-east-1b"), }, { - Key: aws.String("kubernetes.io/cluster/test-cluster"), - Value: aws.String("shared"), + Key: aws.String("kubernetes.io/cluster/test-eks-cluster"), + Value: aws.String("owned"), }, { Key: aws.String("kubernetes.io/role/elb"), @@ -1491,8 +2765,8 @@ func TestReconcileSubnets(t *testing.T) { Value: aws.String("test-cluster-subnet-private-us-east-1b"), }, { - Key: aws.String("kubernetes.io/cluster/test-cluster"), - Value: aws.String("shared"), + Key: aws.String("kubernetes.io/cluster/test-eks-cluster"), + Value: aws.String("owned"), }, { Key: aws.String("kubernetes.io/role/internal-elb"), @@ -1525,7 +2799,6 @@ func TestReconcileSubnets(t *testing.T) { After(zone1PrivateSubnet) // zone 2 - zone2PublicSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ VpcId: aws.String(subnetsVPCID), CidrBlock: aws.String("10.0.32.0/19"), @@ -1539,8 +2812,8 @@ func TestReconcileSubnets(t *testing.T) { Value: aws.String("test-cluster-subnet-public-us-east-1c"), }, { - Key: aws.String("kubernetes.io/cluster/test-cluster"), - Value: aws.String("shared"), + Key: aws.String("kubernetes.io/cluster/test-eks-cluster"), + Value: aws.String("owned"), }, { Key: aws.String("kubernetes.io/role/elb"), @@ -1581,6 +2854,18 @@ func TestReconcileSubnets(t *testing.T) { Return(&ec2.ModifySubnetAttributeOutput{}, nil). After(zone2PublicSubnet) + m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeAvailabilityZonesInput{ + ZoneNames: aws.StringSlice([]string{"us-east-1c"}), + })). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1c"), + ZoneType: aws.String("availability-zone"), + }, + }, + }, nil).AnyTimes() + zone2PrivateSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ VpcId: aws.String(subnetsVPCID), CidrBlock: aws.String("10.0.128.0/18"), @@ -1594,8 +2879,8 @@ func TestReconcileSubnets(t *testing.T) { Value: aws.String("test-cluster-subnet-private-us-east-1c"), }, { - Key: aws.String("kubernetes.io/cluster/test-cluster"), - Value: aws.String("shared"), + Key: aws.String("kubernetes.io/cluster/test-eks-cluster"), + Value: aws.String("owned"), }, { Key: aws.String("kubernetes.io/role/internal-elb"), @@ -1624,37 +2909,182 @@ func TestReconcileSubnets(t *testing.T) { }, nil). After(zone2PublicSubnet) - m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()). - After(zone2PrivateSubnet) + m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()). + After(zone2PrivateSubnet) + }, + }, + { // Edge Zones + name: "Managed VPC, local zones, no existing subnets exist, two az's, one LZ, expect two private and two public from default, and one private and public from Local Zone", + input: func() *ClusterScopeBuilder { + stubNetworkSpecEdgeLocalZonesOnly := stubNetworkSpecWithSubnets.DeepCopy() + stubNetworkSpecEdgeLocalZonesOnly.Subnets = stubSubnetsAvailabilityZone + stubNetworkSpecEdgeLocalZonesOnly.Subnets = append(stubNetworkSpecEdgeLocalZonesOnly.Subnets, stubAdditionalSubnetsAvailabilityZone...) + stubNetworkSpecEdgeLocalZonesOnly.Subnets = append(stubNetworkSpecEdgeLocalZonesOnly.Subnets, stubSubnetsLocalZone...) + return NewClusterScope().WithNetwork(stubNetworkSpecEdgeLocalZonesOnly) + }(), + expect: func(m *mocks.MockEC2APIMockRecorder) { + describeCall := stubMockDescribeSubnetsWithContextManaged(m) + stubMockDescribeRouteTablesWithContext(m) + stubMockDescribeNatGatewaysPagesWithContext(m) + stubMockDescribeAvailabilityZonesWithContextCustomZones(m, []*ec2.AvailabilityZone{ + {ZoneName: aws.String("us-east-1a"), ZoneType: aws.String("availability-zone")}, + {ZoneName: aws.String("us-east-1b"), ZoneType: aws.String("availability-zone")}, + {ZoneName: aws.String("us-east-1-nyc-1a"), ZoneType: aws.String("local-zone"), ParentZoneName: aws.String("us-east-1a")}, + {ZoneName: aws.String("us-east-1-wl1-nyc-wlz-1"), ZoneType: aws.String("wavelength-zone"), ParentZoneName: aws.String("us-east-1a")}, + }).AnyTimes() + + m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()).AnyTimes() + + // Zone 1a subnets + az1aPrivate := stubGenMockCreateSubnetWithContext(m, "test-cluster", "us-east-1a", "private", "10.0.1.0/24", false). + After(describeCall) + + az1aPublic := stubGenMockCreateSubnetWithContext(m, "test-cluster", "us-east-1a", "public", "10.0.2.0/24", false). + After(az1aPrivate) + stubMockModifySubnetAttributeWithContext(m, "subnet-public-us-east-1a"). + After(az1aPublic) + + // Zone 1b subnets + az1bPrivate := stubGenMockCreateSubnetWithContext(m, "test-cluster", "us-east-1b", "private", "10.0.3.0/24", false). + After(az1aPublic) + + az1bPublic := stubGenMockCreateSubnetWithContext(m, "test-cluster", "us-east-1b", "public", "10.0.4.0/24", false). + After(az1bPrivate) + stubMockModifySubnetAttributeWithContext(m, "subnet-public-us-east-1b"). + After(az1bPublic) + + // Local zone 1-nyc-1a. + lz1Private := stubGenMockCreateSubnetWithContext(m, "test-cluster", "us-east-1-nyc-1a", "private", "10.0.5.0/24", true). + After(az1bPublic) + + lz1Public := stubGenMockCreateSubnetWithContext(m, "test-cluster", "us-east-1-nyc-1a", "public", "10.0.6.0/24", true).After(lz1Private) + stubMockModifySubnetAttributeWithContext(m, "subnet-public-us-east-1-nyc-1a"). + After(lz1Public) + }, + }, + { + name: "Managed VPC, edge zones, custom names, no existing subnets exist, one AZ, LZ and WL, expect one private and one public subnets from each of default zones, Local Zone, and Wavelength", + input: NewClusterScope().WithNetwork(stubNetworkSpecWithSubnetsEdge), + expect: func(m *mocks.MockEC2APIMockRecorder) { + describeCall := stubMockDescribeSubnetsWithContextManaged(m) + stubMockDescribeRouteTablesWithContext(m) + stubMockDescribeNatGatewaysPagesWithContext(m) + stubMockDescribeAvailabilityZonesWithContextAllZones(m) + + m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()).AnyTimes() + + // AZone 1a subnets + az1Private := stubGenMockCreateSubnetWithContext(m, "test-cluster", "us-east-1a", "private", "10.0.1.0/24", false). + After(describeCall) + + az1Public := stubGenMockCreateSubnetWithContext(m, "test-cluster", "us-east-1a", "public", "10.0.2.0/24", false).After(az1Private) + stubMockModifySubnetAttributeWithContext(m, "subnet-public-us-east-1a").After(az1Public) + + // Local zone 1-nyc-1a. + lz1Private := stubGenMockCreateSubnetWithContext(m, "test-cluster", "us-east-1-nyc-1a", "private", "10.0.5.0/24", true). + After(describeCall) + + lz1Public := stubGenMockCreateSubnetWithContext(m, "test-cluster", "us-east-1-nyc-1a", "public", "10.0.6.0/24", true).After(lz1Private) + stubMockModifySubnetAttributeWithContext(m, "subnet-public-us-east-1-nyc-1a").After(lz1Public) + + // Wavelength zone nyc-1. + wz1Private := stubGenMockCreateSubnetWithContext(m, "test-cluster", "us-east-1-wl1-nyc-wlz-1", "private", "10.0.7.0/24", true). + After(describeCall) + + stubGenMockCreateSubnetWithContext(m, "test-cluster", "us-east-1-wl1-nyc-wlz-1", "public", "10.0.8.0/24", true).After(wz1Private) + }, + }, + { + name: "Managed VPC, edge zones, error when retrieving zone information for subnet's AvailabilityZone", + input: NewClusterScope().WithNetwork(stubNetworkSpecWithSubnetsEdge), + expect: func(m *mocks.MockEC2APIMockRecorder) { + stubMockDescribeSubnetsWithContextManaged(m) + stubMockDescribeRouteTablesWithContext(m) + stubMockDescribeNatGatewaysPagesWithContext(m) + + m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{}, + }, nil) }, + errorExpected: true, + errorMessageExpected: `expected the zone attributes to be populated to subnet: unable to update zone information for subnet 'subnet-private-us-east-1a' and zone 'us-east-1a'`, }, { - name: "Managed VPC, no existing subnets exist, two az's, max num azs is 1, expect one private and one public from default", + name: "Managed VPC, edge zones, error when IPv6 subnet", + input: func() *ClusterScopeBuilder { + net := stubNetworkSpecWithSubnetsEdge.DeepCopy() + // Only AZ and LZ to simplify the goal + net.Subnets = infrav1.Subnets{} + for i := range stubSubnetsAvailabilityZone { + net.Subnets = append(net.Subnets, *stubSubnetsAvailabilityZone[i].DeepCopy()) + } + for i := range stubSubnetsLocalZone { + lz := stubSubnetsLocalZone[i].DeepCopy() + lz.IsIPv6 = true + net.Subnets = append(net.Subnets, *lz) + } + return NewClusterScope().WithNetwork(net) + }(), + expect: func(m *mocks.MockEC2APIMockRecorder) { + describe := stubMockDescribeSubnetsWithContextManaged(m) + stubMockDescribeRouteTablesWithContext(m) + stubMockDescribeNatGatewaysPagesWithContext(m) + stubMockDescribeAvailabilityZonesWithContextAllZones(m) + + m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()).AnyTimes() + + az1Private := stubGenMockCreateSubnetWithContext(m, "test-cluster", "us-east-1a", "private", "10.0.1.0/24", false).After(describe) + + az1Public := stubGenMockCreateSubnetWithContext(m, "test-cluster", "us-east-1a", "public", "10.0.2.0/24", false).After(az1Private) + stubMockModifySubnetAttributeWithContext(m, "subnet-public-us-east-1a").After(az1Public) + }, + errorExpected: true, + errorMessageExpected: `failed to create subnet: IPv6 is not supported with zone type "local-zone"`, + }, + { + name: "Unmanaged VPC, edge zones, existing subnets, one AZ, LZ and WL, expect one private and one public subnets from each of default zones, Local Zone, and Wavelength", + input: func() *ClusterScopeBuilder { + net := stubNetworkSpecWithSubnetsEdge.DeepCopy() + net.VPC = infrav1.VPCSpec{ + ID: subnetsVPCID, + } + net.Subnets = infrav1.Subnets{ + {ResourceID: "subnet-az-1a-private"}, + {ResourceID: "subnet-az-1a-public"}, + {ResourceID: "subnet-lz-1a-private"}, + {ResourceID: "subnet-lz-1a-public"}, + {ResourceID: "subnet-wl-1a-private"}, + {ResourceID: "subnet-wl-1a-public"}, + } + return NewClusterScope().WithNetwork(net) + }(), + expect: func(m *mocks.MockEC2APIMockRecorder) { + stubMockDescribeSubnetsWithContextUnmanaged(m) + stubMockDescribeAvailabilityZonesWithContextAllZones(m) + stubMockDescribeRouteTablesWithContextWithWavelength(m, + []string{"subnet-az-1a-private", "subnet-lz-1a-private", "subnet-wl-1a-private"}, + []string{"subnet-az-1a-public", "subnet-lz-1a-public"}, + []string{"subnet-wl-1a-public"}) + + stubMockDescribeNatGatewaysPagesWithContext(m) + stubMockCreateTagsWithContext(m, "test-cluster", "subnet-az-1a-private", "us-east-1a", "private", false).AnyTimes() + }, + }, + { + name: "Managed VPC, no existing subnets exist, one az, prefer public subnet schema, expect one private and one public from default", input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{ VPC: infrav1.VPCSpec{ ID: subnetsVPCID, Tags: infrav1.Tags{ infrav1.ClusterTagKey("test-cluster"): "owned", }, - CidrBlock: defaultVPCCidr, - AvailabilityZoneUsageLimit: aws.Int(1), - AvailabilityZoneSelection: &infrav1.AZSelectionSchemeOrdered, + CidrBlock: defaultVPCCidr, + SubnetSchema: &infrav1.SubnetSchemaPreferPublic, }, Subnets: []infrav1.SubnetSpec{}, }), expect: func(m *mocks.MockEC2APIMockRecorder) { - m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()). - Return(&ec2.DescribeAvailabilityZonesOutput{ - AvailabilityZones: []*ec2.AvailabilityZone{ - { - ZoneName: aws.String("us-east-1b"), - }, - { - ZoneName: aws.String("us-east-1c"), - }, - }, - }, nil) - describeCall := m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{ Filters: []*ec2.Filter{ { @@ -1687,21 +3117,33 @@ func TestReconcileSubnets(t *testing.T) { }), gomock.Any()).Return(nil) - zone1PublicSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ + m.DescribeAvailabilityZonesWithContext(context.TODO(), &ec2.DescribeAvailabilityZonesInput{ + ZoneNames: aws.StringSlice([]string{"us-east-1c"}), + }). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1c"), + ZoneType: aws.String("availability-zone"), + }, + }, + }, nil).AnyTimes() + + firstSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ VpcId: aws.String(subnetsVPCID), - CidrBlock: aws.String("10.0.0.0/17"), - AvailabilityZone: aws.String("us-east-1b"), + CidrBlock: aws.String("10.0.128.0/17"), + AvailabilityZone: aws.String("us-east-1c"), TagSpecifications: []*ec2.TagSpecification{ { ResourceType: aws.String("subnet"), Tags: []*ec2.Tag{ { Key: aws.String("Name"), - Value: aws.String("test-cluster-subnet-public-us-east-1b"), + Value: aws.String("test-cluster-subnet-public-us-east-1c"), }, { Key: aws.String("kubernetes.io/cluster/test-cluster"), - Value: aws.String("shared"), + Value: aws.String("owned"), }, { Key: aws.String("kubernetes.io/role/elb"), @@ -1723,15 +3165,15 @@ func TestReconcileSubnets(t *testing.T) { Subnet: &ec2.Subnet{ VpcId: aws.String(subnetsVPCID), SubnetId: aws.String("subnet-1"), - CidrBlock: aws.String("10.0.0.0/17"), - AvailabilityZone: aws.String("us-east-1b"), + CidrBlock: aws.String("10.0.128.0/17"), + AvailabilityZone: aws.String("us-east-1c"), MapPublicIpOnLaunch: aws.Bool(false), }, }, nil). After(describeCall) m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()). - After(zone1PublicSubnet) + After(firstSubnet) m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{ MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{ @@ -1740,23 +3182,23 @@ func TestReconcileSubnets(t *testing.T) { SubnetId: aws.String("subnet-1"), }). Return(&ec2.ModifySubnetAttributeOutput{}, nil). - After(zone1PublicSubnet) + After(firstSubnet) - zone1PrivateSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ + secondSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ VpcId: aws.String(subnetsVPCID), - CidrBlock: aws.String("10.0.128.0/17"), - AvailabilityZone: aws.String("us-east-1b"), + CidrBlock: aws.String("10.0.0.0/17"), + AvailabilityZone: aws.String("us-east-1c"), TagSpecifications: []*ec2.TagSpecification{ { ResourceType: aws.String("subnet"), Tags: []*ec2.Tag{ { Key: aws.String("Name"), - Value: aws.String("test-cluster-subnet-private-us-east-1b"), + Value: aws.String("test-cluster-subnet-private-us-east-1c"), }, { Key: aws.String("kubernetes.io/cluster/test-cluster"), - Value: aws.String("shared"), + Value: aws.String("owned"), }, { Key: aws.String("kubernetes.io/role/internal-elb"), @@ -1778,174 +3220,46 @@ func TestReconcileSubnets(t *testing.T) { Subnet: &ec2.Subnet{ VpcId: aws.String(subnetsVPCID), SubnetId: aws.String("subnet-2"), - CidrBlock: aws.String("10.0.128.0/17"), - AvailabilityZone: aws.String("us-east-1b"), + CidrBlock: aws.String("10.0.0.0/17"), + AvailabilityZone: aws.String("us-east-1c"), MapPublicIpOnLaunch: aws.Bool(false), }, }, nil). - After(zone1PublicSubnet) + After(firstSubnet) m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()). - After(zone1PrivateSubnet) - }, - }, - { - name: "Managed VPC, existing public subnet, 2 subnets in spec, should create 1 subnet", - input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{ - VPC: infrav1.VPCSpec{ - ID: subnetsVPCID, - Tags: infrav1.Tags{ - infrav1.ClusterTagKey("test-cluster"): "owned", - }, - }, - Subnets: []infrav1.SubnetSpec{ - { - ID: "subnet-1", - AvailabilityZone: "us-east-1a", - CidrBlock: "10.0.0.0/17", - IsPublic: true, - }, - { - AvailabilityZone: "us-east-1a", - CidrBlock: "10.0.128.0/17", - IsPublic: false, - }, - }, - }), - expect: func(m *mocks.MockEC2APIMockRecorder) { - m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{ - Filters: []*ec2.Filter{ - { - Name: aws.String("state"), - Values: []*string{aws.String("pending"), aws.String("available")}, - }, - { - Name: aws.String("vpc-id"), - Values: []*string{aws.String(subnetsVPCID)}, - }, - }, - })). - Return(&ec2.DescribeSubnetsOutput{ - Subnets: []*ec2.Subnet{ - { - VpcId: aws.String(subnetsVPCID), - SubnetId: aws.String("subnet-1"), - AvailabilityZone: aws.String("us-east-1a"), - CidrBlock: aws.String("10.0.0.0/17"), - Tags: []*ec2.Tag{ - { - Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), - Value: aws.String("owned"), - }, - { - Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), - Value: aws.String("public"), - }, - { - Key: aws.String("Name"), - Value: aws.String("test-cluster-subnet-public"), - }, - { - Key: aws.String("kubernetes.io/cluster/test-cluster"), - Value: aws.String("shared"), - }, - }, - }, - }, - }, nil) - - m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})). - Return(&ec2.DescribeRouteTablesOutput{}, nil) + After(secondSubnet) - m.DescribeNatGatewaysPagesWithContext(context.TODO(), - gomock.Eq(&ec2.DescribeNatGatewaysInput{ - Filter: []*ec2.Filter{ - { - Name: aws.String("vpc-id"), - Values: []*string{aws.String(subnetsVPCID)}, - }, + m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ { - Name: aws.String("state"), - Values: []*string{aws.String("pending"), aws.String("available")}, - }, - }, - }), - gomock.Any()).Return(nil) - - m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ - VpcId: aws.String(subnetsVPCID), - CidrBlock: aws.String("10.0.128.0/17"), - AvailabilityZone: aws.String("us-east-1a"), - TagSpecifications: []*ec2.TagSpecification{ - { - ResourceType: aws.String("subnet"), - Tags: []*ec2.Tag{ - { - Key: aws.String("Name"), - Value: aws.String("test-cluster-subnet-private-us-east-1a"), - }, - { - Key: aws.String("kubernetes.io/cluster/test-cluster"), - Value: aws.String("shared"), - }, - { - Key: aws.String("kubernetes.io/role/internal-elb"), - Value: aws.String("1"), - }, - { - Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), - Value: aws.String("owned"), - }, - { - Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), - Value: aws.String("private"), - }, + ZoneName: aws.String("us-east-1c"), + ZoneType: aws.String("availability-zone"), }, }, - }, - })). - Return(&ec2.CreateSubnetOutput{ - Subnet: &ec2.Subnet{ - VpcId: aws.String(subnetsVPCID), - SubnetId: aws.String("subnet-2"), - CidrBlock: aws.String("10.0.128.0/17"), - AvailabilityZone: aws.String("us-east-1a"), - }, }, nil) - - m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()) - - // Public subnet - m.CreateTagsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.CreateTagsInput{})). - Return(nil, nil) }, }, { - name: "Managed VPC, existing public subnet, 2 subnets in spec, should create 1 subnet, custom Name tag", + name: "Managed IPv6 VPC, no existing subnets exist, one az, prefer public subnet schema, expect one private and one public from default", input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{ VPC: infrav1.VPCSpec{ ID: subnetsVPCID, Tags: infrav1.Tags{ infrav1.ClusterTagKey("test-cluster"): "owned", }, - }, - Subnets: []infrav1.SubnetSpec{ - { - ID: "subnet-1", - AvailabilityZone: "us-east-1a", - CidrBlock: "10.0.0.0/17", - IsPublic: true, - }, - { - AvailabilityZone: "us-east-1a", - CidrBlock: "10.0.128.0/17", - IsPublic: false, - Tags: map[string]string{"Name": "custom-sub"}, + CidrBlock: defaultVPCCidr, + IPv6: &infrav1.IPv6{ + CidrBlock: "2001:db8:1234:1a01::/56", + PoolID: "amazon", }, + SubnetSchema: &infrav1.SubnetSchemaPreferPublic, }, + Subnets: []infrav1.SubnetSpec{}, }), expect: func(m *mocks.MockEC2APIMockRecorder) { - m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{ + describeCall := m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{ Filters: []*ec2.Filter{ { Name: aws.String("state"), @@ -1955,36 +3269,9 @@ func TestReconcileSubnets(t *testing.T) { Name: aws.String("vpc-id"), Values: []*string{aws.String(subnetsVPCID)}, }, - }, - })). - Return(&ec2.DescribeSubnetsOutput{ - Subnets: []*ec2.Subnet{ - { - VpcId: aws.String(subnetsVPCID), - SubnetId: aws.String("subnet-1"), - AvailabilityZone: aws.String("us-east-1a"), - CidrBlock: aws.String("10.0.0.0/17"), - Tags: []*ec2.Tag{ - { - Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), - Value: aws.String("owned"), - }, - { - Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), - Value: aws.String("public"), - }, - { - Key: aws.String("Name"), - Value: aws.String("test-cluster-subnet-public"), - }, - { - Key: aws.String("kubernetes.io/cluster/test-cluster"), - Value: aws.String("shared"), - }, - }, - }, - }, - }, nil) + }, + })). + Return(&ec2.DescribeSubnetsOutput{}, nil) m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})). Return(&ec2.DescribeRouteTablesOutput{}, nil) @@ -2004,24 +3291,37 @@ func TestReconcileSubnets(t *testing.T) { }), gomock.Any()).Return(nil) - m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ + m.DescribeAvailabilityZonesWithContext(context.TODO(), &ec2.DescribeAvailabilityZonesInput{ + ZoneNames: aws.StringSlice([]string{"us-east-1c"}), + }). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1c"), + ZoneType: aws.String("availability-zone"), + }, + }, + }, nil).AnyTimes() + + firstSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ VpcId: aws.String(subnetsVPCID), CidrBlock: aws.String("10.0.128.0/17"), - AvailabilityZone: aws.String("us-east-1a"), + AvailabilityZone: aws.String("us-east-1c"), + Ipv6CidrBlock: aws.String("2001:db8:1234:1a02::/64"), TagSpecifications: []*ec2.TagSpecification{ { ResourceType: aws.String("subnet"), Tags: []*ec2.Tag{ { Key: aws.String("Name"), - Value: aws.String("custom-sub"), // must use the provided `Name` tag, not generate a name + Value: aws.String("test-cluster-subnet-public-us-east-1c"), }, { Key: aws.String("kubernetes.io/cluster/test-cluster"), - Value: aws.String("shared"), + Value: aws.String("owned"), }, { - Key: aws.String("kubernetes.io/role/internal-elb"), + Key: aws.String("kubernetes.io/role/elb"), Value: aws.String("1"), }, { @@ -2030,7 +3330,7 @@ func TestReconcileSubnets(t *testing.T) { }, { Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), - Value: aws.String("private"), + Value: aws.String("public"), }, }, }, @@ -2038,47 +3338,141 @@ func TestReconcileSubnets(t *testing.T) { })). Return(&ec2.CreateSubnetOutput{ Subnet: &ec2.Subnet{ - VpcId: aws.String(subnetsVPCID), - SubnetId: aws.String("subnet-2"), - CidrBlock: aws.String("10.0.128.0/17"), - AvailabilityZone: aws.String("us-east-1a"), + VpcId: aws.String(subnetsVPCID), + SubnetId: aws.String("subnet-1"), + CidrBlock: aws.String("10.0.128.0/17"), + AssignIpv6AddressOnCreation: aws.Bool(true), + Ipv6CidrBlockAssociationSet: []*ec2.SubnetIpv6CidrBlockAssociation{ + { + AssociationId: aws.String("amazon"), + Ipv6CidrBlock: aws.String("2001:db8:1234:1a02::/64"), + Ipv6CidrBlockState: &ec2.SubnetCidrBlockState{ + State: aws.String(ec2.SubnetCidrBlockStateCodeAssociated), + }, + }, + }, + AvailabilityZone: aws.String("us-east-1c"), + MapPublicIpOnLaunch: aws.Bool(false), }, - }, nil) + }, nil). + After(describeCall) - m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()) + m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()). + After(firstSubnet) - // Public subnet - m.CreateTagsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.CreateTagsInput{})). - Return(nil, nil) - }, - }, - { - name: "With ManagedControlPlaneScope, Managed VPC, no existing subnets exist, two az's, expect two private and two public from default, created with tag including eksClusterName not a name of Cluster resource", - input: NewManagedControlPlaneScope(). - WithEKSClusterName("test-eks-cluster"). - WithNetwork(&infrav1.NetworkSpec{ - VPC: infrav1.VPCSpec{ - ID: subnetsVPCID, - Tags: infrav1.Tags{ - infrav1.ClusterTagKey("test-cluster"): "owned", + m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{ + AssignIpv6AddressOnCreation: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + SubnetId: aws.String("subnet-1"), + }). + Return(&ec2.ModifySubnetAttributeOutput{}, nil). + After(firstSubnet) + + m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{ + AssignIpv6AddressOnCreation: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + SubnetId: aws.String("subnet-2"), + }). + Return(&ec2.ModifySubnetAttributeOutput{}, nil). + After(firstSubnet) + + m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{ + MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + SubnetId: aws.String("subnet-1"), + }). + Return(&ec2.ModifySubnetAttributeOutput{}, nil). + After(firstSubnet) + + secondSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ + VpcId: aws.String(subnetsVPCID), + CidrBlock: aws.String("10.0.0.0/17"), + AvailabilityZone: aws.String("us-east-1c"), + Ipv6CidrBlock: aws.String("2001:db8:1234:1a03::/64"), + TagSpecifications: []*ec2.TagSpecification{ + { + ResourceType: aws.String("subnet"), + Tags: []*ec2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String("test-cluster-subnet-private-us-east-1c"), + }, + { + Key: aws.String("kubernetes.io/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("kubernetes.io/role/internal-elb"), + Value: aws.String("1"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, + { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("private"), + }, + }, }, - CidrBlock: defaultVPCCidr, }, - Subnets: []infrav1.SubnetSpec{}, - }), - expect: func(m *mocks.MockEC2APIMockRecorder) { + })). + Return(&ec2.CreateSubnetOutput{ + Subnet: &ec2.Subnet{ + VpcId: aws.String(subnetsVPCID), + SubnetId: aws.String("subnet-2"), + CidrBlock: aws.String("10.0.0.0/17"), + AssignIpv6AddressOnCreation: aws.Bool(true), + Ipv6CidrBlockAssociationSet: []*ec2.SubnetIpv6CidrBlockAssociation{ + { + AssociationId: aws.String("amazon"), + Ipv6CidrBlock: aws.String("2001:db8:1234:1a03::/64"), + Ipv6CidrBlockState: &ec2.SubnetCidrBlockState{ + State: aws.String(ec2.SubnetCidrBlockStateCodeAssociated), + }, + }, + }, + AvailabilityZone: aws.String("us-east-1c"), + MapPublicIpOnLaunch: aws.Bool(false), + }, + }, nil). + After(firstSubnet) + + m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()). + After(secondSubnet) + m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()). Return(&ec2.DescribeAvailabilityZonesOutput{ AvailabilityZones: []*ec2.AvailabilityZone{ - { - ZoneName: aws.String("us-east-1b"), - }, { ZoneName: aws.String("us-east-1c"), + ZoneType: aws.String("availability-zone"), }, }, }, nil) - + }, + }, + { + name: "Managed IPv6 VPC, no existing subnets exist, two az's, prefer public subnet schema, expect two private and two public from default", + input: NewClusterScope().WithNetwork(&infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + ID: subnetsVPCID, + Tags: infrav1.Tags{ + infrav1.ClusterTagKey("test-cluster"): "owned", + }, + CidrBlock: defaultVPCCidr, + IPv6: &infrav1.IPv6{ + CidrBlock: "2001:db8:1234:1a01::/56", + PoolID: "amazon", + }, + SubnetSchema: &infrav1.SubnetSchemaPreferPublic, + }, + Subnets: []infrav1.SubnetSpec{}, + }), + expect: func(m *mocks.MockEC2APIMockRecorder) { describeCall := m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{ Filters: []*ec2.Filter{ { @@ -2111,10 +3505,38 @@ func TestReconcileSubnets(t *testing.T) { }), gomock.Any()).Return(nil) + m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1b"), + ZoneType: aws.String("availability-zone"), + }, + { + ZoneName: aws.String("us-east-1c"), + ZoneType: aws.String("availability-zone"), + }, + }, + }, nil).AnyTimes() + + // Zone1 + m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Eq(&ec2.DescribeAvailabilityZonesInput{ + ZoneNames: aws.StringSlice([]string{"us-east-1b"}), + })). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1b"), + ZoneType: aws.String("availability-zone"), + }, + }, + }, nil).MaxTimes(2) + zone1PublicSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ VpcId: aws.String(subnetsVPCID), - CidrBlock: aws.String("10.0.0.0/19"), + CidrBlock: aws.String("10.0.64.0/18"), AvailabilityZone: aws.String("us-east-1b"), + Ipv6CidrBlock: aws.String("2001:db8:1234:1a02::/64"), TagSpecifications: []*ec2.TagSpecification{ { ResourceType: aws.String("subnet"), @@ -2124,8 +3546,8 @@ func TestReconcileSubnets(t *testing.T) { Value: aws.String("test-cluster-subnet-public-us-east-1b"), }, { - Key: aws.String("kubernetes.io/cluster/test-eks-cluster"), - Value: aws.String("shared"), + Key: aws.String("kubernetes.io/cluster/test-cluster"), + Value: aws.String("owned"), }, { Key: aws.String("kubernetes.io/role/elb"), @@ -2145,9 +3567,19 @@ func TestReconcileSubnets(t *testing.T) { })). Return(&ec2.CreateSubnetOutput{ Subnet: &ec2.Subnet{ - VpcId: aws.String(subnetsVPCID), - SubnetId: aws.String("subnet-1"), - CidrBlock: aws.String("10.0.0.0/19"), + VpcId: aws.String(subnetsVPCID), + SubnetId: aws.String("subnet-1"), + CidrBlock: aws.String("10.0.64.0/18"), + AssignIpv6AddressOnCreation: aws.Bool(true), + Ipv6CidrBlockAssociationSet: []*ec2.SubnetIpv6CidrBlockAssociation{ + { + AssociationId: aws.String("amazon"), + Ipv6CidrBlock: aws.String("2001:db8:1234:1a02::/64"), + Ipv6CidrBlockState: &ec2.SubnetCidrBlockState{ + State: aws.String(ec2.SubnetCidrBlockStateCodeAssociated), + }, + }, + }, AvailabilityZone: aws.String("us-east-1b"), MapPublicIpOnLaunch: aws.Bool(false), }, @@ -2157,6 +3589,24 @@ func TestReconcileSubnets(t *testing.T) { m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()). After(zone1PublicSubnet) + m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{ + AssignIpv6AddressOnCreation: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + SubnetId: aws.String("subnet-1"), + }). + Return(&ec2.ModifySubnetAttributeOutput{}, nil). + After(zone1PublicSubnet) + + m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{ + AssignIpv6AddressOnCreation: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + SubnetId: aws.String("subnet-2"), + }). + Return(&ec2.ModifySubnetAttributeOutput{}, nil). + After(zone1PublicSubnet) + m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{ MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{ Value: aws.Bool(true), @@ -2168,8 +3618,9 @@ func TestReconcileSubnets(t *testing.T) { zone1PrivateSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ VpcId: aws.String(subnetsVPCID), - CidrBlock: aws.String("10.0.64.0/18"), + CidrBlock: aws.String("10.0.0.0/19"), AvailabilityZone: aws.String("us-east-1b"), + Ipv6CidrBlock: aws.String("2001:db8:1234:1a04::/64"), TagSpecifications: []*ec2.TagSpecification{ { ResourceType: aws.String("subnet"), @@ -2179,8 +3630,8 @@ func TestReconcileSubnets(t *testing.T) { Value: aws.String("test-cluster-subnet-private-us-east-1b"), }, { - Key: aws.String("kubernetes.io/cluster/test-eks-cluster"), - Value: aws.String("shared"), + Key: aws.String("kubernetes.io/cluster/test-cluster"), + Value: aws.String("owned"), }, { Key: aws.String("kubernetes.io/role/internal-elb"), @@ -2200,9 +3651,19 @@ func TestReconcileSubnets(t *testing.T) { })). Return(&ec2.CreateSubnetOutput{ Subnet: &ec2.Subnet{ - VpcId: aws.String(subnetsVPCID), - SubnetId: aws.String("subnet-2"), - CidrBlock: aws.String("10.0.64.0/18"), + VpcId: aws.String(subnetsVPCID), + SubnetId: aws.String("subnet-2"), + CidrBlock: aws.String("10.0.0.0/19"), + AssignIpv6AddressOnCreation: aws.Bool(true), + Ipv6CidrBlockAssociationSet: []*ec2.SubnetIpv6CidrBlockAssociation{ + { + AssociationId: aws.String("amazon"), + Ipv6CidrBlock: aws.String("2001:db8:1234:1a04::/64"), + Ipv6CidrBlockState: &ec2.SubnetCidrBlockState{ + State: aws.String(ec2.SubnetCidrBlockStateCodeAssociated), + }, + }, + }, AvailabilityZone: aws.String("us-east-1b"), MapPublicIpOnLaunch: aws.Bool(false), }, @@ -2213,11 +3674,23 @@ func TestReconcileSubnets(t *testing.T) { After(zone1PrivateSubnet) // zone 2 + m.DescribeAvailabilityZonesWithContext(context.TODO(), &ec2.DescribeAvailabilityZonesInput{ + ZoneNames: aws.StringSlice([]string{"us-east-1c"}), + }). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1c"), + ZoneType: aws.String("availability-zone"), + }, + }, + }, nil).AnyTimes() zone2PublicSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ VpcId: aws.String(subnetsVPCID), - CidrBlock: aws.String("10.0.32.0/19"), + CidrBlock: aws.String("10.0.128.0/18"), AvailabilityZone: aws.String("us-east-1c"), + Ipv6CidrBlock: aws.String("2001:db8:1234:1a03::/64"), TagSpecifications: []*ec2.TagSpecification{ { ResourceType: aws.String("subnet"), @@ -2227,8 +3700,8 @@ func TestReconcileSubnets(t *testing.T) { Value: aws.String("test-cluster-subnet-public-us-east-1c"), }, { - Key: aws.String("kubernetes.io/cluster/test-eks-cluster"), - Value: aws.String("shared"), + Key: aws.String("kubernetes.io/cluster/test-cluster"), + Value: aws.String("owned"), }, { Key: aws.String("kubernetes.io/role/elb"), @@ -2248,9 +3721,19 @@ func TestReconcileSubnets(t *testing.T) { })). Return(&ec2.CreateSubnetOutput{ Subnet: &ec2.Subnet{ - VpcId: aws.String(subnetsVPCID), - SubnetId: aws.String("subnet-1"), - CidrBlock: aws.String("10.0.32.0/19"), + VpcId: aws.String(subnetsVPCID), + SubnetId: aws.String("subnet-1"), + CidrBlock: aws.String("10.0.128.0/18"), + AssignIpv6AddressOnCreation: aws.Bool(true), + Ipv6CidrBlockAssociationSet: []*ec2.SubnetIpv6CidrBlockAssociation{ + { + AssociationId: aws.String("amazon"), + Ipv6CidrBlock: aws.String("2001:db8:1234:1a03::/64"), + Ipv6CidrBlockState: &ec2.SubnetCidrBlockState{ + State: aws.String(ec2.SubnetCidrBlockStateCodeAssociated), + }, + }, + }, AvailabilityZone: aws.String("us-east-1c"), MapPublicIpOnLaunch: aws.Bool(false), }, @@ -2260,6 +3743,23 @@ func TestReconcileSubnets(t *testing.T) { m.WaitUntilSubnetAvailableWithContext(context.TODO(), gomock.Any()). After(zone2PublicSubnet) + m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{ + AssignIpv6AddressOnCreation: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + SubnetId: aws.String("subnet-1"), + }). + Return(&ec2.ModifySubnetAttributeOutput{}, nil). + After(zone2PublicSubnet) + + m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{ + AssignIpv6AddressOnCreation: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + SubnetId: aws.String("subnet-2"), + }). + Return(&ec2.ModifySubnetAttributeOutput{}, nil). + After(zone2PublicSubnet) m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{ MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{ Value: aws.Bool(true), @@ -2271,8 +3771,9 @@ func TestReconcileSubnets(t *testing.T) { zone2PrivateSubnet := m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ VpcId: aws.String(subnetsVPCID), - CidrBlock: aws.String("10.0.128.0/18"), + CidrBlock: aws.String("10.0.32.0/19"), AvailabilityZone: aws.String("us-east-1c"), + Ipv6CidrBlock: aws.String("2001:db8:1234:1a05::/64"), TagSpecifications: []*ec2.TagSpecification{ { ResourceType: aws.String("subnet"), @@ -2282,8 +3783,8 @@ func TestReconcileSubnets(t *testing.T) { Value: aws.String("test-cluster-subnet-private-us-east-1c"), }, { - Key: aws.String("kubernetes.io/cluster/test-eks-cluster"), - Value: aws.String("shared"), + Key: aws.String("kubernetes.io/cluster/test-cluster"), + Value: aws.String("owned"), }, { Key: aws.String("kubernetes.io/role/internal-elb"), @@ -2303,9 +3804,19 @@ func TestReconcileSubnets(t *testing.T) { })). Return(&ec2.CreateSubnetOutput{ Subnet: &ec2.Subnet{ - VpcId: aws.String(subnetsVPCID), - SubnetId: aws.String("subnet-2"), - CidrBlock: aws.String("10.0.128.0/18"), + VpcId: aws.String(subnetsVPCID), + SubnetId: aws.String("subnet-2"), + CidrBlock: aws.String("10.0.32.0/19"), + AssignIpv6AddressOnCreation: aws.Bool(true), + Ipv6CidrBlockAssociationSet: []*ec2.SubnetIpv6CidrBlockAssociation{ + { + AssociationId: aws.String("amazon"), + Ipv6CidrBlock: aws.String("2001:db8:1234:1a05::/64"), + Ipv6CidrBlockState: &ec2.SubnetCidrBlockState{ + State: aws.String(ec2.SubnetCidrBlockStateCodeAssociated), + }, + }, + }, AvailabilityZone: aws.String("us-east-1c"), MapPublicIpOnLaunch: aws.Bool(false), }, @@ -2338,9 +3849,24 @@ func TestReconcileSubnets(t *testing.T) { if tc.errorExpected && err == nil { t.Fatal("expected error reconciling but not no error") } + if tc.errorExpected && err != nil && len(tc.errorMessageExpected) > 0 { + if err.Error() != tc.errorMessageExpected { + t.Fatalf("got an unexpected error message:\nwant: %v\n got: %v\n", tc.errorMessageExpected, err.Error()) + } + } if !tc.errorExpected && err != nil { t.Fatalf("got an unexpected error: %v", err) } + if tc.errorExpected && err != nil && len(tc.errorMessageExpected) > 0 { + if err.Error() != tc.errorMessageExpected { + t.Fatalf("got an unexpected error message: %v", err) + } + } + if len(tc.optionalExpectSubnets) > 0 { + if !cmp.Equal(s.scope.Subnets(), tc.optionalExpectSubnets) { + t.Errorf("got unexpect Subnets():\n%v", cmp.Diff(s.scope.Subnets(), tc.optionalExpectSubnets)) + } + } }) } } @@ -2364,12 +3890,14 @@ func TestDiscoverSubnets(t *testing.T) { AvailabilityZone: "us-east-1a", CidrBlock: "10.0.10.0/24", IsPublic: true, + ZoneType: ptr.To[infrav1.ZoneType]("availability-zone"), }, { ID: "subnet-2", AvailabilityZone: "us-east-1a", CidrBlock: "10.0.11.0/24", IsPublic: false, + ZoneType: ptr.To[infrav1.ZoneType]("availability-zone"), }, }, }, @@ -2415,6 +3943,16 @@ func TestDiscoverSubnets(t *testing.T) { }, }, nil) + m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1a"), + ZoneType: aws.String("availability-zone"), + }, + }, + }, nil) + m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})). Return(&ec2.DescribeRouteTablesOutput{ RouteTables: []*ec2.RouteTable{ @@ -2482,6 +4020,7 @@ func TestDiscoverSubnets(t *testing.T) { Tags: infrav1.Tags{ "Name": "provided-subnet-public", }, + ZoneType: ptr.To[infrav1.ZoneType]("availability-zone"), }, { ID: "subnet-2", @@ -2493,6 +4032,7 @@ func TestDiscoverSubnets(t *testing.T) { Tags: infrav1.Tags{ "Name": "provided-subnet-private", }, + ZoneType: ptr.To[infrav1.ZoneType]("availability-zone"), }, }, }, @@ -2677,7 +4217,7 @@ func TestDeleteSubnets(t *testing.T) { } } -// Test helpers +// Test helpers. type ScopeBuilder interface { Build() (scope.NetworkScope, error) @@ -2781,3 +4321,440 @@ func (b *ManagedControlPlaneScopeBuilder) Build() (scope.NetworkScope, error) { return scope.NewManagedControlPlaneScope(*param) } + +func TestService_retrieveZoneInfo(t *testing.T) { + type testCase struct { + name string + inputZoneNames []string + expect func(m *mocks.MockEC2APIMockRecorder) + want []*ec2.AvailabilityZone + wantErrMessage string + } + + testCases := []*testCase{ + { + name: "empty zones", + inputZoneNames: []string{}, + expect: func(m *mocks.MockEC2APIMockRecorder) { + m.DescribeAvailabilityZonesWithContext(context.TODO(), &ec2.DescribeAvailabilityZonesInput{ + ZoneNames: aws.StringSlice([]string{}), + }). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{}, + }, nil) + }, + want: []*ec2.AvailabilityZone{}, + }, + { + name: "error describing zones", + inputZoneNames: []string{}, + expect: func(m *mocks.MockEC2APIMockRecorder) { + m.DescribeAvailabilityZonesWithContext(context.TODO(), &ec2.DescribeAvailabilityZonesInput{ + ZoneNames: aws.StringSlice([]string{}), + }). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{}, + }, nil).Return(nil, awserrors.NewNotFound("FailedDescribeAvailableZones")) + }, + wantErrMessage: `failed to describe availability zones: FailedDescribeAvailableZones`, + }, + { + name: "get type availability zones", + inputZoneNames: []string{"us-east-1a", "us-east-1b"}, + expect: func(m *mocks.MockEC2APIMockRecorder) { + m.DescribeAvailabilityZonesWithContext(context.TODO(), &ec2.DescribeAvailabilityZonesInput{ + ZoneNames: aws.StringSlice([]string{"us-east-1a", "us-east-1b"}), + }). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1a"), + ZoneType: aws.String("availability-zone"), + ParentZoneName: nil, + }, + { + ZoneName: aws.String("us-east-1b"), + ZoneType: aws.String("availability-zone"), + ParentZoneName: nil, + }, + }, + }, nil) + }, + want: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1a"), + ZoneType: aws.String("availability-zone"), + ParentZoneName: nil, + }, + { + ZoneName: aws.String("us-east-1b"), + ZoneType: aws.String("availability-zone"), + ParentZoneName: nil, + }, + }, + }, + { + name: "get type local zones", + inputZoneNames: []string{"us-east-1-nyc-1a", "us-east-1-bos-1a"}, + expect: func(m *mocks.MockEC2APIMockRecorder) { + m.DescribeAvailabilityZonesWithContext(context.TODO(), &ec2.DescribeAvailabilityZonesInput{ + ZoneNames: aws.StringSlice([]string{"us-east-1-nyc-1a", "us-east-1-bos-1a"}), + }). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1-nyc-1a"), + ZoneType: aws.String("local-zone"), + ParentZoneName: aws.String("us-east-1a"), + }, + { + ZoneName: aws.String("us-east-1-bos-1a"), + ZoneType: aws.String("local-zone"), + ParentZoneName: aws.String("us-east-1b"), + }, + }, + }, nil) + }, + want: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1-nyc-1a"), + ZoneType: aws.String("local-zone"), + ParentZoneName: aws.String("us-east-1a"), + }, + { + ZoneName: aws.String("us-east-1-bos-1a"), + ZoneType: aws.String("local-zone"), + ParentZoneName: aws.String("us-east-1b"), + }, + }, + }, + { + name: "get type wavelength zones", + inputZoneNames: []string{"us-east-1-wl1-nyc-wlz-1", "us-east-1-wl1-bos-wlz-1"}, + expect: func(m *mocks.MockEC2APIMockRecorder) { + m.DescribeAvailabilityZonesWithContext(context.TODO(), &ec2.DescribeAvailabilityZonesInput{ + ZoneNames: aws.StringSlice([]string{"us-east-1-wl1-nyc-wlz-1", "us-east-1-wl1-bos-wlz-1"}), + }). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1-wl1-nyc-wlz-1"), + ZoneType: aws.String("wavelength-zone"), + ParentZoneName: aws.String("us-east-1a"), + }, + { + ZoneName: aws.String("us-east-1-wl1-bos-wlz-1"), + ZoneType: aws.String("wavelength-zone"), + ParentZoneName: aws.String("us-east-1b"), + }, + }, + }, nil) + }, + want: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1-wl1-nyc-wlz-1"), + ZoneType: aws.String("wavelength-zone"), + ParentZoneName: aws.String("us-east-1a"), + }, + { + ZoneName: aws.String("us-east-1-wl1-bos-wlz-1"), + ZoneType: aws.String("wavelength-zone"), + ParentZoneName: aws.String("us-east-1b"), + }, + }, + }, + { + name: "get all zone types", + inputZoneNames: []string{"us-east-1a", "us-east-1-nyc-1a", "us-east-1-wl1-nyc-wlz-1"}, + expect: func(m *mocks.MockEC2APIMockRecorder) { + m.DescribeAvailabilityZonesWithContext(context.TODO(), &ec2.DescribeAvailabilityZonesInput{ + ZoneNames: aws.StringSlice([]string{"us-east-1a", "us-east-1-nyc-1a", "us-east-1-wl1-nyc-wlz-1"}), + }). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1a"), + ZoneType: aws.String("availability-zone"), + ParentZoneName: nil, + }, + { + ZoneName: aws.String("us-east-1-nyc-1a"), + ZoneType: aws.String("local-zone"), + ParentZoneName: aws.String("us-east-1a"), + }, + { + ZoneName: aws.String("us-east-1-wl1-nyc-wlz-1"), + ZoneType: aws.String("wavelength-zone"), + ParentZoneName: aws.String("us-east-1a"), + }, + }, + }, nil) + }, + want: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1a"), + ZoneType: aws.String("availability-zone"), + ParentZoneName: nil, + }, + { + ZoneName: aws.String("us-east-1-nyc-1a"), + ZoneType: aws.String("local-zone"), + ParentZoneName: aws.String("us-east-1a"), + }, + { + ZoneName: aws.String("us-east-1-wl1-nyc-wlz-1"), + ZoneType: aws.String("wavelength-zone"), + ParentZoneName: aws.String("us-east-1a"), + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + g := NewWithT(t) + ec2Mock := mocks.NewMockEC2API(mockCtrl) + + scheme := runtime.NewScheme() + _ = infrav1.AddToScheme(scheme) + client := fake.NewClientBuilder().WithScheme(scheme).Build() + scope, err := scope.NewClusterScope(scope.ClusterScopeParams{ + Client: client, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}, + }, + AWSCluster: &infrav1.AWSCluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: infrav1.AWSClusterSpec{}, + }, + }) + g.Expect(err).NotTo(HaveOccurred()) + if tc.expect != nil { + tc.expect(ec2Mock.EXPECT()) + } + + s := NewService(scope) + s.EC2Client = ec2Mock + + got, err := s.retrieveZoneInfo(tc.inputZoneNames) + if err != nil { + if tc.wantErrMessage != err.Error() { + t.Errorf("Service.retrieveZoneInfo() error != wanted, got: '%v', want: '%v'", err, tc.wantErrMessage) + } + return + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("Service.retrieveZoneInfo() = %v, want %v", got, tc.want) + } + g.Expect(err).NotTo(HaveOccurred()) + }) + } +} + +// Stub functions to generate AWS mock calls. + +func stubGetTags(prefix, role, zone string, isEdge bool) []*ec2.Tag { + tags := []*ec2.Tag{ + {Key: aws.String("Name"), Value: aws.String(fmt.Sprintf("%s-subnet-%s-%s", prefix, role, zone))}, + {Key: aws.String("kubernetes.io/cluster/test-cluster"), Value: aws.String("owned")}, + } + // tags are returned ordered, inserting LB subnets to prevent diffs... + if !isEdge { + lbLabel := "internal-elb" + if role == "public" { + lbLabel = "elb" + } + tags = append(tags, &ec2.Tag{ + Key: aws.String(fmt.Sprintf("kubernetes.io/role/%s", lbLabel)), + Value: aws.String("1"), + }) + } + // ... then appending the rest of tags + tags = append(tags, []*ec2.Tag{ + {Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), Value: aws.String("owned")}, + {Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), Value: aws.String(role)}, + }...) + + return tags +} + +func stubGenMockCreateSubnetWithContext(m *mocks.MockEC2APIMockRecorder, prefix, zone, role, cidr string, isEdge bool) *gomock.Call { + return m.CreateSubnetWithContext(context.TODO(), gomock.Eq(&ec2.CreateSubnetInput{ + VpcId: aws.String(subnetsVPCID), + CidrBlock: aws.String(cidr), + AvailabilityZone: aws.String(zone), + TagSpecifications: []*ec2.TagSpecification{ + { + ResourceType: aws.String("subnet"), + Tags: stubGetTags(prefix, role, zone, isEdge), + }, + }, + })). + Return(&ec2.CreateSubnetOutput{ + Subnet: &ec2.Subnet{ + VpcId: aws.String(subnetsVPCID), + SubnetId: aws.String(fmt.Sprintf("subnet-%s-%s", role, zone)), + CidrBlock: aws.String(cidr), + AvailabilityZone: aws.String(zone), + MapPublicIpOnLaunch: aws.Bool(false), + }, + }, nil) +} + +func stubMockCreateTagsWithContext(m *mocks.MockEC2APIMockRecorder, prefix, name, zone, role string, isEdge bool) *gomock.Call { + return m.CreateTagsWithContext(context.TODO(), gomock.Eq(&ec2.CreateTagsInput{ + Resources: aws.StringSlice([]string{name}), + Tags: stubGetTags(prefix, role, zone, isEdge), + })). + Return(&ec2.CreateTagsOutput{}, nil) +} + +func stubMockDescribeRouteTablesWithContext(m *mocks.MockEC2APIMockRecorder) { + m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})). + Return(&ec2.DescribeRouteTablesOutput{}, nil) +} + +func stubMockDescribeRouteTablesWithContextWithWavelength(m *mocks.MockEC2APIMockRecorder, privSubnets, pubSubnetsIGW, pubSubnetsCarrier []string) *gomock.Call { + routes := []*ec2.RouteTable{} + + // create public route table + pubTable := &ec2.RouteTable{ + Routes: []*ec2.Route{ + { + DestinationCidrBlock: aws.String("0.0.0.0/0"), + GatewayId: aws.String("igw-0"), + }, + }, + RouteTableId: aws.String("rtb-public"), + } + for _, sub := range pubSubnetsIGW { + pubTable.Associations = append(pubTable.Associations, &ec2.RouteTableAssociation{ + SubnetId: aws.String(sub), + }) + } + routes = append(routes, pubTable) + + // create public carrier route table + pubCarrierTable := &ec2.RouteTable{ + Routes: []*ec2.Route{ + { + DestinationCidrBlock: aws.String("0.0.0.0/0"), + CarrierGatewayId: aws.String("cagw-0"), + }, + }, + RouteTableId: aws.String("rtb-carrier"), + } + for _, sub := range pubSubnetsCarrier { + pubCarrierTable.Associations = append(pubCarrierTable.Associations, &ec2.RouteTableAssociation{ + SubnetId: aws.String(sub), + }) + } + routes = append(routes, pubCarrierTable) + + // create private route table + privTable := &ec2.RouteTable{ + Routes: []*ec2.Route{ + { + DestinationCidrBlock: aws.String("10.0.11.0/24"), + GatewayId: aws.String("vpc-natgw-1a"), + }, + }, + RouteTableId: aws.String("rtb-private"), + } + for _, sub := range privSubnets { + privTable.Associations = append(privTable.Associations, &ec2.RouteTableAssociation{ + SubnetId: aws.String(sub), + }) + } + routes = append(routes, privTable) + + return m.DescribeRouteTablesWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeRouteTablesInput{})). + Return(&ec2.DescribeRouteTablesOutput{ + RouteTables: routes, + }, nil) +} + +func stubMockDescribeSubnetsWithContext(m *mocks.MockEC2APIMockRecorder, out *ec2.DescribeSubnetsOutput, filterKey, filterValue string) *gomock.Call { + return m.DescribeSubnetsWithContext(context.TODO(), gomock.Eq(&ec2.DescribeSubnetsInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("state"), + Values: []*string{aws.String("pending"), aws.String("available")}, + }, + { + Name: aws.String(filterKey), + Values: []*string{aws.String(filterValue)}, + }, + }, + })). + Return(out, nil) +} + +func stubMockDescribeSubnetsWithContextUnmanaged(m *mocks.MockEC2APIMockRecorder) *gomock.Call { + return stubMockDescribeSubnetsWithContext(m, &ec2.DescribeSubnetsOutput{ + Subnets: []*ec2.Subnet{ + {SubnetId: aws.String("subnet-az-1a-private"), AvailabilityZone: aws.String("us-east-1a")}, + {SubnetId: aws.String("subnet-az-1a-public"), AvailabilityZone: aws.String("us-east-1a")}, + {SubnetId: aws.String("subnet-lz-1a-private"), AvailabilityZone: aws.String("us-east-1-nyc-1a")}, + {SubnetId: aws.String("subnet-lz-1a-public"), AvailabilityZone: aws.String("us-east-1-nyc-1a")}, + {SubnetId: aws.String("subnet-wl-1a-private"), AvailabilityZone: aws.String("us-east-1-wl1-nyc-wlz-1")}, + {SubnetId: aws.String("subnet-wl-1a-public"), AvailabilityZone: aws.String("us-east-1-wl1-nyc-wlz-1")}, + }, + }, "vpc-id", subnetsVPCID) +} + +func stubMockDescribeSubnetsWithContextManaged(m *mocks.MockEC2APIMockRecorder) *gomock.Call { + return stubMockDescribeSubnetsWithContext(m, &ec2.DescribeSubnetsOutput{}, "vpc-id", subnetsVPCID) +} + +func stubMockDescribeNatGatewaysPagesWithContext(m *mocks.MockEC2APIMockRecorder) { + m.DescribeNatGatewaysPagesWithContext(context.TODO(), + gomock.Eq(&ec2.DescribeNatGatewaysInput{ + Filter: []*ec2.Filter{ + {Name: aws.String("vpc-id"), Values: []*string{aws.String(subnetsVPCID)}}, + {Name: aws.String("state"), Values: []*string{aws.String("pending"), aws.String("available")}}, + }, + }), + gomock.Any()).Return(nil) +} + +func stubMockModifySubnetAttributeWithContext(m *mocks.MockEC2APIMockRecorder, name string) *gomock.Call { + return m.ModifySubnetAttributeWithContext(context.TODO(), &ec2.ModifySubnetAttributeInput{ + MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{Value: aws.Bool(true)}, + SubnetId: aws.String(name), + }). + Return(&ec2.ModifySubnetAttributeOutput{}, nil) +} + +func stubMockDescribeAvailabilityZonesWithContextAllZones(m *mocks.MockEC2APIMockRecorder) { + m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: []*ec2.AvailabilityZone{ + { + ZoneName: aws.String("us-east-1a"), + ZoneType: aws.String("availability-zone"), + ParentZoneName: nil, + }, + { + ZoneName: aws.String("us-east-1-nyc-1a"), + ZoneType: aws.String("local-zone"), + ParentZoneName: aws.String("us-east-1a"), + }, + { + ZoneName: aws.String("us-east-1-wl1-nyc-wlz-1"), + ZoneType: aws.String("wavelength-zone"), + ParentZoneName: aws.String("us-east-1a"), + }, + }, + }, nil).AnyTimes() +} + +func stubMockDescribeAvailabilityZonesWithContextCustomZones(m *mocks.MockEC2APIMockRecorder, zones []*ec2.AvailabilityZone) *gomock.Call { + return m.DescribeAvailabilityZonesWithContext(context.TODO(), gomock.Any()). + Return(&ec2.DescribeAvailabilityZonesOutput{ + AvailabilityZones: zones, + }, nil).AnyTimes() +} diff --git a/pkg/cloud/services/network/vpc_test.go b/pkg/cloud/services/network/vpc_test.go index a48bec80ca..403707b8ec 100644 --- a/pkg/cloud/services/network/vpc_test.go +++ b/pkg/cloud/services/network/vpc_test.go @@ -38,7 +38,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) -func describeVpcAttributeTrue(ctx context.Context, input *ec2.DescribeVpcAttributeInput, requestOptions ...request.Option) (*ec2.DescribeVpcAttributeOutput, error) { +func describeVpcAttributeTrue(_ context.Context, input *ec2.DescribeVpcAttributeInput, _ ...request.Option) (*ec2.DescribeVpcAttributeOutput, error) { result := &ec2.DescribeVpcAttributeOutput{ VpcId: input.VpcId, } @@ -51,7 +51,7 @@ func describeVpcAttributeTrue(ctx context.Context, input *ec2.DescribeVpcAttribu return result, nil } -func describeVpcAttributeFalse(ctx context.Context, input *ec2.DescribeVpcAttributeInput, requestOptions ...request.Option) (*ec2.DescribeVpcAttributeOutput, error) { +func describeVpcAttributeFalse(_ context.Context, input *ec2.DescribeVpcAttributeInput, _ ...request.Option) (*ec2.DescribeVpcAttributeOutput, error) { result := &ec2.DescribeVpcAttributeOutput{ VpcId: input.VpcId, } @@ -573,9 +573,8 @@ func TestReconcileVPC(t *testing.T) { g.Expect(err).ToNot(BeNil()) g.Expect(err.Error()).To(ContainSubstring(*tc.wantErrContaining)) return - } else { - g.Expect(err).To(BeNil()) } + g.Expect(err).To(BeNil()) g.Expect(tc.want).To(Equal(&clusterScope.AWSCluster.Spec.NetworkSpec.VPC)) }) } diff --git a/pkg/cloud/services/s3/mock_s3iface/doc.go b/pkg/cloud/services/s3/mock_s3iface/doc.go index d507db6d37..4b8b857f37 100644 --- a/pkg/cloud/services/s3/mock_s3iface/doc.go +++ b/pkg/cloud/services/s3/mock_s3iface/doc.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package mock_s3iface provides a mock implementation of the s3iface.S3API interface // Run go generate to regenerate this mock. // //go:generate ../../../../../hack/tools/bin/mockgen -destination s3api_mock.go -package mock_s3iface github.com/aws/aws-sdk-go/service/s3/s3iface S3API diff --git a/pkg/cloud/services/s3/mock_s3iface/s3api_mock.go b/pkg/cloud/services/s3/mock_s3iface/s3api_mock.go index ac45c976d7..121d3df3fb 100644 --- a/pkg/cloud/services/s3/mock_s3iface/s3api_mock.go +++ b/pkg/cloud/services/s3/mock_s3iface/s3api_mock.go @@ -302,6 +302,56 @@ func (mr *MockS3APIMockRecorder) CreateMultipartUploadWithContext(arg0, arg1 int return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateMultipartUploadWithContext", reflect.TypeOf((*MockS3API)(nil).CreateMultipartUploadWithContext), varargs...) } +// CreateSession mocks base method. +func (m *MockS3API) CreateSession(arg0 *s3.CreateSessionInput) (*s3.CreateSessionOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateSession", arg0) + ret0, _ := ret[0].(*s3.CreateSessionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateSession indicates an expected call of CreateSession. +func (mr *MockS3APIMockRecorder) CreateSession(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSession", reflect.TypeOf((*MockS3API)(nil).CreateSession), arg0) +} + +// CreateSessionRequest mocks base method. +func (m *MockS3API) CreateSessionRequest(arg0 *s3.CreateSessionInput) (*request.Request, *s3.CreateSessionOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateSessionRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.CreateSessionOutput) + return ret0, ret1 +} + +// CreateSessionRequest indicates an expected call of CreateSessionRequest. +func (mr *MockS3APIMockRecorder) CreateSessionRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSessionRequest", reflect.TypeOf((*MockS3API)(nil).CreateSessionRequest), arg0) +} + +// CreateSessionWithContext mocks base method. +func (m *MockS3API) CreateSessionWithContext(arg0 context.Context, arg1 *s3.CreateSessionInput, arg2 ...request.Option) (*s3.CreateSessionOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateSessionWithContext", varargs...) + ret0, _ := ret[0].(*s3.CreateSessionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateSessionWithContext indicates an expected call of CreateSessionWithContext. +func (mr *MockS3APIMockRecorder) CreateSessionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSessionWithContext", reflect.TypeOf((*MockS3API)(nil).CreateSessionWithContext), varargs...) +} + // DeleteBucket mocks base method. func (m *MockS3API) DeleteBucket(arg0 *s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error) { m.ctrl.T.Helper() @@ -3052,6 +3102,89 @@ func (mr *MockS3APIMockRecorder) ListBucketsWithContext(arg0, arg1 interface{}, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketsWithContext", reflect.TypeOf((*MockS3API)(nil).ListBucketsWithContext), varargs...) } +// ListDirectoryBuckets mocks base method. +func (m *MockS3API) ListDirectoryBuckets(arg0 *s3.ListDirectoryBucketsInput) (*s3.ListDirectoryBucketsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListDirectoryBuckets", arg0) + ret0, _ := ret[0].(*s3.ListDirectoryBucketsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListDirectoryBuckets indicates an expected call of ListDirectoryBuckets. +func (mr *MockS3APIMockRecorder) ListDirectoryBuckets(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListDirectoryBuckets", reflect.TypeOf((*MockS3API)(nil).ListDirectoryBuckets), arg0) +} + +// ListDirectoryBucketsPages mocks base method. +func (m *MockS3API) ListDirectoryBucketsPages(arg0 *s3.ListDirectoryBucketsInput, arg1 func(*s3.ListDirectoryBucketsOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListDirectoryBucketsPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListDirectoryBucketsPages indicates an expected call of ListDirectoryBucketsPages. +func (mr *MockS3APIMockRecorder) ListDirectoryBucketsPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListDirectoryBucketsPages", reflect.TypeOf((*MockS3API)(nil).ListDirectoryBucketsPages), arg0, arg1) +} + +// ListDirectoryBucketsPagesWithContext mocks base method. +func (m *MockS3API) ListDirectoryBucketsPagesWithContext(arg0 context.Context, arg1 *s3.ListDirectoryBucketsInput, arg2 func(*s3.ListDirectoryBucketsOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListDirectoryBucketsPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListDirectoryBucketsPagesWithContext indicates an expected call of ListDirectoryBucketsPagesWithContext. +func (mr *MockS3APIMockRecorder) ListDirectoryBucketsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListDirectoryBucketsPagesWithContext", reflect.TypeOf((*MockS3API)(nil).ListDirectoryBucketsPagesWithContext), varargs...) +} + +// ListDirectoryBucketsRequest mocks base method. +func (m *MockS3API) ListDirectoryBucketsRequest(arg0 *s3.ListDirectoryBucketsInput) (*request.Request, *s3.ListDirectoryBucketsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListDirectoryBucketsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.ListDirectoryBucketsOutput) + return ret0, ret1 +} + +// ListDirectoryBucketsRequest indicates an expected call of ListDirectoryBucketsRequest. +func (mr *MockS3APIMockRecorder) ListDirectoryBucketsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListDirectoryBucketsRequest", reflect.TypeOf((*MockS3API)(nil).ListDirectoryBucketsRequest), arg0) +} + +// ListDirectoryBucketsWithContext mocks base method. +func (m *MockS3API) ListDirectoryBucketsWithContext(arg0 context.Context, arg1 *s3.ListDirectoryBucketsInput, arg2 ...request.Option) (*s3.ListDirectoryBucketsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListDirectoryBucketsWithContext", varargs...) + ret0, _ := ret[0].(*s3.ListDirectoryBucketsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListDirectoryBucketsWithContext indicates an expected call of ListDirectoryBucketsWithContext. +func (mr *MockS3APIMockRecorder) ListDirectoryBucketsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListDirectoryBucketsWithContext", reflect.TypeOf((*MockS3API)(nil).ListDirectoryBucketsWithContext), varargs...) +} + // ListMultipartUploads mocks base method. func (m *MockS3API) ListMultipartUploads(arg0 *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) { m.ctrl.T.Helper() diff --git a/pkg/cloud/services/s3/mock_stsiface/doc.go b/pkg/cloud/services/s3/mock_stsiface/doc.go index 82065f4ad7..429a95b586 100644 --- a/pkg/cloud/services/s3/mock_stsiface/doc.go +++ b/pkg/cloud/services/s3/mock_stsiface/doc.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package mock_stsiface provides a mock implementation for the STSAPI interface. // Run go generate to regenerate this mock. // //go:generate ../../../../../hack/tools/bin/mockgen -destination stsapi_mock.go -package mock_stsiface github.com/aws/aws-sdk-go/service/sts/stsiface STSAPI diff --git a/pkg/cloud/services/s3/s3.go b/pkg/cloud/services/s3/s3.go index a6bbf26b86..6eb8582585 100644 --- a/pkg/cloud/services/s3/s3.go +++ b/pkg/cloud/services/s3/s3.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package s3 provides a way to interact with AWS S3. package s3 import ( @@ -31,6 +32,7 @@ import ( "github.com/aws/aws-sdk-go/service/sts" "github.com/aws/aws-sdk-go/service/sts/stsiface" "github.com/pkg/errors" + "k8s.io/utils/ptr" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" iam "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1" @@ -38,6 +40,9 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/util/system" ) +// AWSDefaultRegion is the default AWS region. +const AWSDefaultRegion string = "us-east-1" + // Service holds a collection of interfaces. // The interfaces are broken down like this to group functions together. // One alternative is to have a large list of functions from the ec2 client. @@ -59,6 +64,7 @@ func NewService(s3Scope scope.S3Scope) *Service { } } +// ReconcileBucket reconciles the S3 bucket. func (s *Service) ReconcileBucket() error { if !s.bucketManagementEnabled() { return nil @@ -81,6 +87,7 @@ func (s *Service) ReconcileBucket() error { return nil } +// DeleteBucket deletes the S3 bucket. func (s *Service) DeleteBucket() error { if !s.bucketManagementEnabled() { return nil @@ -116,6 +123,7 @@ func (s *Service) DeleteBucket() error { return nil } +// Create creates an object in the S3 bucket. func (s *Service) Create(m *scope.MachineScope, data []byte) (string, error) { if !s.bucketManagementEnabled() { return "", errors.New("requested object creation but bucket management is not enabled") @@ -161,6 +169,7 @@ func (s *Service) Create(m *scope.MachineScope, data []byte) (string, error) { return objectURL.String(), nil } +// Delete deletes the object from the S3 bucket. func (s *Service) Delete(m *scope.MachineScope) error { if !s.bucketManagementEnabled() { return errors.New("requested object creation but bucket management is not enabled") @@ -186,16 +195,15 @@ func (s *Service) Delete(m *scope.MachineScope) error { // anyway for backwards compatibility reasons. s.scope.Debug("Received 403 forbidden from S3 HeadObject call. If GetObject permission has been granted to the controller but not ListBucket, object is already deleted. Attempting deletion anyway in case GetObject permission hasn't been granted to the controller but DeleteObject has.", "bucket", bucket, "key", key) - _, err = s.S3Client.DeleteObject(&s3.DeleteObjectInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - }) - if err != nil { - return errors.Wrap(err, "deleting S3 object") + if err := s.deleteObject(bucket, key); err != nil { + return err } s.scope.Debug("Delete object call succeeded despite missing GetObject permission", "bucket", bucket, "key", key) + return nil + case "NotFound": + s.scope.Debug("Either bucket or object does not exist", "bucket", bucket, "key", key) return nil case s3.ErrCodeNoSuchKey: s.scope.Debug("Object already deleted", "bucket", bucket, "key", key) @@ -203,19 +211,30 @@ func (s *Service) Delete(m *scope.MachineScope) error { case s3.ErrCodeNoSuchBucket: s.scope.Debug("Bucket does not exist", "bucket", bucket) return nil - default: - return errors.Wrap(aerr, "deleting S3 object") } } + return errors.Wrap(err, "deleting S3 object") } s.scope.Info("Deleting S3 object", "bucket", bucket, "key", key) - _, err = s.S3Client.DeleteObject(&s3.DeleteObjectInput{ + return s.deleteObject(bucket, key) +} + +func (s *Service) deleteObject(bucket, key string) error { + if _, err := s.S3Client.DeleteObject(&s3.DeleteObjectInput{ Bucket: aws.String(bucket), Key: aws.String(key), - }) - if err != nil { + }); err != nil { + if ptr.Deref(s.scope.Bucket().BestEffortDeleteObjects, false) { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case "Forbidden", "AccessDenied": + s.scope.Debug("Ignoring deletion error", "bucket", bucket, "key", key, "error", aerr.Message()) + return nil + } + } + } return errors.Wrap(err, "deleting S3 object") } @@ -223,11 +242,13 @@ func (s *Service) Delete(m *scope.MachineScope) error { } func (s *Service) createBucketIfNotExist(bucketName string) error { - input := &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - CreateBucketConfiguration: &s3.CreateBucketConfiguration{ + input := &s3.CreateBucketInput{Bucket: aws.String(bucketName)} + + // See https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html#AmazonS3-CreateBucket-request-LocationConstraint. + if s.scope.Region() != AWSDefaultRegion { + input.CreateBucketConfiguration = &s3.CreateBucketConfiguration{ LocationConstraint: aws.String(s.scope.Region()), - }, + } } _, err := s.S3Client.CreateBucket(input) diff --git a/pkg/cloud/services/s3/s3_test.go b/pkg/cloud/services/s3/s3_test.go index 7ce58d19d0..3db7abfca7 100644 --- a/pkg/cloud/services/s3/s3_test.go +++ b/pkg/cloud/services/s3/s3_test.go @@ -66,8 +66,10 @@ func TestReconcileBucket(t *testing.T) { expectedBucketName := "baz" - svc, s3Mock := testService(t, &infrav1.S3Bucket{ - Name: expectedBucketName, + svc, s3Mock := testService(t, &testServiceInput{ + Bucket: &infrav1.S3Bucket{ + Name: expectedBucketName, + }, }) input := &s3svc.CreateBucketInput{ @@ -168,11 +170,13 @@ func TestReconcileBucket(t *testing.T) { bucketName := "bar" - svc, s3Mock := testService(t, &infrav1.S3Bucket{ - Name: bucketName, - ControlPlaneIAMInstanceProfile: fmt.Sprintf("control-plane%s", iamv1.DefaultNameSuffix), - NodesIAMInstanceProfiles: []string{ - fmt.Sprintf("nodes%s", iamv1.DefaultNameSuffix), + svc, s3Mock := testService(t, &testServiceInput{ + Bucket: &infrav1.S3Bucket{ + Name: bucketName, + ControlPlaneIAMInstanceProfile: fmt.Sprintf("control-plane%s", iamv1.DefaultNameSuffix), + NodesIAMInstanceProfiles: []string{ + fmt.Sprintf("nodes%s", iamv1.DefaultNameSuffix), + }, }, }) @@ -218,7 +222,7 @@ func TestReconcileBucket(t *testing.T) { t.Run("is_idempotent", func(t *testing.T) { t.Parallel() - svc, s3Mock := testService(t, &infrav1.S3Bucket{}) + svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}}) s3Mock.EXPECT().CreateBucket(gomock.Any()).Return(nil, nil).Times(2) s3Mock.EXPECT().PutBucketTagging(gomock.Any()).Return(nil, nil).Times(2) @@ -236,7 +240,7 @@ func TestReconcileBucket(t *testing.T) { t.Run("ignores_when_bucket_already_exists_but_its_owned_by_the_same_account", func(t *testing.T) { t.Parallel() - svc, s3Mock := testService(t, &infrav1.S3Bucket{}) + svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}}) err := awserr.New(s3svc.ErrCodeBucketAlreadyOwnedByYou, "err", errors.New("err")) @@ -255,7 +259,7 @@ func TestReconcileBucket(t *testing.T) { t.Run("bucket_creation_fails", func(t *testing.T) { t.Parallel() - svc, s3Mock := testService(t, &infrav1.S3Bucket{}) + svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}}) s3Mock.EXPECT().CreateBucket(gomock.Any()).Return(nil, errors.New("error")).Times(1) @@ -267,7 +271,7 @@ func TestReconcileBucket(t *testing.T) { t.Run("bucket_creation_returns_unexpected_AWS_error", func(t *testing.T) { t.Parallel() - svc, s3Mock := testService(t, &infrav1.S3Bucket{}) + svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}}) s3Mock.EXPECT().CreateBucket(gomock.Any()).Return(nil, awserr.New("foo", "", nil)).Times(1) @@ -279,14 +283,14 @@ func TestReconcileBucket(t *testing.T) { t.Run("generating_bucket_policy_fails", func(t *testing.T) { t.Parallel() - svc, s3Mock := testService(t, &infrav1.S3Bucket{}) + svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}}) s3Mock.EXPECT().CreateBucket(gomock.Any()).Return(nil, nil).Times(1) s3Mock.EXPECT().PutBucketTagging(gomock.Any()).Return(nil, nil).Times(1) mockCtrl := gomock.NewController(t) stsMock := mock_stsiface.NewMockSTSAPI(mockCtrl) - stsMock.EXPECT().GetCallerIdentity(gomock.Any()).Return(nil, fmt.Errorf(t.Name())).AnyTimes() + stsMock.EXPECT().GetCallerIdentity(gomock.Any()).Return(nil, errors.New(t.Name())).AnyTimes() svc.STSClient = stsMock if err := svc.ReconcileBucket(); err == nil { @@ -297,7 +301,7 @@ func TestReconcileBucket(t *testing.T) { t.Run("creating_bucket_policy_fails", func(t *testing.T) { t.Parallel() - svc, s3Mock := testService(t, &infrav1.S3Bucket{}) + svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}}) s3Mock.EXPECT().CreateBucket(gomock.Any()).Return(nil, nil).Times(1) s3Mock.EXPECT().PutBucketTagging(gomock.Any()).Return(nil, nil).Times(1) @@ -307,6 +311,27 @@ func TestReconcileBucket(t *testing.T) { t.Fatalf("Expected error") } }) + + t.Run("creates_bucket_without_location", func(t *testing.T) { + t.Parallel() + + bucketName := "test" + svc, s3Mock := testService(t, &testServiceInput{ + Region: "us-east-1", + Bucket: &infrav1.S3Bucket{Name: bucketName}, + }) + input := &s3svc.CreateBucketInput{ + Bucket: aws.String(bucketName), + } + + s3Mock.EXPECT().CreateBucket(gomock.Eq(input)).Return(nil, nil).Times(1) + s3Mock.EXPECT().PutBucketTagging(gomock.Any()).Return(nil, nil).Times(1) + s3Mock.EXPECT().PutBucketPolicy(gomock.Any()).Return(nil, nil).Times(1) + + if err := svc.ReconcileBucket(); err != nil { + t.Fatalf("Unexpected error: %v", err) + } + }) }) } @@ -328,8 +353,10 @@ func TestDeleteBucket(t *testing.T) { t.Run("deletes_bucket_with_configured_name", func(t *testing.T) { t.Parallel() - svc, s3Mock := testService(t, &infrav1.S3Bucket{ - Name: bucketName, + svc, s3Mock := testService(t, &testServiceInput{ + Bucket: &infrav1.S3Bucket{ + Name: bucketName, + }, }) input := &s3svc.DeleteBucketInput{ @@ -348,7 +375,7 @@ func TestDeleteBucket(t *testing.T) { t.Run("unexpected_error", func(t *testing.T) { t.Parallel() - svc, s3Mock := testService(t, &infrav1.S3Bucket{}) + svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}}) s3Mock.EXPECT().DeleteBucket(gomock.Any()).Return(nil, errors.New("err")).Times(1) @@ -360,7 +387,7 @@ func TestDeleteBucket(t *testing.T) { t.Run("unexpected_AWS_error", func(t *testing.T) { t.Parallel() - svc, s3Mock := testService(t, &infrav1.S3Bucket{}) + svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}}) s3Mock.EXPECT().DeleteBucket(gomock.Any()).Return(nil, awserr.New("foo", "", nil)).Times(1) @@ -373,7 +400,7 @@ func TestDeleteBucket(t *testing.T) { t.Run("ignores_when_bucket_has_already_been_removed", func(t *testing.T) { t.Parallel() - svc, s3Mock := testService(t, &infrav1.S3Bucket{}) + svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}}) s3Mock.EXPECT().DeleteBucket(gomock.Any()).Return(nil, awserr.New(s3svc.ErrCodeNoSuchBucket, "", nil)).Times(1) @@ -385,7 +412,7 @@ func TestDeleteBucket(t *testing.T) { t.Run("skips_bucket_removal_when_bucket_is_not_empty", func(t *testing.T) { t.Parallel() - svc, s3Mock := testService(t, &infrav1.S3Bucket{}) + svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}}) s3Mock.EXPECT().DeleteBucket(gomock.Any()).Return(nil, awserr.New("BucketNotEmpty", "", nil)).Times(1) @@ -406,8 +433,10 @@ func TestCreateObject(t *testing.T) { t.Run("for_machine", func(t *testing.T) { t.Parallel() - svc, s3Mock := testService(t, &infrav1.S3Bucket{ - Name: bucketName, + svc, s3Mock := testService(t, &testServiceInput{ + Bucket: &infrav1.S3Bucket{ + Name: bucketName, + }, }) machineScope := &scope.MachineScope{ @@ -487,7 +516,7 @@ func TestCreateObject(t *testing.T) { t.Run("is_idempotent", func(t *testing.T) { t.Parallel() - svc, s3Mock := testService(t, &infrav1.S3Bucket{}) + svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}}) machineScope := &scope.MachineScope{ Machine: &clusterv1.Machine{}, @@ -516,7 +545,7 @@ func TestCreateObject(t *testing.T) { t.Run("object_creation_fails", func(t *testing.T) { t.Parallel() - svc, s3Mock := testService(t, &infrav1.S3Bucket{}) + svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}}) machineScope := &scope.MachineScope{ Machine: &clusterv1.Machine{}, @@ -542,7 +571,7 @@ func TestCreateObject(t *testing.T) { t.Run("given_empty_machine_scope", func(t *testing.T) { t.Parallel() - svc, _ := testService(t, &infrav1.S3Bucket{}) + svc, _ := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}}) bootstrapDataURL, err := svc.Create(nil, []byte("foo")) if err == nil { @@ -558,7 +587,7 @@ func TestCreateObject(t *testing.T) { t.Run("given_empty_bootstrap_data", func(t *testing.T) { t.Parallel() - svc, _ := testService(t, &infrav1.S3Bucket{}) + svc, _ := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}}) machineScope := &scope.MachineScope{ Machine: &clusterv1.Machine{}, @@ -615,8 +644,10 @@ func TestDeleteObject(t *testing.T) { expectedBucketName := "foo" - svc, s3Mock := testService(t, &infrav1.S3Bucket{ - Name: expectedBucketName, + svc, s3Mock := testService(t, &testServiceInput{ + Bucket: &infrav1.S3Bucket{ + Name: expectedBucketName, + }, }) machineScope := &scope.MachineScope{ @@ -663,11 +694,9 @@ func TestDeleteObject(t *testing.T) { } }) - t.Run("succeeds_when_bucket_has_already_been_removed", func(t *testing.T) { + t.Run("succeeds_when", func(t *testing.T) { t.Parallel() - svc, s3Mock := testService(t, &infrav1.S3Bucket{}) - machineScope := &scope.MachineScope{ Machine: &clusterv1.Machine{}, AWSMachine: &infrav1.AWSMachine{ @@ -677,11 +706,50 @@ func TestDeleteObject(t *testing.T) { }, } - s3Mock.EXPECT().HeadObject(gomock.Any()).Return(nil, awserr.New(s3svc.ErrCodeNoSuchBucket, "", nil)) + t.Run("bucket_has_already_been_removed", func(t *testing.T) { + t.Parallel() - if err := svc.Delete(machineScope); err != nil { - t.Fatalf("Unexpected error, got: %v", err) - } + svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}}) + s3Mock.EXPECT().HeadObject(gomock.Any()).Return(nil, awserr.New(s3svc.ErrCodeNoSuchBucket, "", nil)) + + if err := svc.Delete(machineScope); err != nil { + t.Fatalf("Unexpected error, got: %v", err) + } + }) + + t.Run("object_has_already_been_removed", func(t *testing.T) { + t.Parallel() + + svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}}) + s3Mock.EXPECT().HeadObject(gomock.Any()).Return(nil, awserr.New(s3svc.ErrCodeNoSuchKey, "", nil)) + + if err := svc.Delete(machineScope); err != nil { + t.Fatalf("Unexpected error, got: %v", err) + } + }) + + t.Run("bucket_or_object_not_found", func(t *testing.T) { + t.Parallel() + + svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}}) + s3Mock.EXPECT().HeadObject(gomock.Any()).Return(nil, awserr.New("NotFound", "Not found", nil)) + + if err := svc.Delete(machineScope); err != nil { + t.Fatalf("Unexpected error, got: %v", err) + } + }) + + t.Run("object_access_denied_and_BestEffortDeleteObjects_is_on", func(t *testing.T) { + t.Parallel() + + svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{BestEffortDeleteObjects: aws.Bool(true)}}) + s3Mock.EXPECT().HeadObject(gomock.Any()).Return(nil, nil) + s3Mock.EXPECT().DeleteObject(gomock.Any()).Return(nil, awserr.New("AccessDenied", "Access Denied", nil)) + + if err := svc.Delete(machineScope); err != nil { + t.Fatalf("Unexpected error, got: %v", err) + } + }) }) t.Run("returns_error_when", func(t *testing.T) { @@ -690,7 +758,7 @@ func TestDeleteObject(t *testing.T) { t.Run("object_deletion_fails", func(t *testing.T) { t.Parallel() - svc, s3Mock := testService(t, &infrav1.S3Bucket{}) + svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}}) machineScope := &scope.MachineScope{ Machine: &clusterv1.Machine{}, @@ -712,7 +780,7 @@ func TestDeleteObject(t *testing.T) { t.Run("given_empty_machine_scope", func(t *testing.T) { t.Parallel() - svc, _ := testService(t, &infrav1.S3Bucket{}) + svc, _ := testService(t, nil) if err := svc.Delete(nil); err == nil { t.Fatalf("Expected error") @@ -737,12 +805,33 @@ func TestDeleteObject(t *testing.T) { t.Fatalf("Expected error") } }) + + t.Run("object_access_denied_and_BestEffortDeleteObjects_is_off", func(t *testing.T) { + t.Parallel() + + svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}}) + s3Mock.EXPECT().HeadObject(gomock.Any()).Return(nil, nil) + s3Mock.EXPECT().DeleteObject(gomock.Any()).Return(nil, awserr.New("AccessDenied", "Access Denied", nil)) + + machineScope := &scope.MachineScope{ + Machine: &clusterv1.Machine{}, + AWSMachine: &infrav1.AWSMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + }, + }, + } + + if err := svc.Delete(machineScope); err == nil { + t.Fatalf("Expected error") + } + }) }) t.Run("is_idempotent", func(t *testing.T) { t.Parallel() - svc, s3Mock := testService(t, &infrav1.S3Bucket{}) + svc, s3Mock := testService(t, &testServiceInput{Bucket: &infrav1.S3Bucket{}}) machineScope := &scope.MachineScope{ Machine: &clusterv1.Machine{}, @@ -766,7 +855,14 @@ func TestDeleteObject(t *testing.T) { }) } -func testService(t *testing.T, bucket *infrav1.S3Bucket) (*s3.Service, *mock_s3iface.MockS3API) { +type testServiceInput struct { + Bucket *infrav1.S3Bucket + Region string +} + +const testAWSRegion string = "us-west-2" + +func testService(t *testing.T, si *testServiceInput) (*s3.Service, *mock_s3iface.MockS3API) { t.Helper() mockCtrl := gomock.NewController(t) @@ -780,6 +876,13 @@ func testService(t *testing.T, bucket *infrav1.S3Bucket) (*s3.Service, *mock_s3i _ = infrav1.AddToScheme(scheme) client := fake.NewClientBuilder().WithScheme(scheme).Build() + if si == nil { + si = &testServiceInput{} + } + if si.Region == "" { + si.Region = testAWSRegion + } + scope, err := scope.NewClusterScope(scope.ClusterScopeParams{ Client: client, Cluster: &clusterv1.Cluster{ @@ -790,8 +893,8 @@ func testService(t *testing.T, bucket *infrav1.S3Bucket) (*s3.Service, *mock_s3i }, AWSCluster: &infrav1.AWSCluster{ Spec: infrav1.AWSClusterSpec{ - S3Bucket: bucket, - Region: "us-west-2", + S3Bucket: si.Bucket, + Region: si.Region, AdditionalTags: infrav1.Tags{ "additional": "from-aws-cluster", }, diff --git a/pkg/cloud/services/secretsmanager/mock_secretsmanageriface/doc.go b/pkg/cloud/services/secretsmanager/mock_secretsmanageriface/doc.go index 6f9493872e..88f2878984 100644 --- a/pkg/cloud/services/secretsmanager/mock_secretsmanageriface/doc.go +++ b/pkg/cloud/services/secretsmanager/mock_secretsmanageriface/doc.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package mock_secretsmanageriface provides a mock interface for the SecretsManager API client. // Run go generate to regenerate this mock. +// //go:generate ../../../../../hack/tools/bin/mockgen -destination secretsmanagerapi_mock.go -package mock_secretsmanageriface github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface SecretsManagerAPI //go:generate /usr/bin/env bash -c "cat ../../../../../hack/boilerplate/boilerplate.generatego.txt secretsmanagerapi_mock.go > _secretsmanagerapi_mock.go && mv _secretsmanagerapi_mock.go secretsmanagerapi_mock.go" - package mock_secretsmanageriface //nolint:stylecheck diff --git a/pkg/cloud/services/secretsmanager/mock_secretsmanageriface/secretsmanagerapi_mock.go b/pkg/cloud/services/secretsmanager/mock_secretsmanageriface/secretsmanagerapi_mock.go index c6ebd2110d..638d716da2 100644 --- a/pkg/cloud/services/secretsmanager/mock_secretsmanageriface/secretsmanagerapi_mock.go +++ b/pkg/cloud/services/secretsmanager/mock_secretsmanageriface/secretsmanagerapi_mock.go @@ -52,6 +52,89 @@ func (m *MockSecretsManagerAPI) EXPECT() *MockSecretsManagerAPIMockRecorder { return m.recorder } +// BatchGetSecretValue mocks base method. +func (m *MockSecretsManagerAPI) BatchGetSecretValue(arg0 *secretsmanager.BatchGetSecretValueInput) (*secretsmanager.BatchGetSecretValueOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BatchGetSecretValue", arg0) + ret0, _ := ret[0].(*secretsmanager.BatchGetSecretValueOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BatchGetSecretValue indicates an expected call of BatchGetSecretValue. +func (mr *MockSecretsManagerAPIMockRecorder) BatchGetSecretValue(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchGetSecretValue", reflect.TypeOf((*MockSecretsManagerAPI)(nil).BatchGetSecretValue), arg0) +} + +// BatchGetSecretValuePages mocks base method. +func (m *MockSecretsManagerAPI) BatchGetSecretValuePages(arg0 *secretsmanager.BatchGetSecretValueInput, arg1 func(*secretsmanager.BatchGetSecretValueOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BatchGetSecretValuePages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// BatchGetSecretValuePages indicates an expected call of BatchGetSecretValuePages. +func (mr *MockSecretsManagerAPIMockRecorder) BatchGetSecretValuePages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchGetSecretValuePages", reflect.TypeOf((*MockSecretsManagerAPI)(nil).BatchGetSecretValuePages), arg0, arg1) +} + +// BatchGetSecretValuePagesWithContext mocks base method. +func (m *MockSecretsManagerAPI) BatchGetSecretValuePagesWithContext(arg0 context.Context, arg1 *secretsmanager.BatchGetSecretValueInput, arg2 func(*secretsmanager.BatchGetSecretValueOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "BatchGetSecretValuePagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// BatchGetSecretValuePagesWithContext indicates an expected call of BatchGetSecretValuePagesWithContext. +func (mr *MockSecretsManagerAPIMockRecorder) BatchGetSecretValuePagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchGetSecretValuePagesWithContext", reflect.TypeOf((*MockSecretsManagerAPI)(nil).BatchGetSecretValuePagesWithContext), varargs...) +} + +// BatchGetSecretValueRequest mocks base method. +func (m *MockSecretsManagerAPI) BatchGetSecretValueRequest(arg0 *secretsmanager.BatchGetSecretValueInput) (*request.Request, *secretsmanager.BatchGetSecretValueOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BatchGetSecretValueRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*secretsmanager.BatchGetSecretValueOutput) + return ret0, ret1 +} + +// BatchGetSecretValueRequest indicates an expected call of BatchGetSecretValueRequest. +func (mr *MockSecretsManagerAPIMockRecorder) BatchGetSecretValueRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchGetSecretValueRequest", reflect.TypeOf((*MockSecretsManagerAPI)(nil).BatchGetSecretValueRequest), arg0) +} + +// BatchGetSecretValueWithContext mocks base method. +func (m *MockSecretsManagerAPI) BatchGetSecretValueWithContext(arg0 context.Context, arg1 *secretsmanager.BatchGetSecretValueInput, arg2 ...request.Option) (*secretsmanager.BatchGetSecretValueOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "BatchGetSecretValueWithContext", varargs...) + ret0, _ := ret[0].(*secretsmanager.BatchGetSecretValueOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BatchGetSecretValueWithContext indicates an expected call of BatchGetSecretValueWithContext. +func (mr *MockSecretsManagerAPIMockRecorder) BatchGetSecretValueWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchGetSecretValueWithContext", reflect.TypeOf((*MockSecretsManagerAPI)(nil).BatchGetSecretValueWithContext), varargs...) +} + // CancelRotateSecret mocks base method. func (m *MockSecretsManagerAPI) CancelRotateSecret(arg0 *secretsmanager.CancelRotateSecretInput) (*secretsmanager.CancelRotateSecretOutput, error) { m.ctrl.T.Helper() diff --git a/pkg/cloud/services/secretsmanager/secret_fetch_script.go b/pkg/cloud/services/secretsmanager/secret_fetch_script.go index d7d4accbe7..4e3f09e8fe 100644 --- a/pkg/cloud/services/secretsmanager/secret_fetch_script.go +++ b/pkg/cloud/services/secretsmanager/secret_fetch_script.go @@ -48,6 +48,8 @@ SECRET_PREFIX="{{.SecretPrefix}}" CHUNKS="{{.Chunks}}" FILE="/etc/secret-userdata.txt" FINAL_INDEX=$((CHUNKS - 1)) +MAX_RETRIES=10 +RETRY_DELAY=10 # in seconds # Log an error and exit. # Args: @@ -115,6 +117,7 @@ check_aws_command() { ;; esac } + delete_secret_value() { local id="${SECRET_PREFIX}-${1}" local out @@ -126,19 +129,27 @@ delete_secret_value() { aws secretsmanager ${ENDPOINT} --region ${REGION} delete-secret --force-delete-without-recovery --secret-id "${id}" 2>&1 ) local delete_return=$? - set -o errexit - set -o nounset - set -o pipefail check_aws_command "SecretsManager::DeleteSecret" "${delete_return}" "${out}" if [ ${delete_return} -ne 0 ]; then - log::error_exit "Could not delete secret value" 2 + log::error "Could not delete secret value" + return 1 fi } -delete_secrets() { - for i in $(seq 0 ${FINAL_INDEX}); do - delete_secret_value "$i" +retry_delete_secret_value() { + local retries=0 + while [ ${retries} -lt ${MAX_RETRIES} ]; do + delete_secret_value "$1" + local return_code=$? + if [ ${return_code} -eq 0 ]; then + return 0 + else + ((retries++)) + log::info "Retrying in ${RETRY_DELAY} seconds..." + sleep ${RETRY_DELAY} + fi done + return 1 } get_secret_value() { @@ -159,18 +170,33 @@ get_secret_value() { ) local get_return=$? check_aws_command "SecretsManager::GetSecretValue" "${get_return}" "${data}" + if [ ${get_return} -ne 0 ]; then + log::error "could not get secret value" + return 1 + fi set -o errexit set -o nounset set -o pipefail - if [ ${get_return} -ne 0 ]; then - log::error "could not get secret value, deleting secret" - delete_secrets - log::error_exit "could not get secret value, but secret was deleted" 1 - fi log::info "appending data to temporary file ${FILE}.gz" echo "${data}" | base64 -d >>${FILE}.gz } +retry_get_secret_value() { + local retries=0 + while [ ${retries} -lt ${MAX_RETRIES} ]; do + get_secret_value "$1" + local return_code=$? + if [ ${return_code} -eq 0 ]; then + return 0 + else + ((retries++)) + log::info "Retrying in ${RETRY_DELAY} seconds..." + sleep ${RETRY_DELAY} + fi + done + return 1 +} + log::info "aws.cluster.x-k8s.io encrypted cloud-init script $0 started" log::info "secret prefix: ${SECRET_PREFIX}" log::info "secret count: ${CHUNKS}" @@ -181,10 +207,21 @@ if test -f "${FILE}"; then fi for i in $(seq 0 "${FINAL_INDEX}"); do - get_secret_value "$i" + retry_get_secret_value "$i" + return_code=$? + if [ ${return_code} -ne 0 ]; then + log::error "Failed to get secret value after ${MAX_RETRIES} attempts" + fi done -delete_secrets +for i in $(seq 0 ${FINAL_INDEX}); do + retry_delete_secret_value "$i" + return_code=$? + if [ ${return_code} -ne 0 ]; then + log::error "Failed to delete secret value after ${MAX_RETRIES} attempts" + log::error_exit "couldn't delete the secret value, exiting" 1 + fi +done log::info "decompressing userdata to ${FILE}" gunzip "${FILE}.gz" diff --git a/pkg/cloud/services/secretsmanager/secret_test.go b/pkg/cloud/services/secretsmanager/secret_test.go index df4976ea4e..87cf7e958a 100644 --- a/pkg/cloud/services/secretsmanager/secret_test.go +++ b/pkg/cloud/services/secretsmanager/secret_test.go @@ -26,7 +26,6 @@ import ( "github.com/golang/mock/gomock" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -281,7 +280,6 @@ func getClusterScope(client client.Client) (*scope.ClusterScope, error) { func getMachineScope(client client.Client, clusterScope *scope.ClusterScope) (*scope.MachineScope, error) { return scope.NewMachineScope(scope.MachineScopeParams{ Client: client, - ControlPlane: &unstructured.Unstructured{}, Cluster: clusterScope.Cluster, Machine: &clusterv1.Machine{}, InfraCluster: clusterScope, diff --git a/pkg/cloud/services/secretsmanager/service.go b/pkg/cloud/services/secretsmanager/service.go index 02e844919d..c9a06510f6 100644 --- a/pkg/cloud/services/secretsmanager/service.go +++ b/pkg/cloud/services/secretsmanager/service.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package secretsmanager provides a way to interact with AWS Secrets Manager. package secretsmanager import ( diff --git a/pkg/cloud/services/securitygroup/securitygroups.go b/pkg/cloud/services/securitygroup/securitygroups.go index 1a3c9440e1..f1f82193cc 100644 --- a/pkg/cloud/services/securitygroup/securitygroups.go +++ b/pkg/cloud/services/securitygroup/securitygroups.go @@ -160,10 +160,12 @@ func (s *Service) ReconcileSecurityGroups() error { } current := sg.IngressRules - want, err := s.getSecurityGroupIngressRules(role) + specRules, err := s.getSecurityGroupIngressRules(role) if err != nil { return err } + // Duplicate rules with multiple cidr blocks/source security groups so that we are comparing similar sets. + want := expandIngressRules(specRules) toRevoke := current.Difference(want) if len(toRevoke) > 0 { @@ -197,6 +199,47 @@ func (s *Service) ReconcileSecurityGroups() error { return nil } +// expandIngressRules expand the given ingress rules so that it's compatible with the list generated by +// ingressRulesFromSDKType. +// We assume that processIngressRulesSGs has been already called on the input, so the SourceSecurityGroupRoles have +// been translated into Security Group IDs. +func expandIngressRules(rules infrav1.IngressRules) infrav1.IngressRules { + res := make(infrav1.IngressRules, 0, len(rules)) + for _, rule := range rules { + base := infrav1.IngressRule{ + Description: rule.Description, + Protocol: rule.Protocol, + FromPort: rule.FromPort, + ToPort: rule.ToPort, + } + + // Nothing to expand + if len(rule.CidrBlocks) == 0 && len(rule.IPv6CidrBlocks) == 0 && len(rule.SourceSecurityGroupIDs) == 0 { + res = append(res, base) + continue + } + + for _, src := range rule.CidrBlocks { + rcopy := base + rcopy.CidrBlocks = []string{src} + res = append(res, rcopy) + } + + for _, src := range rule.IPv6CidrBlocks { + rcopy := base + rcopy.IPv6CidrBlocks = []string{src} + res = append(res, rcopy) + } + + for _, src := range rule.SourceSecurityGroupIDs { + rcopy := base + rcopy.SourceSecurityGroupIDs = []string{src} + res = append(res, rcopy) + } + } + return res +} + func (s *Service) securityGroupIsAnOverride(securityGroupID string) bool { for _, overrideID := range s.scope.SecurityGroupOverrides() { if overrideID == securityGroupID { @@ -207,7 +250,7 @@ func (s *Service) securityGroupIsAnOverride(securityGroupID string) bool { } func (s *Service) describeSecurityGroupOverridesByID() (map[infrav1.SecurityGroupRole]*ec2.SecurityGroup, error) { - securityGroupIds := map[infrav1.SecurityGroupRole]*string{} + securityGroupIDs := map[infrav1.SecurityGroupRole]*string{} input := &ec2.DescribeSecurityGroupsInput{} overrides := s.scope.SecurityGroupOverrides() @@ -221,7 +264,7 @@ func (s *Service) describeSecurityGroupOverridesByID() (map[infrav1.SecurityGrou for _, role := range s.roles { securityGroupID, ok := s.scope.SecurityGroupOverrides()[role] if ok { - securityGroupIds[role] = aws.String(securityGroupID) + securityGroupIDs[role] = aws.String(securityGroupID) input.GroupIds = append(input.GroupIds, aws.String(securityGroupID)) } } @@ -235,10 +278,10 @@ func (s *Service) describeSecurityGroupOverridesByID() (map[infrav1.SecurityGrou res := make(map[infrav1.SecurityGroupRole]*ec2.SecurityGroup, len(out.SecurityGroups)) for _, role := range s.roles { for _, ec2sg := range out.SecurityGroups { - if securityGroupIds[role] == nil { + if securityGroupIDs[role] == nil { continue } - if *ec2sg.GroupId == *securityGroupIds[role] { + if *ec2sg.GroupId == *securityGroupIDs[role] { s.scope.Debug("found security group override", "role", role, "security group", *ec2sg.GroupName) res[role] = ec2sg @@ -285,7 +328,7 @@ func (s *Service) DeleteSecurityGroups() error { for i := range clusterGroups { sg := clusterGroups[i] current := sg.IngressRules - if err := s.revokeAllSecurityGroupIngressRules(sg.ID); awserrors.IsIgnorableSecurityGroupError(err) != nil { + if err := s.revokeAllSecurityGroupIngressRules(sg.ID); awserrors.IsIgnorableSecurityGroupError(err) != nil { //nolint:gocritic conditions.MarkFalse(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error()) return err } @@ -311,7 +354,7 @@ func (s *Service) deleteSecurityGroup(sg *infrav1.SecurityGroup, typ string) err GroupId: aws.String(sg.ID), } - if _, err := s.EC2Client.DeleteSecurityGroupWithContext(context.TODO(), input); awserrors.IsIgnorableSecurityGroupError(err) != nil { + if _, err := s.EC2Client.DeleteSecurityGroupWithContext(context.TODO(), input); awserrors.IsIgnorableSecurityGroupError(err) != nil { //nolint:gocritic record.Warnf(s.scope.InfraCluster(), "FailedDeleteSecurityGroup", "Failed to delete %s SecurityGroup %q with name %q: %v", typ, sg.ID, sg.Name, err) return errors.Wrapf(err, "failed to delete security group %q with name %q", sg.ID, sg.Name) } @@ -592,24 +635,12 @@ func (s *Service) getSecurityGroupIngressRules(role infrav1.SecurityGroupRole) ( rules = append(rules, s.defaultSSHIngressRule(s.scope.SecurityGroups()[infrav1.SecurityGroupBastion].ID)) } - ingressRules := s.scope.AdditionalControlPlaneIngressRules() - for i := range ingressRules { - if len(ingressRules[i].CidrBlocks) != 0 || len(ingressRules[i].IPv6CidrBlocks) != 0 { // don't set source security group if cidr blocks are set - continue - } - - if len(ingressRules[i].SourceSecurityGroupIDs) == 0 && len(ingressRules[i].SourceSecurityGroupRoles) == 0 { // if the rule doesn't have a source security group, use the control plane security group - ingressRules[i].SourceSecurityGroupIDs = []string{s.scope.SecurityGroups()[infrav1.SecurityGroupControlPlane].ID} - continue - } - - securityGroupIDs := sets.New[string](ingressRules[i].SourceSecurityGroupIDs...) - for _, sourceSGRole := range ingressRules[i].SourceSecurityGroupRoles { - securityGroupIDs.Insert(s.scope.SecurityGroups()[sourceSGRole].ID) - } - ingressRules[i].SourceSecurityGroupIDs = sets.List[string](securityGroupIDs) + additionalIngressRules, err := s.processIngressRulesSGs(s.scope.AdditionalControlPlaneIngressRules()) + if err != nil { + return nil, err } - rules = append(rules, ingressRules...) + + rules = append(rules, additionalIngressRules...) return append(cniRules, rules...), nil @@ -656,14 +687,22 @@ func (s *Service) getSecurityGroupIngressRules(role infrav1.SecurityGroupRole) ( return infrav1.IngressRules{}, nil case infrav1.SecurityGroupAPIServerLB: kubeletRules := s.getIngressRulesToAllowKubeletToAccessTheControlPlaneLB() - customIngressRules := s.getControlPlaneLBIngressRules() + customIngressRules, err := s.processIngressRulesSGs(s.getControlPlaneLBIngressRules()) + if err != nil { + return nil, err + } rulesToApply := customIngressRules.Difference(kubeletRules) return append(kubeletRules, rulesToApply...), nil case infrav1.SecurityGroupLB: + rules := infrav1.IngressRules{} + allowedNLBTraffic := false // We hand this group off to the in-cluster cloud provider, so these rules aren't used // Except if the load balancer type is NLB, and we have an AWS Cluster in which case we // need to open port 6443 to the NLB traffic and health check inside the VPC. - if s.scope.ControlPlaneLoadBalancer() != nil && s.scope.ControlPlaneLoadBalancer().LoadBalancerType == infrav1.LoadBalancerTypeNLB { + for _, lb := range s.scope.ControlPlaneLoadBalancers() { + if lb == nil || lb.LoadBalancerType != infrav1.LoadBalancerTypeNLB { + continue + } var ( ipv4CidrBlocks []string ipv6CidrBlocks []string @@ -673,25 +712,26 @@ func (s *Service) getSecurityGroupIngressRules(role infrav1.SecurityGroupRole) ( if s.scope.VPC().IsIPv6Enabled() { ipv6CidrBlocks = []string{s.scope.VPC().IPv6.CidrBlock} } - if s.scope.ControlPlaneLoadBalancer().PreserveClientIP { + if lb.PreserveClientIP { ipv4CidrBlocks = []string{services.AnyIPv4CidrBlock} if s.scope.VPC().IsIPv6Enabled() { ipv6CidrBlocks = []string{services.AnyIPv6CidrBlock} } } - rules := infrav1.IngressRules{ - { + if !allowedNLBTraffic { + rules = append(rules, infrav1.IngressRule{ Description: "Allow NLB traffic to the control plane instances.", Protocol: infrav1.SecurityGroupProtocolTCP, FromPort: int64(s.scope.APIServerPort()), ToPort: int64(s.scope.APIServerPort()), CidrBlocks: ipv4CidrBlocks, IPv6CidrBlocks: ipv6CidrBlocks, - }, + }) + allowedNLBTraffic = true } - for _, ln := range s.scope.ControlPlaneLoadBalancer().AdditionalListeners { + for _, ln := range lb.AdditionalListeners { rules = append(rules, infrav1.IngressRule{ Description: fmt.Sprintf("Allow NLB traffic to the control plane instances on port %d.", ln.Port), Protocol: infrav1.SecurityGroupProtocolTCP, @@ -701,10 +741,8 @@ func (s *Service) getSecurityGroupIngressRules(role infrav1.SecurityGroupRole) ( IPv6CidrBlocks: ipv6CidrBlocks, }) } - - return rules, nil } - return infrav1.IngressRules{}, nil + return rules, nil } return nil, errors.Errorf("Cannot determine ingress rules for unknown security group role %q", role) @@ -915,8 +953,14 @@ func (s *Service) getIngressRulesToAllowKubeletToAccessTheControlPlaneLB() infra // getControlPlaneLBIngressRules returns the ingress rules for the control plane LB. // We allow all traffic when no other rules are defined. func (s *Service) getControlPlaneLBIngressRules() infrav1.IngressRules { - if s.scope.ControlPlaneLoadBalancer() != nil && len(s.scope.ControlPlaneLoadBalancer().IngressRules) > 0 { - return s.scope.ControlPlaneLoadBalancer().IngressRules + ingressRules := infrav1.IngressRules{} + for _, lb := range s.scope.ControlPlaneLoadBalancers() { + if lb != nil && len(lb.IngressRules) > 0 { + ingressRules = append(ingressRules, lb.IngressRules...) + } + } + if len(ingressRules) > 0 { + return ingressRules } // If no custom ingress rules have been defined we allow all traffic so that the MC can access the WC API @@ -970,3 +1014,45 @@ func (s *Service) getIngressRuleToAllowVPCCidrInTheAPIServer() infrav1.IngressRu }, } } + +func (s *Service) processIngressRulesSGs(ingressRules []infrav1.IngressRule) (infrav1.IngressRules, error) { + output := []infrav1.IngressRule{} + + for _, rule := range ingressRules { + if rule.NatGatewaysIPsSource { // if the rule has NatGatewaysIPsSource set to true, use the NAT Gateway IPs as the source + natGatewaysCidrs := []string{} + natGatewaysIPs := s.scope.GetNatGatewaysIPs() + for _, ip := range natGatewaysIPs { + natGatewaysCidrs = append(natGatewaysCidrs, fmt.Sprintf("%s/32", ip)) + } + if len(natGatewaysIPs) > 0 { + rule.CidrBlocks = natGatewaysCidrs + output = append(output, rule) + continue + } + + return nil, errors.New("NAT Gateway IPs are not available yet") + } + + if len(rule.CidrBlocks) != 0 || len(rule.IPv6CidrBlocks) != 0 { // don't set source security group if cidr blocks are set + output = append(output, rule) + continue + } + + if len(rule.SourceSecurityGroupIDs) == 0 && len(rule.SourceSecurityGroupRoles) == 0 { // if the rule doesn't have a source security group, use the control plane security group + rule.SourceSecurityGroupIDs = []string{s.scope.SecurityGroups()[infrav1.SecurityGroupControlPlane].ID} + output = append(output, rule) + continue + } + + securityGroupIDs := sets.New(rule.SourceSecurityGroupIDs...) + for _, sourceSGRole := range rule.SourceSecurityGroupRoles { + securityGroupIDs.Insert(s.scope.SecurityGroups()[sourceSGRole].ID) + } + rule.SourceSecurityGroupIDs = sets.List(securityGroupIDs) + + output = append(output, rule) + } + + return output, nil +} diff --git a/pkg/cloud/services/securitygroup/securitygroups_test.go b/pkg/cloud/services/securitygroup/securitygroups_test.go index 50fe7007dc..3bdf795ea8 100644 --- a/pkg/cloud/services/securitygroup/securitygroups_test.go +++ b/pkg/cloud/services/securitygroup/securitygroups_test.go @@ -827,6 +827,295 @@ func TestReconcileSecurityGroups(t *testing.T) { Return(&ec2.AuthorizeSecurityGroupIngressOutput{}, nil).AnyTimes() }, }, + { + name: "authorized target ingress rules are not revoked", + awsCluster: func(acl infrav1.AWSCluster) infrav1.AWSCluster { + return acl + }, + input: &infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + ID: "vpc-securitygroups", + InternetGatewayID: aws.String("igw-01"), + Tags: infrav1.Tags{ + infrav1.ClusterTagKey("test-cluster"): "owned", + }, + EmptyRoutesDefaultVPCSecurityGroup: true, + }, + Subnets: infrav1.Subnets{ + infrav1.SubnetSpec{ + ID: "subnet-securitygroups-private", + IsPublic: false, + AvailabilityZone: "us-east-1a", + }, + infrav1.SubnetSpec{ + ID: "subnet-securitygroups-public", + IsPublic: true, + NatGatewayID: aws.String("nat-01"), + AvailabilityZone: "us-east-1a", + }, + }, + }, + expect: func(m *mocks.MockEC2APIMockRecorder) { + m.DescribeSecurityGroupsWithContext(context.TODO(), &ec2.DescribeSecurityGroupsInput{ + Filters: []*ec2.Filter{ + filter.EC2.VPC("vpc-securitygroups"), + filter.EC2.SecurityGroupName("default"), + }, + }). + Return(&ec2.DescribeSecurityGroupsOutput{ + SecurityGroups: []*ec2.SecurityGroup{ + { + Description: aws.String("default VPC security group"), + GroupName: aws.String("default"), + GroupId: aws.String("sg-default"), + }, + }, + }, nil) + + m.RevokeSecurityGroupIngressWithContext(context.TODO(), gomock.Eq(&ec2.RevokeSecurityGroupIngressInput{ + GroupId: aws.String("sg-default"), + IpPermissions: []*ec2.IpPermission{ + { + IpProtocol: aws.String("-1"), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + { + GroupId: aws.String("sg-default"), + }, + }, + }, + }, + })).Times(1) + + m.RevokeSecurityGroupEgressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.RevokeSecurityGroupEgressInput{ + GroupId: aws.String("sg-default"), + })) + + securityGroupBastion := &ec2.SecurityGroup{ + Description: aws.String("Kubernetes cluster test-cluster: bastion"), + GroupName: aws.String("test-cluster-bastion"), + GroupId: aws.String("sg-bastion"), + Tags: []*ec2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String("test-cluster-bastion"), + }, { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("bastion"), + }, + }, + } + + securityGroupLB := &ec2.SecurityGroup{ + Description: aws.String("Kubernetes cluster test-cluster: lb"), + GroupName: aws.String("test-cluster-lb"), + GroupId: aws.String("sg-lb"), + Tags: []*ec2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String("test-cluster-lb"), + }, { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("lb"), + }, { + Key: aws.String("kubernetes.io/cluster/test-cluster"), + Value: aws.String("owned"), + }, + }, + } + + securityGroupAPIServerLB := &ec2.SecurityGroup{ + Description: aws.String("Kubernetes cluster test-cluster: apiserver-lb"), + GroupName: aws.String("test-cluster-apiserver-lb"), + GroupId: aws.String("sg-apiserver-lb"), + Tags: []*ec2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String("test-cluster-apiserver-lb"), + }, { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("apiserver-lb"), + }, + }, + IpPermissions: []*ec2.IpPermission{ + { + FromPort: aws.Int64(6443), + IpProtocol: aws.String("tcp"), + IpRanges: []*ec2.IpRange{ + { + CidrIp: aws.String("0.0.0.0/0"), + Description: aws.String("Kubernetes API"), + }, + }, + ToPort: aws.Int64(6443), + }, + // Extra rule to be revoked + { + FromPort: aws.Int64(22), + IpProtocol: aws.String("tcp"), + ToPort: aws.Int64(22), + IpRanges: []*ec2.IpRange{ + { + CidrIp: aws.String("0.0.0.0/0"), + Description: aws.String("SSH"), + }, + }, + }, + }, + } + + securityGroupControl := &ec2.SecurityGroup{ + Description: aws.String("Kubernetes cluster test-cluster: controlplane"), + GroupName: aws.String("test-cluster-controlplane"), + GroupId: aws.String("sg-control"), + Tags: []*ec2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String("test-cluster-controlplane"), + }, { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("controlplane"), + }, + }, + IpPermissions: []*ec2.IpPermission{ + { + FromPort: aws.Int64(6443), + IpProtocol: aws.String("tcp"), + ToPort: aws.Int64(6443), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + { + Description: aws.String("Kubernetes API"), + GroupId: aws.String("sg-apiserver-lb"), + }, { + Description: aws.String("Kubernetes API"), + GroupId: aws.String("sg-control"), + }, { + Description: aws.String("Kubernetes API"), + GroupId: aws.String("sg-node"), + }, + }, + }, + { + FromPort: aws.Int64(2379), + IpProtocol: aws.String("tcp"), + ToPort: aws.Int64(2379), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + { + Description: aws.String("etcd"), + GroupId: aws.String("sg-control"), + }, + }, + }, + { + FromPort: aws.Int64(2380), + IpProtocol: aws.String("tcp"), + ToPort: aws.Int64(2380), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + { + Description: aws.String("etcd peer"), + GroupId: aws.String("sg-control"), + }, + }, + }, + }, + } + + securityGroupNode := &ec2.SecurityGroup{ + Description: aws.String("Kubernetes cluster test-cluster: node"), + GroupName: aws.String("test-cluster-node"), + GroupId: aws.String("sg-node"), + Tags: []*ec2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String("test-cluster-node"), + }, { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"), + Value: aws.String("owned"), + }, { + Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"), + Value: aws.String("node"), + }, + }, + IpPermissions: []*ec2.IpPermission{ + { + FromPort: aws.Int64(30000), + ToPort: aws.Int64(32767), + IpProtocol: aws.String("tcp"), + IpRanges: []*ec2.IpRange{ + { + CidrIp: aws.String("0.0.0.0/0"), + Description: aws.String("Node Port Services"), + }, + }, + }, { + FromPort: aws.Int64(10250), + IpProtocol: aws.String("tcp"), + ToPort: aws.Int64(10250), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + { + Description: aws.String("Kubelet API"), + GroupId: aws.String("sg-control"), + }, { + Description: aws.String("Kubelet API"), + GroupId: aws.String("sg-node"), + }, + }, + }, + }, + } + + m.DescribeSecurityGroupsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeSecurityGroupsInput{})). + Return(&ec2.DescribeSecurityGroupsOutput{ + SecurityGroups: []*ec2.SecurityGroup{ + securityGroupBastion, + securityGroupLB, + securityGroupAPIServerLB, + securityGroupControl, + securityGroupNode, + }, + }, nil) + + m.RevokeSecurityGroupIngressWithContext(context.TODO(), gomock.Eq(&ec2.RevokeSecurityGroupIngressInput{ + GroupId: aws.String("sg-apiserver-lb"), + IpPermissions: []*ec2.IpPermission{ + { + FromPort: aws.Int64(22), + ToPort: aws.Int64(22), + IpProtocol: aws.String("tcp"), + IpRanges: []*ec2.IpRange{ + { + CidrIp: aws.String("0.0.0.0/0"), + Description: aws.String("SSH"), + }, + }, + }, + }, + })).Times(1) + + m.AuthorizeSecurityGroupIngressWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.AuthorizeSecurityGroupIngressInput{ + GroupId: aws.String("sg-bastion"), + IpPermissions: []*ec2.IpPermission{ + { + ToPort: aws.Int64(22), + FromPort: aws.Int64(22), + IpProtocol: aws.String("tcp"), + }, + }, + })). + Return(&ec2.AuthorizeSecurityGroupIngressOutput{}, nil).AnyTimes() + }, + }, } for _, tc := range testCases { @@ -905,7 +1194,9 @@ func TestAdditionalControlPlaneSecurityGroup(t *testing.T) { testCases := []struct { name string networkSpec infrav1.NetworkSpec + networkStatus infrav1.NetworkStatus expectedAdditionalIngresRule infrav1.IngressRule + wantErr bool }{ { name: "default control plane security group is used", @@ -919,6 +1210,16 @@ func TestAdditionalControlPlaneSecurityGroup(t *testing.T) { }, }, }, + networkStatus: infrav1.NetworkStatus{ + SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{ + infrav1.SecurityGroupControlPlane: { + ID: "cp-sg-id", + }, + infrav1.SecurityGroupNode: { + ID: "node-sg-id", + }, + }, + }, expectedAdditionalIngresRule: infrav1.IngressRule{ Description: "test", Protocol: infrav1.SecurityGroupProtocolTCP, @@ -940,6 +1241,16 @@ func TestAdditionalControlPlaneSecurityGroup(t *testing.T) { }, }, }, + networkStatus: infrav1.NetworkStatus{ + SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{ + infrav1.SecurityGroupControlPlane: { + ID: "cp-sg-id", + }, + infrav1.SecurityGroupNode: { + ID: "node-sg-id", + }, + }, + }, expectedAdditionalIngresRule: infrav1.IngressRule{ Description: "test", Protocol: infrav1.SecurityGroupProtocolTCP, @@ -961,6 +1272,16 @@ func TestAdditionalControlPlaneSecurityGroup(t *testing.T) { }, }, }, + networkStatus: infrav1.NetworkStatus{ + SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{ + infrav1.SecurityGroupControlPlane: { + ID: "cp-sg-id", + }, + infrav1.SecurityGroupNode: { + ID: "node-sg-id", + }, + }, + }, expectedAdditionalIngresRule: infrav1.IngressRule{ Description: "test", Protocol: infrav1.SecurityGroupProtocolTCP, @@ -983,6 +1304,16 @@ func TestAdditionalControlPlaneSecurityGroup(t *testing.T) { }, }, }, + networkStatus: infrav1.NetworkStatus{ + SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{ + infrav1.SecurityGroupControlPlane: { + ID: "cp-sg-id", + }, + infrav1.SecurityGroupNode: { + ID: "node-sg-id", + }, + }, + }, expectedAdditionalIngresRule: infrav1.IngressRule{ Description: "test", Protocol: infrav1.SecurityGroupProtocolTCP, @@ -1004,6 +1335,16 @@ func TestAdditionalControlPlaneSecurityGroup(t *testing.T) { }, }, }, + networkStatus: infrav1.NetworkStatus{ + SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{ + infrav1.SecurityGroupControlPlane: { + ID: "cp-sg-id", + }, + infrav1.SecurityGroupNode: { + ID: "node-sg-id", + }, + }, + }, expectedAdditionalIngresRule: infrav1.IngressRule{ Description: "test", Protocol: infrav1.SecurityGroupProtocolTCP, @@ -1011,6 +1352,53 @@ func TestAdditionalControlPlaneSecurityGroup(t *testing.T) { ToPort: 9345, }, }, + { + name: "set nat gateway IPs cidr as source if specified", + networkSpec: infrav1.NetworkSpec{ + AdditionalControlPlaneIngressRules: []infrav1.IngressRule{ + { + Description: "test", + Protocol: infrav1.SecurityGroupProtocolTCP, + FromPort: 9345, + ToPort: 9345, + NatGatewaysIPsSource: true, + }, + }, + }, + networkStatus: infrav1.NetworkStatus{ + SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{ + infrav1.SecurityGroupControlPlane: { + ID: "cp-sg-id", + }, + infrav1.SecurityGroupNode: { + ID: "node-sg-id", + }, + }, + NatGatewaysIPs: []string{"test-ip"}, + }, + expectedAdditionalIngresRule: infrav1.IngressRule{ + Description: "test", + Protocol: infrav1.SecurityGroupProtocolTCP, + CidrBlocks: []string{"test-ip/32"}, + FromPort: 9345, + ToPort: 9345, + }, + }, + { + name: "error if nat gateway IPs cidr as source are specified but not available", + networkSpec: infrav1.NetworkSpec{ + AdditionalControlPlaneIngressRules: []infrav1.IngressRule{ + { + Description: "test", + Protocol: infrav1.SecurityGroupProtocolTCP, + FromPort: 9345, + ToPort: 9345, + NatGatewaysIPsSource: true, + }, + }, + }, + wantErr: true, + }, } for _, tc := range testCases { @@ -1025,16 +1413,7 @@ func TestAdditionalControlPlaneSecurityGroup(t *testing.T) { NetworkSpec: tc.networkSpec, }, Status: infrav1.AWSClusterStatus{ - Network: infrav1.NetworkStatus{ - SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{ - infrav1.SecurityGroupControlPlane: { - ID: "cp-sg-id", - }, - infrav1.SecurityGroupNode: { - ID: "node-sg-id", - }, - }, - }, + Network: tc.networkStatus, }, }, }) @@ -1045,29 +1424,33 @@ func TestAdditionalControlPlaneSecurityGroup(t *testing.T) { s := NewService(cs, testSecurityGroupRoles) rules, err := s.getSecurityGroupIngressRules(infrav1.SecurityGroupControlPlane) if err != nil { - t.Fatalf("Failed to lookup controlplane security group ingress rules: %v", err) + if tc.wantErr { + return + } + t.Fatalf("Failed to lookup controlplane security group ingress rules: %v, wantErr %v", err, tc.wantErr) } found := false for _, r := range rules { - if r.Description == "test" { - found = true + if r.Description != "test" { + continue + } + found = true - if r.Protocol != tc.expectedAdditionalIngresRule.Protocol { - t.Fatalf("Expected protocol %s, got %s", tc.expectedAdditionalIngresRule.Protocol, r.Protocol) - } + if r.Protocol != tc.expectedAdditionalIngresRule.Protocol { + t.Fatalf("Expected protocol %s, got %s", tc.expectedAdditionalIngresRule.Protocol, r.Protocol) + } - if r.FromPort != tc.expectedAdditionalIngresRule.FromPort { - t.Fatalf("Expected from port %d, got %d", tc.expectedAdditionalIngresRule.FromPort, r.FromPort) - } + if r.FromPort != tc.expectedAdditionalIngresRule.FromPort { + t.Fatalf("Expected from port %d, got %d", tc.expectedAdditionalIngresRule.FromPort, r.FromPort) + } - if r.ToPort != tc.expectedAdditionalIngresRule.ToPort { - t.Fatalf("Expected to port %d, got %d", tc.expectedAdditionalIngresRule.ToPort, r.ToPort) - } + if r.ToPort != tc.expectedAdditionalIngresRule.ToPort { + t.Fatalf("Expected to port %d, got %d", tc.expectedAdditionalIngresRule.ToPort, r.ToPort) + } - if !sets.New[string](tc.expectedAdditionalIngresRule.SourceSecurityGroupIDs...).Equal(sets.New[string](tc.expectedAdditionalIngresRule.SourceSecurityGroupIDs...)) { - t.Fatalf("Expected source security group IDs %v, got %v", tc.expectedAdditionalIngresRule.SourceSecurityGroupIDs, r.SourceSecurityGroupIDs) - } + if !sets.New(tc.expectedAdditionalIngresRule.SourceSecurityGroupIDs...).Equal(sets.New(r.SourceSecurityGroupIDs...)) { + t.Fatalf("Expected source security group IDs %v, got %v", tc.expectedAdditionalIngresRule.SourceSecurityGroupIDs, r.SourceSecurityGroupIDs) } } @@ -1316,6 +1699,64 @@ func TestControlPlaneLoadBalancerIngressRules(t *testing.T) { }, }, }, + { + name: "defined rules are used when using internal and external LB", + awsCluster: &infrav1.AWSCluster{ + Spec: infrav1.AWSClusterSpec{ + ControlPlaneLoadBalancer: &infrav1.AWSLoadBalancerSpec{ + IngressRules: []infrav1.IngressRule{ + { + Description: "My custom ingress rule", + Protocol: infrav1.SecurityGroupProtocolTCP, + FromPort: 1234, + ToPort: 1234, + CidrBlocks: []string{"172.126.1.1/0"}, + }, + }, + Scheme: &infrav1.ELBSchemeInternal, + }, + SecondaryControlPlaneLoadBalancer: &infrav1.AWSLoadBalancerSpec{ + IngressRules: []infrav1.IngressRule{ + { + Description: "Another custom ingress rule", + Protocol: infrav1.SecurityGroupProtocolTCP, + FromPort: 2345, + ToPort: 2345, + CidrBlocks: []string{"0.0.0.0/0"}, + }, + }, + }, + NetworkSpec: infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + CidrBlock: "10.0.0.0/16", + }, + }, + }, + }, + expectedIngresRules: infrav1.IngressRules{ + infrav1.IngressRule{ + Description: "Kubernetes API", + Protocol: infrav1.SecurityGroupProtocolTCP, + FromPort: 6443, + ToPort: 6443, + CidrBlocks: []string{"10.0.0.0/16"}, + }, + infrav1.IngressRule{ + Description: "My custom ingress rule", + Protocol: infrav1.SecurityGroupProtocolTCP, + FromPort: 1234, + ToPort: 1234, + CidrBlocks: []string{"172.126.1.1/0"}, + }, + infrav1.IngressRule{ + Description: "Another custom ingress rule", + Protocol: infrav1.SecurityGroupProtocolTCP, + FromPort: 2345, + ToPort: 2345, + CidrBlocks: []string{"0.0.0.0/0"}, + }, + }, + }, } for _, tc := range testCases { @@ -1662,3 +2103,137 @@ var processSecurityGroupsPage = func(ctx context.Context, _, y interface{}, requ }, }, true) } + +func TestExpandIngressRules(t *testing.T) { + tests := []struct { + name string + input infrav1.IngressRules + expected infrav1.IngressRules + }{ + { + name: "nothing to expand, nothing to do", + input: infrav1.IngressRules{ + { + Description: "SSH", + Protocol: infrav1.SecurityGroupProtocolTCP, + FromPort: 22, + ToPort: 22, + }, + }, + expected: infrav1.IngressRules{ + { + Description: "SSH", + Protocol: infrav1.SecurityGroupProtocolTCP, + FromPort: 22, + ToPort: 22, + }, + }, + }, + { + name: "nothing to expand, security group roles is removed", + input: infrav1.IngressRules{ + { + Description: "SSH", + Protocol: infrav1.SecurityGroupProtocolTCP, + FromPort: 22, + ToPort: 22, + SourceSecurityGroupRoles: []infrav1.SecurityGroupRole{ + infrav1.SecurityGroupControlPlane, + }, + }, + }, + expected: infrav1.IngressRules{ + { + Description: "SSH", + Protocol: infrav1.SecurityGroupProtocolTCP, + FromPort: 22, + ToPort: 22, + }, + }, + }, + { + name: "cidr blocks expand", + input: infrav1.IngressRules{ + { + Description: "SSH", + Protocol: infrav1.SecurityGroupProtocolTCP, + FromPort: 22, + ToPort: 22, + CidrBlocks: []string{"0.0.0.0/0", "1.1.1.1/0"}, + IPv6CidrBlocks: []string{"::/0", "::/1"}, + }, + }, + expected: infrav1.IngressRules{ + { + Description: "SSH", + Protocol: infrav1.SecurityGroupProtocolTCP, + FromPort: 22, + ToPort: 22, + CidrBlocks: []string{"0.0.0.0/0"}, + }, + { + Description: "SSH", + Protocol: infrav1.SecurityGroupProtocolTCP, + FromPort: 22, + ToPort: 22, + CidrBlocks: []string{"1.1.1.1/0"}, + }, + { + Description: "SSH", + Protocol: infrav1.SecurityGroupProtocolTCP, + FromPort: 22, + ToPort: 22, + IPv6CidrBlocks: []string{"::/0"}, + }, + { + Description: "SSH", + Protocol: infrav1.SecurityGroupProtocolTCP, + FromPort: 22, + ToPort: 22, + IPv6CidrBlocks: []string{"::/1"}, + }, + }, + }, + { + name: "security group ids expand, security group roles removed", + input: infrav1.IngressRules{ + { + Description: "SSH", + Protocol: infrav1.SecurityGroupProtocolTCP, + FromPort: 22, + ToPort: 22, + SourceSecurityGroupIDs: []string{"sg-1", "sg-2"}, + SourceSecurityGroupRoles: []infrav1.SecurityGroupRole{ + infrav1.SecurityGroupControlPlane, + infrav1.SecurityGroupNode, + }, + }, + }, + expected: infrav1.IngressRules{ + { + Description: "SSH", + Protocol: infrav1.SecurityGroupProtocolTCP, + FromPort: 22, + ToPort: 22, + SourceSecurityGroupIDs: []string{"sg-1"}, + }, + { + Description: "SSH", + Protocol: infrav1.SecurityGroupProtocolTCP, + FromPort: 22, + ToPort: 22, + SourceSecurityGroupIDs: []string{"sg-2"}, + }, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + g := NewGomegaWithT(t) + output := expandIngressRules(tc.input) + + g.Expect(output).To(Equal(tc.expected)) + }) + } +} diff --git a/pkg/cloud/services/securitygroup/service.go b/pkg/cloud/services/securitygroup/service.go index 68c82d0752..63231ea260 100644 --- a/pkg/cloud/services/securitygroup/service.go +++ b/pkg/cloud/services/securitygroup/service.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package securitygroup provides a service to manage AWS security group resources. package securitygroup import ( diff --git a/pkg/cloud/services/ssm/cloudinit.go b/pkg/cloud/services/ssm/cloudinit.go index f507d1a1fb..4159238fba 100644 --- a/pkg/cloud/services/ssm/cloudinit.go +++ b/pkg/cloud/services/ssm/cloudinit.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package ssm provides a service to generate userdata for AWS Systems Manager. package ssm import ( diff --git a/pkg/cloud/services/ssm/mock_ssmiface/doc.go b/pkg/cloud/services/ssm/mock_ssmiface/doc.go index e71c785bf9..8188fc99d5 100644 --- a/pkg/cloud/services/ssm/mock_ssmiface/doc.go +++ b/pkg/cloud/services/ssm/mock_ssmiface/doc.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package mock_ssmiface provides a mock interface for the SSM API client. // Run go generate to regenerate this mock. +// //go:generate ../../../../../hack/tools/bin/mockgen -destination ssmapi_mock.go -package mock_ssmiface github.com/aws/aws-sdk-go/service/ssm/ssmiface SSMAPI //go:generate /usr/bin/env bash -c "cat ../../../../../hack/boilerplate/boilerplate.generatego.txt ssmapi_mock.go > _ssmapi_mock.go && mv _ssmapi_mock.go ssmapi_mock.go" - package mock_ssmiface //nolint:stylecheck diff --git a/pkg/cloud/services/ssm/mock_ssmiface/ssmapi_mock.go b/pkg/cloud/services/ssm/mock_ssmiface/ssmapi_mock.go index 940d9679c0..68d5d9a82c 100644 --- a/pkg/cloud/services/ssm/mock_ssmiface/ssmapi_mock.go +++ b/pkg/cloud/services/ssm/mock_ssmiface/ssmapi_mock.go @@ -952,6 +952,56 @@ func (mr *MockSSMAPIMockRecorder) DeleteMaintenanceWindowWithContext(arg0, arg1 return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteMaintenanceWindowWithContext", reflect.TypeOf((*MockSSMAPI)(nil).DeleteMaintenanceWindowWithContext), varargs...) } +// DeleteOpsItem mocks base method. +func (m *MockSSMAPI) DeleteOpsItem(arg0 *ssm.DeleteOpsItemInput) (*ssm.DeleteOpsItemOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteOpsItem", arg0) + ret0, _ := ret[0].(*ssm.DeleteOpsItemOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteOpsItem indicates an expected call of DeleteOpsItem. +func (mr *MockSSMAPIMockRecorder) DeleteOpsItem(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOpsItem", reflect.TypeOf((*MockSSMAPI)(nil).DeleteOpsItem), arg0) +} + +// DeleteOpsItemRequest mocks base method. +func (m *MockSSMAPI) DeleteOpsItemRequest(arg0 *ssm.DeleteOpsItemInput) (*request.Request, *ssm.DeleteOpsItemOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteOpsItemRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*ssm.DeleteOpsItemOutput) + return ret0, ret1 +} + +// DeleteOpsItemRequest indicates an expected call of DeleteOpsItemRequest. +func (mr *MockSSMAPIMockRecorder) DeleteOpsItemRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOpsItemRequest", reflect.TypeOf((*MockSSMAPI)(nil).DeleteOpsItemRequest), arg0) +} + +// DeleteOpsItemWithContext mocks base method. +func (m *MockSSMAPI) DeleteOpsItemWithContext(arg0 context.Context, arg1 *ssm.DeleteOpsItemInput, arg2 ...request.Option) (*ssm.DeleteOpsItemOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteOpsItemWithContext", varargs...) + ret0, _ := ret[0].(*ssm.DeleteOpsItemOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteOpsItemWithContext indicates an expected call of DeleteOpsItemWithContext. +func (mr *MockSSMAPIMockRecorder) DeleteOpsItemWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOpsItemWithContext", reflect.TypeOf((*MockSSMAPI)(nil).DeleteOpsItemWithContext), varargs...) +} + // DeleteOpsMetadata mocks base method. func (m *MockSSMAPI) DeleteOpsMetadata(arg0 *ssm.DeleteOpsMetadataInput) (*ssm.DeleteOpsMetadataOutput, error) { m.ctrl.T.Helper() diff --git a/pkg/cloud/services/ssm/secret_test.go b/pkg/cloud/services/ssm/secret_test.go index 4e82494848..04afa9e1d4 100644 --- a/pkg/cloud/services/ssm/secret_test.go +++ b/pkg/cloud/services/ssm/secret_test.go @@ -28,7 +28,6 @@ import ( "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -274,7 +273,6 @@ func getClusterScope(client client.Client) (*scope.ClusterScope, error) { func getMachineScope(client client.Client, clusterScope *scope.ClusterScope) (*scope.MachineScope, error) { return scope.NewMachineScope(scope.MachineScopeParams{ Client: client, - ControlPlane: &unstructured.Unstructured{}, Cluster: clusterScope.Cluster, Machine: &clusterv1.Machine{}, InfraCluster: clusterScope, diff --git a/pkg/cloud/services/sts/mock_stsiface/doc.go b/pkg/cloud/services/sts/mock_stsiface/doc.go index 900464f08d..1c576fa536 100644 --- a/pkg/cloud/services/sts/mock_stsiface/doc.go +++ b/pkg/cloud/services/sts/mock_stsiface/doc.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package mock_stsiface provides a mock implementation for the STSAPI interface. // Run go generate to regenerate this mock. +// //go:generate ../../../../../hack/tools/bin/mockgen -destination stsiface_mock.go -package mock_stsiface github.com/aws/aws-sdk-go/service/sts/stsiface STSAPI //go:generate /usr/bin/env bash -c "cat ../../../../../hack/boilerplate/boilerplate.generatego.txt stsiface_mock.go > _stsiface_mock.go && mv _stsiface_mock.go stsiface_mock.go" - package mock_stsiface //nolint:stylecheck diff --git a/pkg/cloud/services/userdata/userdata.go b/pkg/cloud/services/userdata/userdata.go index 6b565dfbc3..f7953b6b09 100644 --- a/pkg/cloud/services/userdata/userdata.go +++ b/pkg/cloud/services/userdata/userdata.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package userdata provides a way to generate user data for cloud instances. package userdata import ( diff --git a/pkg/cloud/services/wait/wait.go b/pkg/cloud/services/wait/wait.go index f9b9bf7a27..b725fa6b14 100644 --- a/pkg/cloud/services/wait/wait.go +++ b/pkg/cloud/services/wait/wait.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package wait provides a set of utilities for polling and waiting. package wait import ( diff --git a/pkg/cloud/tags/tags.go b/pkg/cloud/tags/tags.go index 7f97616c5b..42c8bfd843 100644 --- a/pkg/cloud/tags/tags.go +++ b/pkg/cloud/tags/tags.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package tags provides a way to tag cloud resources. package tags import ( diff --git a/pkg/cloud/throttle/throttle.go b/pkg/cloud/throttle/throttle.go index c0e2321997..77511952b7 100644 --- a/pkg/cloud/throttle/throttle.go +++ b/pkg/cloud/throttle/throttle.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package throttle provides a way to limit the number of requests to AWS services. package throttle import ( @@ -60,7 +61,7 @@ func (o *OperationLimiter) Match(r *request.Request) (bool, error) { return false, err } } - return o.regexp.Match([]byte(r.Operation.Name)), nil + return o.regexp.MatchString(r.Operation.Name), nil } // LimitRequest will limit a request. diff --git a/pkg/cloudtest/cloudtest.go b/pkg/cloudtest/cloudtest.go index 482fd54f5f..3264405784 100644 --- a/pkg/cloudtest/cloudtest.go +++ b/pkg/cloudtest/cloudtest.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package cloudtest provides utilities for testing. package cloudtest import ( @@ -42,23 +43,24 @@ func RuntimeRawExtension(t *testing.T, p interface{}) *runtime.RawExtension { // test log messages. type Log struct{} -func (l *Log) Init(info logr.RuntimeInfo) { +// Init initializes the logger. +func (l *Log) Init(_ logr.RuntimeInfo) { } // Error implements Log errors. -func (l *Log) Error(err error, msg string, keysAndValues ...interface{}) {} +func (l *Log) Error(_ error, _ string, _ ...interface{}) {} // V returns the Logger's log level. -func (l *Log) V(level int) logr.LogSink { return l } +func (l *Log) V(_ int) logr.LogSink { return l } // WithValues returns logs with specific values. -func (l *Log) WithValues(keysAndValues ...interface{}) logr.LogSink { return l } +func (l *Log) WithValues(_ ...interface{}) logr.LogSink { return l } // WithName returns the logger with a specific name. -func (l *Log) WithName(name string) logr.LogSink { return l } +func (l *Log) WithName(_ string) logr.LogSink { return l } // Info implements info messages for the logger. -func (l *Log) Info(level int, msg string, keysAndValues ...interface{}) {} +func (l *Log) Info(_ int, _ string, _ ...interface{}) {} // Enabled returns the state of the logger. -func (l *Log) Enabled(level int) bool { return false } +func (l *Log) Enabled(_ int) bool { return false } diff --git a/pkg/eks/addons/plan.go b/pkg/eks/addons/plan.go index ae4425dc1f..22d46e2ab8 100644 --- a/pkg/eks/addons/plan.go +++ b/pkg/eks/addons/plan.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package addons provides a plan to manage EKS addons. package addons import ( @@ -45,7 +46,7 @@ type plan struct { } // Create will create the plan (i.e. list of procedures) for managing EKS addons. -func (a *plan) Create(ctx context.Context) ([]planner.Procedure, error) { +func (a *plan) Create(_ context.Context) ([]planner.Procedure, error) { procedures := []planner.Procedure{} // Handle create and update @@ -54,8 +55,10 @@ func (a *plan) Create(ctx context.Context) ([]planner.Procedure, error) { installed := a.getInstalled(*desired.Name) if installed == nil { // Need to add the addon - procedures = append(procedures, &CreateAddonProcedure{plan: a, name: *desired.Name}) - procedures = append(procedures, &WaitAddonActiveProcedure{plan: a, name: *desired.Name, includeDegraded: true}) + procedures = append(procedures, + &CreateAddonProcedure{plan: a, name: *desired.Name}, + &WaitAddonActiveProcedure{plan: a, name: *desired.Name, includeDegraded: true}, + ) } else { // Check if its just the tags that need updating diffTags := desired.Tags.Difference(installed.Tags) @@ -64,8 +67,10 @@ func (a *plan) Create(ctx context.Context) ([]planner.Procedure, error) { } // Check if we also need to update the addon if !desired.IsEqual(installed, false) { - procedures = append(procedures, &UpdateAddonProcedure{plan: a, name: *installed.Name}) - procedures = append(procedures, &WaitAddonActiveProcedure{plan: a, name: *desired.Name, includeDegraded: true}) + procedures = append(procedures, + &UpdateAddonProcedure{plan: a, name: *installed.Name}, + &WaitAddonActiveProcedure{plan: a, name: *desired.Name, includeDegraded: true}, + ) } else if *installed.Status != eks.AddonStatusActive { // If the desired and installed are the same make sure its active procedures = append(procedures, &WaitAddonActiveProcedure{plan: a, name: *desired.Name, includeDegraded: true}) diff --git a/pkg/eks/addons/procedures.go b/pkg/eks/addons/procedures.go index a57435f014..82f24f56ac 100644 --- a/pkg/eks/addons/procedures.go +++ b/pkg/eks/addons/procedures.go @@ -43,7 +43,7 @@ type DeleteAddonProcedure struct { } // Do implements the logic for the procedure. -func (p *DeleteAddonProcedure) Do(ctx context.Context) error { +func (p *DeleteAddonProcedure) Do(_ context.Context) error { input := &eks.DeleteAddonInput{ AddonName: aws.String(p.name), ClusterName: aws.String(p.plan.clusterName), @@ -68,7 +68,7 @@ type UpdateAddonProcedure struct { } // Do implements the logic for the procedure. -func (p *UpdateAddonProcedure) Do(ctx context.Context) error { +func (p *UpdateAddonProcedure) Do(_ context.Context) error { desired := p.plan.getDesired(p.name) if desired == nil { @@ -103,7 +103,7 @@ type UpdateAddonTagsProcedure struct { } // Do implements the logic for the procedure. -func (p *UpdateAddonTagsProcedure) Do(ctx context.Context) error { +func (p *UpdateAddonTagsProcedure) Do(_ context.Context) error { desired := p.plan.getDesired(p.name) installed := p.plan.getInstalled(p.name) @@ -138,7 +138,7 @@ type CreateAddonProcedure struct { } // Do implements the logic for the procedure. -func (p *CreateAddonProcedure) Do(ctx context.Context) error { +func (p *CreateAddonProcedure) Do(_ context.Context) error { desired := p.plan.getDesired(p.name) if desired == nil { return fmt.Errorf("getting desired addon %s: %w", p.name, ErrAddonNotFound) @@ -181,7 +181,7 @@ type WaitAddonActiveProcedure struct { } // Do implements the logic for the procedure. -func (p *WaitAddonActiveProcedure) Do(ctx context.Context) error { +func (p *WaitAddonActiveProcedure) Do(_ context.Context) error { input := &eks.DescribeAddonInput{ AddonName: aws.String(p.name), ClusterName: aws.String(p.plan.clusterName), @@ -222,7 +222,7 @@ type WaitAddonDeleteProcedure struct { } // Do implements the logic for the procedure. -func (p *WaitAddonDeleteProcedure) Do(ctx context.Context) error { +func (p *WaitAddonDeleteProcedure) Do(_ context.Context) error { input := &eks.DescribeAddonInput{ AddonName: aws.String(p.name), ClusterName: aws.String(p.plan.clusterName), diff --git a/pkg/eks/eks.go b/pkg/eks/eks.go index ebbe442ef5..df25b1b42e 100644 --- a/pkg/eks/eks.go +++ b/pkg/eks/eks.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package eks contains the EKS API implementation. package eks import ( diff --git a/pkg/eks/identityprovider/plan.go b/pkg/eks/identityprovider/plan.go index 1aeaaf125d..fa7975ed1a 100644 --- a/pkg/eks/identityprovider/plan.go +++ b/pkg/eks/identityprovider/plan.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package identityprovider provides a plan to manage EKS OIDC identity provider association. package identityprovider import ( @@ -46,7 +47,8 @@ type plan struct { clusterName string } -func (p *plan) Create(ctx context.Context) ([]planner.Procedure, error) { +// Create will create the plan (i.e. list of procedures) for managing EKS OIDC identity provider association. +func (p *plan) Create(_ context.Context) ([]planner.Procedure, error) { procedures := []planner.Procedure{} if p.desiredIdentityProvider == nil && p.currentIdentityProvider == nil { diff --git a/pkg/eks/identityprovider/procedures.go b/pkg/eks/identityprovider/procedures.go index 20f01ebf6f..ee12f9f9ed 100644 --- a/pkg/eks/identityprovider/procedures.go +++ b/pkg/eks/identityprovider/procedures.go @@ -28,14 +28,17 @@ import ( var oidcType = aws.String("oidc") +// WaitIdentityProviderAssociatedProcedure waits for the identity provider to be associated. type WaitIdentityProviderAssociatedProcedure struct { plan *plan } +// Name returns the name of the procedure. func (w *WaitIdentityProviderAssociatedProcedure) Name() string { return "wait_identity_provider_association" } +// Do waits for the identity provider to be associated. func (w *WaitIdentityProviderAssociatedProcedure) Do(ctx context.Context) error { if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) { out, err := w.plan.eksClient.DescribeIdentityProviderConfigWithContext(ctx, &eks.DescribeIdentityProviderConfigInput{ @@ -62,14 +65,17 @@ func (w *WaitIdentityProviderAssociatedProcedure) Do(ctx context.Context) error return nil } +// DisassociateIdentityProviderConfig disassociates the identity provider. type DisassociateIdentityProviderConfig struct { plan *plan } +// Name returns the name of the procedure. func (d *DisassociateIdentityProviderConfig) Name() string { return "dissociate_identity_provider" } +// Do disassociates the identity provider. func (d *DisassociateIdentityProviderConfig) Do(ctx context.Context) error { if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) { _, err := d.plan.eksClient.DisassociateIdentityProviderConfigWithContext(ctx, &eks.DisassociateIdentityProviderConfigInput{ @@ -92,14 +98,17 @@ func (d *DisassociateIdentityProviderConfig) Do(ctx context.Context) error { return nil } +// AssociateIdentityProviderProcedure associates the identity provider. type AssociateIdentityProviderProcedure struct { plan *plan } +// Name returns the name of the procedure. func (a *AssociateIdentityProviderProcedure) Name() string { return "associate_identity_provider" } +// Do associates the identity provider. func (a *AssociateIdentityProviderProcedure) Do(ctx context.Context) error { oidc := a.plan.desiredIdentityProvider input := &eks.AssociateIdentityProviderConfigInput{ @@ -128,15 +137,18 @@ func (a *AssociateIdentityProviderProcedure) Do(ctx context.Context) error { return nil } +// UpdatedIdentityProviderTagsProcedure updates the tags for the identity provider. type UpdatedIdentityProviderTagsProcedure struct { plan *plan } +// Name returns the name of the procedure. func (u *UpdatedIdentityProviderTagsProcedure) Name() string { return "update_identity_provider_tags" } -func (u *UpdatedIdentityProviderTagsProcedure) Do(ctx context.Context) error { +// Do updates the tags for the identity provider. +func (u *UpdatedIdentityProviderTagsProcedure) Do(_ context.Context) error { arn := u.plan.currentIdentityProvider.IdentityProviderConfigArn _, err := u.plan.eksClient.TagResource(&eks.TagResourceInput{ ResourceArn: &arn, @@ -150,15 +162,18 @@ func (u *UpdatedIdentityProviderTagsProcedure) Do(ctx context.Context) error { return nil } +// RemoveIdentityProviderTagsProcedure removes the tags from the identity provider. type RemoveIdentityProviderTagsProcedure struct { plan *plan } +// Name returns the name of the procedure. func (r *RemoveIdentityProviderTagsProcedure) Name() string { return "remove_identity_provider_tags" } -func (r *RemoveIdentityProviderTagsProcedure) Do(ctx context.Context) error { +// Do removes the tags from the identity provider. +func (r *RemoveIdentityProviderTagsProcedure) Do(_ context.Context) error { keys := make([]*string, 0, len(r.plan.currentIdentityProvider.Tags)) for key := range r.plan.currentIdentityProvider.Tags { diff --git a/pkg/eks/identityprovider/types.go b/pkg/eks/identityprovider/types.go index e7e5868f95..940e8870e5 100644 --- a/pkg/eks/identityprovider/types.go +++ b/pkg/eks/identityprovider/types.go @@ -39,6 +39,7 @@ type OidcIdentityProviderConfig struct { UsernamePrefix string } +// IsEqual returns true if the OidcIdentityProviderConfig is equal to the supplied one. func (o *OidcIdentityProviderConfig) IsEqual(other *OidcIdentityProviderConfig) bool { if o == other { return true diff --git a/pkg/hash/base36.go b/pkg/hash/base36.go index 386b5adfc5..f03f515001 100644 --- a/pkg/hash/base36.go +++ b/pkg/hash/base36.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package hash provides a consistent hash function using blake2b. package hash import ( diff --git a/pkg/internal/bytes/bytes.go b/pkg/internal/bytes/bytes.go index 401a194d57..a9aa86df6e 100644 --- a/pkg/internal/bytes/bytes.go +++ b/pkg/internal/bytes/bytes.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package bytes provides utilities for working with byte arrays. package bytes import ( diff --git a/pkg/internal/cidr/cidr.go b/pkg/internal/cidr/cidr.go index dd56ee5e75..30f0ee4596 100644 --- a/pkg/internal/cidr/cidr.go +++ b/pkg/internal/cidr/cidr.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package cidr provides utilities for working with CIDR blocks. package cidr import ( diff --git a/pkg/internal/cmp/slice.go b/pkg/internal/cmp/slice.go index b2ff2d50db..6d36faa626 100644 --- a/pkg/internal/cmp/slice.go +++ b/pkg/internal/cmp/slice.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package cmp provides a set of comparison functions. package cmp import ( @@ -22,20 +23,25 @@ import ( "k8s.io/utils/ptr" ) +// ByPtrValue is a type to sort a slice of pointers to strings. type ByPtrValue []*string +// Len returns the length of the slice. func (s ByPtrValue) Len() int { return len(s) } +// Swap swaps the elements with indexes i and j. func (s ByPtrValue) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +// Less returns true if the element with index i should sort before the element with index j. func (s ByPtrValue) Less(i, j int) bool { return *s[i] < *s[j] } +// Equals returns true if the two slices of pointers to strings are equal. func Equals(slice1, slice2 []*string) bool { sort.Sort(ByPtrValue(slice1)) sort.Sort(ByPtrValue(slice2)) diff --git a/pkg/internal/mime/mime.go b/pkg/internal/mime/mime.go index 1324482f9f..7f7b23aa8b 100644 --- a/pkg/internal/mime/mime.go +++ b/pkg/internal/mime/mime.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package mime provides a function to generate a multipart MIME document. package mime import ( diff --git a/pkg/internal/rate/rate.go b/pkg/internal/rate/rate.go index 7528cfcad2..607f13f799 100644 --- a/pkg/internal/rate/rate.go +++ b/pkg/internal/rate/rate.go @@ -195,7 +195,7 @@ func (r *Reservation) CancelAt(now time.Time) { r.lim.tokens = tokens if r.timeToAct == r.lim.lastEvent { prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) - if !prevEvent.Before(now) { + if prevEvent.After(now) { r.lim.lastEvent = prevEvent } } diff --git a/pkg/internal/tristate/tristate.go b/pkg/internal/tristate/tristate.go index 6aafa52dc4..eeaae0ed86 100644 --- a/pkg/internal/tristate/tristate.go +++ b/pkg/internal/tristate/tristate.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package tristate provides a helper for working with bool pointers. package tristate // withDefault evaluates a pointer to a bool with a default value. diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index c2cd0ebde2..fa05ff5427 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package logger +// Package logger provides a convenient interface to use to log. package logger import ( @@ -69,35 +69,42 @@ func FromContext(ctx context.Context) *Logger { var _ Wrapper = &Logger{} +// Info logs a message at the info level. func (c *Logger) Info(msg string, keysAndValues ...any) { c.callStackHelper() c.logger.Info(msg, keysAndValues...) } +// Debug logs a message at the debug level. func (c *Logger) Debug(msg string, keysAndValues ...any) { c.callStackHelper() c.logger.V(logLevelDebug).Info(msg, keysAndValues...) } +// Warn logs a message at the warn level. func (c *Logger) Warn(msg string, keysAndValues ...any) { c.callStackHelper() c.logger.V(logLevelWarn).Info(msg, keysAndValues...) } +// Trace logs a message at the trace level. func (c *Logger) Trace(msg string, keysAndValues ...any) { c.callStackHelper() c.logger.V(logLevelTrace).Info(msg, keysAndValues...) } +// Error logs a message at the error level. func (c *Logger) Error(err error, msg string, keysAndValues ...any) { c.callStackHelper() c.logger.Error(err, msg, keysAndValues...) } +// GetLogger returns the underlying logr.Logger. func (c *Logger) GetLogger() logr.Logger { return c.logger } +// WithValues adds some key-value pairs of context to a logger. func (c *Logger) WithValues(keysAndValues ...any) *Logger { return &Logger{ callStackHelper: c.callStackHelper, @@ -105,6 +112,7 @@ func (c *Logger) WithValues(keysAndValues ...any) *Logger { } } +// WithName adds a new element to the logger's name. func (c *Logger) WithName(name string) *Logger { return &Logger{ callStackHelper: c.callStackHelper, diff --git a/pkg/planner/planner.go b/pkg/planner/planner.go index 9010b31edb..74ea078e2d 100644 --- a/pkg/planner/planner.go +++ b/pkg/planner/planner.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package planner provides a simple interface for creating and executing plans. package planner import "context" diff --git a/pkg/record/recorder.go b/pkg/record/recorder.go index 7591249a9b..df9a299264 100644 --- a/pkg/record/recorder.go +++ b/pkg/record/recorder.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package record provides a way to record Kubernetes events. package record import ( diff --git a/pkg/rosa/client.go b/pkg/rosa/client.go index cbb9793d82..36c9ae333b 100644 --- a/pkg/rosa/client.go +++ b/pkg/rosa/client.go @@ -1,3 +1,4 @@ +// Package rosa provides a way to interact with the Red Hat OpenShift Service on AWS (ROSA) API. package rosa import ( @@ -6,6 +7,9 @@ import ( "os" sdk "github.com/openshift-online/ocm-sdk-go" + ocmcfg "github.com/openshift/rosa/pkg/config" + "github.com/openshift/rosa/pkg/ocm" + "github.com/sirupsen/logrus" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" @@ -16,27 +20,50 @@ const ( ocmAPIURLKey = "ocmApiUrl" ) -type RosaClient struct { - ocm *sdk.Connection - rosaScope *scope.ROSAControlPlaneScope +// NewOCMClient creates a new OCM client. +func NewOCMClient(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (*ocm.Client, error) { + token, url, err := ocmCredentials(ctx, rosaScope) + if err != nil { + return nil, err + } + return ocm.NewClient().Logger(logrus.New()).Config(&ocmcfg.Config{ + AccessToken: token, + URL: url, + }).Build() } -// NewRosaClientWithConnection creates a client with a preexisting connection for testing purposes. -func NewRosaClientWithConnection(connection *sdk.Connection, rosaScope *scope.ROSAControlPlaneScope) *RosaClient { - return &RosaClient{ - ocm: connection, - rosaScope: rosaScope, +func newOCMRawConnection(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (*sdk.Connection, error) { + logger, err := sdk.NewGoLoggerBuilder(). + Debug(false). + Build() + if err != nil { + return nil, fmt.Errorf("failed to build logger: %w", err) + } + token, url, err := ocmCredentials(ctx, rosaScope) + if err != nil { + return nil, err } + + connection, err := sdk.NewConnectionBuilder(). + Logger(logger). + Tokens(token). + URL(url). + Build() + if err != nil { + return nil, fmt.Errorf("failed to create ocm connection: %w", err) + } + + return connection, nil } -func NewRosaClient(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (*RosaClient, error) { +func ocmCredentials(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (string, string, error) { var token string var ocmAPIUrl string secret := rosaScope.CredentialsSecret() if secret != nil { if err := rosaScope.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret); err != nil { - return nil, fmt.Errorf("failed to get credentials secret: %w", err) + return "", "", fmt.Errorf("failed to get credentials secret: %w", err) } token = string(secret.Data[ocmTokenKey]) @@ -50,40 +77,7 @@ func NewRosaClient(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) } if token == "" { - return nil, fmt.Errorf("token is not provided, be sure to set OCM_TOKEN env variable or reference a credentials secret with key %s", ocmTokenKey) - } - - // Create a logger that has the debug level enabled: - logger, err := sdk.NewGoLoggerBuilder(). - Debug(true). - Build() - if err != nil { - return nil, fmt.Errorf("failed to build logger: %w", err) + return "", "", fmt.Errorf("token is not provided, be sure to set OCM_TOKEN env variable or reference a credentials secret with key %s", ocmTokenKey) } - - connection, err := sdk.NewConnectionBuilder(). - Logger(logger). - Tokens(token). - URL(ocmAPIUrl). - Build() - if err != nil { - return nil, fmt.Errorf("failed to create ocm connection: %w", err) - } - - return &RosaClient{ - ocm: connection, - rosaScope: rosaScope, - }, nil -} - -func (c *RosaClient) Close() error { - return c.ocm.Close() -} - -func (c *RosaClient) GetConnectionURL() string { - return c.ocm.URL() -} - -func (c *RosaClient) GetConnectionTokens() (string, string, error) { - return c.ocm.Tokens() + return token, ocmAPIUrl, nil } diff --git a/pkg/rosa/clusters.go b/pkg/rosa/clusters.go deleted file mode 100644 index 98dc0c5d2c..0000000000 --- a/pkg/rosa/clusters.go +++ /dev/null @@ -1,77 +0,0 @@ -package rosa - -import ( - "fmt" - - cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" -) - -const ( - rosaCreatorArnProperty = "rosa_creator_arn" -) - -// CreateCluster creates a new ROSA cluster using the specified spec. -func (c *RosaClient) CreateCluster(spec *cmv1.Cluster) (*cmv1.Cluster, error) { - cluster, err := c.ocm.ClustersMgmt().V1().Clusters(). - Add(). - Body(spec). - Send() - if err != nil { - return nil, handleErr(cluster.Error(), err) - } - - clusterObject := cluster.Body() - return clusterObject, nil -} - -// DeleteCluster deletes the ROSA cluster. -func (c *RosaClient) DeleteCluster(clusterID string) error { - response, err := c.ocm.ClustersMgmt().V1().Clusters(). - Cluster(clusterID). - Delete(). - BestEffort(true). - Send() - if err != nil { - return handleErr(response.Error(), err) - } - - return nil -} - -// GetCluster retrieves the ROSA/OCM cluster object. -func (c *RosaClient) GetCluster() (*cmv1.Cluster, error) { - clusterKey := c.rosaScope.RosaClusterName() - query := fmt.Sprintf("%s AND (id = '%s' OR name = '%s' OR external_id = '%s')", - getClusterFilter(c.rosaScope.Identity.Arn), - clusterKey, clusterKey, clusterKey, - ) - response, err := c.ocm.ClustersMgmt().V1().Clusters().List(). - Search(query). - Page(1). - Size(1). - Send() - if err != nil { - return nil, handleErr(response.Error(), err) - } - - switch response.Total() { - case 0: - return nil, nil - case 1: - return response.Items().Slice()[0], nil - default: - return nil, fmt.Errorf("there are %d clusters with identifier or name '%s'", response.Total(), clusterKey) - } -} - -// Generate a query that filters clusters running on the current AWS session account. -func getClusterFilter(creatorArn *string) string { - filter := "product.id = 'rosa'" - if creatorArn != nil { - filter = fmt.Sprintf("%s AND (properties.%s = '%s')", - filter, - rosaCreatorArnProperty, - *creatorArn) - } - return filter -} diff --git a/pkg/rosa/externalauthproviders.go b/pkg/rosa/externalauthproviders.go new file mode 100644 index 0000000000..04573ff392 --- /dev/null +++ b/pkg/rosa/externalauthproviders.go @@ -0,0 +1,136 @@ +package rosa + +import ( + "context" + "fmt" + "time" + + sdk "github.com/openshift-online/ocm-sdk-go" + cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" + + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" +) + +// ExternalAuthClient handles externalAuth operations. +type ExternalAuthClient struct { + ocm *sdk.Connection +} + +// NewExternalAuthClient creates and return a new client to handle externalAuth operations. +func NewExternalAuthClient(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (*ExternalAuthClient, error) { + ocmConnection, err := newOCMRawConnection(ctx, rosaScope) + if err != nil { + return nil, err + } + return &ExternalAuthClient{ + ocm: ocmConnection, + }, nil +} + +// Close closes the underlying ocm connection. +func (c *ExternalAuthClient) Close() error { + return c.ocm.Close() +} + +// CreateExternalAuth creates a new external auth porivder. +func (c *ExternalAuthClient) CreateExternalAuth(clusterID string, externalAuth *cmv1.ExternalAuth) (*cmv1.ExternalAuth, error) { + response, err := c.ocm.ClustersMgmt().V1(). + Clusters().Cluster(clusterID). + ExternalAuthConfig().ExternalAuths().Add().Body(externalAuth).Send() + if err != nil { + return nil, handleErr(response.Error(), err) + } + return response.Body(), nil +} + +// UpdateExternalAuth updates an existing external auth porivder. +func (c *ExternalAuthClient) UpdateExternalAuth(clusterID string, externalAuth *cmv1.ExternalAuth) (*cmv1.ExternalAuth, error) { + response, err := c.ocm.ClustersMgmt().V1(). + Clusters().Cluster(clusterID). + ExternalAuthConfig().ExternalAuths(). + ExternalAuth(externalAuth.ID()). + Update().Body(externalAuth).Send() + if err != nil { + return nil, handleErr(response.Error(), err) + } + return response.Body(), nil +} + +// GetExternalAuth retrieves the specified external auth porivder. +func (c *ExternalAuthClient) GetExternalAuth(clusterID string, externalAuthID string) (*cmv1.ExternalAuth, bool, error) { + response, err := c.ocm.ClustersMgmt().V1(). + Clusters().Cluster(clusterID).ExternalAuthConfig(). + ExternalAuths().ExternalAuth(externalAuthID). + Get(). + Send() + if response.Status() == 404 { + return nil, false, nil + } + if err != nil { + return nil, false, handleErr(response.Error(), err) + } + return response.Body(), true, nil +} + +// ListExternalAuths lists all external auth porivder for the cluster. +func (c *ExternalAuthClient) ListExternalAuths(clusterID string) ([]*cmv1.ExternalAuth, error) { + response, err := c.ocm.ClustersMgmt().V1(). + Clusters().Cluster(clusterID). + ExternalAuthConfig(). + ExternalAuths(). + List().Page(1).Size(-1). + Send() + if err != nil { + return nil, handleErr(response.Error(), err) + } + return response.Items().Slice(), nil +} + +// DeleteExternalAuth deletes the specified external auth porivder. +func (c *ExternalAuthClient) DeleteExternalAuth(clusterID string, externalAuthID string) error { + response, err := c.ocm.ClustersMgmt().V1(). + Clusters().Cluster(clusterID). + ExternalAuthConfig().ExternalAuths(). + ExternalAuth(externalAuthID). + Delete(). + Send() + if err != nil { + return handleErr(response.Error(), err) + } + return nil +} + +// CreateBreakGlassCredential creates a break glass credential. +func (c *ExternalAuthClient) CreateBreakGlassCredential(clusterID string, breakGlassCredential *cmv1.BreakGlassCredential) (*cmv1.BreakGlassCredential, error) { + response, err := c.ocm.ClustersMgmt().V1(). + Clusters().Cluster(clusterID).BreakGlassCredentials(). + Add().Body(breakGlassCredential).Send() + if err != nil { + return nil, handleErr(response.Error(), err) + } + return response.Body(), nil +} + +const pollInterval = 15 * time.Second + +// PollKubeconfig continuously polls for the kubeconfig of the provided break glass credential. +func (c *ExternalAuthClient) PollKubeconfig(ctx context.Context, clusterID string, credentialID string) (kubeconfig string, err error) { + ctx, cancel := context.WithTimeout(ctx, time.Minute*5) + defer cancel() + + credentialClient := c.ocm.ClustersMgmt().V1().Clusters(). + Cluster(clusterID).BreakGlassCredentials().BreakGlassCredential(credentialID) + response, err := credentialClient.Poll(). + Interval(pollInterval). + Predicate(func(bgcgr *cmv1.BreakGlassCredentialGetResponse) bool { + return bgcgr.Body().Status() == cmv1.BreakGlassCredentialStatusIssued && bgcgr.Body().Kubeconfig() != "" + }). + StartContext(ctx) + if err != nil { + err = fmt.Errorf("failed to poll kubeconfig for cluster '%s' with break glass credential '%s': %v", + clusterID, credentialID, err) + return + } + + return response.Body().Kubeconfig(), nil +} diff --git a/pkg/rosa/helpers.go b/pkg/rosa/helpers.go new file mode 100644 index 0000000000..f5f8cd1817 --- /dev/null +++ b/pkg/rosa/helpers.go @@ -0,0 +1,40 @@ +package rosa + +import ( + cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" + ocmerrors "github.com/openshift-online/ocm-sdk-go/errors" + errors "github.com/zgalor/weberr" +) + +// IsNodePoolReady checkes whether the nodepool is provisoned and all replicas are available. +// If autosacling is enabled, NodePool must have replicas >= autosacling.MinReplica to be considered ready. +func IsNodePoolReady(nodePool *cmv1.NodePool) bool { + if nodePool.Status().Message() != "" { + return false + } + + if nodePool.Replicas() != 0 { + return nodePool.Replicas() == nodePool.Status().CurrentReplicas() + } + + if nodePool.Autoscaling() != nil { + return nodePool.Status().CurrentReplicas() >= nodePool.Autoscaling().MinReplica() + } + + return false +} + +func handleErr(res *ocmerrors.Error, err error) error { + msg := res.Reason() + if msg == "" { + msg = err.Error() + } + // Hack to always display the correct terms and conditions message + if res.Code() == "CLUSTERS-MGMT-451" { + msg = "You must accept the Terms and Conditions in order to continue.\n" + + "Go to https://www.redhat.com/wapps/tnc/ackrequired?site=ocm&event=register\n" + + "Once you accept the terms, you will need to retry the action that was blocked." + } + errType := errors.ErrorType(res.Status()) + return errType.Set(errors.Errorf("%s", msg)) +} diff --git a/pkg/rosa/idps.go b/pkg/rosa/idps.go index 8bd6d01f39..bfa9fce65e 100644 --- a/pkg/rosa/idps.go +++ b/pkg/rosa/idps.go @@ -2,64 +2,11 @@ package rosa import ( "fmt" - "net/http" cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" + "github.com/openshift/rosa/pkg/ocm" ) -// ListIdentityProviders retrieves the list of identity providers. -func (c *RosaClient) ListIdentityProviders(clusterID string) ([]*cmv1.IdentityProvider, error) { - response, err := c.ocm.ClustersMgmt().V1(). - Clusters().Cluster(clusterID). - IdentityProviders(). - List().Page(1).Size(-1). - Send() - if err != nil { - return nil, handleErr(response.Error(), err) - } - - return response.Items().Slice(), nil -} - -// CreateIdentityProvider adds a new identity provider to the cluster. -func (c *RosaClient) CreateIdentityProvider(clusterID string, idp *cmv1.IdentityProvider) (*cmv1.IdentityProvider, error) { - response, err := c.ocm.ClustersMgmt().V1(). - Clusters().Cluster(clusterID). - IdentityProviders(). - Add().Body(idp). - Send() - if err != nil { - return nil, handleErr(response.Error(), err) - } - return response.Body(), nil -} - -// GetHTPasswdUserList retrieves the list of users of the provided _HTPasswd_ identity provider. -func (c *RosaClient) GetHTPasswdUserList(clusterID, htpasswdIDPId string) (*cmv1.HTPasswdUserList, error) { - listResponse, err := c.ocm.ClustersMgmt().V1().Clusters().Cluster(clusterID). - IdentityProviders().IdentityProvider(htpasswdIDPId).HtpasswdUsers().List().Send() - if err != nil { - if listResponse.Error().Status() == http.StatusNotFound { - return nil, nil - } - return nil, handleErr(listResponse.Error(), err) - } - - return listResponse.Items(), nil -} - -// AddHTPasswdUser adds a new user to the provided _HTPasswd_ identity provider. -func (c *RosaClient) AddHTPasswdUser(username, password, clusterID, idpID string) error { - htpasswdUser, _ := cmv1.NewHTPasswdUser().Username(username).Password(password).Build() - response, err := c.ocm.ClustersMgmt().V1().Clusters().Cluster(clusterID). - IdentityProviders().IdentityProvider(idpID).HtpasswdUsers().Add().Body(htpasswdUser).Send() - if err != nil { - return handleErr(response.Error(), err) - } - - return nil -} - const ( clusterAdminUserGroup = "cluster-admins" clusterAdminIDPname = "cluster-admin" @@ -67,8 +14,8 @@ const ( // CreateAdminUserIfNotExist creates a new admin user withe username/password in the cluster if username doesn't already exist. // the user is granted admin privileges by being added to a special IDP called `cluster-admin` which will be created if it doesn't already exist. -func (c *RosaClient) CreateAdminUserIfNotExist(clusterID, username, password string) error { - existingClusterAdminIDP, userList, err := c.findExistingClusterAdminIDP(clusterID) +func CreateAdminUserIfNotExist(client *ocm.Client, clusterID, username, password string) error { + existingClusterAdminIDP, userList, err := findExistingClusterAdminIDP(client, clusterID) if err != nil { return fmt.Errorf("failed to find existing cluster admin IDP: %w", err) } @@ -80,7 +27,7 @@ func (c *RosaClient) CreateAdminUserIfNotExist(clusterID, username, password str } // Add admin user to the cluster-admins group: - user, err := c.CreateUserIfNotExist(clusterID, clusterAdminUserGroup, username) + user, err := CreateUserIfNotExist(client, clusterID, clusterAdminUserGroup, username) if err != nil { return fmt.Errorf("failed to add user '%s' to cluster '%s': %s", username, clusterID, err) @@ -88,7 +35,7 @@ func (c *RosaClient) CreateAdminUserIfNotExist(clusterID, username, password str if existingClusterAdminIDP != nil { // add htpasswd user to existing idp - err := c.AddHTPasswdUser(username, password, clusterID, existingClusterAdminIDP.ID()) + err := client.AddHTPasswdUser(username, password, clusterID, existingClusterAdminIDP.ID()) if err != nil { return fmt.Errorf("failed to add htpassawoed user cluster-admin to existing idp: %s", existingClusterAdminIDP.ID()) } @@ -114,10 +61,10 @@ func (c *RosaClient) CreateAdminUserIfNotExist(clusterID, username, password str } // Add HTPasswd IDP to cluster - _, err = c.CreateIdentityProvider(clusterID, clusterAdminIDP) + _, err = client.CreateIdentityProvider(clusterID, clusterAdminIDP) if err != nil { // since we could not add the HTPasswd IDP to the cluster, roll back and remove the cluster admin - if err := c.DeleteUser(clusterID, clusterAdminUserGroup, user.ID()); err != nil { + if err := client.DeleteUser(clusterID, clusterAdminUserGroup, user.ID()); err != nil { return fmt.Errorf("failed to revert the admin user for cluster '%s': %w", clusterID, err) } @@ -127,26 +74,42 @@ func (c *RosaClient) CreateAdminUserIfNotExist(clusterID, username, password str return nil } -func (c *RosaClient) findExistingClusterAdminIDP(clusterID string) ( +// CreateUserIfNotExist creates a new user with `username` and adds it to the group if it doesn't already exist. +func CreateUserIfNotExist(client *ocm.Client, clusterID string, group, username string) (*cmv1.User, error) { + user, err := client.GetUser(clusterID, group, username) + if user != nil || err != nil { + return user, err + } + + userCfg, err := cmv1.NewUser().ID(username).Build() + if err != nil { + return nil, fmt.Errorf("failed to create user '%s' for cluster '%s': %w", username, clusterID, err) + } + return client.CreateUser(clusterID, group, userCfg) +} + +func findExistingClusterAdminIDP(client *ocm.Client, clusterID string) ( htpasswdIDP *cmv1.IdentityProvider, userList *cmv1.HTPasswdUserList, reterr error) { - idps, err := c.ListIdentityProviders(clusterID) + idps, err := client.GetIdentityProviders(clusterID) if err != nil { reterr = fmt.Errorf("failed to get identity providers for cluster '%s': %v", clusterID, err) return } for _, idp := range idps { - if idp.Name() == clusterAdminIDPname { - itemUserList, err := c.GetHTPasswdUserList(clusterID, idp.ID()) - if err != nil { - reterr = fmt.Errorf("failed to get user list of the HTPasswd IDP of '%s: %s': %v", idp.Name(), clusterID, err) - return - } - - htpasswdIDP = idp - userList = itemUserList + if idp.Name() != clusterAdminIDPname { + continue + } + + itemUserList, err := client.GetHTPasswdUserList(clusterID, idp.ID()) + if err != nil { + reterr = fmt.Errorf("failed to get user list of the HTPasswd IDP of '%s: %s': %v", idp.Name(), clusterID, err) return } + + htpasswdIDP = idp + userList = itemUserList + return } return diff --git a/pkg/rosa/nodepools.go b/pkg/rosa/nodepools.go deleted file mode 100644 index 0b59bb9869..0000000000 --- a/pkg/rosa/nodepools.go +++ /dev/null @@ -1,146 +0,0 @@ -package rosa - -import ( - "time" - - cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" -) - -// CreateNodePool adds a new node pool to the cluster. -func (c *RosaClient) CreateNodePool(clusterID string, nodePool *cmv1.NodePool) (*cmv1.NodePool, error) { - response, err := c.ocm.ClustersMgmt().V1(). - Clusters().Cluster(clusterID). - NodePools(). - Add().Body(nodePool). - Send() - if err != nil { - return nil, handleErr(response.Error(), err) - } - return response.Body(), nil -} - -// GetNodePools retrieves the list of node pools in the cluster. -func (c *RosaClient) GetNodePools(clusterID string) ([]*cmv1.NodePool, error) { - response, err := c.ocm.ClustersMgmt().V1(). - Clusters().Cluster(clusterID). - NodePools(). - List().Page(1).Size(-1). - Send() - if err != nil { - return nil, handleErr(response.Error(), err) - } - return response.Items().Slice(), nil -} - -// GetNodePool retrieves the details of the specified node pool. -func (c *RosaClient) GetNodePool(clusterID string, nodePoolID string) (*cmv1.NodePool, bool, error) { - response, err := c.ocm.ClustersMgmt().V1(). - Clusters().Cluster(clusterID). - NodePools(). - NodePool(nodePoolID). - Get(). - Send() - if response.Status() == 404 { - return nil, false, nil - } - if err != nil { - return nil, false, handleErr(response.Error(), err) - } - return response.Body(), true, nil -} - -// UpdateNodePool updates the specified node pool. -func (c *RosaClient) UpdateNodePool(clusterID string, nodePool *cmv1.NodePool) (*cmv1.NodePool, error) { - response, err := c.ocm.ClustersMgmt().V1(). - Clusters().Cluster(clusterID). - NodePools().NodePool(nodePool.ID()). - Update().Body(nodePool). - Send() - if err != nil { - return nil, handleErr(response.Error(), err) - } - return response.Body(), nil -} - -// DeleteNodePool deletes the specified node pool. -func (c *RosaClient) DeleteNodePool(clusterID string, nodePoolID string) error { - response, err := c.ocm.ClustersMgmt().V1(). - Clusters().Cluster(clusterID). - NodePools().NodePool(nodePoolID). - Delete(). - Send() - if err != nil { - return handleErr(response.Error(), err) - } - return nil -} - -// CheckNodePoolExistingScheduledUpgrade checks and returns the current upgrade schedule for the nodePool if any. -func (c *RosaClient) CheckNodePoolExistingScheduledUpgrade(clusterID string, nodePool *cmv1.NodePool) (*cmv1.NodePoolUpgradePolicy, error) { - upgradePolicies, err := c.getNodePoolUpgradePolicies(clusterID, nodePool.ID()) - if err != nil { - return nil, err - } - for _, upgradePolicy := range upgradePolicies { - if upgradePolicy.UpgradeType() == cmv1.UpgradeTypeNodePool { - return upgradePolicy, nil - } - } - return nil, nil -} - -// ScheduleNodePoolUpgrade schedules a new nodePool upgrade to the specified version at the specified time. -func (c *RosaClient) ScheduleNodePoolUpgrade(clusterID string, nodePool *cmv1.NodePool, version string, nextRun time.Time) (*cmv1.NodePoolUpgradePolicy, error) { - // earliestNextRun is set to at least 5 min from now by the OCM API. - // we set it to 6 min here to account for latencty. - earliestNextRun := time.Now().Add(time.Minute * 6) - if nextRun.Before(earliestNextRun) { - nextRun = earliestNextRun - } - - upgradePolicy, err := cmv1.NewNodePoolUpgradePolicy(). - UpgradeType(cmv1.UpgradeTypeNodePool). - NodePoolID(nodePool.ID()). - ScheduleType(cmv1.ScheduleTypeManual). - Version(version). - NextRun(nextRun). - Build() - if err != nil { - return nil, err - } - - response, err := c.ocm.ClustersMgmt().V1(). - Clusters().Cluster(clusterID). - NodePools(). - NodePool(nodePool.ID()).UpgradePolicies(). - Add().Body(upgradePolicy). - Send() - if err != nil { - return nil, handleErr(response.Error(), err) - } - - return response.Body(), nil -} - -func (c *RosaClient) getNodePoolUpgradePolicies(clusterID string, nodePoolID string) (nodePoolUpgradePolicies []*cmv1.NodePoolUpgradePolicy, err error) { - collection := c.ocm.ClustersMgmt().V1(). - Clusters(). - Cluster(clusterID).NodePools().NodePool(nodePoolID).UpgradePolicies() - page := 1 - size := 100 - for { - response, err := collection.List(). - Page(page). - Size(size). - Send() - if err != nil { - return nil, handleErr(response.Error(), err) - } - nodePoolUpgradePolicies = append(nodePoolUpgradePolicies, response.Items().Slice()...) - if response.Size() < size { - break - } - page++ - } - return -} diff --git a/pkg/rosa/oauth.go b/pkg/rosa/oauth.go index 110f638392..299dfb01d3 100644 --- a/pkg/rosa/oauth.go +++ b/pkg/rosa/oauth.go @@ -14,6 +14,7 @@ import ( restclient "k8s.io/client-go/rest" ) +// TokenResponse contains the access token and the duration until it expires. type TokenResponse struct { AccessToken string ExpiresIn time.Duration @@ -29,7 +30,7 @@ func RequestToken(ctx context.Context, apiURL, username, password string, config } tokenReqURL := fmt.Sprintf("%s/oauth/authorize?response_type=token&client_id=%s", oauthURL, clientID) - request, err := http.NewRequestWithContext(ctx, http.MethodGet, tokenReqURL, nil) + request, err := http.NewRequestWithContext(ctx, http.MethodGet, tokenReqURL, http.NoBody) if err != nil { return nil, err } diff --git a/pkg/rosa/users.go b/pkg/rosa/users.go deleted file mode 100644 index 38203536f2..0000000000 --- a/pkg/rosa/users.go +++ /dev/null @@ -1,58 +0,0 @@ -package rosa - -import ( - "fmt" - "net/http" - - cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" -) - -// CreateUserIfNotExist creates a new user with `username` and adds it to the group if it doesn't already exist. -func (c *RosaClient) CreateUserIfNotExist(clusterID string, group, username string) (*cmv1.User, error) { - response, err := c.ocm.ClustersMgmt().V1(). - Clusters().Cluster(clusterID). - Groups().Group(group). - Users().User(username). - Get(). - Send() - if err == nil { - return response.Body(), nil - } else if response.Error().Status() != http.StatusNotFound { - return nil, handleErr(response.Error(), err) - } - - user, err := cmv1.NewUser().ID(username).Build() - if err != nil { - return nil, fmt.Errorf("failed to create user '%s' for cluster '%s'", username, clusterID) - } - - return c.CreateUser(clusterID, group, user) -} - -// CreateUser adds a new user to the group. -func (c *RosaClient) CreateUser(clusterID string, group string, user *cmv1.User) (*cmv1.User, error) { - response, err := c.ocm.ClustersMgmt().V1(). - Clusters().Cluster(clusterID). - Groups().Group(group). - Users(). - Add().Body(user). - Send() - if err != nil { - return nil, handleErr(response.Error(), err) - } - return response.Body(), nil -} - -// DeleteUser deletes the user from the cluster. -func (c *RosaClient) DeleteUser(clusterID string, group string, username string) error { - response, err := c.ocm.ClustersMgmt().V1(). - Clusters().Cluster(clusterID). - Groups().Group(group). - Users().User(username). - Delete(). - Send() - if err != nil { - return handleErr(response.Error(), err) - } - return nil -} diff --git a/pkg/rosa/util.go b/pkg/rosa/util.go deleted file mode 100644 index 37f75bc25e..0000000000 --- a/pkg/rosa/util.go +++ /dev/null @@ -1,60 +0,0 @@ -package rosa - -import ( - "crypto/rand" - "fmt" - "math/big" - - ocmerrors "github.com/openshift-online/ocm-sdk-go/errors" -) - -func handleErr(res *ocmerrors.Error, err error) error { - msg := res.Reason() - if msg == "" { - msg = err.Error() - } - // Hack to always display the correct terms and conditions message - if res.Code() == "CLUSTERS-MGMT-451" { - msg = "You must accept the Terms and Conditions in order to continue.\n" + - "Go to https://www.redhat.com/wapps/tnc/ackrequired?site=ocm&event=register\n" + - "Once you accept the terms, you will need to retry the action that was blocked." - } - return fmt.Errorf(msg) -} - -// GenerateRandomPassword generates a random password which satisfies OCM requiremts for passwords. -func GenerateRandomPassword() (string, error) { - const ( - maxPasswordLength = 23 - lowerLetters = "abcdefghijkmnopqrstuvwxyz" - upperLetters = "ABCDEFGHIJKLMNPQRSTUVWXYZ" - digits = "23456789" - all = lowerLetters + upperLetters + digits - ) - var password string - for i := 0; i < maxPasswordLength; i++ { - n, err := rand.Int(rand.Reader, big.NewInt(int64(len(all)))) - if err != nil { - return "", err - } - newchar := string(all[n.Int64()]) - if password == "" { - password = newchar - } - if i < maxPasswordLength-1 { - n, err = rand.Int(rand.Reader, big.NewInt(int64(len(password)+1))) - if err != nil { - return "", err - } - j := n.Int64() - password = password[0:j] + newchar + password[j:] - } - } - - pw := []rune(password) - for _, replace := range []int{5, 11, 17} { - pw[replace] = '-' - } - - return string(pw), nil -} diff --git a/pkg/rosa/versions.go b/pkg/rosa/versions.go index 1bdeee8033..d300adbf96 100644 --- a/pkg/rosa/versions.go +++ b/pkg/rosa/versions.go @@ -2,47 +2,19 @@ package rosa import ( "fmt" - "strings" "time" "github.com/blang/semver" cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" + "github.com/openshift/rosa/pkg/ocm" ) +// MinSupportedVersion is the minimum supported version for ROSA. var MinSupportedVersion = semver.MustParse("4.14.0") -// IsVersionSupported checks whether the input version is supported for ROSA clusters. -func (c *RosaClient) IsVersionSupported(versionID string) (bool, error) { - parsedVersion, err := semver.Parse(versionID) - if err != nil { - return false, err - } - if parsedVersion.LT(MinSupportedVersion) { - return false, nil - } - - filter := fmt.Sprintf("raw_id='%s' AND channel_group = '%s'", versionID, "stable") - response, err := c.ocm.ClustersMgmt().V1(). - Versions(). - List(). - Search(filter). - Page(1).Size(1). - Parameter("product", "hcp"). - Send() - if err != nil { - return false, handleErr(response.Error(), err) - } - if response.Total() == 0 { - return false, nil - } - - version := response.Items().Get(0) - return version.ROSAEnabled() && version.HostedControlPlaneEnabled(), nil -} - // CheckExistingScheduledUpgrade checks and returns the current upgrade schedule if any. -func (c *RosaClient) CheckExistingScheduledUpgrade(cluster *cmv1.Cluster) (*cmv1.ControlPlaneUpgradePolicy, error) { - upgradePolicies, err := c.getControlPlaneUpgradePolicies(cluster.ID()) +func CheckExistingScheduledUpgrade(client *ocm.Client, cluster *cmv1.Cluster) (*cmv1.ControlPlaneUpgradePolicy, error) { + upgradePolicies, err := client.GetControlPlaneUpgradePolicies(cluster.ID()) if err != nil { return nil, err } @@ -55,9 +27,10 @@ func (c *RosaClient) CheckExistingScheduledUpgrade(cluster *cmv1.Cluster) (*cmv1 } // ScheduleControlPlaneUpgrade schedules a new control plane upgrade to the specified version at the specified time. -func (c *RosaClient) ScheduleControlPlaneUpgrade(cluster *cmv1.Cluster, version string, nextRun time.Time) (*cmv1.ControlPlaneUpgradePolicy, error) { +func ScheduleControlPlaneUpgrade(client *ocm.Client, cluster *cmv1.Cluster, version string, nextRun time.Time) (*cmv1.ControlPlaneUpgradePolicy, error) { // earliestNextRun is set to at least 5 min from now by the OCM API. - // we set it to 6 min here to account for latencty. + // Set our next run request to something slightly longer than 5min to make sure we account for the latency between when we send this + // request and when the server processes it. earliestNextRun := time.Now().Add(time.Minute * 6) if nextRun.Before(earliestNextRun) { nextRun = earliestNextRun @@ -72,48 +45,43 @@ func (c *RosaClient) ScheduleControlPlaneUpgrade(cluster *cmv1.Cluster, version if err != nil { return nil, err } + return client.ScheduleHypershiftControlPlaneUpgrade(cluster.ID(), upgradePolicy) +} - response, err := c.ocm.ClustersMgmt().V1(). - Clusters().Cluster(cluster.ID()). - ControlPlane(). - UpgradePolicies(). - Add().Body(upgradePolicy). - Send() - if err != nil { - return nil, handleErr(response.Error(), err) +// ScheduleNodePoolUpgrade schedules a new nodePool upgrade to the specified version at the specified time. +func ScheduleNodePoolUpgrade(client *ocm.Client, clusterID string, nodePool *cmv1.NodePool, version string, nextRun time.Time) (*cmv1.NodePoolUpgradePolicy, error) { + // earliestNextRun is set to at least 5 min from now by the OCM API. + // Set our next run request to something slightly longer than 5min to make sure we account for the latency between when we send this + // request and when the server processes it. + earliestNextRun := time.Now().Add(time.Minute * 6) + if nextRun.Before(earliestNextRun) { + nextRun = earliestNextRun } - return response.Body(), nil -} + upgradePolicy, err := cmv1.NewNodePoolUpgradePolicy(). + UpgradeType(cmv1.UpgradeTypeNodePool). + NodePoolID(nodePool.ID()). + ScheduleType(cmv1.ScheduleTypeManual). + Version(version). + NextRun(nextRun). + Build() + if err != nil { + return nil, err + } -func (c *RosaClient) getControlPlaneUpgradePolicies(clusterID string) (controlPlaneUpgradePolicies []*cmv1.ControlPlaneUpgradePolicy, err error) { - collection := c.ocm.ClustersMgmt().V1(). - Clusters(). - Cluster(clusterID). - ControlPlane(). - UpgradePolicies() - page := 1 - size := 100 - for { - response, err := collection.List(). - Page(page). - Size(size). - Send() - if err != nil { - return nil, handleErr(response.Error(), err) - } - controlPlaneUpgradePolicies = append(controlPlaneUpgradePolicies, response.Items().Slice()...) - if response.Size() < size { - break - } - page++ + scheduledUpgrade, err := client.ScheduleNodePoolUpgrade(clusterID, nodePool.ID(), upgradePolicy) + if err != nil { + return nil, fmt.Errorf("failed to schedule nodePool upgrade to version %s: %w", version, err) } - return + + return scheduledUpgrade, nil } // machinepools can be created with a minimal of two minor versions from the control plane. const minorVersionsAllowedDeviation = 2 +// MachinePoolSupportedVersionsRange returns the supported range of versions +// for a machine pool based on the control plane version. func MachinePoolSupportedVersionsRange(controlPlaneVersion string) (*semver.Version, *semver.Version, error) { maxVersion, err := semver.Parse(controlPlaneVersion) if err != nil { @@ -133,8 +101,6 @@ func MachinePoolSupportedVersionsRange(controlPlaneVersion string) (*semver.Vers return &minVersion, &maxVersion, nil } -const versionPrefix = "openshift-v" - // RawVersionID returns the rawID from the provided OCM version object. func RawVersionID(version *cmv1.Version) string { rawID := version.RawID() @@ -142,15 +108,5 @@ func RawVersionID(version *cmv1.Version) string { return rawID } - rawID = strings.TrimPrefix(version.ID(), versionPrefix) - channelSeparator := strings.LastIndex(rawID, "-") - if channelSeparator > 0 { - return rawID[:channelSeparator] - } - return rawID -} - -// VersionID construcuts and returns an OCM versionID from the provided rawVersionID. -func VersionID(rawVersionID string) string { - return fmt.Sprintf("%s%s", versionPrefix, rawVersionID) + return ocm.GetRawVersionId(version.ID()) } diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go new file mode 100644 index 0000000000..9bad7e0a79 --- /dev/null +++ b/pkg/utils/utils.go @@ -0,0 +1,27 @@ +// Package utils has the common functions that can be used for cluster-api-provider-aws repo. +package utils + +import ( + "context" + "fmt" + + crclient "sigs.k8s.io/controller-runtime/pkg/client" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" +) + +// GetMachinePools belong to a cluster. +func GetMachinePools(ctx context.Context, client crclient.Client, clusterName string, clusterNS string) ([]expclusterv1.MachinePool, error) { + machinePoolList := expclusterv1.MachinePoolList{} + listOptions := []crclient.ListOption{ + crclient.InNamespace(clusterNS), + crclient.MatchingLabels(map[string]string{clusterv1.ClusterNameLabel: clusterName}), + } + + if err := client.List(ctx, &machinePoolList, listOptions...); err != nil { + return []expclusterv1.MachinePool{}, fmt.Errorf("failed to list machine pools for cluster %s: %v", clusterName, err) + } + + return machinePoolList.Items, nil +} diff --git a/scripts/ci-docker-build.sh b/scripts/ci-docker-build.sh new file mode 100755 index 0000000000..cff6212bc8 --- /dev/null +++ b/scripts/ci-docker-build.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +# Copyright 2024 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +# shellcheck source=../hack/ensure-go.sh +source "${REPO_ROOT}/hack/ensure-go.sh" + +cd "${REPO_ROOT}" && make docker-build-all release-binaries diff --git a/scripts/go_install.sh b/scripts/go_install.sh index 12ce444224..a07b8e0f11 100755 --- a/scripts/go_install.sh +++ b/scripts/go_install.sh @@ -37,7 +37,7 @@ if [ -z "${GOBIN}" ]; then exit 1 fi -rm "${GOBIN}/${2}"* || true +rm -f "${GOBIN}/${2}"* || true # install the golang module specified as the first argument go install "${1}@${3}" diff --git a/templates/cluster-template-flatcar.yaml b/templates/cluster-template-flatcar.yaml index fa1e346c9b..058e9d48c6 100644 --- a/templates/cluster-template-flatcar.yaml +++ b/templates/cluster-template-flatcar.yaml @@ -2,7 +2,10 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: - name: "${CLUSTER_NAME}" + labels: + ccm: external + csi: external + name: ${CLUSTER_NAME} spec: clusterNetwork: pods: @@ -165,3 +168,855 @@ spec: preKubeadmCommands: - envsubst < /etc/kubeadm.yml > /etc/kubeadm.yml.tmp - mv /etc/kubeadm.yml.tmp /etc/kubeadm.yml +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: crs-ccm +spec: + clusterSelector: + matchLabels: + ccm: external + resources: + - kind: ConfigMap + name: cloud-controller-manager-addon + strategy: ApplyOnce +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: crs-csi +spec: + clusterSelector: + matchLabels: + csi: external + resources: + - kind: ConfigMap + name: aws-ebs-csi-driver-addon + strategy: ApplyOnce +--- +apiVersion: v1 +data: + aws-ccm-external.yaml: | + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: aws-cloud-controller-manager + namespace: kube-system + labels: + k8s-app: aws-cloud-controller-manager + spec: + selector: + matchLabels: + k8s-app: aws-cloud-controller-manager + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + k8s-app: aws-cloud-controller-manager + spec: + nodeSelector: + node-role.kubernetes.io/control-plane: "" + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + serviceAccountName: cloud-controller-manager + containers: + - name: aws-cloud-controller-manager + image: registry.k8s.io/provider-aws/cloud-controller-manager:v1.28.3 + args: + - --v=2 + - --cloud-provider=aws + - --use-service-account-credentials=true + - --configure-cloud-routes=false + resources: + requests: + cpu: 200m + hostNetwork: true + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: cloud-controller-manager:apiserver-authentication-reader + namespace: kube-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - apiGroup: "" + kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - services/status + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - watch + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - update + - watch + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - serviceaccounts/token + verbs: + - create + --- + kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - apiGroup: "" + kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system +kind: ConfigMap +metadata: + annotations: + note: generated + labels: + type: generated + name: cloud-controller-manager-addon +--- +apiVersion: v1 +data: + aws-ebs-csi-external.yaml: |- + apiVersion: v1 + kind: Secret + metadata: + name: aws-secret + namespace: kube-system + stringData: + key_id: "" + access_key: "" + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs-csi-controller-sa + namespace: kube-system + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs-csi-node-sa + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs-external-attacher-role + rules: + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - csi.storage.k8s.io + resources: + - csinodeinfos + verbs: + - get + - list + - watch + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments/status + verbs: + - patch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs-external-provisioner-role + rules: + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - create + - delete + - apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch + - update + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - get + - list + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents + verbs: + - get + - list + - apiGroups: + - storage.k8s.io + resources: + - csinodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs-external-resizer-role + rules: + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - persistentvolumeclaims/status + verbs: + - update + - patch + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs-external-snapshotter-role + rules: + - apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotclasses + verbs: + - get + - list + - watch + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents + verbs: + - create + - get + - list + - watch + - update + - delete + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents/status + verbs: + - update + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs-csi-attacher-binding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-external-attacher-role + subjects: + - kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs-csi-provisioner-binding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-external-provisioner-role + subjects: + - kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs-csi-resizer-binding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-external-resizer-role + subjects: + - kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs-csi-snapshotter-binding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-external-snapshotter-role + subjects: + - kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs-csi-controller + namespace: kube-system + spec: + replicas: 2 + selector: + matchLabels: + app: ebs-csi-controller + app.kubernetes.io/name: aws-ebs-csi-driver + template: + metadata: + labels: + app: ebs-csi-controller + app.kubernetes.io/name: aws-ebs-csi-driver + spec: + containers: + - args: + - --endpoint=$(CSI_ENDPOINT) + - --logtostderr + - --v=2 + env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: CSI_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + key: key_id + name: aws-secret + optional: true + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + key: access_key + name: aws-secret + optional: true + image: registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.25.0 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 3 + name: ebs-plugin + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + readinessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=$(ADDRESS) + - --v=2 + - --feature-gates=Topology=true + - --extra-create-metadata + - --leader-election=true + - --default-fstype=ext4 + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: registry.k8s.io/sig-storage/csi-provisioner:v3.6.2 + name: csi-provisioner + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=$(ADDRESS) + - --v=2 + - --leader-election=true + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: registry.k8s.io/sig-storage/csi-attacher:v4.4.2 + name: csi-attacher + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=$(ADDRESS) + - --leader-election=true + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: registry.k8s.io/sig-storage/csi-snapshotter:v6.3.2 + name: csi-snapshotter + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=$(ADDRESS) + - --v=2 + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: registry.k8s.io/sig-storage/csi-resizer:v1.9.2 + imagePullPolicy: Always + name: csi-resizer + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=/csi/csi.sock + image: registry.k8s.io/sig-storage/livenessprobe:v2.11.0 + name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: ebs-csi-controller-sa + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + tolerationSeconds: 300 + - key: node-role.kubernetes.io/master + effect: NoSchedule + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + volumes: + - emptyDir: {} + name: socket-dir + --- + apiVersion: policy/v1beta1 + kind: PodDisruptionBudget + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs-csi-controller + namespace: kube-system + spec: + maxUnavailable: 1 + selector: + matchLabels: + app: ebs-csi-controller + app.kubernetes.io/name: aws-ebs-csi-driver + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs-csi-node + namespace: kube-system + spec: + selector: + matchLabels: + app: ebs-csi-node + app.kubernetes.io/name: aws-ebs-csi-driver + template: + metadata: + labels: + app: ebs-csi-node + app.kubernetes.io/name: aws-ebs-csi-driver + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: eks.amazonaws.com/compute-type + operator: NotIn + values: + - fargate + containers: + - args: + - node + - --endpoint=$(CSI_ENDPOINT) + - --logtostderr + - --v=2 + env: + - name: CSI_ENDPOINT + value: unix:/csi/csi.sock + - name: CSI_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + image: registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.25.0 + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 3 + name: ebs-plugin + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kubelet + mountPropagation: Bidirectional + name: kubelet-dir + - mountPath: /csi + name: plugin-dir + - mountPath: /dev + name: device-dir + - args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.9.2 + name: node-driver-registrar + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /registration + name: registration-dir + - args: + - --csi-address=/csi/csi.sock + image: registry.k8s.io/sig-storage/livenessprobe:v2.11.0 + name: liveness-probe + volumeMounts: + - mountPath: /csi + name: plugin-dir + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + serviceAccountName: ebs-csi-node-sa + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + tolerationSeconds: 300 + volumes: + - hostPath: + path: /var/lib/kubelet + type: Directory + name: kubelet-dir + - hostPath: + path: /var/lib/kubelet/plugins/ebs.csi.aws.com/ + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: Directory + name: registration-dir + - hostPath: + path: /dev + type: Directory + name: device-dir + updateStrategy: + rollingUpdate: + maxUnavailable: 10% + type: RollingUpdate + --- + apiVersion: storage.k8s.io/v1 + kind: CSIDriver + metadata: + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + name: ebs.csi.aws.com + spec: + attachRequired: true + podInfoOnMount: false +kind: ConfigMap +metadata: + annotations: + note: generated + labels: + type: generated + name: aws-ebs-csi-driver-addon diff --git a/templates/cluster-template-rosa-machinepool.yaml b/templates/cluster-template-rosa-machinepool.yaml index ce5d04a728..67cdac8050 100644 --- a/templates/cluster-template-rosa-machinepool.yaml +++ b/templates/cluster-template-rosa-machinepool.yaml @@ -27,12 +27,11 @@ kind: ROSAControlPlane metadata: name: "${CLUSTER_NAME}-control-plane" spec: - rosaClusterName: ${CLUSTER_NAME:0:15} + rosaClusterName: ${CLUSTER_NAME:0:54} version: "${OPENSHIFT_VERSION}" region: "${AWS_REGION}" - accountID: "${AWS_ACCOUNT_ID}" - creatorARN: "${AWS_CREATOR_ARN}" - machineCIDR: "10.0.0.0/16" + network: + machineCIDR: "10.0.0.0/16" rolesRef: ingressARN: "arn:aws:iam::${AWS_ACCOUNT_ID}:role/${OPERATOR_ROLES_PREFIX}-openshift-ingress-operator-cloud-credentials" imageRegistryARN: "arn:aws:iam::${AWS_ACCOUNT_ID}:role/${OPERATOR_ROLES_PREFIX}-openshift-image-registry-installer-cloud-credentials" @@ -44,7 +43,7 @@ spec: kmsProviderARN: "arn:aws:iam::${AWS_ACCOUNT_ID}:role/${OPERATOR_ROLES_PREFIX}-kube-system-kms-provider" oidcID: "${OIDC_CONFIG_ID}" subnets: - - "${PUBLIC_SUBNET_ID}" + - "${PUBLIC_SUBNET_ID}" # remove if creating a private cluster - "${PRIVATE_SUBNET_ID}" availabilityZones: - "${AWS_AVAILABILITY_ZONE}" @@ -78,5 +77,3 @@ spec: instanceType: "m5.xlarge" subnet: "${PRIVATE_SUBNET_ID}" version: "${OPENSHIFT_VERSION}" - - diff --git a/templates/cluster-template-rosa.yaml b/templates/cluster-template-rosa.yaml index 8fd60b9a5f..f9ece3a42f 100644 --- a/templates/cluster-template-rosa.yaml +++ b/templates/cluster-template-rosa.yaml @@ -27,12 +27,11 @@ kind: ROSAControlPlane metadata: name: "${CLUSTER_NAME}-control-plane" spec: - rosaClusterName: ${CLUSTER_NAME:0:15} + rosaClusterName: ${CLUSTER_NAME:0:54} version: "${OPENSHIFT_VERSION}" region: "${AWS_REGION}" - accountID: "${AWS_ACCOUNT_ID}" - creatorARN: "${AWS_CREATOR_ARN}" - machineCIDR: "10.0.0.0/16" + network: + machineCIDR: "10.0.0.0/16" rolesRef: ingressARN: "arn:aws:iam::${AWS_ACCOUNT_ID}:role/${OPERATOR_ROLES_PREFIX}-openshift-ingress-operator-cloud-credentials" imageRegistryARN: "arn:aws:iam::${AWS_ACCOUNT_ID}:role/${OPERATOR_ROLES_PREFIX}-openshift-image-registry-installer-cloud-credentials" @@ -44,7 +43,7 @@ spec: kmsProviderARN: "arn:aws:iam::${AWS_ACCOUNT_ID}:role/${OPERATOR_ROLES_PREFIX}-kube-system-kms-provider" oidcID: "${OIDC_CONFIG_ID}" subnets: - - "${PUBLIC_SUBNET_ID}" + - "${PUBLIC_SUBNET_ID}" # remove if creating a private cluster - "${PRIVATE_SUBNET_ID}" availabilityZones: - "${AWS_AVAILABILITY_ZONE}" diff --git a/test/e2e/data/e2e_conf.yaml b/test/e2e/data/e2e_conf.yaml index d469adf3ea..b272860f6b 100644 --- a/test/e2e/data/e2e_conf.yaml +++ b/test/e2e/data/e2e_conf.yaml @@ -20,17 +20,17 @@ images: ## PLEASE KEEP THESE UP TO DATE WITH THE COMPONENTS # Cluster API v1beta1 Preloads - - name: quay.io/jetstack/cert-manager-cainjector:v1.12.2 + - name: quay.io/jetstack/cert-manager-cainjector:v1.14.4 loadBehavior: tryLoad - - name: quay.io/jetstack/cert-manager-webhook:v1.12.2 + - name: quay.io/jetstack/cert-manager-webhook:v1.14.4 loadBehavior: tryLoad - - name: quay.io/jetstack/cert-manager-controller:v1.12.2 + - name: quay.io/jetstack/cert-manager-controller:v1.14.4 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.6.1 + - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.7.1 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.6.1 + - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.7.1 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.6.1 + - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.7.1 loadBehavior: tryLoad providers: @@ -48,8 +48,8 @@ providers: new: "imagePullPolicy: IfNotPresent" - old: --metrics-bind-addr=127.0.0.1:8080 new: --metrics-bind-addr=:8080 - - name: v1.6.1 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.1/core-components.yaml" + - name: v1.7.1 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.1/core-components.yaml" type: "url" contract: v1beta1 files: @@ -75,8 +75,8 @@ providers: new: "imagePullPolicy: IfNotPresent" - old: --metrics-bind-addr=127.0.0.1:8080 new: --metrics-bind-addr=:8080 - - name: v1.6.1 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.1/bootstrap-components.yaml" + - name: v1.7.1 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.1/bootstrap-components.yaml" type: "url" contract: v1beta1 files: @@ -102,8 +102,8 @@ providers: new: "imagePullPolicy: IfNotPresent" - old: --metrics-bind-addr=127.0.0.1:8080 new: --metrics-bind-addr=:8080 - - name: v1.6.1 # latest published release in the v1beta1 series; this is used for v1beta1 --> v1beta1 latest clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.1/control-plane-components.yaml" + - name: v1.7.1 # latest published release in the v1beta1 series; this is used for v1beta1 --> v1beta1 latest clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.1/control-plane-components.yaml" type: "url" contract: v1beta1 files: @@ -176,7 +176,7 @@ variables: # allowing the same e2e config file to be re-used in different Prow jobs e.g. each one with a K8s version permutation. # The following Kubernetes versions should be the latest versions with already published kindest/node images. # This avoids building node images in the default case which improves the test duration significantly. - KUBERNETES_VERSION_MANAGEMENT: "v1.28.0" + KUBERNETES_VERSION_MANAGEMENT: "v1.29.0" KUBERNETES_VERSION: "v1.26.6" KUBERNETES_VERSION_UPGRADE_TO: "v1.26.6" KUBERNETES_VERSION_UPGRADE_FROM: "v1.25.3" diff --git a/test/e2e/data/e2e_eks_conf.yaml b/test/e2e/data/e2e_eks_conf.yaml index dd481096a5..8e238124f4 100644 --- a/test/e2e/data/e2e_eks_conf.yaml +++ b/test/e2e/data/e2e_eks_conf.yaml @@ -17,25 +17,25 @@ images: loadBehavior: mustLoad ## PLEASE KEEP THESE UP TO DATE WITH THE COMPONENTS - - name: quay.io/jetstack/cert-manager-cainjector:v1.12.2 + - name: quay.io/jetstack/cert-manager-cainjector:v1.14.4 loadBehavior: tryLoad - - name: quay.io/jetstack/cert-manager-webhook:v1.12.2 + - name: quay.io/jetstack/cert-manager-webhook:v1.14.4 loadBehavior: tryLoad - - name: quay.io/jetstack/cert-manager-controller:v1.12.2 + - name: quay.io/jetstack/cert-manager-controller:v1.14.4 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.6.1 + - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.7.1 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.6.1 + - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.7.1 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.6.1 + - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.7.1 loadBehavior: tryLoad providers: - name: cluster-api type: CoreProvider versions: - - name: v1.6.1 - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.1/core-components.yaml" + - name: v1.7.1 + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.1/core-components.yaml" type: "url" contract: v1beta1 files: @@ -50,8 +50,8 @@ providers: files: - sourcePath: "./shared/v1beta1/metadata.yaml" versions: - - name: v1.6.1 - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.1/bootstrap-components.yaml" + - name: v1.7.1 + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.1/bootstrap-components.yaml" type: "url" contract: v1beta1 files: @@ -66,8 +66,8 @@ providers: files: - sourcePath: "./shared/v1beta1/metadata.yaml" versions: - - name: v1.6.1 - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.1/control-plane-components.yaml" + - name: v1.7.1 + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.1/control-plane-components.yaml" type: "url" contract: v1beta1 files: @@ -117,7 +117,7 @@ providers: variables: KUBERNETES_VERSION: "v1.29.1" - KUBERNETES_VERSION_MANAGEMENT: "v1.28.0" # Kind bootstrap + KUBERNETES_VERSION_MANAGEMENT: "v1.29.0" # Kind bootstrap EXP_MACHINE_POOL: "true" EXP_CLUSTER_RESOURCE_SET: "true" EVENT_BRIDGE_INSTANCE_STATE: "true" diff --git a/test/e2e/data/shared/v1beta1/metadata.yaml b/test/e2e/data/shared/v1beta1/metadata.yaml index 9feb6a6eb2..7f3d15522d 100644 --- a/test/e2e/data/shared/v1beta1/metadata.yaml +++ b/test/e2e/data/shared/v1beta1/metadata.yaml @@ -5,6 +5,9 @@ # update this file only when a new major or minor version is released apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 releaseSeries: + - major: 1 + minor: 7 + contract: v1beta1 - major: 1 minor: 6 contract: v1beta1 diff --git a/test/e2e/shared/aws.go b/test/e2e/shared/aws.go index 95b469780e..31a6ac283a 100644 --- a/test/e2e/shared/aws.go +++ b/test/e2e/shared/aws.go @@ -509,7 +509,7 @@ func deleteResourcesInCloudFormation(prov client.ConfigProvider, t *cfn_bootstra } code, ok := awserrors.Code(err) return err == nil || (ok && code == iam.ErrCodeNoSuchEntityException) - }, 5*time.Minute, 5*time.Second).Should(BeTrue()) + }, 5*time.Minute, 5*time.Second).Should(BeTrue(), fmt.Sprintf("Eventually failed deleting the following role: %q", role.RoleName)) } for _, profile := range instanceProfiles { By(fmt.Sprintf("cleanup for profile with name '%s'", profile.InstanceProfileName)) @@ -522,7 +522,7 @@ func deleteResourcesInCloudFormation(prov client.ConfigProvider, t *cfn_bootstra } code, ok := awserrors.Code(err) return err == nil || (ok && code == iam.ErrCodeNoSuchEntityException) - }, 5*time.Minute, 5*time.Second).Should(BeTrue()) + }, 5*time.Minute, 5*time.Second).Should(BeTrue(), fmt.Sprintf("Eventually failed cleaning up profile with name %q", profile.InstanceProfileName)) } for _, group := range groups { repeat := false @@ -534,7 +534,7 @@ func deleteResourcesInCloudFormation(prov client.ConfigProvider, t *cfn_bootstra } code, ok := awserrors.Code(err) return err == nil || (ok && code == iam.ErrCodeNoSuchEntityException) - }, 5*time.Minute, 5*time.Second).Should(BeTrue()) + }, 5*time.Minute, 5*time.Second).Should(BeTrue(), fmt.Sprintf("Eventually failed deleting group %q", group.GroupName)) } for _, policy := range policies { policies, err := iamSvc.ListPolicies(&iam.ListPoliciesInput{}) @@ -554,7 +554,7 @@ func deleteResourcesInCloudFormation(prov client.ConfigProvider, t *cfn_bootstra } code, ok := awserrors.Code(err) return err == nil || (ok && code == iam.ErrCodeNoSuchEntityException) - }, 5*time.Minute, 5*time.Second).Should(BeTrue()) + }, 5*time.Minute, 5*time.Second).Should(BeTrue(), fmt.Sprintf("Eventually failed to delete policy %q", p.String())) // TODO: why is there a break here? Don't we want to clean up everything? break } @@ -953,7 +953,8 @@ func (s *ServiceQuota) updateServiceQuotaRequestStatus(serviceQuotasClient *serv } } -func DumpEKSClusters(ctx context.Context, e2eCtx *E2EContext) { +// DumpEKSClusters dumps the EKS clusters in the environment. +func DumpEKSClusters(_ context.Context, e2eCtx *E2EContext) { name := "no-bootstrap-cluster" if e2eCtx.Environment.BootstrapClusterProxy != nil { name = e2eCtx.Environment.BootstrapClusterProxy.GetName() @@ -1014,7 +1015,7 @@ func dumpEKSCluster(cluster *eks.Cluster, logPath string) { } // To calculate how much resources a test consumes, these helper functions below can be used. -// ListVpcInternetGateways, ListNATGateways, ListRunningEC2, ListVPC +// ListVpcInternetGateways, ListNATGateways, ListRunningEC2, ListVPC. func ListVpcInternetGateways(e2eCtx *E2EContext) ([]*ec2.InternetGateway, error) { ec2Svc := ec2.New(e2eCtx.AWSSession) @@ -1052,7 +1053,8 @@ func ListNATGateways(e2eCtx *E2EContext) (map[string]*ec2.NatGateway, error) { return gateways, nil } -func ListRunningEC2(e2eCtx *E2EContext) ([]instance, error) { +// listRunningEC2 returns a list of running EC2 instances. +func listRunningEC2(e2eCtx *E2EContext) ([]instance, error) { //nolint:unused ec2Svc := ec2.New(e2eCtx.AWSSession) resp, err := ec2Svc.DescribeInstancesWithContext(context.TODO(), &ec2.DescribeInstancesInput{ @@ -1128,7 +1130,7 @@ func WaitForInstanceState(e2eCtx *E2EContext, clusterName string, state string) return true } return false - }, 5*time.Minute, 5*time.Second).Should(BeTrue()) + }, 5*time.Minute, 5*time.Second).Should(BeTrue(), fmt.Sprintf("Eventually failed waiting for all cluster's EC2 instance to be in %q state", state)) return false } @@ -1545,7 +1547,7 @@ func WaitForNatGatewayState(e2eCtx *E2EContext, gatewayID string, state string) gw, _ := GetNatGateway(e2eCtx, gatewayID) gwState := *gw.State return gwState == state - }, 3*time.Minute, 5*time.Second).Should(BeTrue()) + }, 3*time.Minute, 5*time.Second).Should(BeTrue(), fmt.Sprintf("Eventually failed waiting for NAT Gateway to be in %q state", state)) return false } diff --git a/test/e2e/shared/common.go b/test/e2e/shared/common.go index 8cc2d9a6b4..c11e1d82f3 100644 --- a/test/e2e/shared/common.go +++ b/test/e2e/shared/common.go @@ -92,12 +92,13 @@ func DumpSpecResourcesAndCleanup(ctx context.Context, specName string, namespace delete(e2eCtx.Environment.Namespaces, namespace) } +// AWSStackLogCollector collects logs from the AWS stack. type AWSStackLogCollector struct { E2EContext *E2EContext } // CollectInfrastructureLogs collects log from the infrastructure. -func (k AWSStackLogCollector) CollectInfrastructureLogs(ctx context.Context, managementClusterClient crclient.Client, c *clusterv1.Cluster, outputPath string) error { +func (k AWSStackLogCollector) CollectInfrastructureLogs(_ context.Context, _ crclient.Client, _ *clusterv1.Cluster, _ string) error { return nil } diff --git a/test/e2e/shared/defaults.go b/test/e2e/shared/defaults.go index c67c5538a5..13e77c84f7 100644 --- a/test/e2e/shared/defaults.go +++ b/test/e2e/shared/defaults.go @@ -73,37 +73,51 @@ const ( MultiTenancy = "MULTI_TENANCY_" ) +// ResourceQuotaFilePath is the path to the file that contains the resource usage. var ResourceQuotaFilePath = "/tmp/capa-e2e-resource-usage.lock" + var ( + // MultiTenancySimpleRole is the simple role for multi-tenancy test. MultiTenancySimpleRole = MultitenancyRole("Simple") - MultiTenancyJumpRole = MultitenancyRole("Jump") + // MultiTenancyJumpRole is the jump role for multi-tenancy test. + MultiTenancyJumpRole = MultitenancyRole("Jump") + // MultiTenancyNestedRole is the nested role for multi-tenancy test. MultiTenancyNestedRole = MultitenancyRole("Nested") - MultiTenancyRoles = []MultitenancyRole{MultiTenancySimpleRole, MultiTenancyJumpRole, MultiTenancyNestedRole} - roleLookupCache = make(map[string]string) + + // MultiTenancyRoles is the list of multi-tenancy roles. + MultiTenancyRoles = []MultitenancyRole{MultiTenancySimpleRole, MultiTenancyJumpRole, MultiTenancyNestedRole} + roleLookupCache = make(map[string]string) ) +// MultitenancyRole is the role of the test. type MultitenancyRole string +// EnvVarARN returns the environment variable name for the role ARN. func (m MultitenancyRole) EnvVarARN() string { return MultiTenancy + strings.ToUpper(string(m)) + "_ROLE_ARN" } +// EnvVarName returns the environment variable name for the role name. func (m MultitenancyRole) EnvVarName() string { return MultiTenancy + strings.ToUpper(string(m)) + "_ROLE_NAME" } +// EnvVarIdentity returns the environment variable name for the identity name. func (m MultitenancyRole) EnvVarIdentity() string { return MultiTenancy + strings.ToUpper(string(m)) + "_IDENTITY_NAME" } +// IdentityName returns the identity name. func (m MultitenancyRole) IdentityName() string { return strings.ToLower(m.RoleName()) } +// RoleName returns the role name. func (m MultitenancyRole) RoleName() string { return "CAPAMultiTenancy" + string(m) } +// SetEnvVars sets the environment variables for the role. func (m MultitenancyRole) SetEnvVars(prov client.ConfigProvider) error { arn, err := m.RoleARN(prov) if err != nil { @@ -115,6 +129,7 @@ func (m MultitenancyRole) SetEnvVars(prov client.ConfigProvider) error { return nil } +// RoleARN returns the role ARN. func (m MultitenancyRole) RoleARN(prov client.ConfigProvider) (string, error) { if roleARN, ok := roleLookupCache[m.RoleName()]; ok { return roleARN, nil diff --git a/test/e2e/shared/gpu.go b/test/e2e/shared/gpu.go index 3bbdeed267..b871b2f010 100644 --- a/test/e2e/shared/gpu.go +++ b/test/e2e/shared/gpu.go @@ -103,7 +103,7 @@ type jobsClientAdapter struct { } // Get fetches the job named by the key and updates the provided object. -func (c jobsClientAdapter) Get(ctx context.Context, key crclient.ObjectKey, obj crclient.Object, opts ...crclient.GetOption) error { +func (c jobsClientAdapter) Get(ctx context.Context, key crclient.ObjectKey, obj crclient.Object, _ ...crclient.GetOption) error { job, err := c.client.Get(ctx, key.Name, metav1.GetOptions{}) if jobObj, ok := obj.(*batchv1.Job); ok { job.DeepCopyInto(jobObj) diff --git a/test/e2e/shared/suite.go b/test/e2e/shared/suite.go index 07e83fe0da..bf1212e4c2 100644 --- a/test/e2e/shared/suite.go +++ b/test/e2e/shared/suite.go @@ -17,6 +17,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package shared provides common utilities, setup and teardown for the e2e tests. package shared import ( @@ -118,8 +119,11 @@ func Node1BeforeSuite(e2eCtx *E2EContext) []byte { if prov.Name != "aws" { continue } - e2eCtx.E2EConfig.Providers[i].Files = append(e2eCtx.E2EConfig.Providers[i].Files, clusterctlCITemplate) - e2eCtx.E2EConfig.Providers[i].Files = append(e2eCtx.E2EConfig.Providers[i].Files, clusterctlCITemplateForUpgrade) + e2eCtx.E2EConfig.Providers[i].Files = append( + e2eCtx.E2EConfig.Providers[i].Files, + clusterctlCITemplate, + clusterctlCITemplateForUpgrade, + ) } } @@ -136,11 +140,12 @@ func Node1BeforeSuite(e2eCtx *E2EContext) []byte { By(fmt.Sprintf("Trying to create CloudFormation stack... attempt %d", count)) success := true if err := createCloudFormationStack(e2eCtx.AWSSession, bootstrapTemplate, bootstrapTags); err != nil { + By(fmt.Sprintf("Failed to create CloudFormation stack in attempt %d: %s", count, err.Error())) deleteCloudFormationStack(e2eCtx.AWSSession, bootstrapTemplate) success = false } return success - }, 10*time.Minute, 5*time.Second).Should(BeTrue()) + }, 10*time.Minute, 5*time.Second).Should(BeTrue(), "Should've eventually succeeded creating an AWS CloudFormation stack") } ensureStackTags(e2eCtx.AWSSession, bootstrapTemplate.Spec.StackName, bootstrapTags) diff --git a/test/e2e/shared/template.go b/test/e2e/shared/template.go index caa16917e9..72c4884412 100644 --- a/test/e2e/shared/template.go +++ b/test/e2e/shared/template.go @@ -154,7 +154,7 @@ func renderCustomCloudFormation(t *cfn_bootstrap.Template) *cloudformation.Templ return cloudformationTemplate } -func appendMultiTenancyRoles(t *cfn_bootstrap.Template, cfnt *cloudformation.Template) { +func appendMultiTenancyRoles(_ *cfn_bootstrap.Template, cfnt *cloudformation.Template) { controllersPolicy := cfnt.Resources[string(cfn_bootstrap.ControllersPolicy)].(*cfn_iam.ManagedPolicy) controllersPolicy.Roles = append( controllersPolicy.Roles, diff --git a/test/e2e/suites/managed/addon_helpers.go b/test/e2e/suites/managed/addon_helpers.go index 5f3940bcea..c8e55e42e1 100644 --- a/test/e2e/suites/managed/addon_helpers.go +++ b/test/e2e/suites/managed/addon_helpers.go @@ -65,7 +65,7 @@ func waitForEKSAddonToHaveStatus(input waitForEKSAddonToHaveStatusInput, interva } return false, nil - }, intervals...).Should(BeTrue()) + }, intervals...).Should(BeTrue(), fmt.Sprintf("Eventually failed waiting for EKS addon %q to have status %q for EKS cluster %q", input.AddonName, input.AddonStatus, input.ControlPlane.Spec.EKSClusterName)) } type checkEKSAddonConfigurationInput struct { @@ -102,5 +102,5 @@ func checkEKSAddonConfiguration(input checkEKSAddonConfigurationInput, intervals } return false, nil - }, intervals...).Should(BeTrue()) + }, intervals...).Should(BeTrue(), fmt.Sprintf("Eventually failed waiting for EKS addon %q to have config %q for EKS cluster %q", input.AddonName, input.AddonConfiguration, input.ControlPlane.Spec.EKSClusterName)) } diff --git a/test/e2e/suites/managed/cluster.go b/test/e2e/suites/managed/cluster.go index 1edae3adec..46829a2bcb 100644 --- a/test/e2e/suites/managed/cluster.go +++ b/test/e2e/suites/managed/cluster.go @@ -17,6 +17,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package managed implements a test for creating a managed cluster using CAPA. package managed import ( diff --git a/test/e2e/suites/managed/control_plane_helpers.go b/test/e2e/suites/managed/control_plane_helpers.go index 65f2ee2da3..b42c17e495 100644 --- a/test/e2e/suites/managed/control_plane_helpers.go +++ b/test/e2e/suites/managed/control_plane_helpers.go @@ -68,7 +68,7 @@ func waitForControlPlaneToBeUpgraded(input waitForControlPlaneToBeUpgradedInput, default: return false, nil } - }, intervals...).Should(BeTrue()) + }, intervals...).Should(BeTrue(), fmt.Sprintf("Eventually failed waiting for EKS control-plane to be upgraded to kubernetes version %q", input.UpgradeVersion)) } type GetControlPlaneByNameInput struct { @@ -89,7 +89,7 @@ func GetControlPlaneByName(ctx context.Context, input GetControlPlaneByNameInput return err } return nil - }, 2*time.Minute, 5*time.Second).Should(Succeed()) + }, 2*time.Minute, 5*time.Second).Should(Succeed(), fmt.Sprintf("Eventually failed to get AWSManagedControlPlane object '%s/%s'", input.Namespace, input.Name)) Expect(input.Getter.Get(ctx, key, cp)).To(Succeed(), "Failed to get AWSManagedControlPlane object %s/%s", input.Namespace, input.Name) return cp } diff --git a/test/e2e/suites/managed/machine_deployment.go b/test/e2e/suites/managed/machine_deployment.go index 89a17c772a..79d26d3355 100644 --- a/test/e2e/suites/managed/machine_deployment.go +++ b/test/e2e/suites/managed/machine_deployment.go @@ -100,10 +100,6 @@ func MachineDeploymentSpec(ctx context.Context, inputGetter func() MachineDeploy Deleter: input.BootstrapClusterProxy.GetClient(), MachineDeployment: md[0], }) - // deleteMachine(ctx, deleteMachineInput{ - // Deleter: input.BootstrapClusterProxy.GetClient(), - // Machine: &workerMachines[0], - // }) waitForMachineDeploymentDeleted(ctx, waitForMachineDeploymentDeletedInput{ Getter: input.BootstrapClusterProxy.GetClient(), diff --git a/test/e2e/suites/managed/machine_deployment_helpers.go b/test/e2e/suites/managed/machine_deployment_helpers.go index ca22403f33..e156b4ac51 100644 --- a/test/e2e/suites/managed/machine_deployment_helpers.go +++ b/test/e2e/suites/managed/machine_deployment_helpers.go @@ -58,7 +58,7 @@ func waitForMachineDeploymentDeleted(ctx context.Context, input waitForMachineDe err := input.Getter.Get(ctx, key, mp) notFound := apierrors.IsNotFound(err) return notFound - }, intervals...).Should(BeTrue()) + }, intervals...).Should(BeTrue(), fmt.Sprintf("Eventually failed waiting for MachineDeployment %q to be deleted", input.MachineDeployment.GetName())) } type waitForMachineDeletedInput struct { @@ -77,5 +77,5 @@ func waitForMachineDeleted(ctx context.Context, input waitForMachineDeletedInput err := input.Getter.Get(ctx, key, mp) notFound := apierrors.IsNotFound(err) return notFound - }, intervals...).Should(BeTrue()) + }, intervals...).Should(BeTrue(), fmt.Sprintf("Eventually failed waiting for Machine %q to be deleted", input.Machine.GetName())) } diff --git a/test/e2e/suites/managed/machine_pool_helpers.go b/test/e2e/suites/managed/machine_pool_helpers.go index 47eca2b850..b34eb7b1b8 100644 --- a/test/e2e/suites/managed/machine_pool_helpers.go +++ b/test/e2e/suites/managed/machine_pool_helpers.go @@ -58,5 +58,5 @@ func waitForMachinePoolDeleted(ctx context.Context, input waitForMachinePoolDele err := input.Getter.Get(ctx, key, mp) notFound := apierrors.IsNotFound(err) return notFound - }, intervals...).Should(BeTrue()) + }, intervals...).Should(BeTrue(), fmt.Sprintf("Eventually failed waiting for machine pool %q to be deleted", input.MachinePool.GetName())) } diff --git a/test/e2e/suites/unmanaged/helpers_test.go b/test/e2e/suites/unmanaged/helpers_test.go index 39457e481c..19a1394088 100644 --- a/test/e2e/suites/unmanaged/helpers_test.go +++ b/test/e2e/suites/unmanaged/helpers_test.go @@ -211,7 +211,7 @@ func createPVC(statefulsetinfo statefulSetInfo) corev1.PersistentVolumeClaim { Spec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, StorageClassName: &statefulsetinfo.storageClassName, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("4Gi"), }, @@ -418,7 +418,7 @@ func getSubnetID(filterKey, filterValue, clusterName string) *string { return subnetOutput.Subnets[0].SubnetId } -func getVolumeIds(info statefulSetInfo, k8sclient crclient.Client) []*string { +func getVolumeIDs(info statefulSetInfo, k8sclient crclient.Client) []*string { ginkgo.By("Retrieving IDs of dynamically provisioned volumes.") statefulset := &appsv1.StatefulSet{} err := k8sclient.Get(context.TODO(), apimachinerytypes.NamespacedName{Namespace: info.namespace, Name: info.name}, statefulset) @@ -683,11 +683,11 @@ func verifyElbExists(elbName string, exists bool) { } } -func verifyVolumesExists(awsVolumeIds []*string) { +func verifyVolumesExists(awsVolumeIDs []*string) { ginkgo.By("Ensuring dynamically provisioned volumes exists") ec2Client := ec2.New(e2eCtx.AWSSession) input := &ec2.DescribeVolumesInput{ - VolumeIds: awsVolumeIds, + VolumeIds: awsVolumeIDs, } _, err := ec2Client.DescribeVolumes(input) Expect(err).NotTo(HaveOccurred()) @@ -703,7 +703,7 @@ func waitForStatefulSetRunning(info statefulSetInfo, k8sclient crclient.Client) } return *statefulset.Spec.Replicas == statefulset.Status.ReadyReplicas, nil }, 10*time.Minute, 30*time.Second, - ).Should(BeTrue()) + ).Should(BeTrue(), fmt.Sprintf("Eventually failed waiting for StatefulSet %s to be running", info.name)) } // LatestCIReleaseForVersion returns the latest ci release of a specific version. @@ -835,7 +835,7 @@ func createPVCForEFS(storageClassName string, clusterClient crclient.Client) { corev1.ReadWriteMany, }, StorageClassName: &storageClassName, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: map[corev1.ResourceName]resource.Quantity{ corev1.ResourceStorage: *resource.NewQuantity(5*1024*1024*1024, resource.BinarySI), }, diff --git a/test/e2e/suites/unmanaged/unmanaged_functional_test.go b/test/e2e/suites/unmanaged/unmanaged_functional_test.go index aed9e02309..2b9ab782c8 100644 --- a/test/e2e/suites/unmanaged/unmanaged_functional_test.go +++ b/test/e2e/suites/unmanaged/unmanaged_functional_test.go @@ -319,8 +319,8 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { clusterClient := e2eCtx.Environment.BootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, cluster1Name).GetClient() createStatefulSet(nginxStatefulsetInfo, clusterClient) - awsVolIds := getVolumeIds(nginxStatefulsetInfo, clusterClient) - verifyVolumesExists(awsVolIds) + awsVolIDs := getVolumeIDs(nginxStatefulsetInfo, clusterClient) + verifyVolumesExists(awsVolIDs) kubernetesUgradeVersion := e2eCtx.E2EConfig.GetVariable(shared.PostCSIKubernetesVer) configCluster.KubernetesVersion = kubernetesUgradeVersion @@ -348,8 +348,8 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { ginkgo.By("Deploying StatefulSet on infra when K8s >= 1.23") createStatefulSet(nginxStatefulsetInfo2, clusterClient) - awsVolIds = getVolumeIds(nginxStatefulsetInfo2, clusterClient) - verifyVolumesExists(awsVolIds) + awsVolIDs = getVolumeIDs(nginxStatefulsetInfo2, clusterClient) + verifyVolumesExists(awsVolIDs) ginkgo.By("Deleting LB service") deleteLBService(metav1.NamespaceDefault, lbServiceName, clusterClient) @@ -358,7 +358,7 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { deleteCluster(ctx, cluster2) ginkgo.By("Deleting retained dynamically provisioned volumes") - deleteRetainedVolumes(awsVolIds) + deleteRetainedVolumes(awsVolIDs) ginkgo.By("PASSED!") }) }) @@ -388,8 +388,8 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { clusterClient := e2eCtx.Environment.BootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, cluster1Name).GetClient() createStatefulSet(nginxStatefulsetInfo, clusterClient) - awsVolIds := getVolumeIds(nginxStatefulsetInfo, clusterClient) - verifyVolumesExists(awsVolIds) + awsVolIDs := getVolumeIDs(nginxStatefulsetInfo, clusterClient) + verifyVolumesExists(awsVolIDs) kubernetesUgradeVersion := e2eCtx.E2EConfig.GetVariable(shared.PostCSIKubernetesVer) @@ -418,8 +418,8 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { ginkgo.By("Deploying StatefulSet on infra when K8s >= 1.23") createStatefulSet(nginxStatefulsetInfo2, clusterClient) - awsVolIds = getVolumeIds(nginxStatefulsetInfo2, clusterClient) - verifyVolumesExists(awsVolIds) + awsVolIDs = getVolumeIDs(nginxStatefulsetInfo2, clusterClient) + verifyVolumesExists(awsVolIDs) ginkgo.By("Deleting LB service") deleteLBService(metav1.NamespaceDefault, lbServiceName, clusterClient) @@ -428,7 +428,7 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { deleteCluster(ctx, cluster2) ginkgo.By("Deleting retained dynamically provisioned volumes") - deleteRetainedVolumes(awsVolIds) + deleteRetainedVolumes(awsVolIDs) ginkgo.By("PASSED!") }) }) @@ -459,8 +459,8 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { clusterClient := e2eCtx.Environment.BootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, cluster1Name).GetClient() createStatefulSet(nginxStatefulsetInfo, clusterClient) - awsVolIds := getVolumeIds(nginxStatefulsetInfo, clusterClient) - verifyVolumesExists(awsVolIds) + awsVolIDs := getVolumeIDs(nginxStatefulsetInfo, clusterClient) + verifyVolumesExists(awsVolIDs) kubernetesUgradeVersion := e2eCtx.E2EConfig.GetVariable(shared.PostCSIKubernetesVer) configCluster.KubernetesVersion = kubernetesUgradeVersion @@ -488,8 +488,8 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { ginkgo.By("Deploying StatefulSet on infra when K8s >= 1.23") createStatefulSet(nginxStatefulsetInfo2, clusterClient) - awsVolIds = getVolumeIds(nginxStatefulsetInfo2, clusterClient) - verifyVolumesExists(awsVolIds) + awsVolIDs = getVolumeIDs(nginxStatefulsetInfo2, clusterClient) + verifyVolumesExists(awsVolIDs) ginkgo.By("Deleting LB service") deleteLBService(metav1.NamespaceDefault, lbServiceName, clusterClient) @@ -498,7 +498,7 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { deleteCluster(ctx, cluster2) ginkgo.By("Deleting retained dynamically provisioned volumes") - deleteRetainedVolumes(awsVolIds) + deleteRetainedVolumes(awsVolIDs) ginkgo.By("PASSED!") }) }) @@ -566,7 +566,7 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { subnetError := "Failed to create instance: failed to run instance: InvalidSubnetID.NotFound: " + "The subnet ID '%s' does not exist" return isErrorEventExists(namespace.Name, md1Name, "FailedCreate", fmt.Sprintf(subnetError, "invalid-subnet"), eventList) - }, e2eCtx.E2EConfig.GetIntervals("", "wait-worker-nodes")...).Should(BeTrue()) + }, e2eCtx.E2EConfig.GetIntervals("", "wait-worker-nodes")...).Should(BeTrue(), "Eventually failed waiting for 'invalid subnet ID' event to be reported") ginkgo.By("Creating Machine Deployment in non-configured Availability Zone") md2Name := clusterName + "-md-2" @@ -584,7 +584,7 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { eventList := getEvents(namespace.Name) azError := "Failed to create instance: no subnets available in availability zone \"%s\"" return isErrorEventExists(namespace.Name, md2Name, "FailedCreate", fmt.Sprintf(azError, *invalidAz), eventList) - }, e2eCtx.E2EConfig.GetIntervals("", "wait-worker-nodes")...).Should(BeTrue()) + }, e2eCtx.E2EConfig.GetIntervals("", "wait-worker-nodes")...).Should(BeTrue(), "Eventually failed waiting for 'no subnet available in AZ' event to be reported") }) }) @@ -705,7 +705,7 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { machineList := getAWSMachinesForDeployment(ns2.Name, *md2[0]) labels := machineList.Items[0].GetLabels() return labels[instancestate.Ec2InstanceStateLabelKey] == string(infrav1.InstanceStateTerminated) - }, e2eCtx.E2EConfig.GetIntervals("", "wait-machine-status")...).Should(BeTrue()) + }, e2eCtx.E2EConfig.GetIntervals("", "wait-machine-status")...).Should(BeTrue(), "Eventually failed waiting for AWSMachine to be labelled as terminated") ginkgo.By("Waiting for machine to reach Failed state") statusChecks := []framework.MachineStatusCheck{framework.MachinePhaseCheck(string(clusterv1.MachinePhaseFailed))} @@ -878,7 +878,7 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { } wlClusterInfra.Peering = aPeering return aPeering != nil - }, 60*time.Second).Should(BeTrue()) + }, 60*time.Second).Should(BeTrue(), "Eventually failed waiting for peering to be accepted") ginkgo.By("Creating security groups") mgmtSG, _ := shared.CreateSecurityGroup(e2eCtx, mgmtClusterName+"-all", mgmtClusterName+"-all", *mgmtClusterInfra.VPC.VpcId) @@ -1135,7 +1135,8 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { Expect(err).To(BeNil()) return conditions.IsFalse(awsCluster, infrav1.VpcEndpointsReadyCondition) && conditions.GetReason(awsCluster, infrav1.VpcEndpointsReadyCondition) == clusterv1.DeletedReason - }, e2eCtx.E2EConfig.GetIntervals("", "wait-delete-cluster")...).Should(BeTrue()) + }, e2eCtx.E2EConfig.GetIntervals("", "wait-delete-cluster")...).Should(BeTrue(), + "Eventually failed waiting for AWSCluster to show VPC endpoint as deleted in conditions") }) }) }) diff --git a/test/helpers/envtest.go b/test/helpers/envtest.go index 098a280d69..43f0618b0c 100644 --- a/test/helpers/envtest.go +++ b/test/helpers/envtest.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package helpers provides a set of utilities for testing controllers. package helpers import ( @@ -83,7 +84,7 @@ func init() { utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) // Get the root of the current file to use in CRD paths. - _, filename, _, _ := goruntime.Caller(0) //nolint + _, filename, _, _ := goruntime.Caller(0) //nolint:dogsled root = path.Join(path.Dir(filename), "..", "..") } @@ -237,7 +238,7 @@ func buildModifiedWebhook(tag string, relativeFilePath string) (admissionv1.Muta if o.GetKind() == mutatingWebhookKind { // update the name in metadata if o.GetName() == defaultMutatingWebhookName { - o.SetName(strings.Join([]string{defaultMutatingWebhookName, "-", tag}, "")) + o.SetName(defaultMutatingWebhookName + "-" + tag) if err := scheme.Scheme.Convert(&o, &mutatingWebhook, nil); err != nil { klog.Fatalf("failed to convert MutatingWebhookConfiguration %s", o.GetName()) } @@ -246,7 +247,7 @@ func buildModifiedWebhook(tag string, relativeFilePath string) (admissionv1.Muta if o.GetKind() == validatingWebhookKind { // update the name in metadata if o.GetName() == defaultValidatingWebhookName { - o.SetName(strings.Join([]string{defaultValidatingWebhookName, "-", tag}, "")) + o.SetName(defaultValidatingWebhookName + "-" + tag) if err := scheme.Scheme.Convert(&o, &validatingWebhook, nil); err != nil { klog.Fatalf("failed to convert ValidatingWebhookConfiguration %s", o.GetName()) } diff --git a/test/helpers/external/cluster.go b/test/helpers/external/cluster.go index 051fb88391..524c775e0d 100644 --- a/test/helpers/external/cluster.go +++ b/test/helpers/external/cluster.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package external provides mock CRDs for use in tests. package external import ( diff --git a/test/helpers/matchers.go b/test/helpers/matchers.go new file mode 100644 index 0000000000..202ae22c27 --- /dev/null +++ b/test/helpers/matchers.go @@ -0,0 +1,66 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helpers + +import ( + "fmt" + "strings" + "testing" + + "github.com/aws/aws-sdk-go/service/elbv2" + "github.com/golang/mock/gomock" +) + +// PartialMatchCreateTargetGroupInput matches a partial CreateTargetGroupInput struct based on fuzzy matching rules. +func PartialMatchCreateTargetGroupInput(t *testing.T, i *elbv2.CreateTargetGroupInput) gomock.Matcher { + t.Helper() + return &createTargetGroupInputPartialMatcher{ + in: i, + t: t, + } +} + +// createTargetGroupInputPartialMatcher conforms to the gomock.Matcher interface in order to implement a match against a partial +// CreateTargetGroupInput expected value. +// In particular, the TargetGroupName expected value is used as a prefix, in order to support generated names. +type createTargetGroupInputPartialMatcher struct { + in *elbv2.CreateTargetGroupInput + t *testing.T +} + +func (m *createTargetGroupInputPartialMatcher) Matches(x interface{}) bool { + actual, ok := x.(*elbv2.CreateTargetGroupInput) + if !ok { + return false + } + + // Check for a perfect match across all fields first. + eq := gomock.Eq(m.in).Matches(actual) + + if !eq && (actual.Name != nil && m.in.Name != nil) { + // If the actual name is prefixed with the expected value, then it matches + if (*actual.Name != *m.in.Name) && strings.HasPrefix(*actual.Name, *m.in.Name) { + return true + } + } + + return eq +} + +func (m *createTargetGroupInputPartialMatcher) String() string { + return fmt.Sprintf("%v (%T)", m.in, m.in) +} diff --git a/test/mocks/aws_ec2api_mock.go b/test/mocks/aws_ec2api_mock.go index f23dc5e004..3942392c8c 100644 --- a/test/mocks/aws_ec2api_mock.go +++ b/test/mocks/aws_ec2api_mock.go @@ -1102,6 +1102,56 @@ func (mr *MockEC2APIMockRecorder) AssociateInstanceEventWindowWithContext(arg0, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AssociateInstanceEventWindowWithContext", reflect.TypeOf((*MockEC2API)(nil).AssociateInstanceEventWindowWithContext), varargs...) } +// AssociateIpamByoasn mocks base method. +func (m *MockEC2API) AssociateIpamByoasn(arg0 *ec2.AssociateIpamByoasnInput) (*ec2.AssociateIpamByoasnOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AssociateIpamByoasn", arg0) + ret0, _ := ret[0].(*ec2.AssociateIpamByoasnOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AssociateIpamByoasn indicates an expected call of AssociateIpamByoasn. +func (mr *MockEC2APIMockRecorder) AssociateIpamByoasn(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AssociateIpamByoasn", reflect.TypeOf((*MockEC2API)(nil).AssociateIpamByoasn), arg0) +} + +// AssociateIpamByoasnRequest mocks base method. +func (m *MockEC2API) AssociateIpamByoasnRequest(arg0 *ec2.AssociateIpamByoasnInput) (*request.Request, *ec2.AssociateIpamByoasnOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AssociateIpamByoasnRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*ec2.AssociateIpamByoasnOutput) + return ret0, ret1 +} + +// AssociateIpamByoasnRequest indicates an expected call of AssociateIpamByoasnRequest. +func (mr *MockEC2APIMockRecorder) AssociateIpamByoasnRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AssociateIpamByoasnRequest", reflect.TypeOf((*MockEC2API)(nil).AssociateIpamByoasnRequest), arg0) +} + +// AssociateIpamByoasnWithContext mocks base method. +func (m *MockEC2API) AssociateIpamByoasnWithContext(arg0 context.Context, arg1 *ec2.AssociateIpamByoasnInput, arg2 ...request.Option) (*ec2.AssociateIpamByoasnOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "AssociateIpamByoasnWithContext", varargs...) + ret0, _ := ret[0].(*ec2.AssociateIpamByoasnOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AssociateIpamByoasnWithContext indicates an expected call of AssociateIpamByoasnWithContext. +func (mr *MockEC2APIMockRecorder) AssociateIpamByoasnWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AssociateIpamByoasnWithContext", reflect.TypeOf((*MockEC2API)(nil).AssociateIpamByoasnWithContext), varargs...) +} + // AssociateIpamResourceDiscovery mocks base method. func (m *MockEC2API) AssociateIpamResourceDiscovery(arg0 *ec2.AssociateIpamResourceDiscoveryInput) (*ec2.AssociateIpamResourceDiscoveryOutput, error) { m.ctrl.T.Helper() @@ -10602,6 +10652,56 @@ func (mr *MockEC2APIMockRecorder) DeprovisionByoipCidrWithContext(arg0, arg1 int return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeprovisionByoipCidrWithContext", reflect.TypeOf((*MockEC2API)(nil).DeprovisionByoipCidrWithContext), varargs...) } +// DeprovisionIpamByoasn mocks base method. +func (m *MockEC2API) DeprovisionIpamByoasn(arg0 *ec2.DeprovisionIpamByoasnInput) (*ec2.DeprovisionIpamByoasnOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeprovisionIpamByoasn", arg0) + ret0, _ := ret[0].(*ec2.DeprovisionIpamByoasnOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeprovisionIpamByoasn indicates an expected call of DeprovisionIpamByoasn. +func (mr *MockEC2APIMockRecorder) DeprovisionIpamByoasn(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeprovisionIpamByoasn", reflect.TypeOf((*MockEC2API)(nil).DeprovisionIpamByoasn), arg0) +} + +// DeprovisionIpamByoasnRequest mocks base method. +func (m *MockEC2API) DeprovisionIpamByoasnRequest(arg0 *ec2.DeprovisionIpamByoasnInput) (*request.Request, *ec2.DeprovisionIpamByoasnOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeprovisionIpamByoasnRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*ec2.DeprovisionIpamByoasnOutput) + return ret0, ret1 +} + +// DeprovisionIpamByoasnRequest indicates an expected call of DeprovisionIpamByoasnRequest. +func (mr *MockEC2APIMockRecorder) DeprovisionIpamByoasnRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeprovisionIpamByoasnRequest", reflect.TypeOf((*MockEC2API)(nil).DeprovisionIpamByoasnRequest), arg0) +} + +// DeprovisionIpamByoasnWithContext mocks base method. +func (m *MockEC2API) DeprovisionIpamByoasnWithContext(arg0 context.Context, arg1 *ec2.DeprovisionIpamByoasnInput, arg2 ...request.Option) (*ec2.DeprovisionIpamByoasnOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeprovisionIpamByoasnWithContext", varargs...) + ret0, _ := ret[0].(*ec2.DeprovisionIpamByoasnOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeprovisionIpamByoasnWithContext indicates an expected call of DeprovisionIpamByoasnWithContext. +func (mr *MockEC2APIMockRecorder) DeprovisionIpamByoasnWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeprovisionIpamByoasnWithContext", reflect.TypeOf((*MockEC2API)(nil).DeprovisionIpamByoasnWithContext), varargs...) +} + // DeprovisionIpamPoolCidr mocks base method. func (m *MockEC2API) DeprovisionIpamPoolCidr(arg0 *ec2.DeprovisionIpamPoolCidrInput) (*ec2.DeprovisionIpamPoolCidrOutput, error) { m.ctrl.T.Helper() @@ -11484,6 +11584,89 @@ func (mr *MockEC2APIMockRecorder) DescribeByoipCidrsWithContext(arg0, arg1 inter return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeByoipCidrsWithContext", reflect.TypeOf((*MockEC2API)(nil).DescribeByoipCidrsWithContext), varargs...) } +// DescribeCapacityBlockOfferings mocks base method. +func (m *MockEC2API) DescribeCapacityBlockOfferings(arg0 *ec2.DescribeCapacityBlockOfferingsInput) (*ec2.DescribeCapacityBlockOfferingsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeCapacityBlockOfferings", arg0) + ret0, _ := ret[0].(*ec2.DescribeCapacityBlockOfferingsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeCapacityBlockOfferings indicates an expected call of DescribeCapacityBlockOfferings. +func (mr *MockEC2APIMockRecorder) DescribeCapacityBlockOfferings(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeCapacityBlockOfferings", reflect.TypeOf((*MockEC2API)(nil).DescribeCapacityBlockOfferings), arg0) +} + +// DescribeCapacityBlockOfferingsPages mocks base method. +func (m *MockEC2API) DescribeCapacityBlockOfferingsPages(arg0 *ec2.DescribeCapacityBlockOfferingsInput, arg1 func(*ec2.DescribeCapacityBlockOfferingsOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeCapacityBlockOfferingsPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DescribeCapacityBlockOfferingsPages indicates an expected call of DescribeCapacityBlockOfferingsPages. +func (mr *MockEC2APIMockRecorder) DescribeCapacityBlockOfferingsPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeCapacityBlockOfferingsPages", reflect.TypeOf((*MockEC2API)(nil).DescribeCapacityBlockOfferingsPages), arg0, arg1) +} + +// DescribeCapacityBlockOfferingsPagesWithContext mocks base method. +func (m *MockEC2API) DescribeCapacityBlockOfferingsPagesWithContext(arg0 context.Context, arg1 *ec2.DescribeCapacityBlockOfferingsInput, arg2 func(*ec2.DescribeCapacityBlockOfferingsOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeCapacityBlockOfferingsPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// DescribeCapacityBlockOfferingsPagesWithContext indicates an expected call of DescribeCapacityBlockOfferingsPagesWithContext. +func (mr *MockEC2APIMockRecorder) DescribeCapacityBlockOfferingsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeCapacityBlockOfferingsPagesWithContext", reflect.TypeOf((*MockEC2API)(nil).DescribeCapacityBlockOfferingsPagesWithContext), varargs...) +} + +// DescribeCapacityBlockOfferingsRequest mocks base method. +func (m *MockEC2API) DescribeCapacityBlockOfferingsRequest(arg0 *ec2.DescribeCapacityBlockOfferingsInput) (*request.Request, *ec2.DescribeCapacityBlockOfferingsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeCapacityBlockOfferingsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*ec2.DescribeCapacityBlockOfferingsOutput) + return ret0, ret1 +} + +// DescribeCapacityBlockOfferingsRequest indicates an expected call of DescribeCapacityBlockOfferingsRequest. +func (mr *MockEC2APIMockRecorder) DescribeCapacityBlockOfferingsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeCapacityBlockOfferingsRequest", reflect.TypeOf((*MockEC2API)(nil).DescribeCapacityBlockOfferingsRequest), arg0) +} + +// DescribeCapacityBlockOfferingsWithContext mocks base method. +func (m *MockEC2API) DescribeCapacityBlockOfferingsWithContext(arg0 context.Context, arg1 *ec2.DescribeCapacityBlockOfferingsInput, arg2 ...request.Option) (*ec2.DescribeCapacityBlockOfferingsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeCapacityBlockOfferingsWithContext", varargs...) + ret0, _ := ret[0].(*ec2.DescribeCapacityBlockOfferingsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeCapacityBlockOfferingsWithContext indicates an expected call of DescribeCapacityBlockOfferingsWithContext. +func (mr *MockEC2APIMockRecorder) DescribeCapacityBlockOfferingsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeCapacityBlockOfferingsWithContext", reflect.TypeOf((*MockEC2API)(nil).DescribeCapacityBlockOfferingsWithContext), varargs...) +} + // DescribeCapacityReservationFleets mocks base method. func (m *MockEC2API) DescribeCapacityReservationFleets(arg0 *ec2.DescribeCapacityReservationFleetsInput) (*ec2.DescribeCapacityReservationFleetsOutput, error) { m.ctrl.T.Helper() @@ -14491,6 +14674,89 @@ func (mr *MockEC2APIMockRecorder) DescribeInstanceStatusWithContext(arg0, arg1 i return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeInstanceStatusWithContext", reflect.TypeOf((*MockEC2API)(nil).DescribeInstanceStatusWithContext), varargs...) } +// DescribeInstanceTopology mocks base method. +func (m *MockEC2API) DescribeInstanceTopology(arg0 *ec2.DescribeInstanceTopologyInput) (*ec2.DescribeInstanceTopologyOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeInstanceTopology", arg0) + ret0, _ := ret[0].(*ec2.DescribeInstanceTopologyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeInstanceTopology indicates an expected call of DescribeInstanceTopology. +func (mr *MockEC2APIMockRecorder) DescribeInstanceTopology(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeInstanceTopology", reflect.TypeOf((*MockEC2API)(nil).DescribeInstanceTopology), arg0) +} + +// DescribeInstanceTopologyPages mocks base method. +func (m *MockEC2API) DescribeInstanceTopologyPages(arg0 *ec2.DescribeInstanceTopologyInput, arg1 func(*ec2.DescribeInstanceTopologyOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeInstanceTopologyPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DescribeInstanceTopologyPages indicates an expected call of DescribeInstanceTopologyPages. +func (mr *MockEC2APIMockRecorder) DescribeInstanceTopologyPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeInstanceTopologyPages", reflect.TypeOf((*MockEC2API)(nil).DescribeInstanceTopologyPages), arg0, arg1) +} + +// DescribeInstanceTopologyPagesWithContext mocks base method. +func (m *MockEC2API) DescribeInstanceTopologyPagesWithContext(arg0 context.Context, arg1 *ec2.DescribeInstanceTopologyInput, arg2 func(*ec2.DescribeInstanceTopologyOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeInstanceTopologyPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// DescribeInstanceTopologyPagesWithContext indicates an expected call of DescribeInstanceTopologyPagesWithContext. +func (mr *MockEC2APIMockRecorder) DescribeInstanceTopologyPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeInstanceTopologyPagesWithContext", reflect.TypeOf((*MockEC2API)(nil).DescribeInstanceTopologyPagesWithContext), varargs...) +} + +// DescribeInstanceTopologyRequest mocks base method. +func (m *MockEC2API) DescribeInstanceTopologyRequest(arg0 *ec2.DescribeInstanceTopologyInput) (*request.Request, *ec2.DescribeInstanceTopologyOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeInstanceTopologyRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*ec2.DescribeInstanceTopologyOutput) + return ret0, ret1 +} + +// DescribeInstanceTopologyRequest indicates an expected call of DescribeInstanceTopologyRequest. +func (mr *MockEC2APIMockRecorder) DescribeInstanceTopologyRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeInstanceTopologyRequest", reflect.TypeOf((*MockEC2API)(nil).DescribeInstanceTopologyRequest), arg0) +} + +// DescribeInstanceTopologyWithContext mocks base method. +func (m *MockEC2API) DescribeInstanceTopologyWithContext(arg0 context.Context, arg1 *ec2.DescribeInstanceTopologyInput, arg2 ...request.Option) (*ec2.DescribeInstanceTopologyOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeInstanceTopologyWithContext", varargs...) + ret0, _ := ret[0].(*ec2.DescribeInstanceTopologyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeInstanceTopologyWithContext indicates an expected call of DescribeInstanceTopologyWithContext. +func (mr *MockEC2APIMockRecorder) DescribeInstanceTopologyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeInstanceTopologyWithContext", reflect.TypeOf((*MockEC2API)(nil).DescribeInstanceTopologyWithContext), varargs...) +} + // DescribeInstanceTypeOfferings mocks base method. func (m *MockEC2API) DescribeInstanceTypeOfferings(arg0 *ec2.DescribeInstanceTypeOfferingsInput) (*ec2.DescribeInstanceTypeOfferingsOutput, error) { m.ctrl.T.Helper() @@ -14823,6 +15089,56 @@ func (mr *MockEC2APIMockRecorder) DescribeInternetGatewaysWithContext(arg0, arg1 return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeInternetGatewaysWithContext", reflect.TypeOf((*MockEC2API)(nil).DescribeInternetGatewaysWithContext), varargs...) } +// DescribeIpamByoasn mocks base method. +func (m *MockEC2API) DescribeIpamByoasn(arg0 *ec2.DescribeIpamByoasnInput) (*ec2.DescribeIpamByoasnOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeIpamByoasn", arg0) + ret0, _ := ret[0].(*ec2.DescribeIpamByoasnOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeIpamByoasn indicates an expected call of DescribeIpamByoasn. +func (mr *MockEC2APIMockRecorder) DescribeIpamByoasn(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeIpamByoasn", reflect.TypeOf((*MockEC2API)(nil).DescribeIpamByoasn), arg0) +} + +// DescribeIpamByoasnRequest mocks base method. +func (m *MockEC2API) DescribeIpamByoasnRequest(arg0 *ec2.DescribeIpamByoasnInput) (*request.Request, *ec2.DescribeIpamByoasnOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeIpamByoasnRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*ec2.DescribeIpamByoasnOutput) + return ret0, ret1 +} + +// DescribeIpamByoasnRequest indicates an expected call of DescribeIpamByoasnRequest. +func (mr *MockEC2APIMockRecorder) DescribeIpamByoasnRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeIpamByoasnRequest", reflect.TypeOf((*MockEC2API)(nil).DescribeIpamByoasnRequest), arg0) +} + +// DescribeIpamByoasnWithContext mocks base method. +func (m *MockEC2API) DescribeIpamByoasnWithContext(arg0 context.Context, arg1 *ec2.DescribeIpamByoasnInput, arg2 ...request.Option) (*ec2.DescribeIpamByoasnOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeIpamByoasnWithContext", varargs...) + ret0, _ := ret[0].(*ec2.DescribeIpamByoasnOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeIpamByoasnWithContext indicates an expected call of DescribeIpamByoasnWithContext. +func (mr *MockEC2APIMockRecorder) DescribeIpamByoasnWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeIpamByoasnWithContext", reflect.TypeOf((*MockEC2API)(nil).DescribeIpamByoasnWithContext), varargs...) +} + // DescribeIpamPools mocks base method. func (m *MockEC2API) DescribeIpamPools(arg0 *ec2.DescribeIpamPoolsInput) (*ec2.DescribeIpamPoolsOutput, error) { m.ctrl.T.Helper() @@ -16035,6 +16351,139 @@ func (mr *MockEC2APIMockRecorder) DescribeLocalGatewaysWithContext(arg0, arg1 in return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeLocalGatewaysWithContext", reflect.TypeOf((*MockEC2API)(nil).DescribeLocalGatewaysWithContext), varargs...) } +// DescribeLockedSnapshots mocks base method. +func (m *MockEC2API) DescribeLockedSnapshots(arg0 *ec2.DescribeLockedSnapshotsInput) (*ec2.DescribeLockedSnapshotsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeLockedSnapshots", arg0) + ret0, _ := ret[0].(*ec2.DescribeLockedSnapshotsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeLockedSnapshots indicates an expected call of DescribeLockedSnapshots. +func (mr *MockEC2APIMockRecorder) DescribeLockedSnapshots(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeLockedSnapshots", reflect.TypeOf((*MockEC2API)(nil).DescribeLockedSnapshots), arg0) +} + +// DescribeLockedSnapshotsRequest mocks base method. +func (m *MockEC2API) DescribeLockedSnapshotsRequest(arg0 *ec2.DescribeLockedSnapshotsInput) (*request.Request, *ec2.DescribeLockedSnapshotsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeLockedSnapshotsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*ec2.DescribeLockedSnapshotsOutput) + return ret0, ret1 +} + +// DescribeLockedSnapshotsRequest indicates an expected call of DescribeLockedSnapshotsRequest. +func (mr *MockEC2APIMockRecorder) DescribeLockedSnapshotsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeLockedSnapshotsRequest", reflect.TypeOf((*MockEC2API)(nil).DescribeLockedSnapshotsRequest), arg0) +} + +// DescribeLockedSnapshotsWithContext mocks base method. +func (m *MockEC2API) DescribeLockedSnapshotsWithContext(arg0 context.Context, arg1 *ec2.DescribeLockedSnapshotsInput, arg2 ...request.Option) (*ec2.DescribeLockedSnapshotsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeLockedSnapshotsWithContext", varargs...) + ret0, _ := ret[0].(*ec2.DescribeLockedSnapshotsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeLockedSnapshotsWithContext indicates an expected call of DescribeLockedSnapshotsWithContext. +func (mr *MockEC2APIMockRecorder) DescribeLockedSnapshotsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeLockedSnapshotsWithContext", reflect.TypeOf((*MockEC2API)(nil).DescribeLockedSnapshotsWithContext), varargs...) +} + +// DescribeMacHosts mocks base method. +func (m *MockEC2API) DescribeMacHosts(arg0 *ec2.DescribeMacHostsInput) (*ec2.DescribeMacHostsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeMacHosts", arg0) + ret0, _ := ret[0].(*ec2.DescribeMacHostsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeMacHosts indicates an expected call of DescribeMacHosts. +func (mr *MockEC2APIMockRecorder) DescribeMacHosts(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeMacHosts", reflect.TypeOf((*MockEC2API)(nil).DescribeMacHosts), arg0) +} + +// DescribeMacHostsPages mocks base method. +func (m *MockEC2API) DescribeMacHostsPages(arg0 *ec2.DescribeMacHostsInput, arg1 func(*ec2.DescribeMacHostsOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeMacHostsPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DescribeMacHostsPages indicates an expected call of DescribeMacHostsPages. +func (mr *MockEC2APIMockRecorder) DescribeMacHostsPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeMacHostsPages", reflect.TypeOf((*MockEC2API)(nil).DescribeMacHostsPages), arg0, arg1) +} + +// DescribeMacHostsPagesWithContext mocks base method. +func (m *MockEC2API) DescribeMacHostsPagesWithContext(arg0 context.Context, arg1 *ec2.DescribeMacHostsInput, arg2 func(*ec2.DescribeMacHostsOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeMacHostsPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// DescribeMacHostsPagesWithContext indicates an expected call of DescribeMacHostsPagesWithContext. +func (mr *MockEC2APIMockRecorder) DescribeMacHostsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeMacHostsPagesWithContext", reflect.TypeOf((*MockEC2API)(nil).DescribeMacHostsPagesWithContext), varargs...) +} + +// DescribeMacHostsRequest mocks base method. +func (m *MockEC2API) DescribeMacHostsRequest(arg0 *ec2.DescribeMacHostsInput) (*request.Request, *ec2.DescribeMacHostsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeMacHostsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*ec2.DescribeMacHostsOutput) + return ret0, ret1 +} + +// DescribeMacHostsRequest indicates an expected call of DescribeMacHostsRequest. +func (mr *MockEC2APIMockRecorder) DescribeMacHostsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeMacHostsRequest", reflect.TypeOf((*MockEC2API)(nil).DescribeMacHostsRequest), arg0) +} + +// DescribeMacHostsWithContext mocks base method. +func (m *MockEC2API) DescribeMacHostsWithContext(arg0 context.Context, arg1 *ec2.DescribeMacHostsInput, arg2 ...request.Option) (*ec2.DescribeMacHostsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeMacHostsWithContext", varargs...) + ret0, _ := ret[0].(*ec2.DescribeMacHostsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeMacHostsWithContext indicates an expected call of DescribeMacHostsWithContext. +func (mr *MockEC2APIMockRecorder) DescribeMacHostsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeMacHostsWithContext", reflect.TypeOf((*MockEC2API)(nil).DescribeMacHostsWithContext), varargs...) +} + // DescribeManagedPrefixLists mocks base method. func (m *MockEC2API) DescribeManagedPrefixLists(arg0 *ec2.DescribeManagedPrefixListsInput) (*ec2.DescribeManagedPrefixListsOutput, error) { m.ctrl.T.Helper() @@ -22365,6 +22814,71 @@ func (mr *MockEC2APIMockRecorder) DisableFastSnapshotRestoresWithContext(arg0, a return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableFastSnapshotRestoresWithContext", reflect.TypeOf((*MockEC2API)(nil).DisableFastSnapshotRestoresWithContext), varargs...) } +// DisableImage mocks base method. +func (m *MockEC2API) DisableImage(arg0 *ec2.DisableImageInput) (*ec2.DisableImageOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DisableImage", arg0) + ret0, _ := ret[0].(*ec2.DisableImageOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DisableImage indicates an expected call of DisableImage. +func (mr *MockEC2APIMockRecorder) DisableImage(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableImage", reflect.TypeOf((*MockEC2API)(nil).DisableImage), arg0) +} + +// DisableImageBlockPublicAccess mocks base method. +func (m *MockEC2API) DisableImageBlockPublicAccess(arg0 *ec2.DisableImageBlockPublicAccessInput) (*ec2.DisableImageBlockPublicAccessOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DisableImageBlockPublicAccess", arg0) + ret0, _ := ret[0].(*ec2.DisableImageBlockPublicAccessOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DisableImageBlockPublicAccess indicates an expected call of DisableImageBlockPublicAccess. +func (mr *MockEC2APIMockRecorder) DisableImageBlockPublicAccess(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableImageBlockPublicAccess", reflect.TypeOf((*MockEC2API)(nil).DisableImageBlockPublicAccess), arg0) +} + +// DisableImageBlockPublicAccessRequest mocks base method. +func (m *MockEC2API) DisableImageBlockPublicAccessRequest(arg0 *ec2.DisableImageBlockPublicAccessInput) (*request.Request, *ec2.DisableImageBlockPublicAccessOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DisableImageBlockPublicAccessRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*ec2.DisableImageBlockPublicAccessOutput) + return ret0, ret1 +} + +// DisableImageBlockPublicAccessRequest indicates an expected call of DisableImageBlockPublicAccessRequest. +func (mr *MockEC2APIMockRecorder) DisableImageBlockPublicAccessRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableImageBlockPublicAccessRequest", reflect.TypeOf((*MockEC2API)(nil).DisableImageBlockPublicAccessRequest), arg0) +} + +// DisableImageBlockPublicAccessWithContext mocks base method. +func (m *MockEC2API) DisableImageBlockPublicAccessWithContext(arg0 context.Context, arg1 *ec2.DisableImageBlockPublicAccessInput, arg2 ...request.Option) (*ec2.DisableImageBlockPublicAccessOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DisableImageBlockPublicAccessWithContext", varargs...) + ret0, _ := ret[0].(*ec2.DisableImageBlockPublicAccessOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DisableImageBlockPublicAccessWithContext indicates an expected call of DisableImageBlockPublicAccessWithContext. +func (mr *MockEC2APIMockRecorder) DisableImageBlockPublicAccessWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableImageBlockPublicAccessWithContext", reflect.TypeOf((*MockEC2API)(nil).DisableImageBlockPublicAccessWithContext), varargs...) +} + // DisableImageDeprecation mocks base method. func (m *MockEC2API) DisableImageDeprecation(arg0 *ec2.DisableImageDeprecationInput) (*ec2.DisableImageDeprecationOutput, error) { m.ctrl.T.Helper() @@ -22415,6 +22929,41 @@ func (mr *MockEC2APIMockRecorder) DisableImageDeprecationWithContext(arg0, arg1 return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableImageDeprecationWithContext", reflect.TypeOf((*MockEC2API)(nil).DisableImageDeprecationWithContext), varargs...) } +// DisableImageRequest mocks base method. +func (m *MockEC2API) DisableImageRequest(arg0 *ec2.DisableImageInput) (*request.Request, *ec2.DisableImageOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DisableImageRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*ec2.DisableImageOutput) + return ret0, ret1 +} + +// DisableImageRequest indicates an expected call of DisableImageRequest. +func (mr *MockEC2APIMockRecorder) DisableImageRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableImageRequest", reflect.TypeOf((*MockEC2API)(nil).DisableImageRequest), arg0) +} + +// DisableImageWithContext mocks base method. +func (m *MockEC2API) DisableImageWithContext(arg0 context.Context, arg1 *ec2.DisableImageInput, arg2 ...request.Option) (*ec2.DisableImageOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DisableImageWithContext", varargs...) + ret0, _ := ret[0].(*ec2.DisableImageOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DisableImageWithContext indicates an expected call of DisableImageWithContext. +func (mr *MockEC2APIMockRecorder) DisableImageWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableImageWithContext", reflect.TypeOf((*MockEC2API)(nil).DisableImageWithContext), varargs...) +} + // DisableIpamOrganizationAdminAccount mocks base method. func (m *MockEC2API) DisableIpamOrganizationAdminAccount(arg0 *ec2.DisableIpamOrganizationAdminAccountInput) (*ec2.DisableIpamOrganizationAdminAccountOutput, error) { m.ctrl.T.Helper() @@ -22515,6 +23064,56 @@ func (mr *MockEC2APIMockRecorder) DisableSerialConsoleAccessWithContext(arg0, ar return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableSerialConsoleAccessWithContext", reflect.TypeOf((*MockEC2API)(nil).DisableSerialConsoleAccessWithContext), varargs...) } +// DisableSnapshotBlockPublicAccess mocks base method. +func (m *MockEC2API) DisableSnapshotBlockPublicAccess(arg0 *ec2.DisableSnapshotBlockPublicAccessInput) (*ec2.DisableSnapshotBlockPublicAccessOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DisableSnapshotBlockPublicAccess", arg0) + ret0, _ := ret[0].(*ec2.DisableSnapshotBlockPublicAccessOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DisableSnapshotBlockPublicAccess indicates an expected call of DisableSnapshotBlockPublicAccess. +func (mr *MockEC2APIMockRecorder) DisableSnapshotBlockPublicAccess(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableSnapshotBlockPublicAccess", reflect.TypeOf((*MockEC2API)(nil).DisableSnapshotBlockPublicAccess), arg0) +} + +// DisableSnapshotBlockPublicAccessRequest mocks base method. +func (m *MockEC2API) DisableSnapshotBlockPublicAccessRequest(arg0 *ec2.DisableSnapshotBlockPublicAccessInput) (*request.Request, *ec2.DisableSnapshotBlockPublicAccessOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DisableSnapshotBlockPublicAccessRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*ec2.DisableSnapshotBlockPublicAccessOutput) + return ret0, ret1 +} + +// DisableSnapshotBlockPublicAccessRequest indicates an expected call of DisableSnapshotBlockPublicAccessRequest. +func (mr *MockEC2APIMockRecorder) DisableSnapshotBlockPublicAccessRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableSnapshotBlockPublicAccessRequest", reflect.TypeOf((*MockEC2API)(nil).DisableSnapshotBlockPublicAccessRequest), arg0) +} + +// DisableSnapshotBlockPublicAccessWithContext mocks base method. +func (m *MockEC2API) DisableSnapshotBlockPublicAccessWithContext(arg0 context.Context, arg1 *ec2.DisableSnapshotBlockPublicAccessInput, arg2 ...request.Option) (*ec2.DisableSnapshotBlockPublicAccessOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DisableSnapshotBlockPublicAccessWithContext", varargs...) + ret0, _ := ret[0].(*ec2.DisableSnapshotBlockPublicAccessOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DisableSnapshotBlockPublicAccessWithContext indicates an expected call of DisableSnapshotBlockPublicAccessWithContext. +func (mr *MockEC2APIMockRecorder) DisableSnapshotBlockPublicAccessWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableSnapshotBlockPublicAccessWithContext", reflect.TypeOf((*MockEC2API)(nil).DisableSnapshotBlockPublicAccessWithContext), varargs...) +} + // DisableTransitGatewayRouteTablePropagation mocks base method. func (m *MockEC2API) DisableTransitGatewayRouteTablePropagation(arg0 *ec2.DisableTransitGatewayRouteTablePropagationInput) (*ec2.DisableTransitGatewayRouteTablePropagationOutput, error) { m.ctrl.T.Helper() @@ -22965,6 +23564,56 @@ func (mr *MockEC2APIMockRecorder) DisassociateInstanceEventWindowWithContext(arg return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisassociateInstanceEventWindowWithContext", reflect.TypeOf((*MockEC2API)(nil).DisassociateInstanceEventWindowWithContext), varargs...) } +// DisassociateIpamByoasn mocks base method. +func (m *MockEC2API) DisassociateIpamByoasn(arg0 *ec2.DisassociateIpamByoasnInput) (*ec2.DisassociateIpamByoasnOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DisassociateIpamByoasn", arg0) + ret0, _ := ret[0].(*ec2.DisassociateIpamByoasnOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DisassociateIpamByoasn indicates an expected call of DisassociateIpamByoasn. +func (mr *MockEC2APIMockRecorder) DisassociateIpamByoasn(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisassociateIpamByoasn", reflect.TypeOf((*MockEC2API)(nil).DisassociateIpamByoasn), arg0) +} + +// DisassociateIpamByoasnRequest mocks base method. +func (m *MockEC2API) DisassociateIpamByoasnRequest(arg0 *ec2.DisassociateIpamByoasnInput) (*request.Request, *ec2.DisassociateIpamByoasnOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DisassociateIpamByoasnRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*ec2.DisassociateIpamByoasnOutput) + return ret0, ret1 +} + +// DisassociateIpamByoasnRequest indicates an expected call of DisassociateIpamByoasnRequest. +func (mr *MockEC2APIMockRecorder) DisassociateIpamByoasnRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisassociateIpamByoasnRequest", reflect.TypeOf((*MockEC2API)(nil).DisassociateIpamByoasnRequest), arg0) +} + +// DisassociateIpamByoasnWithContext mocks base method. +func (m *MockEC2API) DisassociateIpamByoasnWithContext(arg0 context.Context, arg1 *ec2.DisassociateIpamByoasnInput, arg2 ...request.Option) (*ec2.DisassociateIpamByoasnOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DisassociateIpamByoasnWithContext", varargs...) + ret0, _ := ret[0].(*ec2.DisassociateIpamByoasnOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DisassociateIpamByoasnWithContext indicates an expected call of DisassociateIpamByoasnWithContext. +func (mr *MockEC2APIMockRecorder) DisassociateIpamByoasnWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisassociateIpamByoasnWithContext", reflect.TypeOf((*MockEC2API)(nil).DisassociateIpamByoasnWithContext), varargs...) +} + // DisassociateIpamResourceDiscovery mocks base method. func (m *MockEC2API) DisassociateIpamResourceDiscovery(arg0 *ec2.DisassociateIpamResourceDiscoveryInput) (*ec2.DisassociateIpamResourceDiscoveryOutput, error) { m.ctrl.T.Helper() @@ -23665,6 +24314,71 @@ func (mr *MockEC2APIMockRecorder) EnableFastSnapshotRestoresWithContext(arg0, ar return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableFastSnapshotRestoresWithContext", reflect.TypeOf((*MockEC2API)(nil).EnableFastSnapshotRestoresWithContext), varargs...) } +// EnableImage mocks base method. +func (m *MockEC2API) EnableImage(arg0 *ec2.EnableImageInput) (*ec2.EnableImageOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "EnableImage", arg0) + ret0, _ := ret[0].(*ec2.EnableImageOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// EnableImage indicates an expected call of EnableImage. +func (mr *MockEC2APIMockRecorder) EnableImage(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableImage", reflect.TypeOf((*MockEC2API)(nil).EnableImage), arg0) +} + +// EnableImageBlockPublicAccess mocks base method. +func (m *MockEC2API) EnableImageBlockPublicAccess(arg0 *ec2.EnableImageBlockPublicAccessInput) (*ec2.EnableImageBlockPublicAccessOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "EnableImageBlockPublicAccess", arg0) + ret0, _ := ret[0].(*ec2.EnableImageBlockPublicAccessOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// EnableImageBlockPublicAccess indicates an expected call of EnableImageBlockPublicAccess. +func (mr *MockEC2APIMockRecorder) EnableImageBlockPublicAccess(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableImageBlockPublicAccess", reflect.TypeOf((*MockEC2API)(nil).EnableImageBlockPublicAccess), arg0) +} + +// EnableImageBlockPublicAccessRequest mocks base method. +func (m *MockEC2API) EnableImageBlockPublicAccessRequest(arg0 *ec2.EnableImageBlockPublicAccessInput) (*request.Request, *ec2.EnableImageBlockPublicAccessOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "EnableImageBlockPublicAccessRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*ec2.EnableImageBlockPublicAccessOutput) + return ret0, ret1 +} + +// EnableImageBlockPublicAccessRequest indicates an expected call of EnableImageBlockPublicAccessRequest. +func (mr *MockEC2APIMockRecorder) EnableImageBlockPublicAccessRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableImageBlockPublicAccessRequest", reflect.TypeOf((*MockEC2API)(nil).EnableImageBlockPublicAccessRequest), arg0) +} + +// EnableImageBlockPublicAccessWithContext mocks base method. +func (m *MockEC2API) EnableImageBlockPublicAccessWithContext(arg0 context.Context, arg1 *ec2.EnableImageBlockPublicAccessInput, arg2 ...request.Option) (*ec2.EnableImageBlockPublicAccessOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "EnableImageBlockPublicAccessWithContext", varargs...) + ret0, _ := ret[0].(*ec2.EnableImageBlockPublicAccessOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// EnableImageBlockPublicAccessWithContext indicates an expected call of EnableImageBlockPublicAccessWithContext. +func (mr *MockEC2APIMockRecorder) EnableImageBlockPublicAccessWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableImageBlockPublicAccessWithContext", reflect.TypeOf((*MockEC2API)(nil).EnableImageBlockPublicAccessWithContext), varargs...) +} + // EnableImageDeprecation mocks base method. func (m *MockEC2API) EnableImageDeprecation(arg0 *ec2.EnableImageDeprecationInput) (*ec2.EnableImageDeprecationOutput, error) { m.ctrl.T.Helper() @@ -23715,6 +24429,41 @@ func (mr *MockEC2APIMockRecorder) EnableImageDeprecationWithContext(arg0, arg1 i return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableImageDeprecationWithContext", reflect.TypeOf((*MockEC2API)(nil).EnableImageDeprecationWithContext), varargs...) } +// EnableImageRequest mocks base method. +func (m *MockEC2API) EnableImageRequest(arg0 *ec2.EnableImageInput) (*request.Request, *ec2.EnableImageOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "EnableImageRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*ec2.EnableImageOutput) + return ret0, ret1 +} + +// EnableImageRequest indicates an expected call of EnableImageRequest. +func (mr *MockEC2APIMockRecorder) EnableImageRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableImageRequest", reflect.TypeOf((*MockEC2API)(nil).EnableImageRequest), arg0) +} + +// EnableImageWithContext mocks base method. +func (m *MockEC2API) EnableImageWithContext(arg0 context.Context, arg1 *ec2.EnableImageInput, arg2 ...request.Option) (*ec2.EnableImageOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "EnableImageWithContext", varargs...) + ret0, _ := ret[0].(*ec2.EnableImageOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// EnableImageWithContext indicates an expected call of EnableImageWithContext. +func (mr *MockEC2APIMockRecorder) EnableImageWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableImageWithContext", reflect.TypeOf((*MockEC2API)(nil).EnableImageWithContext), varargs...) +} + // EnableIpamOrganizationAdminAccount mocks base method. func (m *MockEC2API) EnableIpamOrganizationAdminAccount(arg0 *ec2.EnableIpamOrganizationAdminAccountInput) (*ec2.EnableIpamOrganizationAdminAccountOutput, error) { m.ctrl.T.Helper() @@ -23865,6 +24614,56 @@ func (mr *MockEC2APIMockRecorder) EnableSerialConsoleAccessWithContext(arg0, arg return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableSerialConsoleAccessWithContext", reflect.TypeOf((*MockEC2API)(nil).EnableSerialConsoleAccessWithContext), varargs...) } +// EnableSnapshotBlockPublicAccess mocks base method. +func (m *MockEC2API) EnableSnapshotBlockPublicAccess(arg0 *ec2.EnableSnapshotBlockPublicAccessInput) (*ec2.EnableSnapshotBlockPublicAccessOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "EnableSnapshotBlockPublicAccess", arg0) + ret0, _ := ret[0].(*ec2.EnableSnapshotBlockPublicAccessOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// EnableSnapshotBlockPublicAccess indicates an expected call of EnableSnapshotBlockPublicAccess. +func (mr *MockEC2APIMockRecorder) EnableSnapshotBlockPublicAccess(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableSnapshotBlockPublicAccess", reflect.TypeOf((*MockEC2API)(nil).EnableSnapshotBlockPublicAccess), arg0) +} + +// EnableSnapshotBlockPublicAccessRequest mocks base method. +func (m *MockEC2API) EnableSnapshotBlockPublicAccessRequest(arg0 *ec2.EnableSnapshotBlockPublicAccessInput) (*request.Request, *ec2.EnableSnapshotBlockPublicAccessOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "EnableSnapshotBlockPublicAccessRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*ec2.EnableSnapshotBlockPublicAccessOutput) + return ret0, ret1 +} + +// EnableSnapshotBlockPublicAccessRequest indicates an expected call of EnableSnapshotBlockPublicAccessRequest. +func (mr *MockEC2APIMockRecorder) EnableSnapshotBlockPublicAccessRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableSnapshotBlockPublicAccessRequest", reflect.TypeOf((*MockEC2API)(nil).EnableSnapshotBlockPublicAccessRequest), arg0) +} + +// EnableSnapshotBlockPublicAccessWithContext mocks base method. +func (m *MockEC2API) EnableSnapshotBlockPublicAccessWithContext(arg0 context.Context, arg1 *ec2.EnableSnapshotBlockPublicAccessInput, arg2 ...request.Option) (*ec2.EnableSnapshotBlockPublicAccessOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "EnableSnapshotBlockPublicAccessWithContext", varargs...) + ret0, _ := ret[0].(*ec2.EnableSnapshotBlockPublicAccessOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// EnableSnapshotBlockPublicAccessWithContext indicates an expected call of EnableSnapshotBlockPublicAccessWithContext. +func (mr *MockEC2APIMockRecorder) EnableSnapshotBlockPublicAccessWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableSnapshotBlockPublicAccessWithContext", reflect.TypeOf((*MockEC2API)(nil).EnableSnapshotBlockPublicAccessWithContext), varargs...) +} + // EnableTransitGatewayRouteTablePropagation mocks base method. func (m *MockEC2API) EnableTransitGatewayRouteTablePropagation(arg0 *ec2.EnableTransitGatewayRouteTablePropagationInput) (*ec2.EnableTransitGatewayRouteTablePropagationOutput, error) { m.ctrl.T.Helper() @@ -25064,6 +25863,106 @@ func (mr *MockEC2APIMockRecorder) GetHostReservationPurchasePreviewWithContext(a return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHostReservationPurchasePreviewWithContext", reflect.TypeOf((*MockEC2API)(nil).GetHostReservationPurchasePreviewWithContext), varargs...) } +// GetImageBlockPublicAccessState mocks base method. +func (m *MockEC2API) GetImageBlockPublicAccessState(arg0 *ec2.GetImageBlockPublicAccessStateInput) (*ec2.GetImageBlockPublicAccessStateOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetImageBlockPublicAccessState", arg0) + ret0, _ := ret[0].(*ec2.GetImageBlockPublicAccessStateOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetImageBlockPublicAccessState indicates an expected call of GetImageBlockPublicAccessState. +func (mr *MockEC2APIMockRecorder) GetImageBlockPublicAccessState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetImageBlockPublicAccessState", reflect.TypeOf((*MockEC2API)(nil).GetImageBlockPublicAccessState), arg0) +} + +// GetImageBlockPublicAccessStateRequest mocks base method. +func (m *MockEC2API) GetImageBlockPublicAccessStateRequest(arg0 *ec2.GetImageBlockPublicAccessStateInput) (*request.Request, *ec2.GetImageBlockPublicAccessStateOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetImageBlockPublicAccessStateRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*ec2.GetImageBlockPublicAccessStateOutput) + return ret0, ret1 +} + +// GetImageBlockPublicAccessStateRequest indicates an expected call of GetImageBlockPublicAccessStateRequest. +func (mr *MockEC2APIMockRecorder) GetImageBlockPublicAccessStateRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetImageBlockPublicAccessStateRequest", reflect.TypeOf((*MockEC2API)(nil).GetImageBlockPublicAccessStateRequest), arg0) +} + +// GetImageBlockPublicAccessStateWithContext mocks base method. +func (m *MockEC2API) GetImageBlockPublicAccessStateWithContext(arg0 context.Context, arg1 *ec2.GetImageBlockPublicAccessStateInput, arg2 ...request.Option) (*ec2.GetImageBlockPublicAccessStateOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetImageBlockPublicAccessStateWithContext", varargs...) + ret0, _ := ret[0].(*ec2.GetImageBlockPublicAccessStateOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetImageBlockPublicAccessStateWithContext indicates an expected call of GetImageBlockPublicAccessStateWithContext. +func (mr *MockEC2APIMockRecorder) GetImageBlockPublicAccessStateWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetImageBlockPublicAccessStateWithContext", reflect.TypeOf((*MockEC2API)(nil).GetImageBlockPublicAccessStateWithContext), varargs...) +} + +// GetInstanceMetadataDefaults mocks base method. +func (m *MockEC2API) GetInstanceMetadataDefaults(arg0 *ec2.GetInstanceMetadataDefaultsInput) (*ec2.GetInstanceMetadataDefaultsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetInstanceMetadataDefaults", arg0) + ret0, _ := ret[0].(*ec2.GetInstanceMetadataDefaultsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetInstanceMetadataDefaults indicates an expected call of GetInstanceMetadataDefaults. +func (mr *MockEC2APIMockRecorder) GetInstanceMetadataDefaults(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceMetadataDefaults", reflect.TypeOf((*MockEC2API)(nil).GetInstanceMetadataDefaults), arg0) +} + +// GetInstanceMetadataDefaultsRequest mocks base method. +func (m *MockEC2API) GetInstanceMetadataDefaultsRequest(arg0 *ec2.GetInstanceMetadataDefaultsInput) (*request.Request, *ec2.GetInstanceMetadataDefaultsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetInstanceMetadataDefaultsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*ec2.GetInstanceMetadataDefaultsOutput) + return ret0, ret1 +} + +// GetInstanceMetadataDefaultsRequest indicates an expected call of GetInstanceMetadataDefaultsRequest. +func (mr *MockEC2APIMockRecorder) GetInstanceMetadataDefaultsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceMetadataDefaultsRequest", reflect.TypeOf((*MockEC2API)(nil).GetInstanceMetadataDefaultsRequest), arg0) +} + +// GetInstanceMetadataDefaultsWithContext mocks base method. +func (m *MockEC2API) GetInstanceMetadataDefaultsWithContext(arg0 context.Context, arg1 *ec2.GetInstanceMetadataDefaultsInput, arg2 ...request.Option) (*ec2.GetInstanceMetadataDefaultsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetInstanceMetadataDefaultsWithContext", varargs...) + ret0, _ := ret[0].(*ec2.GetInstanceMetadataDefaultsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetInstanceMetadataDefaultsWithContext indicates an expected call of GetInstanceMetadataDefaultsWithContext. +func (mr *MockEC2APIMockRecorder) GetInstanceMetadataDefaultsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceMetadataDefaultsWithContext", reflect.TypeOf((*MockEC2API)(nil).GetInstanceMetadataDefaultsWithContext), varargs...) +} + // GetInstanceTypesFromInstanceRequirements mocks base method. func (m *MockEC2API) GetInstanceTypesFromInstanceRequirements(arg0 *ec2.GetInstanceTypesFromInstanceRequirementsInput) (*ec2.GetInstanceTypesFromInstanceRequirementsOutput, error) { m.ctrl.T.Helper() @@ -25363,6 +26262,56 @@ func (mr *MockEC2APIMockRecorder) GetIpamDiscoveredAccountsWithContext(arg0, arg return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIpamDiscoveredAccountsWithContext", reflect.TypeOf((*MockEC2API)(nil).GetIpamDiscoveredAccountsWithContext), varargs...) } +// GetIpamDiscoveredPublicAddresses mocks base method. +func (m *MockEC2API) GetIpamDiscoveredPublicAddresses(arg0 *ec2.GetIpamDiscoveredPublicAddressesInput) (*ec2.GetIpamDiscoveredPublicAddressesOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetIpamDiscoveredPublicAddresses", arg0) + ret0, _ := ret[0].(*ec2.GetIpamDiscoveredPublicAddressesOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetIpamDiscoveredPublicAddresses indicates an expected call of GetIpamDiscoveredPublicAddresses. +func (mr *MockEC2APIMockRecorder) GetIpamDiscoveredPublicAddresses(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIpamDiscoveredPublicAddresses", reflect.TypeOf((*MockEC2API)(nil).GetIpamDiscoveredPublicAddresses), arg0) +} + +// GetIpamDiscoveredPublicAddressesRequest mocks base method. +func (m *MockEC2API) GetIpamDiscoveredPublicAddressesRequest(arg0 *ec2.GetIpamDiscoveredPublicAddressesInput) (*request.Request, *ec2.GetIpamDiscoveredPublicAddressesOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetIpamDiscoveredPublicAddressesRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*ec2.GetIpamDiscoveredPublicAddressesOutput) + return ret0, ret1 +} + +// GetIpamDiscoveredPublicAddressesRequest indicates an expected call of GetIpamDiscoveredPublicAddressesRequest. +func (mr *MockEC2APIMockRecorder) GetIpamDiscoveredPublicAddressesRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIpamDiscoveredPublicAddressesRequest", reflect.TypeOf((*MockEC2API)(nil).GetIpamDiscoveredPublicAddressesRequest), arg0) +} + +// GetIpamDiscoveredPublicAddressesWithContext mocks base method. +func (m *MockEC2API) GetIpamDiscoveredPublicAddressesWithContext(arg0 context.Context, arg1 *ec2.GetIpamDiscoveredPublicAddressesInput, arg2 ...request.Option) (*ec2.GetIpamDiscoveredPublicAddressesOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetIpamDiscoveredPublicAddressesWithContext", varargs...) + ret0, _ := ret[0].(*ec2.GetIpamDiscoveredPublicAddressesOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetIpamDiscoveredPublicAddressesWithContext indicates an expected call of GetIpamDiscoveredPublicAddressesWithContext. +func (mr *MockEC2APIMockRecorder) GetIpamDiscoveredPublicAddressesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIpamDiscoveredPublicAddressesWithContext", reflect.TypeOf((*MockEC2API)(nil).GetIpamDiscoveredPublicAddressesWithContext), varargs...) +} + // GetIpamDiscoveredResourceCidrs mocks base method. func (m *MockEC2API) GetIpamDiscoveredResourceCidrs(arg0 *ec2.GetIpamDiscoveredResourceCidrsInput) (*ec2.GetIpamDiscoveredResourceCidrsOutput, error) { m.ctrl.T.Helper() @@ -26144,6 +27093,89 @@ func (mr *MockEC2APIMockRecorder) GetReservedInstancesExchangeQuoteWithContext(a return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReservedInstancesExchangeQuoteWithContext", reflect.TypeOf((*MockEC2API)(nil).GetReservedInstancesExchangeQuoteWithContext), varargs...) } +// GetSecurityGroupsForVpc mocks base method. +func (m *MockEC2API) GetSecurityGroupsForVpc(arg0 *ec2.GetSecurityGroupsForVpcInput) (*ec2.GetSecurityGroupsForVpcOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSecurityGroupsForVpc", arg0) + ret0, _ := ret[0].(*ec2.GetSecurityGroupsForVpcOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSecurityGroupsForVpc indicates an expected call of GetSecurityGroupsForVpc. +func (mr *MockEC2APIMockRecorder) GetSecurityGroupsForVpc(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSecurityGroupsForVpc", reflect.TypeOf((*MockEC2API)(nil).GetSecurityGroupsForVpc), arg0) +} + +// GetSecurityGroupsForVpcPages mocks base method. +func (m *MockEC2API) GetSecurityGroupsForVpcPages(arg0 *ec2.GetSecurityGroupsForVpcInput, arg1 func(*ec2.GetSecurityGroupsForVpcOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSecurityGroupsForVpcPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// GetSecurityGroupsForVpcPages indicates an expected call of GetSecurityGroupsForVpcPages. +func (mr *MockEC2APIMockRecorder) GetSecurityGroupsForVpcPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSecurityGroupsForVpcPages", reflect.TypeOf((*MockEC2API)(nil).GetSecurityGroupsForVpcPages), arg0, arg1) +} + +// GetSecurityGroupsForVpcPagesWithContext mocks base method. +func (m *MockEC2API) GetSecurityGroupsForVpcPagesWithContext(arg0 context.Context, arg1 *ec2.GetSecurityGroupsForVpcInput, arg2 func(*ec2.GetSecurityGroupsForVpcOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetSecurityGroupsForVpcPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// GetSecurityGroupsForVpcPagesWithContext indicates an expected call of GetSecurityGroupsForVpcPagesWithContext. +func (mr *MockEC2APIMockRecorder) GetSecurityGroupsForVpcPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSecurityGroupsForVpcPagesWithContext", reflect.TypeOf((*MockEC2API)(nil).GetSecurityGroupsForVpcPagesWithContext), varargs...) +} + +// GetSecurityGroupsForVpcRequest mocks base method. +func (m *MockEC2API) GetSecurityGroupsForVpcRequest(arg0 *ec2.GetSecurityGroupsForVpcInput) (*request.Request, *ec2.GetSecurityGroupsForVpcOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSecurityGroupsForVpcRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*ec2.GetSecurityGroupsForVpcOutput) + return ret0, ret1 +} + +// GetSecurityGroupsForVpcRequest indicates an expected call of GetSecurityGroupsForVpcRequest. +func (mr *MockEC2APIMockRecorder) GetSecurityGroupsForVpcRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSecurityGroupsForVpcRequest", reflect.TypeOf((*MockEC2API)(nil).GetSecurityGroupsForVpcRequest), arg0) +} + +// GetSecurityGroupsForVpcWithContext mocks base method. +func (m *MockEC2API) GetSecurityGroupsForVpcWithContext(arg0 context.Context, arg1 *ec2.GetSecurityGroupsForVpcInput, arg2 ...request.Option) (*ec2.GetSecurityGroupsForVpcOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetSecurityGroupsForVpcWithContext", varargs...) + ret0, _ := ret[0].(*ec2.GetSecurityGroupsForVpcOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSecurityGroupsForVpcWithContext indicates an expected call of GetSecurityGroupsForVpcWithContext. +func (mr *MockEC2APIMockRecorder) GetSecurityGroupsForVpcWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSecurityGroupsForVpcWithContext", reflect.TypeOf((*MockEC2API)(nil).GetSecurityGroupsForVpcWithContext), varargs...) +} + // GetSerialConsoleAccessStatus mocks base method. func (m *MockEC2API) GetSerialConsoleAccessStatus(arg0 *ec2.GetSerialConsoleAccessStatusInput) (*ec2.GetSerialConsoleAccessStatusOutput, error) { m.ctrl.T.Helper() @@ -26194,6 +27226,56 @@ func (mr *MockEC2APIMockRecorder) GetSerialConsoleAccessStatusWithContext(arg0, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSerialConsoleAccessStatusWithContext", reflect.TypeOf((*MockEC2API)(nil).GetSerialConsoleAccessStatusWithContext), varargs...) } +// GetSnapshotBlockPublicAccessState mocks base method. +func (m *MockEC2API) GetSnapshotBlockPublicAccessState(arg0 *ec2.GetSnapshotBlockPublicAccessStateInput) (*ec2.GetSnapshotBlockPublicAccessStateOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSnapshotBlockPublicAccessState", arg0) + ret0, _ := ret[0].(*ec2.GetSnapshotBlockPublicAccessStateOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSnapshotBlockPublicAccessState indicates an expected call of GetSnapshotBlockPublicAccessState. +func (mr *MockEC2APIMockRecorder) GetSnapshotBlockPublicAccessState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSnapshotBlockPublicAccessState", reflect.TypeOf((*MockEC2API)(nil).GetSnapshotBlockPublicAccessState), arg0) +} + +// GetSnapshotBlockPublicAccessStateRequest mocks base method. +func (m *MockEC2API) GetSnapshotBlockPublicAccessStateRequest(arg0 *ec2.GetSnapshotBlockPublicAccessStateInput) (*request.Request, *ec2.GetSnapshotBlockPublicAccessStateOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSnapshotBlockPublicAccessStateRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*ec2.GetSnapshotBlockPublicAccessStateOutput) + return ret0, ret1 +} + +// GetSnapshotBlockPublicAccessStateRequest indicates an expected call of GetSnapshotBlockPublicAccessStateRequest. +func (mr *MockEC2APIMockRecorder) GetSnapshotBlockPublicAccessStateRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSnapshotBlockPublicAccessStateRequest", reflect.TypeOf((*MockEC2API)(nil).GetSnapshotBlockPublicAccessStateRequest), arg0) +} + +// GetSnapshotBlockPublicAccessStateWithContext mocks base method. +func (m *MockEC2API) GetSnapshotBlockPublicAccessStateWithContext(arg0 context.Context, arg1 *ec2.GetSnapshotBlockPublicAccessStateInput, arg2 ...request.Option) (*ec2.GetSnapshotBlockPublicAccessStateOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetSnapshotBlockPublicAccessStateWithContext", varargs...) + ret0, _ := ret[0].(*ec2.GetSnapshotBlockPublicAccessStateOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSnapshotBlockPublicAccessStateWithContext indicates an expected call of GetSnapshotBlockPublicAccessStateWithContext. +func (mr *MockEC2APIMockRecorder) GetSnapshotBlockPublicAccessStateWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSnapshotBlockPublicAccessStateWithContext", reflect.TypeOf((*MockEC2API)(nil).GetSnapshotBlockPublicAccessStateWithContext), varargs...) +} + // GetSpotPlacementScores mocks base method. func (m *MockEC2API) GetSpotPlacementScores(arg0 *ec2.GetSpotPlacementScoresInput) (*ec2.GetSpotPlacementScoresOutput, error) { m.ctrl.T.Helper() @@ -27624,6 +28706,56 @@ func (mr *MockEC2APIMockRecorder) ListSnapshotsInRecycleBinWithContext(arg0, arg return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSnapshotsInRecycleBinWithContext", reflect.TypeOf((*MockEC2API)(nil).ListSnapshotsInRecycleBinWithContext), varargs...) } +// LockSnapshot mocks base method. +func (m *MockEC2API) LockSnapshot(arg0 *ec2.LockSnapshotInput) (*ec2.LockSnapshotOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LockSnapshot", arg0) + ret0, _ := ret[0].(*ec2.LockSnapshotOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LockSnapshot indicates an expected call of LockSnapshot. +func (mr *MockEC2APIMockRecorder) LockSnapshot(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LockSnapshot", reflect.TypeOf((*MockEC2API)(nil).LockSnapshot), arg0) +} + +// LockSnapshotRequest mocks base method. +func (m *MockEC2API) LockSnapshotRequest(arg0 *ec2.LockSnapshotInput) (*request.Request, *ec2.LockSnapshotOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LockSnapshotRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*ec2.LockSnapshotOutput) + return ret0, ret1 +} + +// LockSnapshotRequest indicates an expected call of LockSnapshotRequest. +func (mr *MockEC2APIMockRecorder) LockSnapshotRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LockSnapshotRequest", reflect.TypeOf((*MockEC2API)(nil).LockSnapshotRequest), arg0) +} + +// LockSnapshotWithContext mocks base method. +func (m *MockEC2API) LockSnapshotWithContext(arg0 context.Context, arg1 *ec2.LockSnapshotInput, arg2 ...request.Option) (*ec2.LockSnapshotOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "LockSnapshotWithContext", varargs...) + ret0, _ := ret[0].(*ec2.LockSnapshotOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LockSnapshotWithContext indicates an expected call of LockSnapshotWithContext. +func (mr *MockEC2APIMockRecorder) LockSnapshotWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LockSnapshotWithContext", reflect.TypeOf((*MockEC2API)(nil).LockSnapshotWithContext), varargs...) +} + // ModifyAddressAttribute mocks base method. func (m *MockEC2API) ModifyAddressAttribute(arg0 *ec2.ModifyAddressAttributeInput) (*ec2.ModifyAddressAttributeOutput, error) { m.ctrl.T.Helper() @@ -28574,6 +29706,56 @@ func (mr *MockEC2APIMockRecorder) ModifyInstanceMaintenanceOptionsWithContext(ar return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ModifyInstanceMaintenanceOptionsWithContext", reflect.TypeOf((*MockEC2API)(nil).ModifyInstanceMaintenanceOptionsWithContext), varargs...) } +// ModifyInstanceMetadataDefaults mocks base method. +func (m *MockEC2API) ModifyInstanceMetadataDefaults(arg0 *ec2.ModifyInstanceMetadataDefaultsInput) (*ec2.ModifyInstanceMetadataDefaultsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ModifyInstanceMetadataDefaults", arg0) + ret0, _ := ret[0].(*ec2.ModifyInstanceMetadataDefaultsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ModifyInstanceMetadataDefaults indicates an expected call of ModifyInstanceMetadataDefaults. +func (mr *MockEC2APIMockRecorder) ModifyInstanceMetadataDefaults(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ModifyInstanceMetadataDefaults", reflect.TypeOf((*MockEC2API)(nil).ModifyInstanceMetadataDefaults), arg0) +} + +// ModifyInstanceMetadataDefaultsRequest mocks base method. +func (m *MockEC2API) ModifyInstanceMetadataDefaultsRequest(arg0 *ec2.ModifyInstanceMetadataDefaultsInput) (*request.Request, *ec2.ModifyInstanceMetadataDefaultsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ModifyInstanceMetadataDefaultsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*ec2.ModifyInstanceMetadataDefaultsOutput) + return ret0, ret1 +} + +// ModifyInstanceMetadataDefaultsRequest indicates an expected call of ModifyInstanceMetadataDefaultsRequest. +func (mr *MockEC2APIMockRecorder) ModifyInstanceMetadataDefaultsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ModifyInstanceMetadataDefaultsRequest", reflect.TypeOf((*MockEC2API)(nil).ModifyInstanceMetadataDefaultsRequest), arg0) +} + +// ModifyInstanceMetadataDefaultsWithContext mocks base method. +func (m *MockEC2API) ModifyInstanceMetadataDefaultsWithContext(arg0 context.Context, arg1 *ec2.ModifyInstanceMetadataDefaultsInput, arg2 ...request.Option) (*ec2.ModifyInstanceMetadataDefaultsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ModifyInstanceMetadataDefaultsWithContext", varargs...) + ret0, _ := ret[0].(*ec2.ModifyInstanceMetadataDefaultsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ModifyInstanceMetadataDefaultsWithContext indicates an expected call of ModifyInstanceMetadataDefaultsWithContext. +func (mr *MockEC2APIMockRecorder) ModifyInstanceMetadataDefaultsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ModifyInstanceMetadataDefaultsWithContext", reflect.TypeOf((*MockEC2API)(nil).ModifyInstanceMetadataDefaultsWithContext), varargs...) +} + // ModifyInstanceMetadataOptions mocks base method. func (m *MockEC2API) ModifyInstanceMetadataOptions(arg0 *ec2.ModifyInstanceMetadataOptionsInput) (*ec2.ModifyInstanceMetadataOptionsOutput, error) { m.ctrl.T.Helper() @@ -31024,6 +32206,56 @@ func (mr *MockEC2APIMockRecorder) ProvisionByoipCidrWithContext(arg0, arg1 inter return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProvisionByoipCidrWithContext", reflect.TypeOf((*MockEC2API)(nil).ProvisionByoipCidrWithContext), varargs...) } +// ProvisionIpamByoasn mocks base method. +func (m *MockEC2API) ProvisionIpamByoasn(arg0 *ec2.ProvisionIpamByoasnInput) (*ec2.ProvisionIpamByoasnOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ProvisionIpamByoasn", arg0) + ret0, _ := ret[0].(*ec2.ProvisionIpamByoasnOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ProvisionIpamByoasn indicates an expected call of ProvisionIpamByoasn. +func (mr *MockEC2APIMockRecorder) ProvisionIpamByoasn(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProvisionIpamByoasn", reflect.TypeOf((*MockEC2API)(nil).ProvisionIpamByoasn), arg0) +} + +// ProvisionIpamByoasnRequest mocks base method. +func (m *MockEC2API) ProvisionIpamByoasnRequest(arg0 *ec2.ProvisionIpamByoasnInput) (*request.Request, *ec2.ProvisionIpamByoasnOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ProvisionIpamByoasnRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*ec2.ProvisionIpamByoasnOutput) + return ret0, ret1 +} + +// ProvisionIpamByoasnRequest indicates an expected call of ProvisionIpamByoasnRequest. +func (mr *MockEC2APIMockRecorder) ProvisionIpamByoasnRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProvisionIpamByoasnRequest", reflect.TypeOf((*MockEC2API)(nil).ProvisionIpamByoasnRequest), arg0) +} + +// ProvisionIpamByoasnWithContext mocks base method. +func (m *MockEC2API) ProvisionIpamByoasnWithContext(arg0 context.Context, arg1 *ec2.ProvisionIpamByoasnInput, arg2 ...request.Option) (*ec2.ProvisionIpamByoasnOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ProvisionIpamByoasnWithContext", varargs...) + ret0, _ := ret[0].(*ec2.ProvisionIpamByoasnOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ProvisionIpamByoasnWithContext indicates an expected call of ProvisionIpamByoasnWithContext. +func (mr *MockEC2APIMockRecorder) ProvisionIpamByoasnWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProvisionIpamByoasnWithContext", reflect.TypeOf((*MockEC2API)(nil).ProvisionIpamByoasnWithContext), varargs...) +} + // ProvisionIpamPoolCidr mocks base method. func (m *MockEC2API) ProvisionIpamPoolCidr(arg0 *ec2.ProvisionIpamPoolCidrInput) (*ec2.ProvisionIpamPoolCidrOutput, error) { m.ctrl.T.Helper() @@ -31124,6 +32356,56 @@ func (mr *MockEC2APIMockRecorder) ProvisionPublicIpv4PoolCidrWithContext(arg0, a return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProvisionPublicIpv4PoolCidrWithContext", reflect.TypeOf((*MockEC2API)(nil).ProvisionPublicIpv4PoolCidrWithContext), varargs...) } +// PurchaseCapacityBlock mocks base method. +func (m *MockEC2API) PurchaseCapacityBlock(arg0 *ec2.PurchaseCapacityBlockInput) (*ec2.PurchaseCapacityBlockOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PurchaseCapacityBlock", arg0) + ret0, _ := ret[0].(*ec2.PurchaseCapacityBlockOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PurchaseCapacityBlock indicates an expected call of PurchaseCapacityBlock. +func (mr *MockEC2APIMockRecorder) PurchaseCapacityBlock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PurchaseCapacityBlock", reflect.TypeOf((*MockEC2API)(nil).PurchaseCapacityBlock), arg0) +} + +// PurchaseCapacityBlockRequest mocks base method. +func (m *MockEC2API) PurchaseCapacityBlockRequest(arg0 *ec2.PurchaseCapacityBlockInput) (*request.Request, *ec2.PurchaseCapacityBlockOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PurchaseCapacityBlockRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*ec2.PurchaseCapacityBlockOutput) + return ret0, ret1 +} + +// PurchaseCapacityBlockRequest indicates an expected call of PurchaseCapacityBlockRequest. +func (mr *MockEC2APIMockRecorder) PurchaseCapacityBlockRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PurchaseCapacityBlockRequest", reflect.TypeOf((*MockEC2API)(nil).PurchaseCapacityBlockRequest), arg0) +} + +// PurchaseCapacityBlockWithContext mocks base method. +func (m *MockEC2API) PurchaseCapacityBlockWithContext(arg0 context.Context, arg1 *ec2.PurchaseCapacityBlockInput, arg2 ...request.Option) (*ec2.PurchaseCapacityBlockOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PurchaseCapacityBlockWithContext", varargs...) + ret0, _ := ret[0].(*ec2.PurchaseCapacityBlockOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PurchaseCapacityBlockWithContext indicates an expected call of PurchaseCapacityBlockWithContext. +func (mr *MockEC2APIMockRecorder) PurchaseCapacityBlockWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PurchaseCapacityBlockWithContext", reflect.TypeOf((*MockEC2API)(nil).PurchaseCapacityBlockWithContext), varargs...) +} + // PurchaseHostReservation mocks base method. func (m *MockEC2API) PurchaseHostReservation(arg0 *ec2.PurchaseHostReservationInput) (*ec2.PurchaseHostReservationOutput, error) { m.ctrl.T.Helper() @@ -34040,6 +35322,56 @@ func (mr *MockEC2APIMockRecorder) UnassignPrivateNatGatewayAddressWithContext(ar return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnassignPrivateNatGatewayAddressWithContext", reflect.TypeOf((*MockEC2API)(nil).UnassignPrivateNatGatewayAddressWithContext), varargs...) } +// UnlockSnapshot mocks base method. +func (m *MockEC2API) UnlockSnapshot(arg0 *ec2.UnlockSnapshotInput) (*ec2.UnlockSnapshotOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnlockSnapshot", arg0) + ret0, _ := ret[0].(*ec2.UnlockSnapshotOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UnlockSnapshot indicates an expected call of UnlockSnapshot. +func (mr *MockEC2APIMockRecorder) UnlockSnapshot(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnlockSnapshot", reflect.TypeOf((*MockEC2API)(nil).UnlockSnapshot), arg0) +} + +// UnlockSnapshotRequest mocks base method. +func (m *MockEC2API) UnlockSnapshotRequest(arg0 *ec2.UnlockSnapshotInput) (*request.Request, *ec2.UnlockSnapshotOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnlockSnapshotRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*ec2.UnlockSnapshotOutput) + return ret0, ret1 +} + +// UnlockSnapshotRequest indicates an expected call of UnlockSnapshotRequest. +func (mr *MockEC2APIMockRecorder) UnlockSnapshotRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnlockSnapshotRequest", reflect.TypeOf((*MockEC2API)(nil).UnlockSnapshotRequest), arg0) +} + +// UnlockSnapshotWithContext mocks base method. +func (m *MockEC2API) UnlockSnapshotWithContext(arg0 context.Context, arg1 *ec2.UnlockSnapshotInput, arg2 ...request.Option) (*ec2.UnlockSnapshotOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UnlockSnapshotWithContext", varargs...) + ret0, _ := ret[0].(*ec2.UnlockSnapshotOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UnlockSnapshotWithContext indicates an expected call of UnlockSnapshotWithContext. +func (mr *MockEC2APIMockRecorder) UnlockSnapshotWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnlockSnapshotWithContext", reflect.TypeOf((*MockEC2API)(nil).UnlockSnapshotWithContext), varargs...) +} + // UnmonitorInstances mocks base method. func (m *MockEC2API) UnmonitorInstances(arg0 *ec2.UnmonitorInstancesInput) (*ec2.UnmonitorInstancesOutput, error) { m.ctrl.T.Helper() diff --git a/test/mocks/aws_elbv2_mock.go b/test/mocks/aws_elbv2_mock.go index 7258fe33fb..a784f68b5a 100644 --- a/test/mocks/aws_elbv2_mock.go +++ b/test/mocks/aws_elbv2_mock.go @@ -152,6 +152,56 @@ func (mr *MockELBV2APIMockRecorder) AddTagsWithContext(arg0, arg1 interface{}, a return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTagsWithContext", reflect.TypeOf((*MockELBV2API)(nil).AddTagsWithContext), varargs...) } +// AddTrustStoreRevocations mocks base method. +func (m *MockELBV2API) AddTrustStoreRevocations(arg0 *elbv2.AddTrustStoreRevocationsInput) (*elbv2.AddTrustStoreRevocationsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddTrustStoreRevocations", arg0) + ret0, _ := ret[0].(*elbv2.AddTrustStoreRevocationsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AddTrustStoreRevocations indicates an expected call of AddTrustStoreRevocations. +func (mr *MockELBV2APIMockRecorder) AddTrustStoreRevocations(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTrustStoreRevocations", reflect.TypeOf((*MockELBV2API)(nil).AddTrustStoreRevocations), arg0) +} + +// AddTrustStoreRevocationsRequest mocks base method. +func (m *MockELBV2API) AddTrustStoreRevocationsRequest(arg0 *elbv2.AddTrustStoreRevocationsInput) (*request.Request, *elbv2.AddTrustStoreRevocationsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddTrustStoreRevocationsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*elbv2.AddTrustStoreRevocationsOutput) + return ret0, ret1 +} + +// AddTrustStoreRevocationsRequest indicates an expected call of AddTrustStoreRevocationsRequest. +func (mr *MockELBV2APIMockRecorder) AddTrustStoreRevocationsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTrustStoreRevocationsRequest", reflect.TypeOf((*MockELBV2API)(nil).AddTrustStoreRevocationsRequest), arg0) +} + +// AddTrustStoreRevocationsWithContext mocks base method. +func (m *MockELBV2API) AddTrustStoreRevocationsWithContext(arg0 context.Context, arg1 *elbv2.AddTrustStoreRevocationsInput, arg2 ...request.Option) (*elbv2.AddTrustStoreRevocationsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "AddTrustStoreRevocationsWithContext", varargs...) + ret0, _ := ret[0].(*elbv2.AddTrustStoreRevocationsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AddTrustStoreRevocationsWithContext indicates an expected call of AddTrustStoreRevocationsWithContext. +func (mr *MockELBV2APIMockRecorder) AddTrustStoreRevocationsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTrustStoreRevocationsWithContext", reflect.TypeOf((*MockELBV2API)(nil).AddTrustStoreRevocationsWithContext), varargs...) +} + // CreateListener mocks base method. func (m *MockELBV2API) CreateListener(arg0 *elbv2.CreateListenerInput) (*elbv2.CreateListenerOutput, error) { m.ctrl.T.Helper() @@ -352,6 +402,56 @@ func (mr *MockELBV2APIMockRecorder) CreateTargetGroupWithContext(arg0, arg1 inte return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateTargetGroupWithContext", reflect.TypeOf((*MockELBV2API)(nil).CreateTargetGroupWithContext), varargs...) } +// CreateTrustStore mocks base method. +func (m *MockELBV2API) CreateTrustStore(arg0 *elbv2.CreateTrustStoreInput) (*elbv2.CreateTrustStoreOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateTrustStore", arg0) + ret0, _ := ret[0].(*elbv2.CreateTrustStoreOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateTrustStore indicates an expected call of CreateTrustStore. +func (mr *MockELBV2APIMockRecorder) CreateTrustStore(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateTrustStore", reflect.TypeOf((*MockELBV2API)(nil).CreateTrustStore), arg0) +} + +// CreateTrustStoreRequest mocks base method. +func (m *MockELBV2API) CreateTrustStoreRequest(arg0 *elbv2.CreateTrustStoreInput) (*request.Request, *elbv2.CreateTrustStoreOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateTrustStoreRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*elbv2.CreateTrustStoreOutput) + return ret0, ret1 +} + +// CreateTrustStoreRequest indicates an expected call of CreateTrustStoreRequest. +func (mr *MockELBV2APIMockRecorder) CreateTrustStoreRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateTrustStoreRequest", reflect.TypeOf((*MockELBV2API)(nil).CreateTrustStoreRequest), arg0) +} + +// CreateTrustStoreWithContext mocks base method. +func (m *MockELBV2API) CreateTrustStoreWithContext(arg0 context.Context, arg1 *elbv2.CreateTrustStoreInput, arg2 ...request.Option) (*elbv2.CreateTrustStoreOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateTrustStoreWithContext", varargs...) + ret0, _ := ret[0].(*elbv2.CreateTrustStoreOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateTrustStoreWithContext indicates an expected call of CreateTrustStoreWithContext. +func (mr *MockELBV2APIMockRecorder) CreateTrustStoreWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateTrustStoreWithContext", reflect.TypeOf((*MockELBV2API)(nil).CreateTrustStoreWithContext), varargs...) +} + // DeleteListener mocks base method. func (m *MockELBV2API) DeleteListener(arg0 *elbv2.DeleteListenerInput) (*elbv2.DeleteListenerOutput, error) { m.ctrl.T.Helper() @@ -552,6 +652,56 @@ func (mr *MockELBV2APIMockRecorder) DeleteTargetGroupWithContext(arg0, arg1 inte return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteTargetGroupWithContext", reflect.TypeOf((*MockELBV2API)(nil).DeleteTargetGroupWithContext), varargs...) } +// DeleteTrustStore mocks base method. +func (m *MockELBV2API) DeleteTrustStore(arg0 *elbv2.DeleteTrustStoreInput) (*elbv2.DeleteTrustStoreOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteTrustStore", arg0) + ret0, _ := ret[0].(*elbv2.DeleteTrustStoreOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteTrustStore indicates an expected call of DeleteTrustStore. +func (mr *MockELBV2APIMockRecorder) DeleteTrustStore(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteTrustStore", reflect.TypeOf((*MockELBV2API)(nil).DeleteTrustStore), arg0) +} + +// DeleteTrustStoreRequest mocks base method. +func (m *MockELBV2API) DeleteTrustStoreRequest(arg0 *elbv2.DeleteTrustStoreInput) (*request.Request, *elbv2.DeleteTrustStoreOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteTrustStoreRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*elbv2.DeleteTrustStoreOutput) + return ret0, ret1 +} + +// DeleteTrustStoreRequest indicates an expected call of DeleteTrustStoreRequest. +func (mr *MockELBV2APIMockRecorder) DeleteTrustStoreRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteTrustStoreRequest", reflect.TypeOf((*MockELBV2API)(nil).DeleteTrustStoreRequest), arg0) +} + +// DeleteTrustStoreWithContext mocks base method. +func (m *MockELBV2API) DeleteTrustStoreWithContext(arg0 context.Context, arg1 *elbv2.DeleteTrustStoreInput, arg2 ...request.Option) (*elbv2.DeleteTrustStoreOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteTrustStoreWithContext", varargs...) + ret0, _ := ret[0].(*elbv2.DeleteTrustStoreOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteTrustStoreWithContext indicates an expected call of DeleteTrustStoreWithContext. +func (mr *MockELBV2APIMockRecorder) DeleteTrustStoreWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteTrustStoreWithContext", reflect.TypeOf((*MockELBV2API)(nil).DeleteTrustStoreWithContext), varargs...) +} + // DeregisterTargets mocks base method. func (m *MockELBV2API) DeregisterTargets(arg0 *elbv2.DeregisterTargetsInput) (*elbv2.DeregisterTargetsOutput, error) { m.ctrl.T.Helper() @@ -1251,6 +1401,355 @@ func (mr *MockELBV2APIMockRecorder) DescribeTargetHealthWithContext(arg0, arg1 i return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTargetHealthWithContext", reflect.TypeOf((*MockELBV2API)(nil).DescribeTargetHealthWithContext), varargs...) } +// DescribeTrustStoreAssociations mocks base method. +func (m *MockELBV2API) DescribeTrustStoreAssociations(arg0 *elbv2.DescribeTrustStoreAssociationsInput) (*elbv2.DescribeTrustStoreAssociationsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeTrustStoreAssociations", arg0) + ret0, _ := ret[0].(*elbv2.DescribeTrustStoreAssociationsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeTrustStoreAssociations indicates an expected call of DescribeTrustStoreAssociations. +func (mr *MockELBV2APIMockRecorder) DescribeTrustStoreAssociations(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTrustStoreAssociations", reflect.TypeOf((*MockELBV2API)(nil).DescribeTrustStoreAssociations), arg0) +} + +// DescribeTrustStoreAssociationsPages mocks base method. +func (m *MockELBV2API) DescribeTrustStoreAssociationsPages(arg0 *elbv2.DescribeTrustStoreAssociationsInput, arg1 func(*elbv2.DescribeTrustStoreAssociationsOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeTrustStoreAssociationsPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DescribeTrustStoreAssociationsPages indicates an expected call of DescribeTrustStoreAssociationsPages. +func (mr *MockELBV2APIMockRecorder) DescribeTrustStoreAssociationsPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTrustStoreAssociationsPages", reflect.TypeOf((*MockELBV2API)(nil).DescribeTrustStoreAssociationsPages), arg0, arg1) +} + +// DescribeTrustStoreAssociationsPagesWithContext mocks base method. +func (m *MockELBV2API) DescribeTrustStoreAssociationsPagesWithContext(arg0 context.Context, arg1 *elbv2.DescribeTrustStoreAssociationsInput, arg2 func(*elbv2.DescribeTrustStoreAssociationsOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeTrustStoreAssociationsPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// DescribeTrustStoreAssociationsPagesWithContext indicates an expected call of DescribeTrustStoreAssociationsPagesWithContext. +func (mr *MockELBV2APIMockRecorder) DescribeTrustStoreAssociationsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTrustStoreAssociationsPagesWithContext", reflect.TypeOf((*MockELBV2API)(nil).DescribeTrustStoreAssociationsPagesWithContext), varargs...) +} + +// DescribeTrustStoreAssociationsRequest mocks base method. +func (m *MockELBV2API) DescribeTrustStoreAssociationsRequest(arg0 *elbv2.DescribeTrustStoreAssociationsInput) (*request.Request, *elbv2.DescribeTrustStoreAssociationsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeTrustStoreAssociationsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*elbv2.DescribeTrustStoreAssociationsOutput) + return ret0, ret1 +} + +// DescribeTrustStoreAssociationsRequest indicates an expected call of DescribeTrustStoreAssociationsRequest. +func (mr *MockELBV2APIMockRecorder) DescribeTrustStoreAssociationsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTrustStoreAssociationsRequest", reflect.TypeOf((*MockELBV2API)(nil).DescribeTrustStoreAssociationsRequest), arg0) +} + +// DescribeTrustStoreAssociationsWithContext mocks base method. +func (m *MockELBV2API) DescribeTrustStoreAssociationsWithContext(arg0 context.Context, arg1 *elbv2.DescribeTrustStoreAssociationsInput, arg2 ...request.Option) (*elbv2.DescribeTrustStoreAssociationsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeTrustStoreAssociationsWithContext", varargs...) + ret0, _ := ret[0].(*elbv2.DescribeTrustStoreAssociationsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeTrustStoreAssociationsWithContext indicates an expected call of DescribeTrustStoreAssociationsWithContext. +func (mr *MockELBV2APIMockRecorder) DescribeTrustStoreAssociationsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTrustStoreAssociationsWithContext", reflect.TypeOf((*MockELBV2API)(nil).DescribeTrustStoreAssociationsWithContext), varargs...) +} + +// DescribeTrustStoreRevocations mocks base method. +func (m *MockELBV2API) DescribeTrustStoreRevocations(arg0 *elbv2.DescribeTrustStoreRevocationsInput) (*elbv2.DescribeTrustStoreRevocationsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeTrustStoreRevocations", arg0) + ret0, _ := ret[0].(*elbv2.DescribeTrustStoreRevocationsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeTrustStoreRevocations indicates an expected call of DescribeTrustStoreRevocations. +func (mr *MockELBV2APIMockRecorder) DescribeTrustStoreRevocations(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTrustStoreRevocations", reflect.TypeOf((*MockELBV2API)(nil).DescribeTrustStoreRevocations), arg0) +} + +// DescribeTrustStoreRevocationsPages mocks base method. +func (m *MockELBV2API) DescribeTrustStoreRevocationsPages(arg0 *elbv2.DescribeTrustStoreRevocationsInput, arg1 func(*elbv2.DescribeTrustStoreRevocationsOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeTrustStoreRevocationsPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DescribeTrustStoreRevocationsPages indicates an expected call of DescribeTrustStoreRevocationsPages. +func (mr *MockELBV2APIMockRecorder) DescribeTrustStoreRevocationsPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTrustStoreRevocationsPages", reflect.TypeOf((*MockELBV2API)(nil).DescribeTrustStoreRevocationsPages), arg0, arg1) +} + +// DescribeTrustStoreRevocationsPagesWithContext mocks base method. +func (m *MockELBV2API) DescribeTrustStoreRevocationsPagesWithContext(arg0 context.Context, arg1 *elbv2.DescribeTrustStoreRevocationsInput, arg2 func(*elbv2.DescribeTrustStoreRevocationsOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeTrustStoreRevocationsPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// DescribeTrustStoreRevocationsPagesWithContext indicates an expected call of DescribeTrustStoreRevocationsPagesWithContext. +func (mr *MockELBV2APIMockRecorder) DescribeTrustStoreRevocationsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTrustStoreRevocationsPagesWithContext", reflect.TypeOf((*MockELBV2API)(nil).DescribeTrustStoreRevocationsPagesWithContext), varargs...) +} + +// DescribeTrustStoreRevocationsRequest mocks base method. +func (m *MockELBV2API) DescribeTrustStoreRevocationsRequest(arg0 *elbv2.DescribeTrustStoreRevocationsInput) (*request.Request, *elbv2.DescribeTrustStoreRevocationsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeTrustStoreRevocationsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*elbv2.DescribeTrustStoreRevocationsOutput) + return ret0, ret1 +} + +// DescribeTrustStoreRevocationsRequest indicates an expected call of DescribeTrustStoreRevocationsRequest. +func (mr *MockELBV2APIMockRecorder) DescribeTrustStoreRevocationsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTrustStoreRevocationsRequest", reflect.TypeOf((*MockELBV2API)(nil).DescribeTrustStoreRevocationsRequest), arg0) +} + +// DescribeTrustStoreRevocationsWithContext mocks base method. +func (m *MockELBV2API) DescribeTrustStoreRevocationsWithContext(arg0 context.Context, arg1 *elbv2.DescribeTrustStoreRevocationsInput, arg2 ...request.Option) (*elbv2.DescribeTrustStoreRevocationsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeTrustStoreRevocationsWithContext", varargs...) + ret0, _ := ret[0].(*elbv2.DescribeTrustStoreRevocationsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeTrustStoreRevocationsWithContext indicates an expected call of DescribeTrustStoreRevocationsWithContext. +func (mr *MockELBV2APIMockRecorder) DescribeTrustStoreRevocationsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTrustStoreRevocationsWithContext", reflect.TypeOf((*MockELBV2API)(nil).DescribeTrustStoreRevocationsWithContext), varargs...) +} + +// DescribeTrustStores mocks base method. +func (m *MockELBV2API) DescribeTrustStores(arg0 *elbv2.DescribeTrustStoresInput) (*elbv2.DescribeTrustStoresOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeTrustStores", arg0) + ret0, _ := ret[0].(*elbv2.DescribeTrustStoresOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeTrustStores indicates an expected call of DescribeTrustStores. +func (mr *MockELBV2APIMockRecorder) DescribeTrustStores(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTrustStores", reflect.TypeOf((*MockELBV2API)(nil).DescribeTrustStores), arg0) +} + +// DescribeTrustStoresPages mocks base method. +func (m *MockELBV2API) DescribeTrustStoresPages(arg0 *elbv2.DescribeTrustStoresInput, arg1 func(*elbv2.DescribeTrustStoresOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeTrustStoresPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DescribeTrustStoresPages indicates an expected call of DescribeTrustStoresPages. +func (mr *MockELBV2APIMockRecorder) DescribeTrustStoresPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTrustStoresPages", reflect.TypeOf((*MockELBV2API)(nil).DescribeTrustStoresPages), arg0, arg1) +} + +// DescribeTrustStoresPagesWithContext mocks base method. +func (m *MockELBV2API) DescribeTrustStoresPagesWithContext(arg0 context.Context, arg1 *elbv2.DescribeTrustStoresInput, arg2 func(*elbv2.DescribeTrustStoresOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeTrustStoresPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// DescribeTrustStoresPagesWithContext indicates an expected call of DescribeTrustStoresPagesWithContext. +func (mr *MockELBV2APIMockRecorder) DescribeTrustStoresPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTrustStoresPagesWithContext", reflect.TypeOf((*MockELBV2API)(nil).DescribeTrustStoresPagesWithContext), varargs...) +} + +// DescribeTrustStoresRequest mocks base method. +func (m *MockELBV2API) DescribeTrustStoresRequest(arg0 *elbv2.DescribeTrustStoresInput) (*request.Request, *elbv2.DescribeTrustStoresOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeTrustStoresRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*elbv2.DescribeTrustStoresOutput) + return ret0, ret1 +} + +// DescribeTrustStoresRequest indicates an expected call of DescribeTrustStoresRequest. +func (mr *MockELBV2APIMockRecorder) DescribeTrustStoresRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTrustStoresRequest", reflect.TypeOf((*MockELBV2API)(nil).DescribeTrustStoresRequest), arg0) +} + +// DescribeTrustStoresWithContext mocks base method. +func (m *MockELBV2API) DescribeTrustStoresWithContext(arg0 context.Context, arg1 *elbv2.DescribeTrustStoresInput, arg2 ...request.Option) (*elbv2.DescribeTrustStoresOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeTrustStoresWithContext", varargs...) + ret0, _ := ret[0].(*elbv2.DescribeTrustStoresOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeTrustStoresWithContext indicates an expected call of DescribeTrustStoresWithContext. +func (mr *MockELBV2APIMockRecorder) DescribeTrustStoresWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTrustStoresWithContext", reflect.TypeOf((*MockELBV2API)(nil).DescribeTrustStoresWithContext), varargs...) +} + +// GetTrustStoreCaCertificatesBundle mocks base method. +func (m *MockELBV2API) GetTrustStoreCaCertificatesBundle(arg0 *elbv2.GetTrustStoreCaCertificatesBundleInput) (*elbv2.GetTrustStoreCaCertificatesBundleOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTrustStoreCaCertificatesBundle", arg0) + ret0, _ := ret[0].(*elbv2.GetTrustStoreCaCertificatesBundleOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTrustStoreCaCertificatesBundle indicates an expected call of GetTrustStoreCaCertificatesBundle. +func (mr *MockELBV2APIMockRecorder) GetTrustStoreCaCertificatesBundle(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTrustStoreCaCertificatesBundle", reflect.TypeOf((*MockELBV2API)(nil).GetTrustStoreCaCertificatesBundle), arg0) +} + +// GetTrustStoreCaCertificatesBundleRequest mocks base method. +func (m *MockELBV2API) GetTrustStoreCaCertificatesBundleRequest(arg0 *elbv2.GetTrustStoreCaCertificatesBundleInput) (*request.Request, *elbv2.GetTrustStoreCaCertificatesBundleOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTrustStoreCaCertificatesBundleRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*elbv2.GetTrustStoreCaCertificatesBundleOutput) + return ret0, ret1 +} + +// GetTrustStoreCaCertificatesBundleRequest indicates an expected call of GetTrustStoreCaCertificatesBundleRequest. +func (mr *MockELBV2APIMockRecorder) GetTrustStoreCaCertificatesBundleRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTrustStoreCaCertificatesBundleRequest", reflect.TypeOf((*MockELBV2API)(nil).GetTrustStoreCaCertificatesBundleRequest), arg0) +} + +// GetTrustStoreCaCertificatesBundleWithContext mocks base method. +func (m *MockELBV2API) GetTrustStoreCaCertificatesBundleWithContext(arg0 context.Context, arg1 *elbv2.GetTrustStoreCaCertificatesBundleInput, arg2 ...request.Option) (*elbv2.GetTrustStoreCaCertificatesBundleOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetTrustStoreCaCertificatesBundleWithContext", varargs...) + ret0, _ := ret[0].(*elbv2.GetTrustStoreCaCertificatesBundleOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTrustStoreCaCertificatesBundleWithContext indicates an expected call of GetTrustStoreCaCertificatesBundleWithContext. +func (mr *MockELBV2APIMockRecorder) GetTrustStoreCaCertificatesBundleWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTrustStoreCaCertificatesBundleWithContext", reflect.TypeOf((*MockELBV2API)(nil).GetTrustStoreCaCertificatesBundleWithContext), varargs...) +} + +// GetTrustStoreRevocationContent mocks base method. +func (m *MockELBV2API) GetTrustStoreRevocationContent(arg0 *elbv2.GetTrustStoreRevocationContentInput) (*elbv2.GetTrustStoreRevocationContentOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTrustStoreRevocationContent", arg0) + ret0, _ := ret[0].(*elbv2.GetTrustStoreRevocationContentOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTrustStoreRevocationContent indicates an expected call of GetTrustStoreRevocationContent. +func (mr *MockELBV2APIMockRecorder) GetTrustStoreRevocationContent(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTrustStoreRevocationContent", reflect.TypeOf((*MockELBV2API)(nil).GetTrustStoreRevocationContent), arg0) +} + +// GetTrustStoreRevocationContentRequest mocks base method. +func (m *MockELBV2API) GetTrustStoreRevocationContentRequest(arg0 *elbv2.GetTrustStoreRevocationContentInput) (*request.Request, *elbv2.GetTrustStoreRevocationContentOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTrustStoreRevocationContentRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*elbv2.GetTrustStoreRevocationContentOutput) + return ret0, ret1 +} + +// GetTrustStoreRevocationContentRequest indicates an expected call of GetTrustStoreRevocationContentRequest. +func (mr *MockELBV2APIMockRecorder) GetTrustStoreRevocationContentRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTrustStoreRevocationContentRequest", reflect.TypeOf((*MockELBV2API)(nil).GetTrustStoreRevocationContentRequest), arg0) +} + +// GetTrustStoreRevocationContentWithContext mocks base method. +func (m *MockELBV2API) GetTrustStoreRevocationContentWithContext(arg0 context.Context, arg1 *elbv2.GetTrustStoreRevocationContentInput, arg2 ...request.Option) (*elbv2.GetTrustStoreRevocationContentOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetTrustStoreRevocationContentWithContext", varargs...) + ret0, _ := ret[0].(*elbv2.GetTrustStoreRevocationContentOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTrustStoreRevocationContentWithContext indicates an expected call of GetTrustStoreRevocationContentWithContext. +func (mr *MockELBV2APIMockRecorder) GetTrustStoreRevocationContentWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTrustStoreRevocationContentWithContext", reflect.TypeOf((*MockELBV2API)(nil).GetTrustStoreRevocationContentWithContext), varargs...) +} + // ModifyListener mocks base method. func (m *MockELBV2API) ModifyListener(arg0 *elbv2.ModifyListenerInput) (*elbv2.ModifyListenerOutput, error) { m.ctrl.T.Helper() @@ -1501,6 +2000,56 @@ func (mr *MockELBV2APIMockRecorder) ModifyTargetGroupWithContext(arg0, arg1 inte return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ModifyTargetGroupWithContext", reflect.TypeOf((*MockELBV2API)(nil).ModifyTargetGroupWithContext), varargs...) } +// ModifyTrustStore mocks base method. +func (m *MockELBV2API) ModifyTrustStore(arg0 *elbv2.ModifyTrustStoreInput) (*elbv2.ModifyTrustStoreOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ModifyTrustStore", arg0) + ret0, _ := ret[0].(*elbv2.ModifyTrustStoreOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ModifyTrustStore indicates an expected call of ModifyTrustStore. +func (mr *MockELBV2APIMockRecorder) ModifyTrustStore(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ModifyTrustStore", reflect.TypeOf((*MockELBV2API)(nil).ModifyTrustStore), arg0) +} + +// ModifyTrustStoreRequest mocks base method. +func (m *MockELBV2API) ModifyTrustStoreRequest(arg0 *elbv2.ModifyTrustStoreInput) (*request.Request, *elbv2.ModifyTrustStoreOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ModifyTrustStoreRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*elbv2.ModifyTrustStoreOutput) + return ret0, ret1 +} + +// ModifyTrustStoreRequest indicates an expected call of ModifyTrustStoreRequest. +func (mr *MockELBV2APIMockRecorder) ModifyTrustStoreRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ModifyTrustStoreRequest", reflect.TypeOf((*MockELBV2API)(nil).ModifyTrustStoreRequest), arg0) +} + +// ModifyTrustStoreWithContext mocks base method. +func (m *MockELBV2API) ModifyTrustStoreWithContext(arg0 context.Context, arg1 *elbv2.ModifyTrustStoreInput, arg2 ...request.Option) (*elbv2.ModifyTrustStoreOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ModifyTrustStoreWithContext", varargs...) + ret0, _ := ret[0].(*elbv2.ModifyTrustStoreOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ModifyTrustStoreWithContext indicates an expected call of ModifyTrustStoreWithContext. +func (mr *MockELBV2APIMockRecorder) ModifyTrustStoreWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ModifyTrustStoreWithContext", reflect.TypeOf((*MockELBV2API)(nil).ModifyTrustStoreWithContext), varargs...) +} + // RegisterTargets mocks base method. func (m *MockELBV2API) RegisterTargets(arg0 *elbv2.RegisterTargetsInput) (*elbv2.RegisterTargetsOutput, error) { m.ctrl.T.Helper() @@ -1651,6 +2200,56 @@ func (mr *MockELBV2APIMockRecorder) RemoveTagsWithContext(arg0, arg1 interface{} return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveTagsWithContext", reflect.TypeOf((*MockELBV2API)(nil).RemoveTagsWithContext), varargs...) } +// RemoveTrustStoreRevocations mocks base method. +func (m *MockELBV2API) RemoveTrustStoreRevocations(arg0 *elbv2.RemoveTrustStoreRevocationsInput) (*elbv2.RemoveTrustStoreRevocationsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveTrustStoreRevocations", arg0) + ret0, _ := ret[0].(*elbv2.RemoveTrustStoreRevocationsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RemoveTrustStoreRevocations indicates an expected call of RemoveTrustStoreRevocations. +func (mr *MockELBV2APIMockRecorder) RemoveTrustStoreRevocations(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveTrustStoreRevocations", reflect.TypeOf((*MockELBV2API)(nil).RemoveTrustStoreRevocations), arg0) +} + +// RemoveTrustStoreRevocationsRequest mocks base method. +func (m *MockELBV2API) RemoveTrustStoreRevocationsRequest(arg0 *elbv2.RemoveTrustStoreRevocationsInput) (*request.Request, *elbv2.RemoveTrustStoreRevocationsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveTrustStoreRevocationsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*elbv2.RemoveTrustStoreRevocationsOutput) + return ret0, ret1 +} + +// RemoveTrustStoreRevocationsRequest indicates an expected call of RemoveTrustStoreRevocationsRequest. +func (mr *MockELBV2APIMockRecorder) RemoveTrustStoreRevocationsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveTrustStoreRevocationsRequest", reflect.TypeOf((*MockELBV2API)(nil).RemoveTrustStoreRevocationsRequest), arg0) +} + +// RemoveTrustStoreRevocationsWithContext mocks base method. +func (m *MockELBV2API) RemoveTrustStoreRevocationsWithContext(arg0 context.Context, arg1 *elbv2.RemoveTrustStoreRevocationsInput, arg2 ...request.Option) (*elbv2.RemoveTrustStoreRevocationsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "RemoveTrustStoreRevocationsWithContext", varargs...) + ret0, _ := ret[0].(*elbv2.RemoveTrustStoreRevocationsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RemoveTrustStoreRevocationsWithContext indicates an expected call of RemoveTrustStoreRevocationsWithContext. +func (mr *MockELBV2APIMockRecorder) RemoveTrustStoreRevocationsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveTrustStoreRevocationsWithContext", reflect.TypeOf((*MockELBV2API)(nil).RemoveTrustStoreRevocationsWithContext), varargs...) +} + // SetIpAddressType mocks base method. func (m *MockELBV2API) SetIpAddressType(arg0 *elbv2.SetIpAddressTypeInput) (*elbv2.SetIpAddressTypeOutput, error) { m.ctrl.T.Helper() diff --git a/test/mocks/capa_clusterscoper_mock.go b/test/mocks/capa_clusterscoper_mock.go index 0fb8d7b2e3..54b9ee81eb 100644 --- a/test/mocks/capa_clusterscoper_mock.go +++ b/test/mocks/capa_clusterscoper_mock.go @@ -26,6 +26,7 @@ import ( client "github.com/aws/aws-sdk-go/aws/client" logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" + unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" v1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" cloud "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" throttle "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" @@ -375,6 +376,21 @@ func (mr *MockClusterScoperMockRecorder) Trace(arg0 interface{}, arg1 ...interfa return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trace", reflect.TypeOf((*MockClusterScoper)(nil).Trace), varargs...) } +// UnstructuredControlPlane mocks base method. +func (m *MockClusterScoper) UnstructuredControlPlane() (*unstructured.Unstructured, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnstructuredControlPlane") + ret0, _ := ret[0].(*unstructured.Unstructured) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UnstructuredControlPlane indicates an expected call of UnstructuredControlPlane. +func (mr *MockClusterScoperMockRecorder) UnstructuredControlPlane() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnstructuredControlPlane", reflect.TypeOf((*MockClusterScoper)(nil).UnstructuredControlPlane)) +} + // Warn mocks base method. func (m *MockClusterScoper) Warn(arg0 string, arg1 ...interface{}) { m.ctrl.T.Helper() diff --git a/test/mocks/generate_aws.go b/test/mocks/generate_aws.go index 5c5e5a7f02..f3b08973ec 100644 --- a/test/mocks/generate_aws.go +++ b/test/mocks/generate_aws.go @@ -14,16 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package mocks provides a way to generate mock objects for AWS services. +// //go:generate ../../hack/tools/bin/mockgen -destination aws_elbv2_mock.go -package mocks github.com/aws/aws-sdk-go/service/elbv2/elbv2iface ELBV2API //go:generate /usr/bin/env bash -c "cat ../../hack/boilerplate/boilerplate.generatego.txt aws_elbv2_mock.go > _aws_elbv2_mock.go && mv _aws_elbv2_mock.go aws_elbv2_mock.go" - //go:generate ../../hack/tools/bin/mockgen -destination aws_elb_mock.go -package mocks github.com/aws/aws-sdk-go/service/elb/elbiface ELBAPI //go:generate /usr/bin/env bash -c "cat ../../hack/boilerplate/boilerplate.generatego.txt aws_elb_mock.go > _aws_elb_mock.go && mv _aws_elb_mock.go aws_elb_mock.go" - //go:generate ../../hack/tools/bin/mockgen -destination aws_rgtagging_mock.go -package mocks github.com/aws/aws-sdk-go/service/resourcegroupstaggingapi/resourcegroupstaggingapiiface ResourceGroupsTaggingAPIAPI //go:generate /usr/bin/env bash -c "cat ../../hack/boilerplate/boilerplate.generatego.txt aws_rgtagging_mock.go > _aws_rgtagging_mock.go && mv _aws_rgtagging_mock.go aws_rgtagging_mock.go" - //go:generate ../../hack/tools/bin/mockgen -destination aws_ec2api_mock.go -package mocks github.com/aws/aws-sdk-go/service/ec2/ec2iface EC2API //go:generate /usr/bin/env bash -c "cat ../../hack/boilerplate/boilerplate.generatego.txt aws_ec2api_mock.go > _aws_ec2api_mock.go && mv _aws_ec2api_mock.go aws_ec2api_mock.go" - package mocks diff --git a/util/conditions/helper.go b/util/conditions/helper.go index c4e4ad7a2a..2acb09093e 100644 --- a/util/conditions/helper.go +++ b/util/conditions/helper.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package conditions provides helper functions for working with conditions. package conditions import ( diff --git a/util/system/util.go b/util/system/util.go index 786150950d..0b6eb9507c 100644 --- a/util/system/util.go +++ b/util/system/util.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package system contains utiilities for the system namespace. package system import ( diff --git a/version/version.go b/version/version.go index 4132c9f016..b895ae2daf 100644 --- a/version/version.go +++ b/version/version.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package version provides the version of the manager. package version import ( diff --git a/versions.mk b/versions.mk index 7b51e18117..653fb00f1c 100644 --- a/versions.mk +++ b/versions.mk @@ -14,10 +14,10 @@ MDBOOK_VERSION := v0.4.5 PLANTUML_VERSION := 1.2020.16 -GH_VERSION := 2.7.0 -CERT_MANAGER_VERSION := v1.12.2 -CAPI_VERSION := v1.6.1 +CERT_MANAGER_VERSION := v1.14.4 +CAPI_VERSION := v1.7.1 KPROMO_VERSION := v4.0.4 YQ_VERSION := v4.25.2 GOLANGCI_LINT_VERSION := v1.53.3 RELEASE_NOTES_VERSION := v0.16.5 +GORELEASER_VERSION := v1.24.0