diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..47ec282 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,19 @@ +.idea +.code +local +bin +.git +.gitignore +.github +charts +docs +example +Dockerfile +.gitlab-ci.yml +env.local +docker-compose* +Makefile +README.md +sonar-project.properties +*.iml +.dockerignore diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 0000000..894e53d --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,58 @@ +on: + push: + paths-ignore: + - 'charts/**' + - 'example/**' + - 'docs/**' + - '/*.md' + branches: + - '*' + pull_request: + paths-ignore: + - 'example/**' + - 'charts/**' + - 'docs/**' + - '/*.md' + workflow_call: + +name: Build and test +jobs: + build: + strategy: + matrix: + architecture: [amd64, arm64] + os: [linux, darwin] + + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Go 1.21.x + uses: actions/setup-go@v4 + with: + go-version: '1.21' + cache: true + id: go + + - name: Download Go modules + run: go mod download + env: + GOPROXY: https://proxy.golang.org + + - name: Build + run: GOOS=${{ matrix.os }} GOARCH=${{matrix.architecture}} go build ${{ env.LDFLAGS }} -o bin/node-undertaker-${{ matrix.os }}-${{ matrix.architecture }} ./cmd/node-undertaker + + - name: Test + run: | + go get github.com/golang/mock/mockgen + go install github.com/golang/mock/mockgen + go generate ./... + go test ./... + + - uses: actions/upload-artifact@v3 + if: github.event_name != 'pull_request' + with: + name: node-undertaker-binaries + path: bin/node-undertaker-* + if-no-files-found: error diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 0000000..1ed66d5 --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,60 @@ +on: + schedule: + - cron: '5 11 * * 3' + push: + paths-ignore: + - 'charts/**' + - 'example/**' + - 'docs/**' + - '/*.md' + branches: + - 'main' + - 'release-*' + pull_request: + branches: + - 'main' + - 'release-*' + paths-ignore: + - 'charts/**' + - 'example/**' + - 'docs/**' + - '/*.md' + workflow_call: + +name: CodeQL analysis +jobs: + codql-build: + permissions: + security-events: write + strategy: + matrix: + architecture: [amd64] + os: [linux] + + runs-on: ubuntu-latest + steps: + - name: Set up Go 1.21.x + uses: actions/setup-go@v2 + with: + go-version: 1.21.x + cache: true + id: go + + - name: Checkout code + uses: actions/checkout@v3 + + - name: Download Go modules + run: go mod download + env: + GOPROXY: https://proxy.golang.org + + - name: Initialize CodeQL + uses: github/codeql-action/init@v2 + with: + languages: go + + - name: Build + run: GOOS=${{ matrix.os }} GOARCH=${{matrix.architecture}} go build -o bin/node-undertaker ./cmd/node-undertaker + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v2 diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml new file mode 100644 index 0000000..5bdcb1c --- /dev/null +++ b/.github/workflows/docker.yml @@ -0,0 +1,81 @@ +name: Build docker image + +on: + workflow_call: + push: + paths-ignore: + - 'charts/**' + - 'docs/**' + - 'example/**' + - '/*.md' + branches: + - '*' + pull_request: + paths-ignore: + - 'charts/**' + - 'example/**' + - 'docs/**' + - '/*.md' +env: + # Use docker.io for Docker Hub if empty + REGISTRY: ghcr.io + # github.repository as / + IMAGE_NAME: ${{ github.repository }} + +jobs: + build: + + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + # This is used to complete the identity challenge + # with sigstore/fulcio when running outside of PRs. + id-token: write + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + # Workaround: https://github.com/docker/build-push-action/issues/461 + - name: Setup Docker buildx + uses: docker/setup-buildx-action@v3 + + # Login against a Docker registry except on PR + # https://github.com/docker/login-action + - name: Log into registry ${{ env.REGISTRY }} + if: github.event_name != 'pull_request' + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + # Extract metadata (tags, labels) for Docker + # https://github.com/docker/metadata-action + - name: Extract Docker metadata + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + tags: | + type=raw,value=latest,enable=${{ github.ref == format('refs/heads/{0}', github.event.repository.default_branch) }} + type=ref,event=tag + type=ref,event=branch + type=ref,event=pr + + # Build and push Docker image with Buildx (don't push on PR) + # https://github.com/docker/build-push-action + - name: Build and push Docker image + id: build-and-push + uses: docker/build-push-action@v5 + with: + context: . + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + platforms: linux/amd64,linux/arm64 #,darwin/arm64,darwin/amd64 + build-args: ${{ github.event_name == 'push' && github.ref_type == 'tag' && format('RELEASE_VERSION={0}', github.ref_name) || '' }} + cache-from: type=registry,ref=${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:buildcache + cache-to: ${{ github.event_name != 'pull_request' && format( 'type=registry,ref={0}/{1}:buildcache,mode=max', env.REGISTRY, env.IMAGE_NAME ) || '' }} + diff --git a/.github/workflows/release-helm-chart.yml b/.github/workflows/release-helm-chart.yml new file mode 100644 index 0000000..0add83c --- /dev/null +++ b/.github/workflows/release-helm-chart.yml @@ -0,0 +1,31 @@ +on: + push: + paths: + - "charts/**" + branches: + - 'main' + +name: Release +jobs: + create-helm-release: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: Configure Git + run: | + git config user.name "$GITHUB_ACTOR" + git config user.email "$GITHUB_ACTOR@users.noreply.github.com" + + - name: Install Helm + uses: azure/setup-helm@v1 + with: + version: v3.8.1 + + - name: Run chart-releaser + uses: helm/chart-releaser-action@v1.5.0 + env: + CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..c077ff3 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,31 @@ +on: + push: + tags: + - "v*.*.*" + +name: Release +jobs: + docker-build: + uses: ./.github/workflows/docker.yml + build-and-test: + uses: ./.github/workflows/build.yml + create-release: + needs: + - docker-build + - build-and-test + runs-on: ubuntu-latest + steps: + - uses: actions/download-artifact@v3 + with: + name: node-undertaker-binaries + path: bin + - name: debug list files + run: find . + - name: Release + uses: softprops/action-gh-release@v1 + with: + files: bin/node-undertaker* + append_body: true + body: | + Build also available as docker image: + `ghcr.io/${{ github.repository }}:${{ github.ref_name }}` diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..7e18c07 --- /dev/null +++ b/.gitignore @@ -0,0 +1,27 @@ +./bin +bin/ +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test +**/mocks/*_mocks.go + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work + +.idea +.code + +etcd-peers +local/certs +site/ diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 0000000..eb4cb63 --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,43 @@ +variables: + TYPE: app + +image: golang:1.21 + +include: + - project: shared-scripts/gitlab-ci + file: /autodevops-golang.yml + ref: v1 + +# mockgen setup +Test: + before_script: + - export PATH=$GOPATH/bin:$PATH + - go get github.com/golang/mock/mockgen + - go install github.com/golang/mock/mockgen +Vet: + before_script: + - export PATH=$GOPATH/bin:$PATH + - go get github.com/golang/mock/mockgen + - go install github.com/golang/mock/mockgen +Build: + before_script: + - export PATH=$GOPATH/bin:$PATH + - go get github.com/golang/mock/mockgen + - go install github.com/golang/mock/mockgen +Dependency-Track: + before_script: + - export PATH=$GOPATH/bin:$PATH + - go get github.com/golang/mock/mockgen + - go install github.com/golang/mock/mockgen + +Docker Release: + variables: + PROJECT_VERSION: "${CI_COMMIT_REF_NAME}" + +Deploy Dev Manual: + rules: + - when: never + +Deploy Fat: + rules: + - when: never diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..9f92d1e --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,42 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.1.0 + hooks: + - id: end-of-file-fixer + - id: trailing-whitespace + - id: check-case-conflict + - id: check-merge-conflict + - id: detect-private-key + - id: detect-aws-credentials + - repo: https://github.com/gruntwork-io/pre-commit + rev: v0.1.17 + hooks: + - id: helmlint + - repo: https://github.com/dnephin/pre-commit-golang + rev: v0.5.0 + hooks: + - id: go-fmt + - id: validate-toml + - id: no-go-testing + - id: golangci-lint +# - id: go-unit-tests # doesn't work properly + - id: go-build + - id: go-mod-tidy + - repo: https://github.com/Bahjat/pre-commit-golang + rev: v1.0.2 + hooks: + - id: go-fmt-import + - id: go-vet + - repo: https://github.com/tekwizely/pre-commit-golang + rev: v1.0.0-beta.5 + hooks: + - name: govulncheck + id: my-cmd-repo-mod + args: + - govulncheck + - ./... + - repo: https://github.com/antonbabenko/pre-commit-terraform + rev: v1.43.0 + hooks: + - id: terraform_fmt + - id: terraform_docs diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..21e2a0f --- /dev/null +++ b/Dockerfile @@ -0,0 +1,24 @@ +FROM --platform=$BUILDPLATFORM golang:1.21-alpine AS builder + +ARG TARGETOS +ARG TARGETARCH + +# Install our build tools +RUN apk add --update ca-certificates + +WORKDIR /go/src/app + +COPY . ./ + +RUN GOOS=$TARGETOS GOARCH=$TARGETARCH CGO_ENABLED=0 go build -ldflags="$LDFLAGS" -o bin/node-undertaker github.com/dbschenker/node-undertaker/cmd/node-undertaker + +RUN echo "nonroot:x:1337:1337:nonroot:/nonroot:/usr/sbin/nologin" > /etc_passwd + +FROM --platform=$BUILDPLATFORM scratch +COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ +COPY --from=builder /go/src/app/bin/* / +COPY --from=builder /etc_passwd /etc/passwd + +USER nonroot + +ENTRYPOINT ["/node-undertaker"] diff --git a/LICENSE b/LICENSE index 261eeb9..5e6b9c7 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2023 Schenker AG Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..98bda36 --- /dev/null +++ b/Makefile @@ -0,0 +1,46 @@ +all: clean test build + +build: build-node-undertaker + +build-node-undertaker: + go build -o bin/node-undertaker github.com/dbschenker/node-undertaker/cmd/node-undertaker + +test: mock + go test ./... + +clean: + rm -r bin/ || true + +docker: + docker buildx build -t node-undertaker:local . + +lint: + golangci-lint run ./... -v + +mock: + go generate ./... + +clean_mocks: + find . -name '*_mocks.go' -delete + +vuln: + govulncheck ./... + +vet: + go vet ./... + +kind: + kind create cluster --config example/kind/config.yaml + +kind_load: + kind load docker-image node-undertaker:local + +kind_helm: + helm upgrade --install -n node-undertaker node-undertaker charts/node-undertaker --create-namespace -f example/kind/values.yaml + +local: + bin/node-undertaker --namespace kube-node-lease --log-level=debug --cloud-provider=kwok --cloud-termination-delay=180 --cloud-prepare-termination-delay=200 --drain-delay=190 --node-initial-threshold 45 + +kwok: + kwokctl create cluster + kubectl config use-context kwok-kwok diff --git a/README.md b/README.md new file mode 100644 index 0000000..6af2184 --- /dev/null +++ b/README.md @@ -0,0 +1,121 @@ +# Node undertaker + +Node-undertaker is a tool that was built to address handling Kubernetes nodes that are unhealthy. + +Kubernetes itself marks such nodes and then using NoExecute taint removes pods out of them. But such a node still +runs in the cloud provider and consumes resources. This tool detects such nodes and terminates them in the cloud provider. + +Currently supported cloud providers: +* AWS +* kind (for testing & development) +* kwok (for testing & development) + +## How it works + +This tool checks every minute all the nodes if they have "fresh" lease in a namespace. +It can check leases in the kube-node-lease namespace (created by kubelet) or any other namespace that contains similar leases (for custom healthchecking solution). + +![Diagram](docs/states.png) + + +## Getting started + +### Cloud provider setup + +Before you can start node-undertaker it needs credentials with access granted to cloud provider. + +#### AWS +For AWS node-undertaker requires to have granted IAM role with following policy: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:TerminateInstances", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeTrafficSources", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:DeregisterTargets" + ], + "Resource": "*" + } + ] +} +``` + +In case there are more resources than one cluster it is advised to limit access to only one cluster's resources (for example by using Conditions). Example policy for clusters tagged with 'kubernetes.io/cluster/CLUSTER_NAME=owned': + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:TerminateInstances", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:DeregisterTargets" + ], + "Resource": "*", + "Condition": { + "StringLike": { + "aws:ResourceTag/kubernetes.io/cluster/CLUSTER_NAME": "owned" + } + } + }, + { + "Effect": "Allow", + "Action": [ + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeTrafficSources" + ], + "Resource": "*" + } + ] +} +``` + +### Installation +#### With helm + +1. First add helm chart repository: + ``` + helm repo add dbschenker https://dbschenker.github.io/node-undertaker + ``` +2. Install helm chart + ```shell + helm upgrade --install --create-namespace -n node-undertaker node-undertaker node-undertaker + ``` + +## Development + +### Requirements + +1. golang +2. docker (for running tests) +3. (optional) kwok & kind - useful for manual testing +4. (optional) make - for convenient building + +### Testing + +#### With kwok +1. Create cluster & switch to its context: `make kwok` +2. Run node-undertaker locally: `make local` or run command with customized configuration +3. Create node with required configuration - examples in `example/kwok/node*.yaml` +4. (Optional) manually update node's lease with `example/kwok/create-node-lease.sh NODE_NAME kube-node-lease 100` + * NODE_NAME - is node name which lease has to be updated + * kube-node-lease - is the namespace that holds the leases + * 100 - is the lease duration to set + +Cleanup: `kwokctl delete cluster` + +#### With kind +1. create cluster: `make kind` +2. build local image: `make docker` +3. load image to kind: `make kind_load` +4. Install with helm: `make kind_helm` + +Cleanup: `kind delete cluster` diff --git a/catalog-info.yaml b/catalog-info.yaml new file mode 100644 index 0000000..dcc1318 --- /dev/null +++ b/catalog-info.yaml @@ -0,0 +1,22 @@ +apiVersion: backstage.io/v1alpha1 +kind: Component +metadata: + name: node-undertaker + description: Node undertaker terminates kubernetes nodes that are unhealthy + annotations: + # project key in sonarqube + sonarqube.org/project-key: 'github.com_dbschenker_node-undertaker' + # Mark for backstage that this Component has techdocs included + backstage.io/techdocs-ref: dir:. + links: [] + tags: + - no-kpi +spec: + type: service + lifecycle: experimental + owner: devops-platforms + system: toolbox + partOf: + - component:default/toolbox-kubernetes-cluster + dependsOn: + - component:default/version-control-system diff --git a/charts/install.sh b/charts/install.sh new file mode 100755 index 0000000..0076405 --- /dev/null +++ b/charts/install.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +helm upgrade --install --create-namespace -n node-undertaker node-undertaker node-undertaker \ + --set controller.image.tag=local \ + --set controller.settings.cloudProvider=kind \ + --set controller.settings.logLevel=debug \ + --set-string controller.podAnnotations.prometheus\\.io\\/scrape=true \ + --set controller.podAnnotations."prometheus\.io\/path"=/metrics \ + --set-string controller.podAnnotations.prometheus\\.io\\/port=8080 \ + --set controller.settings.nodeLeaseNamespace=kube-node-lease \ + --set controller.settings.nodeSelector="" \ + $@ diff --git a/charts/node-undertaker/.helmignore b/charts/node-undertaker/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/charts/node-undertaker/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/node-undertaker/Chart.yaml b/charts/node-undertaker/Chart.yaml new file mode 100644 index 0000000..4581824 --- /dev/null +++ b/charts/node-undertaker/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: node-undertaker +description: A Helm chart for node-undertaker + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 1.0.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.0.0" diff --git a/charts/node-undertaker/templates/_helpers.tpl b/charts/node-undertaker/templates/_helpers.tpl new file mode 100644 index 0000000..650d271 --- /dev/null +++ b/charts/node-undertaker/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "node-undertaker.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "node-undertaker.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "node-undertaker.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "node-undertaker.labels" -}} +helm.sh/chart: {{ include "node-undertaker.chart" . }} +{{ include "node-undertaker.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "node-undertaker.selectorLabels" -}} +app.kubernetes.io/name: {{ include "node-undertaker.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "node-undertaker.serviceAccountName" -}} +{{- if .Values.controller.serviceAccount.create }} +{{- default (include "node-undertaker.fullname" .) .Values.controller.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.controller.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/charts/node-undertaker/templates/deployment.yaml b/charts/node-undertaker/templates/deployment.yaml new file mode 100644 index 0000000..74f8057 --- /dev/null +++ b/charts/node-undertaker/templates/deployment.yaml @@ -0,0 +1,68 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "node-undertaker.fullname" . }}-controller + labels: + {{- include "node-undertaker.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.controller.replicaCount }} + selector: + matchLabels: + {{- include "node-undertaker.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.controller.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "node-undertaker.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.controller.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "node-undertaker.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.controller.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + env: + - name: PORT + value: {{ .Values.controller.port | quote }} + {{- range $key, $value := .Values.controller.env }} + - name: {{ $key }} + value: {{ $value }} + {{- end }} + securityContext: + {{- toYaml .Values.controller.securityContext | nindent 12 }} + image: "{{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.controller.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.controller.port }} + protocol: TCP + livenessProbe: + httpGet: + path: /livez + port: http + initialDelaySeconds: 10 + readinessProbe: + httpGet: + path: /readyz + port: http + initialDelaySeconds: 10 + resources: + {{- toYaml .Values.controller.resources | nindent 12 }} + {{- with .Values.controller.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/charts/node-undertaker/templates/pdb.yaml b/charts/node-undertaker/templates/pdb.yaml new file mode 100644 index 0000000..b2c3255 --- /dev/null +++ b/charts/node-undertaker/templates/pdb.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.controller.pdb.enabled (gt .Values.controller.replicaCount 1.0) }} +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + labels: + {{- include "node-undertaker.labels" . | nindent 4 }} + name: {{ include "node-undertaker.fullname" . }}-controller +spec: + maxUnavailable: {{ .Values.controller.pdb.maxUnavailable }} + selector: + matchLabels: + {{- include "node-undertaker.selectorLabels" . | nindent 6 }} +{{- end }} \ No newline at end of file diff --git a/charts/node-undertaker/templates/rbac.yaml b/charts/node-undertaker/templates/rbac.yaml new file mode 100644 index 0000000..87b6ba7 --- /dev/null +++ b/charts/node-undertaker/templates/rbac.yaml @@ -0,0 +1,120 @@ +{{- if .Values.controller.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "node-undertaker.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ include "node-undertaker.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ include "node-undertaker.fullname" . }} + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "node-undertaker.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - update + - patch + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - apiGroups: + - "" + resources: + - pods/eviction + verbs: + - create + - apiGroups: + - extensions + resources: + - daemonsets + verbs: + - get + - apiGroups: + - apps + resources: + - daemonsets + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "node-undertaker.fullname" . }}-node-lease + namespace: {{ .Values.controller.env.NODE_LEASE_NAMESPACE | default "kube-node-lease" }} +subjects: + - kind: ServiceAccount + name: {{ include "node-undertaker.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: Role + name: {{ include "node-undertaker.fullname" . }}-node-lease + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "node-undertaker.fullname" . }}-node-lease + namespace: {{ .Values.controller.env.NODE_LEASE_NAMESPACE | default "kube-node-lease" }} +rules: + - apiGroups: + - "coordination.k8s.io" + resources: + - leases + verbs: + - get + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "node-undertaker.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ include "node-undertaker.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: Role + name: {{ include "node-undertaker.fullname" . }} + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "node-undertaker.fullname" . }} +rules: + - apiGroups: + - "coordination.k8s.io" + resources: + - leases + verbs: + - get + - create + - delete + - update + - patch + - apiGroups: + - "events.k8s.io" + resources: + - events + verbs: + - create +{{- end }} diff --git a/charts/node-undertaker/templates/serviceaccount.yaml b/charts/node-undertaker/templates/serviceaccount.yaml new file mode 100644 index 0000000..4dd0150 --- /dev/null +++ b/charts/node-undertaker/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.controller.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "node-undertaker.serviceAccountName" . }} + labels: + {{- include "node-undertaker.labels" . | nindent 4 }} + {{- with .Values.controller.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/node-undertaker/values.yaml b/charts/node-undertaker/values.yaml new file mode 100644 index 0000000..369505a --- /dev/null +++ b/charts/node-undertaker/values.yaml @@ -0,0 +1,82 @@ +# Default values for node-undertaker. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +nameOverride: "" +fullnameOverride: "" + +controller: + replicaCount: 2 + + port: 8080 + + pdb: + enabled: true + maxUnavailable: 1 + + image: + repository: "ghcr.io/dbschenker/node-undertaker" + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + + imagePullSecrets: [] + + rbac: + # Specified if the roles and their bindings should be created + create: true + + serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + + podAnnotations: {} +# to enable scraping metrics please uncomment those lines +# prometheus.io/scrape: 'true' +# prometheus.io/path: '/metrics' +# prometheus.io/port: '8080' + + podSecurityContext: {} + # fsGroup: 2000 + + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + + resources: + limits: + memory: 256Mi + requests: + cpu: 100m + memory: 64Mi + + nodeSelector: {} + + tolerations: [] + + affinity: {} + + env: + CLOUD_PROVIDER: aws + # LOG_LEVEL: info + LOG_FORMAT: json + # LEASE_LOCK_NAME: null + # LEASE_LOCK_NAMESPACE: null + # NAMESPACE: null + NODE_INITIAL_THRESHOLD: "120" + DRAIN_DELAY: "300" + CLOUD_TERMINATION_DELAY: "120" + CLOUD_PREPARE_TERMINATION_DELAY: "300" + # NODE_LEASE_NAMESPACE: "kube-node-lease" + # NODE_SELECTOR: "" + # AWS_REGION: "" + diff --git a/charts/template.sh b/charts/template.sh new file mode 100755 index 0000000..6faf8c4 --- /dev/null +++ b/charts/template.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +helm template --create-namespace -n node-undertaker node-undertaker node-undertaker \ + --set controller.image.tag=local \ + --set controller.settings.cloudProvider=kind \ + --set controller.settings.logLevel=debug \ + --set-string controller.podAnnotations.prometheus\\.io\\/scrape=true \ + --set controller.podAnnotations."prometheus\.io\/path"=/metrics \ + --set-string controller.podAnnotations.prometheus\\.io\\/port=8080 \ + --set controller.settings.nodeLeaseNamespace=kube-node-lease \ + $@ diff --git a/cmd/node-undertaker/flags/flags.go b/cmd/node-undertaker/flags/flags.go new file mode 100644 index 0000000..2672dde --- /dev/null +++ b/cmd/node-undertaker/flags/flags.go @@ -0,0 +1,114 @@ +package flags + +import ( + "fmt" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +const ( + LogLevelFlag = "log-level" + LogFormatFlag = "log-format" + CloudProviderFlag = "cloud-provider" + InitialDelayFlag = "initial-delay" + DrainDelayFlag = "drain-delay" + CloudTerminationDelayFlag = "cloud-termination-delay" + CloudPrepareTerminationDelayFlag = "cloud-prepare-termination-delay" + PortFlag = "port" + NodeInitialThresholdFlag = "node-initial-threshold" + NodeLeaseNamespaceFlag = "node-lease-namespace" + NamespaceFlag = "namespace" + LeaseLockNameFlag = "lease-lock-name" + LeaseLockNamespaceFlag = "lease-lock-namespace" + LogFormatJson = "json" + LogFormatText = "text" + NodeSelectorFlag = "node-selector" +) + +func SetupFlags(cmd *cobra.Command) error { + cmd.PersistentFlags().String(LogLevelFlag, "info", "Log level [panic|fatal|error|warn|warning|info|debug|trace]. Default: 'info'. Can be set using LOG_LEVEL env variable") + err := viper.BindPFlag(LogLevelFlag, cmd.PersistentFlags().Lookup(LogLevelFlag)) + if err != nil { + return err + } + cmd.PersistentFlags().String(LogFormatFlag, "text", "Log format [text|json]. Default: 'text'. Can be set using LOG_FORMAT env variable") + err = viper.BindPFlag(LogFormatFlag, cmd.PersistentFlags().Lookup(LogFormatFlag)) + if err != nil { + return err + } + cmd.PersistentFlags().String(CloudProviderFlag, "aws", "Cloud provider name. Default: 'aws'. Possible values: aws,kwok,kind. Can be set using CLOUD_PROVIDER env variable") + err = viper.BindPFlag(CloudProviderFlag, cmd.PersistentFlags().Lookup(CloudProviderFlag)) + if err != nil { + return err + } + cmd.PersistentFlags().Int(DrainDelayFlag, 300, "Drain unhealthy node after number of seconds after observed unhealthy (env: DRAIN_DELAY)") + err = viper.BindPFlag(DrainDelayFlag, cmd.PersistentFlags().Lookup(DrainDelayFlag)) + if err != nil { + return err + } + cmd.PersistentFlags().Int(CloudTerminationDelayFlag, 300, "Terminate unhealthy node after number of seconds after starting termination preparation (env: CLOUD_TERMINATION_DELAY)") + err = viper.BindPFlag(CloudTerminationDelayFlag, cmd.PersistentFlags().Lookup(CloudTerminationDelayFlag)) + cmd.PersistentFlags().Int(CloudPrepareTerminationDelayFlag, 300, "Prepare termination of unhealthy node after number of seconds after starting drain (env: CLOUD_PREPARE_TERMINATION_DELAY)") + err = viper.BindPFlag(CloudPrepareTerminationDelayFlag, cmd.PersistentFlags().Lookup(CloudPrepareTerminationDelayFlag)) + if err != nil { + return err + } + cmd.PersistentFlags().Int(NodeInitialThresholdFlag, 120, "Node is skipped until this number of seconds passes since creation (env: NODE_INITIAL_THRESHOLD)") + err = viper.BindPFlag(NodeInitialThresholdFlag, cmd.PersistentFlags().Lookup(NodeInitialThresholdFlag)) + if err != nil { + panic(err) + } + cmd.PersistentFlags().Int(PortFlag, 8080, "Http port (used for observability). Can be set using PORT env variable") + err = viper.BindPFlag(PortFlag, cmd.PersistentFlags().Lookup(PortFlag)) + if err != nil { + return err + } + cmd.PersistentFlags().String(NamespaceFlag, "", "Namespace where events should be created. Default: '' - which is the same namespace node-undertaker runs. Can be set using NAMESPACE env variable") + err = viper.BindPFlag(NamespaceFlag, cmd.PersistentFlags().Lookup(NamespaceFlag)) + if err != nil { + return err + } + cmd.PersistentFlags().String(NodeLeaseNamespaceFlag, "kube-node-lease", "Namespace containing leases. Default: '' - which is the same namespace node-undertaker runs. Can be set using NODE_LEASE_NAMESPACE env variable") + err = viper.BindPFlag(NodeLeaseNamespaceFlag, cmd.PersistentFlags().Lookup(NodeLeaseNamespaceFlag)) + if err != nil { + return err + } + //lease + cmd.PersistentFlags().String(LeaseLockNamespaceFlag, "", "Namespace containing leader election lease. Default: '' - which is the same namespace node-undertaker runs. Can be set using LEASE_LOCK_NAMESPACE env variable") + err = viper.BindPFlag(LeaseLockNamespaceFlag, cmd.PersistentFlags().Lookup(LeaseLockNamespaceFlag)) + if err != nil { + return err + } + cmd.PersistentFlags().String(LeaseLockNameFlag, "node-undertaker-leader-election", "Name of node-undertaker's leader election lease. Default: 'node-undertaker-leader-election'. Can be set using LEASE_LOCK_NAME env variable") + err = viper.BindPFlag(LeaseLockNameFlag, cmd.PersistentFlags().Lookup(LeaseLockNameFlag)) + if err != nil { + return err + } + cmd.PersistentFlags().Int(InitialDelayFlag, 0, "Initial delay from start of node-undertaker pod until starts handling node state changes. Default: '0'. Can be set using INITIAL_DELAY env variable") + err = viper.BindPFlag(InitialDelayFlag, cmd.PersistentFlags().Lookup(InitialDelayFlag)) + if err != nil { + return err + } + cmd.PersistentFlags().String(NodeSelectorFlag, "", "Label selector for nodes to watch. Default: ''. Can be set using NODE_SELECTOR env variable") + err = viper.BindPFlag(NodeSelectorFlag, cmd.PersistentFlags().Lookup(NodeSelectorFlag)) + if err != nil { + return err + } + return nil +} + +func ValidateRootFlags() error { + viper.GetString(LogLevelFlag) + _, err := log.ParseLevel(viper.GetString(LogLevelFlag)) + if err != nil { + return err + } + + format := viper.GetString(LogFormatFlag) + if format != LogFormatJson && format != LogFormatText { + return fmt.Errorf("unknown log format: %s", format) + } + + return nil +} diff --git a/cmd/node-undertaker/flags/flags_test.go b/cmd/node-undertaker/flags/flags_test.go new file mode 100644 index 0000000..44d3b88 --- /dev/null +++ b/cmd/node-undertaker/flags/flags_test.go @@ -0,0 +1,28 @@ +package flags + +import ( + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSetupFlags(t *testing.T) { + cmd := &cobra.Command{} + res := SetupFlags(cmd) + assert.NoError(t, res) +} + +func TestValdiateRootFlagsOk(t *testing.T) { + viper.Set(LogLevelFlag, "info") + res := ValidateRootFlags() + + assert.NoError(t, res) +} + +func TestValdiateRootFlagsFail(t *testing.T) { + viper.Set(LogLevelFlag, "wrong") + res := ValidateRootFlags() + + assert.Error(t, res) +} diff --git a/cmd/node-undertaker/flags/init_test.go b/cmd/node-undertaker/flags/init_test.go new file mode 100644 index 0000000..88416cd --- /dev/null +++ b/cmd/node-undertaker/flags/init_test.go @@ -0,0 +1,11 @@ +package flags + +import ( + "github.com/sirupsen/logrus" + "github.com/spf13/viper" +) + +func init() { + viper.Reset() + logrus.Infof("Initialized tests") +} diff --git a/cmd/node-undertaker/node-undertaker.go b/cmd/node-undertaker/node-undertaker.go new file mode 100644 index 0000000..c93bab7 --- /dev/null +++ b/cmd/node-undertaker/node-undertaker.go @@ -0,0 +1,46 @@ +package main + +import ( + "fmt" + "github.com/dbschenker/node-undertaker/cmd/node-undertaker/flags" + "github.com/dbschenker/node-undertaker/pkg/nodeundertaker" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "strings" +) + +// rootCmd represents the base command when called without any subcommands +var rootCmd = &cobra.Command{ + Use: "node-undertaker", + Short: "Node undertaker terminates kubernetes nodes that are unhealthy", + Long: "Node undertaker terminates kubernetes nodes that are unhealthy" + + "Please use `node-undertaker --help` to get possible options", + RunE: func(cmd *cobra.Command, args []string) error { + fmt.Println("Node-undertaker starting") + err := flags.ValidateRootFlags() + if err != nil { + return err + } + + return nodeundertaker.Execute() + }, +} + +func main() { + cobra.CheckErr(rootCmd.Execute()) +} + +func init() { + + cobra.OnInitialize(initConfig) + //flags + err := flags.SetupFlags(rootCmd) + if err != nil { + panic(err) + } +} + +func initConfig() { + viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) + viper.AutomaticEnv() +} diff --git a/docs/states.plantuml b/docs/states.plantuml new file mode 100644 index 0000000..8e09c45 --- /dev/null +++ b/docs/states.plantuml @@ -0,0 +1,29 @@ +@startuml +hide empty description + +state "Healthy" as healthy #green;text:white +state "Label node" as label_node +label_node : label node with:\ndbschenker.com/node-undertaker=unhealthy +state "Taint node" as taint_node #yellow +taint_node : taint node with:\ndbschenker.com/node-undertaker:NoExecute +taint_node : label node with:\ndbschenker.com/node-undertaker=tainted +state "Drain node" as drain_node #orange +drain_node : label node with:\ndbschenker.com/node-undertaker=draining +drain_node : drain node +state "Prepare node termination" as prepare_termination #red +prepare_termination : label node with:\ndbschenker.com/node-undertaker=prepare_termination +state "Terminating node" as terminating_node #darkred;text:white +terminating_node : label node with:\ndbschenker.com/node-undertaker=terminating + +[*] --> healthy +healthy --> label_node : lease not refreshed +label_node --> taint_node : on update +taint_node --> drain_node : after "drain-delay" seconds +drain_node --> prepare_termination : after "cloud-prepare-termination-delay" seconds +prepare_termination --> terminating_node : after "cloud-termination-delay" +terminating_node --> [*] + +label_node -[#green]-> healthy : lease refreshed +taint_node -[#green]-> healthy : lease refreshed +drain_node -[#green]-> healthy : lease refreshed +@enduml \ No newline at end of file diff --git a/docs/states.png b/docs/states.png new file mode 100644 index 0000000..066ee83 Binary files /dev/null and b/docs/states.png differ diff --git a/example/kind/config.yaml b/example/kind/config.yaml new file mode 100644 index 0000000..671a4b3 --- /dev/null +++ b/example/kind/config.yaml @@ -0,0 +1,24 @@ +# this config file contains all config fields with comments +# NOTE: this is not a particularly useful config file +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +# patch the generated kubeadm config with some extra settings +#kubeadmConfigPatches: +# - | +# apiVersion: kubelet.config.k8s.io/v1beta1 +# kind: KubeletConfiguration +# evictionHard: +# nodefs.available: "0%" +# patch it further using a JSON 6902 patch + +# 1 control plane node and 3 workers +nodes: + # the control plane node config + - role: control-plane + - role: control-plane + # the three workers + - role: worker + labels: + testUnhealthy: true + - role: worker + - role: worker \ No newline at end of file diff --git a/example/kind/values.yaml b/example/kind/values.yaml new file mode 100644 index 0000000..4d937a9 --- /dev/null +++ b/example/kind/values.yaml @@ -0,0 +1,12 @@ +controller: + settings: + cloudProvider: kind + logLevel: debug + logFormat: text + drainDelay: 150 + nodeInitialThreshold: 60 + cloudTerminationDelay: 150 + + image: + repository: node-undertaker + tag: local diff --git a/example/kwok/create-node-lease.sh b/example/kwok/create-node-lease.sh new file mode 100755 index 0000000..4ffc4ce --- /dev/null +++ b/example/kwok/create-node-lease.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +if [[ $# -ne 3 ]]; then + echo "Usage: ./create-node-lease.sh NODE_NAME NAMESPACE_NAME LEASE_DURATION" + exit 1 +fi + +NODE_NAME=$1 +NAMESPACE_NAME=$2 +LEASE_DURATION=$3 +NODE_UID="$(kubectl get node -o custom-columns=uid:.metadata.uid --no-headers $NODE_NAME)" + +echo "{\"apiVersion\": \"coordination.k8s.io/v1\",\"kind\": \"Lease\",\"metadata\": {\"name\": \"$NODE_NAME\",\"namespace\": \"$NAMESPACE_NAME\", \"ownerReferences\": [{\"apiVersion\": \"v1\",\"kind\": \"Node\",\"name\": \"$NODE_NAME\",\"uid\": \"$NODE_UID\"}]}, \"spec\": {\"holderIdentity\": \"$NODE_NAME\", \"leaseDurationSeconds\": $LEASE_DURATION, \"renewTime\": \"$(date -u +"%Y-%m-%dT%H:%M:%S.000000Z")\"}}" | kubectl apply -f - diff --git a/example/kwok/node.yaml b/example/kwok/node.yaml new file mode 100644 index 0000000..d948b17 --- /dev/null +++ b/example/kwok/node.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +kind: Node +metadata: + annotations: + node.alpha.kubernetes.io/ttl: "0" + kwok.x-k8s.io/node: fake + labels: + beta.kubernetes.io/arch: amd64 + beta.kubernetes.io/os: linux + kubernetes.io/arch: amd64 + kubernetes.io/hostname: kwok-node-0 + kubernetes.io/os: linux + kubernetes.io/role: agent + node-role.kubernetes.io/agent: "" + type: kwok + health-manager: node-undertaker + name: kwok-node-0 +spec: + providerID: kwok://kwok-node-0 +status: + allocatable: + cpu: "32" + memory: 256Gi + pods: "110" + capacity: + cpu: "32" + memory: 256Gi + pods: "110" + nodeInfo: + architecture: amd64 + bootID: "" + containerRuntimeVersion: "" + kernelVersion: "" + kubeProxyVersion: fake + kubeletVersion: fake + machineID: "" + operatingSystem: linux + osImage: "" + systemUUID: "" + phase: Running \ No newline at end of file diff --git a/example/kwok/node2.yaml b/example/kwok/node2.yaml new file mode 100644 index 0000000..90a888f --- /dev/null +++ b/example/kwok/node2.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +kind: Node +metadata: + annotations: + node.alpha.kubernetes.io/ttl: "0" + kwok.x-k8s.io/node: fake + labels: + beta.kubernetes.io/arch: amd64 + beta.kubernetes.io/os: linux + kubernetes.io/arch: amd64 + kubernetes.io/hostname: kwok-node-1 + kubernetes.io/os: linux + kubernetes.io/role: agent + node-role.kubernetes.io/agent: "" + type: kwok + health-manager: other + name: kwok-node-1 +spec: + providerID: kwok://kwok-node-1 +status: + allocatable: + cpu: "32" + memory: 256Gi + pods: "110" + capacity: + cpu: "32" + memory: 256Gi + pods: "110" + nodeInfo: + architecture: amd64 + bootID: "" + containerRuntimeVersion: "" + kernelVersion: "" + kubeProxyVersion: fake + kubeletVersion: fake + machineID: "" + operatingSystem: linux + osImage: "" + systemUUID: "" + phase: Running \ No newline at end of file diff --git a/example/node-state-reporter/reporter.yaml b/example/node-state-reporter/reporter.yaml new file mode 100644 index 0000000..ded6d9b --- /dev/null +++ b/example/node-state-reporter/reporter.yaml @@ -0,0 +1,121 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: node-undertaker +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: node-lease-reporter + namespace: node-undertaker +spec: + selector: + matchLabels: + app: node-lease-reporter + template: + metadata: + labels: + app: node-lease-reporter + spec: + serviceAccountName: node-lease-reporter + tolerations: + - operator: Exists + effect: NoSchedule + containers: + - name: reporter + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: NAMESPACE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LEASE_DURATION + value: "130" + - name: SLEEP + value: "20" + image: bitnami/kubectl:1.26 + command: + - /bin/sh + - -c + args: + - | + set -ex + export NODE_UID="$(kubectl get node -o custom-columns=uid:.metadata.uid --no-headers $NODE_NAME)" + echo "NODE_UID=$NODE_UID" + while : + do + export UNHEALTHY=$(kubectl get node $NODE_NAME --no-headers -o 'custom-columns=arch:.metadata.labels.testUnhealthy') + if [ "$UNHEALTHY" != "true" ]; then + echo "{\"apiVersion\": \"coordination.k8s.io/v1\",\"kind\": \"Lease\",\"metadata\": {\"name\": \"$NODE_NAME\",\"namespace\": \"$NAMESPACE_NAME\", \"ownerReferences\": [{\"apiVersion\": \"v1\",\"kind\": \"Node\",\"name\": \"$NODE_NAME\",\"uid\": \"$NODE_UID\"}]}, \"spec\": {\"holderIdentity\": \"$NODE_NAME\", \"leaseDurationSeconds\": $LEASE_DURATION, \"renewTime\": \"$(date -u +"%Y-%m-%dT%H:%M:%S.%6NZ")\"}}" | kubectl apply -f - + else + echo "Node unhealthy, not updating lease" + fi + sleep $SLEEP + done +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: node-lease-reporter + namespace: node-undertaker +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: node-lease-reporter-node-reader +subjects: + - kind: ServiceAccount + name: node-lease-reporter + namespace: node-undertaker +roleRef: + kind: ClusterRole + name: node-lease-reporter + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: node-lease-reporter +rules: + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: node-lease-reporter-lease-creator + namespace: node-undertaker +subjects: + + - kind: ServiceAccount + name: node-lease-reporter + namespace: node-undertaker +roleRef: + kind: Role + name: node-lease-reporter + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: node-lease-reporter + namespace: node-undertaker +rules: + - apiGroups: + - "coordination.k8s.io" + resources: + - leases + verbs: + - get + - create + - patch + diff --git a/example/workloads/deployment.yaml b/example/workloads/deployment.yaml new file mode 100644 index 0000000..992a8c5 --- /dev/null +++ b/example/workloads/deployment.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: test +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: test + name: test + namespace: test +spec: + replicas: 5 + selector: + matchLabels: + app: test + template: + metadata: + labels: + app: test + spec: + nodeSelector: + "node-role:kubernetes:io/worker": "" + containers: + - image: nginx + name: nginx diff --git a/example/workloads/pdb.yaml b/example/workloads/pdb.yaml new file mode 100644 index 0000000..5c57438 --- /dev/null +++ b/example/workloads/pdb.yaml @@ -0,0 +1,12 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + labels: + app: test + name: test + namespace: test +spec: + maxUnavailable: 0 + selector: + matchLabels: + app: test \ No newline at end of file diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..985a86f --- /dev/null +++ b/go.mod @@ -0,0 +1,147 @@ +module github.com/dbschenker/node-undertaker + +go 1.21 + +require ( + github.com/aws/aws-sdk-go-v2/config v1.18.39 + github.com/aws/aws-sdk-go-v2/service/autoscaling v1.30.6 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.118.0 + github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.16.5 + github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.21.4 + github.com/docker/go-connections v0.4.0 + github.com/golang/mock v1.6.0 + github.com/google/uuid v1.3.1 + github.com/prometheus/client_golang v1.16.0 + github.com/sirupsen/logrus v1.9.3 + github.com/spf13/cobra v1.7.0 + github.com/spf13/viper v1.16.0 + github.com/stretchr/testify v1.8.4 + github.com/testcontainers/testcontainers-go v0.23.0 + golang.org/x/sync v0.3.0 + k8s.io/api v0.28.2 + k8s.io/apimachinery v0.28.2 + k8s.io/client-go v0.28.2 + k8s.io/cloud-provider-aws v1.28.1 + k8s.io/kubectl v0.28.2 +) + +require ( + dario.cat/mergo v1.0.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/MakeNowJust/heredoc v1.0.0 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/Microsoft/hcsshim v0.11.0 // indirect + github.com/aws/aws-sdk-go v1.45.9 // indirect + github.com/aws/aws-sdk-go-v2 v1.21.0 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.13.37 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.11 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.42 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.13.6 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.21.5 // indirect + github.com/aws/smithy-go v1.14.2 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/chai2010/gettext-go v1.0.2 // indirect + github.com/containerd/containerd v1.7.6 // indirect + github.com/cpuguy83/dockercfg v0.3.1 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/docker/distribution v2.8.2+incompatible // indirect + github.com/docker/docker v24.0.6+incompatible // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch v5.7.0+incompatible // indirect + github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/go-errors/errors v1.5.0 // indirect + github.com/go-logr/logr v1.2.4 // indirect + github.com/go-openapi/jsonpointer v0.20.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.4 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/btree v1.1.2 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.16.7 // indirect + github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/moby/patternmatcher v0.6.0 // indirect + github.com/moby/spdystream v0.2.0 // indirect + github.com/moby/sys/sequential v0.5.0 // indirect + github.com/moby/term v0.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect + github.com/morikuni/aec v1.0.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0-rc4 // indirect + github.com/opencontainers/runc v1.1.9 // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect + github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/procfs v0.11.1 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/cast v1.5.1 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/xlab/treeprint v1.2.0 // indirect + go.starlark.net v0.0.0-20230912135651-745481cf39ed // indirect + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/net v0.15.0 // indirect + golang.org/x/oauth2 v0.12.0 // indirect + golang.org/x/sys v0.12.0 // indirect + golang.org/x/term v0.12.0 // indirect + golang.org/x/text v0.13.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.13.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230913181813-007df8e322eb // indirect + google.golang.org/grpc v1.58.0 // indirect + google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/gcfg.v1 v1.2.3 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/warnings.v0 v0.1.2 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/cli-runtime v0.28.2 // indirect + k8s.io/cloud-provider v0.28.2 // indirect + k8s.io/component-base v0.28.2 // indirect + k8s.io/component-helpers v0.28.2 // indirect + k8s.io/csi-translation-lib v0.28.2 // indirect + k8s.io/klog/v2 v2.100.1 // indirect + k8s.io/kube-openapi v0.0.0-20230905202853-d090da108d2f // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/kustomize/api v0.14.0 // indirect + sigs.k8s.io/kustomize/kyaml v0.14.3 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..fe31b4c --- /dev/null +++ b/go.sum @@ -0,0 +1,791 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/hcsshim v0.11.0 h1:7EFNIY4igHEXUdj1zXgAyU3fLc7QfOKHbkldRVTBdiM= +github.com/Microsoft/hcsshim v0.11.0/go.mod h1:OEthFdQv/AD2RAdzR6Mm1N1KPCztGKDurW1Z8b8VGMM= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/aws/aws-sdk-go v1.45.9 h1:ks4nMaagM/0jeOFUxWxx9C009vkrdgm3lgcnciet1YU= +github.com/aws/aws-sdk-go v1.45.9/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go-v2 v1.21.0 h1:gMT0IW+03wtYJhRqTVYn0wLzwdnK9sRMcxmtfGzRdJc= +github.com/aws/aws-sdk-go-v2 v1.21.0/go.mod h1:/RfNgGmRxI+iFOB1OeJUyxiU+9s88k3pfHvDagGEp0M= +github.com/aws/aws-sdk-go-v2/config v1.18.39 h1:oPVyh6fuu/u4OiW4qcuQyEtk7U7uuNBmHmJSLg1AJsQ= +github.com/aws/aws-sdk-go-v2/config v1.18.39/go.mod h1:+NH/ZigdPckFpgB1TRcRuWCB/Kbbvkxc/iNAKTq5RhE= +github.com/aws/aws-sdk-go-v2/credentials v1.13.37 h1:BvEdm09+ZEh2XtN+PVHPcYwKY3wIeB6pw7vPRM4M9/U= +github.com/aws/aws-sdk-go-v2/credentials v1.13.37/go.mod h1:ACLrdkd4CLZyXOghZ8IYumQbcooAcp2jo/s2xsFH8IM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.11 h1:uDZJF1hu0EVT/4bogChk8DyjSF6fof6uL/0Y26Ma7Fg= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.11/go.mod h1:TEPP4tENqBGO99KwVpV9MlOX4NSrSLP8u3KRy2CDwA8= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41 h1:22dGT7PneFMx4+b3pz7lMTRyN8ZKH7M2cW4GP9yUS2g= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41/go.mod h1:CrObHAuPneJBlfEJ5T3szXOUkLEThaGfvnhTf33buas= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35 h1:SijA0mgjV8E+8G45ltVHs0fvKpTj8xmZJ3VwhGKtUSI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35/go.mod h1:SJC1nEVVva1g3pHAIdCp7QsRIkMmLAgoDquQ9Rr8kYw= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.42 h1:GPUcE/Yq7Ur8YSUk6lVkoIMWnJNO0HT18GUzCWCgCI0= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.42/go.mod h1:rzfdUlfA+jdgLDmPKjd3Chq9V7LVLYo1Nz++Wb91aRo= +github.com/aws/aws-sdk-go-v2/service/autoscaling v1.30.6 h1:OuxP8FzE3++AjQ8wabMcwJxtS25inpTIblMPNzV3nB8= +github.com/aws/aws-sdk-go-v2/service/autoscaling v1.30.6/go.mod h1:iHCpld+TvQd0odwp6BiwtL9H9LbU41kPW1i9oBy3iOo= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.118.0 h1:ueSJS07XpOwCFhYTHh/Jjw856+U+u0Dv5LIIPOB1/Ns= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.118.0/go.mod h1:0FhI2Rzcv5BNM3dNnbcCx2qa2naFZoAidJi11cQgzL0= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.16.5 h1:DfvVNjrKOQpJyll4gDvHbFRkbSmQvFqcEljgR3/RSz4= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.16.5/go.mod h1:xCxinsYWeneLsHYY9O2lbIzT1ZgjzuRPMjdUFgE798I= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.21.4 h1:hcJmu7oeocSOHQKaifUoMWaSxengFuvGriP7SvuVvTw= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.21.4/go.mod h1:CbJHS0jJJNd2dZOakkG5TBbT8OHz+T0UBzR1ClIdezI= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35 h1:CdzPW9kKitgIiLV1+MHobfR5Xg25iYnyzWZhyQuSlDI= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35/go.mod h1:QGF2Rs33W5MaN9gYdEQOBBFPLwTZkEhRwI33f7KIG0o= +github.com/aws/aws-sdk-go-v2/service/sso v1.13.6 h1:2PylFCfKCEDv6PeSN09pC/VUiRd10wi1VfHG5FrW0/g= +github.com/aws/aws-sdk-go-v2/service/sso v1.13.6/go.mod h1:fIAwKQKBFu90pBxx07BFOMJLpRUGu8VOzLJakeY+0K4= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.6 h1:pSB560BbVj9ZlJZF4WYj5zsytWHWKxg+NgyGV4B2L58= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.6/go.mod h1:yygr8ACQRY2PrEcy3xsUI357stq2AxnFM6DIsR9lij4= +github.com/aws/aws-sdk-go-v2/service/sts v1.21.5 h1:CQBFElb0LS8RojMJlxRSo/HXipvTZW2S44Lt9Mk2aYQ= +github.com/aws/aws-sdk-go-v2/service/sts v1.21.5/go.mod h1:VC7JDqsqiwXukYEDjoHh9U0fOJtNWh04FPQz4ct4GGU= +github.com/aws/smithy-go v1.14.2 h1:MJU9hqBGbvWZdApzpvoF2WAIJDbtjK2NDJSiJP7HblQ= +github.com/aws/smithy-go v1.14.2/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= +github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/containerd/containerd v1.7.6 h1:oNAVsnhPoy4BTPQivLgTzI9Oleml9l/+eYIDYXRCYo8= +github.com/containerd/containerd v1.7.6/go.mod h1:SY6lrkkuJT40BVNO37tlYTSnKJnP5AXBc0fhx0q+TJ4= +github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E= +github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v24.0.6+incompatible h1:hceabKCtUgDqPu+qm0NgsaXf28Ljf4/pWFL7xjWWDgE= +github.com/docker/docker v24.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= +github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/go-errors/errors v1.5.0 h1:/EuijeGOu7ckFxzhkj4CXJ8JaenxK7bKUxpPYqeLHqQ= +github.com/go-errors/errors v1.5.0/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= +github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= +github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= +github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= +github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= +github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc4 h1:oOxKUJWnFC4YGHCCMNql1x4YaDfYBTS5Y4x/Cgeo1E0= +github.com/opencontainers/image-spec v1.1.0-rc4/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/opencontainers/runc v1.1.9 h1:XR0VIHTGce5eWPkaPesqTBrhW2yAcaraWfsEalNwQLM= +github.com/opencontainers/runc v1.1.9/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= +github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= +github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= +github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/testcontainers/testcontainers-go v0.23.0 h1:ERYTSikX01QczBLPZpqsETTBO7lInqEP349phDOVJVs= +github.com/testcontainers/testcontainers-go v0.23.0/go.mod h1:3gzuZfb7T9qfcH2pHpV4RLlWrPjeWNQah6XlYQ32c4I= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.starlark.net v0.0.0-20230912135651-745481cf39ed h1:kNt8RXSIU6IRBO9MP3m+6q3WpyBHQQXqSktcyVKDPOQ= +go.starlark.net v0.0.0-20230912135651-745481cf39ed/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= +golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230913181813-007df8e322eb h1:Isk1sSH7bovx8Rti2wZK0UZF6oraBDK74uoyLEEVFN0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230913181813-007df8e322eb/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.58.0 h1:32JY8YpPMSR45K+c3o6b8VL73V+rR8k+DeMIr4vRH8o= +google.golang.org/grpc v1.58.0/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= +gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.28.2 h1:9mpl5mOb6vXZvqbQmankOfPIGiudghwCoLl1EYfUZbw= +k8s.io/api v0.28.2/go.mod h1:RVnJBsjU8tcMq7C3iaRSGMeaKt2TWEUXcpIt/90fjEg= +k8s.io/apimachinery v0.28.2 h1:KCOJLrc6gu+wV1BYgwik4AF4vXOlVJPdiqn0yAWWwXQ= +k8s.io/apimachinery v0.28.2/go.mod h1:RdzF87y/ngqk9H4z3EL2Rppv5jj95vGS/HaFXrLDApU= +k8s.io/cli-runtime v0.28.2 h1:64meB2fDj10/ThIMEJLO29a1oujSm0GQmKzh1RtA/uk= +k8s.io/cli-runtime v0.28.2/go.mod h1:bTpGOvpdsPtDKoyfG4EG041WIyFZLV9qq4rPlkyYfDA= +k8s.io/client-go v0.28.2 h1:DNoYI1vGq0slMBN/SWKMZMw0Rq+0EQW6/AK4v9+3VeY= +k8s.io/client-go v0.28.2/go.mod h1:sMkApowspLuc7omj1FOSUxSoqjr+d5Q0Yc0LOFnYFJY= +k8s.io/cloud-provider v0.28.2 h1:9qsYm86hm4bnPgZbl9LE29Zfgjuq3NZR2dgtPioJ40s= +k8s.io/cloud-provider v0.28.2/go.mod h1:40fqf6MtgYho5Eu4gkyLgh5abxU/QKTMTIwBxt4ILyU= +k8s.io/cloud-provider-aws v1.28.1 h1:eOuPRE/3BDrCkGNVtH9SocHXAifCH9rcaVO1GabsYvo= +k8s.io/cloud-provider-aws v1.28.1/go.mod h1:t/rdeU79YtYD+5zZbHVRmmpcmFxxJtVen8g1znL/AP4= +k8s.io/component-base v0.28.2 h1:Yc1yU+6AQSlpJZyvehm/NkJBII72rzlEsd6MkBQ+G0E= +k8s.io/component-base v0.28.2/go.mod h1:4IuQPQviQCg3du4si8GpMrhAIegxpsgPngPRR/zWpzc= +k8s.io/component-helpers v0.28.2 h1:r/XJ265PMirW9EcGXr/F+2yWrLPo2I69KdvcY/h9HAo= +k8s.io/component-helpers v0.28.2/go.mod h1:pF1R5YWQ+sgf0i6EbVm+MQCzkYuqutDUibdrkvAa6aI= +k8s.io/csi-translation-lib v0.28.2 h1:63MIOXUn5bet2Mw7G+A7zFmLzQ/vzBrjvNYIlXYh/n0= +k8s.io/csi-translation-lib v0.28.2/go.mod h1:14Lusc0J0vnlRNXA/T7GlZcou4XFTRHC071jsz+SHvQ= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20230905202853-d090da108d2f h1:eeEUOoGYWhOz7EyXqhlR2zHKNw2mNJ9vzJmub6YN6kk= +k8s.io/kube-openapi v0.0.0-20230905202853-d090da108d2f/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/kubectl v0.28.2 h1:fOWOtU6S0smdNjG1PB9WFbqEIMlkzU5ahyHkc7ESHgM= +k8s.io/kubectl v0.28.2/go.mod h1:6EQWTPySF1fn7yKoQZHYf9TPwIl2AygHEcJoxFekr64= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kustomize/api v0.14.0 h1:6+QLmXXA8X4eDM7ejeaNUyruA1DDB3PVIjbpVhDOJRA= +sigs.k8s.io/kustomize/api v0.14.0/go.mod h1:vmOXlC8BcmcUJQjiceUbcyQ75JBP6eg8sgoyzc+eLpQ= +sigs.k8s.io/kustomize/kyaml v0.14.3 h1:WpabVAKZe2YEp/irTSHwD6bfjwZnTtSDewd2BVJGMZs= +sigs.k8s.io/kustomize/kyaml v0.14.3/go.mod h1:npvh9epWysfQ689Rtt/U+dpOJDTBn8kUnF1O6VzvmZA= +sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= +sigs.k8s.io/structured-merge-diff/v4 v4.3.0/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/pkg/cloudproviders/aws/api.go b/pkg/cloudproviders/aws/api.go new file mode 100644 index 0000000..58d1ff6 --- /dev/null +++ b/pkg/cloudproviders/aws/api.go @@ -0,0 +1,28 @@ +package aws + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/service/autoscaling" + "github.com/aws/aws-sdk-go-v2/service/ec2" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" +) + +//go:generate mockgen -destination=./mocks/api_mocks.go github.com/dbschenker/node-undertaker/pkg/cloudproviders/aws EC2CLIENT,ELBCLIENT,ELBV2CLIENT,ASGCLIENT + +type EC2CLIENT interface { + TerminateInstances(ctx context.Context, params *ec2.TerminateInstancesInput, optFns ...func(*ec2.Options)) (*ec2.TerminateInstancesOutput, error) +} + +type ELBCLIENT interface { + DeregisterInstancesFromLoadBalancer(ctx context.Context, params *elasticloadbalancing.DeregisterInstancesFromLoadBalancerInput, optFns ...func(*elasticloadbalancing.Options)) (*elasticloadbalancing.DeregisterInstancesFromLoadBalancerOutput, error) +} + +type ELBV2CLIENT interface { + DeregisterTargets(ctx context.Context, params *elasticloadbalancingv2.DeregisterTargetsInput, optFns ...func(*elasticloadbalancingv2.Options)) (*elasticloadbalancingv2.DeregisterTargetsOutput, error) +} + +type ASGCLIENT interface { + DescribeTrafficSources(ctx context.Context, params *autoscaling.DescribeTrafficSourcesInput, optFns ...func(*autoscaling.Options)) (*autoscaling.DescribeTrafficSourcesOutput, error) + DescribeAutoScalingInstances(ctx context.Context, params *autoscaling.DescribeAutoScalingInstancesInput, optFns ...func(*autoscaling.Options)) (*autoscaling.DescribeAutoScalingInstancesOutput, error) +} diff --git a/pkg/cloudproviders/aws/init_test.go b/pkg/cloudproviders/aws/init_test.go new file mode 100644 index 0000000..9e632e7 --- /dev/null +++ b/pkg/cloudproviders/aws/init_test.go @@ -0,0 +1,11 @@ +package aws + +import ( + "github.com/sirupsen/logrus" + "github.com/spf13/viper" +) + +func init() { + viper.Reset() + logrus.Infof("Initialized tests") +} diff --git a/pkg/cloudproviders/aws/provider.go b/pkg/cloudproviders/aws/provider.go new file mode 100644 index 0000000..f8a97e9 --- /dev/null +++ b/pkg/cloudproviders/aws/provider.go @@ -0,0 +1,162 @@ +package aws + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/autoscaling" + autoscalingtypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" + "github.com/aws/aws-sdk-go-v2/service/ec2" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing" + elasticloadbalancingtypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing/types" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" + elasticloadbalancingv2types "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" + log "github.com/sirupsen/logrus" + awscloudproviderv1 "k8s.io/cloud-provider-aws/pkg/providers/v1" +) + +type AwsCloudProvider struct { + Ec2Client EC2CLIENT + ElbClient ELBCLIENT + Elbv2Client ELBV2CLIENT + AsgClient ASGCLIENT +} + +const ( + TerminationEventActionFailed = "Instance Termination Failed" + TerminationEventActionSucceeded = "Instance Terminated" + PrepareTerminationEventActionFailed = "Instance Preparation For Termination Failed" + PrepareTerminationEventActionSucceeded = "Instance Prepared For Termination " +) + +func CreateCloudProvider(ctx context.Context) (AwsCloudProvider, error) { + ret := AwsCloudProvider{} + + cfg, err := config.LoadDefaultConfig(ctx) + if err != nil { + return ret, err + } + ret.Ec2Client = ec2.NewFromConfig(cfg) + ret.AsgClient = autoscaling.NewFromConfig(cfg) + ret.ElbClient = elasticloadbalancing.NewFromConfig(cfg) + ret.Elbv2Client = elasticloadbalancingv2.NewFromConfig(cfg) + return ret, nil +} + +func (p AwsCloudProvider) TerminateNode(ctx context.Context, cloudProviderNodeId string) (string, error) { + instanceId, err := awscloudproviderv1.KubernetesInstanceID(cloudProviderNodeId).MapToAWSInstanceID() + if err != nil { + return TerminationEventActionFailed, err + } + err = p.terminateInstance(ctx, string(instanceId)) + if err != nil { + return TerminationEventActionFailed, err + } + return TerminationEventActionSucceeded, nil +} + +func (p AwsCloudProvider) PrepareTermination(ctx context.Context, cloudProviderNodeId string) (string, error) { + instanceId, err := awscloudproviderv1.KubernetesInstanceID(cloudProviderNodeId).MapToAWSInstanceID() + if err != nil { + return PrepareTerminationEventActionFailed, err + } + asgName, err := p.getAsgForInstance(ctx, string(instanceId)) + if err != nil { + return PrepareTerminationEventActionFailed, err + } + if asgName != nil { + ts, err := p.getTrafficSourcesForAsg(ctx, asgName) + if err != nil { + return PrepareTerminationEventActionFailed, err + } + if len(ts) > 0 { + err := p.detachInstanceFromTrafficSources(ctx, ts, string(instanceId)) + if err != nil { + return PrepareTerminationEventActionFailed, err + } + + } + } + return PrepareTerminationEventActionSucceeded, nil +} + +func (p AwsCloudProvider) terminateInstance(ctx context.Context, instanceId string) error { + input := ec2.TerminateInstancesInput{ + InstanceIds: []string{ + string(instanceId), + }, + } + log.Debugf("EC2 Instance %s will be terminated in AWS", string(instanceId)) + _, err := p.Ec2Client.TerminateInstances(ctx, &input) + return err +} + +func (p AwsCloudProvider) getAsgForInstance(ctx context.Context, instanceId string) (*string, error) { + input := autoscaling.DescribeAutoScalingInstancesInput{ + InstanceIds: []string{ + instanceId, + }, + } + output, err := p.AsgClient.DescribeAutoScalingInstances(ctx, &input) + if err != nil { + return nil, err + } + if len(output.AutoScalingInstances) == 0 { + return nil, nil + } else if len(output.AutoScalingInstances) == 1 { + return output.AutoScalingInstances[0].AutoScalingGroupName, nil + } + + return nil, fmt.Errorf("AWS autoscaling API returned more than one ASG instance for instanceId: %s", instanceId) +} + +func (p AwsCloudProvider) getTrafficSourcesForAsg(ctx context.Context, asgName *string) ([]autoscalingtypes.TrafficSourceState, error) { + input := autoscaling.DescribeTrafficSourcesInput{ + AutoScalingGroupName: asgName, + } + output, err := p.AsgClient.DescribeTrafficSources(ctx, &input) + if err != nil { + return []autoscalingtypes.TrafficSourceState{}, err + } + ret := []autoscalingtypes.TrafficSourceState{} + for i := range output.TrafficSources { + if *output.TrafficSources[i].Type == "elb" || *output.TrafficSources[i].Type == "elbv2" { + if *output.TrafficSources[i].State != "Removing" && *output.TrafficSources[i].State != "Removed" { + ret = append(ret, output.TrafficSources[i]) + } + } + } + + return ret, err +} + +func (p AwsCloudProvider) detachInstanceFromTrafficSources(ctx context.Context, sources []autoscalingtypes.TrafficSourceState, instanceId string) error { + for i := range sources { + log.Debugf("Detaching instance %s from %s %s", instanceId, *sources[i].Type, *sources[i].Identifier) + + if *sources[i].Type == "elb" { + input := elasticloadbalancing.DeregisterInstancesFromLoadBalancerInput{ + LoadBalancerName: sources[i].Identifier, + Instances: []elasticloadbalancingtypes.Instance{ + {InstanceId: &instanceId}, + }, + } + _, err := p.ElbClient.DeregisterInstancesFromLoadBalancer(ctx, &input) + if err != nil { + return err + } + } else if *sources[i].Type == "elbv2" { + input := elasticloadbalancingv2.DeregisterTargetsInput{ + TargetGroupArn: sources[i].Identifier, + Targets: []elasticloadbalancingv2types.TargetDescription{ + {Id: &instanceId}, + }, + } + _, err := p.Elbv2Client.DeregisterTargets(ctx, &input) + if err != nil { + return err + } + } + } + return nil +} diff --git a/pkg/cloudproviders/aws/provider_test.go b/pkg/cloudproviders/aws/provider_test.go new file mode 100644 index 0000000..8ce3660 --- /dev/null +++ b/pkg/cloudproviders/aws/provider_test.go @@ -0,0 +1,451 @@ +package aws + +import ( + "context" + "errors" + "github.com/aws/aws-sdk-go-v2/service/autoscaling" + autoscalingtypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" + "github.com/aws/aws-sdk-go-v2/service/ec2" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing" + elasticloadbalancingtypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing/types" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" + elasticloadbalancingv2types "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" + mockaws "github.com/dbschenker/node-undertaker/pkg/cloudproviders/aws/mocks" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestCreateAwsCloudProvider(t *testing.T) { + ctx := context.TODO() + + ret, err := CreateCloudProvider(ctx) + assert.NoError(t, err) + //assert.Equal(t, dummyRegion, ret.Region) + assert.NotNil(t, ret) + assert.NotNil(t, ret.AsgClient) + assert.NotNil(t, ret.ElbClient) + assert.NotNil(t, ret.ElbClient) + assert.NotNil(t, ret.Ec2Client) +} + +func TestTerminatNode(t *testing.T) { + mockCtrl := gomock.NewController(t) + ec2Client := mockaws.NewMockEC2CLIENT(mockCtrl) + + instanceId := "i-12312313" + + expectedInput := ec2.TerminateInstancesInput{ + InstanceIds: []string{ + instanceId, + }, + } + + ec2Client.EXPECT().TerminateInstances(gomock.Any(), &expectedInput).Return(nil, nil).Times(1) + + cloudProvider := AwsCloudProvider{ + Ec2Client: ec2Client, + } + + res, err := cloudProvider.TerminateNode(context.TODO(), "aws://nonexistant/"+instanceId) + assert.NoError(t, err) + assert.Equal(t, TerminationEventActionSucceeded, res) +} + +func TestPrepareTerminationNodeNotInLB(t *testing.T) { + mockCtrl := gomock.NewController(t) + elbClient := mockaws.NewMockELBCLIENT(mockCtrl) + elbv2Client := mockaws.NewMockELBV2CLIENT(mockCtrl) + asgClient := mockaws.NewMockASGCLIENT(mockCtrl) + + instanceId := "i-12312313" + + expectedAsgInput := autoscaling.DescribeAutoScalingInstancesInput{ + InstanceIds: []string{ + instanceId, + }, + } + expectedAsgOutput := autoscaling.DescribeAutoScalingInstancesOutput{ + AutoScalingInstances: []autoscalingtypes.AutoScalingInstanceDetails{}, + } + + asgClient.EXPECT().DescribeAutoScalingInstances(gomock.Any(), &expectedAsgInput).Return(&expectedAsgOutput, nil).Times(1) + + cloudProvider := AwsCloudProvider{ + AsgClient: asgClient, + Elbv2Client: elbv2Client, + ElbClient: elbClient, + } + + res, err := cloudProvider.PrepareTermination(context.TODO(), "aws://nonexistant/"+instanceId) + assert.NoError(t, err) + assert.Equal(t, PrepareTerminationEventActionSucceeded, res) +} + +func TestPrepareTerminationNodeInMultipleLB(t *testing.T) { + mockCtrl := gomock.NewController(t) + ec2Client := mockaws.NewMockEC2CLIENT(mockCtrl) + elbClient := mockaws.NewMockELBCLIENT(mockCtrl) + elbv2Client := mockaws.NewMockELBV2CLIENT(mockCtrl) + asgClient := mockaws.NewMockASGCLIENT(mockCtrl) + + instanceId := "i-12312313" + asgName := "asg-name-1" + + expectedAsgInput := autoscaling.DescribeAutoScalingInstancesInput{ + InstanceIds: []string{ + instanceId, + }, + } + expectedAsgOutput := autoscaling.DescribeAutoScalingInstancesOutput{ + AutoScalingInstances: []autoscalingtypes.AutoScalingInstanceDetails{ + { + InstanceId: &instanceId, + AutoScalingGroupName: &asgName, + }, + }, + } + + asgClient.EXPECT().DescribeAutoScalingInstances(gomock.Any(), &expectedAsgInput).Return(&expectedAsgOutput, nil).Times(1) + + trafficSourceType1 := "elb" + trafficSourceState1 := "Added" + trafficSourceIdentifier1 := "lb-name1" + trafficSourceType2 := "elbv2" + trafficSourceState2 := "InService" + trafficSourceIdentifier2 := "arn:aws:elbv2" + trafficSourceType3 := "elb" + trafficSourceState3 := "Removing" + trafficSourceIdentifier3 := "lb-name1" + + expectedTrafficSourcesInput := autoscaling.DescribeTrafficSourcesInput{ + AutoScalingGroupName: &asgName, + } + expectedTrafficSourcesOutput := autoscaling.DescribeTrafficSourcesOutput{ + TrafficSources: []autoscalingtypes.TrafficSourceState{ + { + Type: &trafficSourceType1, + State: &trafficSourceState1, + Identifier: &trafficSourceIdentifier1, + }, + { + Type: &trafficSourceType2, + State: &trafficSourceState2, + Identifier: &trafficSourceIdentifier2, + }, + { + Type: &trafficSourceType3, + State: &trafficSourceState3, + Identifier: &trafficSourceIdentifier3, + }, + }, + } + asgClient.EXPECT().DescribeTrafficSources(gomock.Any(), &expectedTrafficSourcesInput).Return(&expectedTrafficSourcesOutput, nil).Times(1) + + expectedInput1 := elasticloadbalancing.DeregisterInstancesFromLoadBalancerInput{ + LoadBalancerName: &trafficSourceIdentifier1, + Instances: []elasticloadbalancingtypes.Instance{ + {InstanceId: &instanceId}, + }, + } + elbClient.EXPECT().DeregisterInstancesFromLoadBalancer(gomock.Any(), &expectedInput1).Return(nil, nil).Times(1) + + expectedInput2 := elasticloadbalancingv2.DeregisterTargetsInput{ + TargetGroupArn: &trafficSourceIdentifier2, + Targets: []elasticloadbalancingv2types.TargetDescription{ + {Id: &instanceId}, + }, + } + elbv2Client.EXPECT().DeregisterTargets(gomock.Any(), &expectedInput2).Return(nil, nil).Times(1) + + cloudProvider := AwsCloudProvider{ + Ec2Client: ec2Client, + AsgClient: asgClient, + Elbv2Client: elbv2Client, + ElbClient: elbClient, + } + + res, err := cloudProvider.PrepareTermination(context.TODO(), "aws://nonexistant/"+instanceId) + assert.NoError(t, err) + assert.Equal(t, PrepareTerminationEventActionSucceeded, res) +} + +func TestTerminateNodeWrongProviderId(t *testing.T) { + mockCtrl := gomock.NewController(t) + ec2Client := mockaws.NewMockEC2CLIENT(mockCtrl) + + cloudProvider := AwsCloudProvider{ + Ec2Client: ec2Client, + } + res, err := cloudProvider.TerminateNode(context.TODO(), "test123") + assert.Error(t, err) + assert.Equal(t, TerminationEventActionFailed, res) +} + +func TestPrepareTerminationNodeWrongProviderId(t *testing.T) { + mockCtrl := gomock.NewController(t) + ec2Client := mockaws.NewMockEC2CLIENT(mockCtrl) + + cloudProvider := AwsCloudProvider{ + Ec2Client: ec2Client, + } + res, err := cloudProvider.PrepareTermination(context.TODO(), "test123") + assert.Error(t, err) + assert.Equal(t, PrepareTerminationEventActionFailed, res) +} + +func TestGetAsgForInstanceNone(t *testing.T) { + mockCtrl := gomock.NewController(t) + asgClient := mockaws.NewMockASGCLIENT(mockCtrl) + instanceId := "i-123" + expectedInput := autoscaling.DescribeAutoScalingInstancesInput{ + InstanceIds: []string{ + instanceId, + }, + } + expectedOutput := autoscaling.DescribeAutoScalingInstancesOutput{ + AutoScalingInstances: []autoscalingtypes.AutoScalingInstanceDetails{}, + } + asgClient.EXPECT().DescribeAutoScalingInstances(gomock.Any(), &expectedInput).Return(&expectedOutput, nil).Times(1) + + p := AwsCloudProvider{ + AsgClient: asgClient, + } + + ret, err := p.getAsgForInstance(context.TODO(), instanceId) + assert.Nil(t, ret) + assert.NoError(t, err) +} + +func TestGetAsgForInstanceOne(t *testing.T) { + mockCtrl := gomock.NewController(t) + asgClient := mockaws.NewMockASGCLIENT(mockCtrl) + instanceId := "i-123" + asgName := "test-asg-1" + expectedInput := autoscaling.DescribeAutoScalingInstancesInput{ + InstanceIds: []string{ + instanceId, + }, + } + expectedOutput := autoscaling.DescribeAutoScalingInstancesOutput{ + AutoScalingInstances: []autoscalingtypes.AutoScalingInstanceDetails{ + { + InstanceId: &instanceId, + AutoScalingGroupName: &asgName, + }, + }, + } + asgClient.EXPECT().DescribeAutoScalingInstances(gomock.Any(), &expectedInput).Return(&expectedOutput, nil).Times(1) + + p := AwsCloudProvider{ + AsgClient: asgClient, + } + + ret, err := p.getAsgForInstance(context.TODO(), instanceId) + assert.NotNil(t, ret) + assert.Equal(t, asgName, *ret) + assert.NoError(t, err) +} + +func TestGetAsgForInstanceErr(t *testing.T) { + mockCtrl := gomock.NewController(t) + asgClient := mockaws.NewMockASGCLIENT(mockCtrl) + instanceId := "i-123" + asgName := "test-asg-1" + errorReturned := errors.New("test-error") + expectedInput := autoscaling.DescribeAutoScalingInstancesInput{ + InstanceIds: []string{ + instanceId, + }, + } + expectedOutput := autoscaling.DescribeAutoScalingInstancesOutput{ + AutoScalingInstances: []autoscalingtypes.AutoScalingInstanceDetails{ + { + InstanceId: &instanceId, + AutoScalingGroupName: &asgName, + }, + }, + } + asgClient.EXPECT().DescribeAutoScalingInstances(gomock.Any(), &expectedInput).Return(&expectedOutput, errorReturned).Times(1) + + p := AwsCloudProvider{ + AsgClient: asgClient, + } + + ret, err := p.getAsgForInstance(context.TODO(), instanceId) + assert.Nil(t, ret) + assert.Error(t, err) +} + +func TestGetAsgForInstanceErrMore(t *testing.T) { + mockCtrl := gomock.NewController(t) + asgClient := mockaws.NewMockASGCLIENT(mockCtrl) + instanceId := "i-123" + asgName := "test-asg-1" + expectedInput := autoscaling.DescribeAutoScalingInstancesInput{ + InstanceIds: []string{ + instanceId, + }, + } + expectedOutput := autoscaling.DescribeAutoScalingInstancesOutput{ + AutoScalingInstances: []autoscalingtypes.AutoScalingInstanceDetails{ + { + InstanceId: &instanceId, + AutoScalingGroupName: &asgName, + }, + { + InstanceId: &instanceId, + AutoScalingGroupName: &asgName, + }, + }, + } + asgClient.EXPECT().DescribeAutoScalingInstances(gomock.Any(), &expectedInput).Return(&expectedOutput, nil).Times(1) + + p := AwsCloudProvider{ + AsgClient: asgClient, + } + + ret, err := p.getAsgForInstance(context.TODO(), instanceId) + assert.Nil(t, ret) + assert.Error(t, err) +} + +func TestGetTrafficSourcesForAsgNone(t *testing.T) { + mockCtrl := gomock.NewController(t) + asgClient := mockaws.NewMockASGCLIENT(mockCtrl) + asgName := "test-asg-1" + expectedInput := autoscaling.DescribeTrafficSourcesInput{ + AutoScalingGroupName: &asgName, + } + expectedOutput := autoscaling.DescribeTrafficSourcesOutput{ + TrafficSources: []autoscalingtypes.TrafficSourceState{}, + } + asgClient.EXPECT().DescribeTrafficSources(gomock.Any(), &expectedInput).Return(&expectedOutput, nil).Times(1) + + p := AwsCloudProvider{ + AsgClient: asgClient, + } + + ret, err := p.getTrafficSourcesForAsg(context.TODO(), &asgName) + assert.Empty(t, ret) + assert.NoError(t, err) +} + +func TestGetTrafficSourcesForAsgOk(t *testing.T) { + mockCtrl := gomock.NewController(t) + asgClient := mockaws.NewMockASGCLIENT(mockCtrl) + asgName := "test-asg-1" + expectedInput := autoscaling.DescribeTrafficSourcesInput{ + AutoScalingGroupName: &asgName, + } + + trafficSourceType1 := "elb" + trafficSourceState1 := "Added" + trafficSourceIdentifier1 := "lb-name1" + trafficSourceType2 := "elbv2" + trafficSourceState2 := "InService" + trafficSourceIdentifier2 := "arn:aws:elbv2" + trafficSourceType3 := "elb" + trafficSourceState3 := "Removing" + trafficSourceIdentifier3 := "lb-name1" + expectedOutput := autoscaling.DescribeTrafficSourcesOutput{ + TrafficSources: []autoscalingtypes.TrafficSourceState{ + { + Type: &trafficSourceType1, + State: &trafficSourceState1, + Identifier: &trafficSourceIdentifier1, + }, + { + Type: &trafficSourceType2, + State: &trafficSourceState2, + Identifier: &trafficSourceIdentifier2, + }, + { + Type: &trafficSourceType3, + State: &trafficSourceState3, + Identifier: &trafficSourceIdentifier3, + }, + }, + } + asgClient.EXPECT().DescribeTrafficSources(gomock.Any(), &expectedInput).Return(&expectedOutput, nil).Times(1) + + p := AwsCloudProvider{ + AsgClient: asgClient, + } + + ret, err := p.getTrafficSourcesForAsg(context.TODO(), &asgName) + assert.NoError(t, err) + assert.Len(t, ret, 2) + assert.Equal(t, trafficSourceType1, *ret[0].Type) + assert.Equal(t, trafficSourceIdentifier1, *ret[0].Identifier) + assert.Equal(t, trafficSourceState1, *ret[0].State) + assert.Equal(t, trafficSourceType2, *ret[1].Type) + assert.Equal(t, trafficSourceIdentifier2, *ret[1].Identifier) + assert.Equal(t, trafficSourceState2, *ret[1].State) +} + +func TestDetachInstanceFromTrafficSources(t *testing.T) { + mockCtrl := gomock.NewController(t) + elbClient := mockaws.NewMockELBCLIENT(mockCtrl) + elbv2Client := mockaws.NewMockELBV2CLIENT(mockCtrl) + p := AwsCloudProvider{ + ElbClient: elbClient, + Elbv2Client: elbv2Client, + } + + instanceId := "i-123124" + + trafficSourceType1 := "elb" + trafficSourceState1 := "Added" + trafficSourceIdentifier1 := "lb-name1" + trafficSourceType2 := "elbv2" + trafficSourceState2 := "InService" + trafficSourceIdentifier2 := "arn:aws:elbv:target-group:123322" + trafficSourceType3 := "elb" + trafficSourceState3 := "Adding" + trafficSourceIdentifier3 := "lb-name1" + sources := []autoscalingtypes.TrafficSourceState{ + { + Type: &trafficSourceType1, + State: &trafficSourceState1, + Identifier: &trafficSourceIdentifier1, + }, + { + Type: &trafficSourceType2, + State: &trafficSourceState2, + Identifier: &trafficSourceIdentifier2, + }, + { + Type: &trafficSourceType3, + State: &trafficSourceState3, + Identifier: &trafficSourceIdentifier3, + }, + } + + expectedInput1 := elasticloadbalancing.DeregisterInstancesFromLoadBalancerInput{ + LoadBalancerName: &trafficSourceIdentifier1, + Instances: []elasticloadbalancingtypes.Instance{ + {InstanceId: &instanceId}, + }, + } + elbClient.EXPECT().DeregisterInstancesFromLoadBalancer(gomock.Any(), &expectedInput1).Return(nil, nil).Times(1) + + expectedInput2 := elasticloadbalancingv2.DeregisterTargetsInput{ + TargetGroupArn: &trafficSourceIdentifier2, + Targets: []elasticloadbalancingv2types.TargetDescription{ + {Id: &instanceId}, + }, + } + elbv2Client.EXPECT().DeregisterTargets(gomock.Any(), &expectedInput2).Return(nil, nil).Times(1) + + expectedInput3 := elasticloadbalancing.DeregisterInstancesFromLoadBalancerInput{ + LoadBalancerName: &trafficSourceIdentifier3, + Instances: []elasticloadbalancingtypes.Instance{ + {InstanceId: &instanceId}, + }, + } + elbClient.EXPECT().DeregisterInstancesFromLoadBalancer(gomock.Any(), &expectedInput3).Return(nil, nil).Times(1) + + err := p.detachInstanceFromTrafficSources(context.TODO(), sources, instanceId) + assert.NoError(t, err) +} diff --git a/pkg/cloudproviders/aws/validate_config.go b/pkg/cloudproviders/aws/validate_config.go new file mode 100644 index 0000000..955528a --- /dev/null +++ b/pkg/cloudproviders/aws/validate_config.go @@ -0,0 +1,6 @@ +package aws + +func (t AwsCloudProvider) ValidateConfig() error { + + return nil +} diff --git a/pkg/cloudproviders/aws/validate_config_test.go b/pkg/cloudproviders/aws/validate_config_test.go new file mode 100644 index 0000000..5df2175 --- /dev/null +++ b/pkg/cloudproviders/aws/validate_config_test.go @@ -0,0 +1,12 @@ +package aws + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestValidateConfigOk(t *testing.T) { + cloudProvider := AwsCloudProvider{} + result := cloudProvider.ValidateConfig() + assert.NoError(t, result) +} diff --git a/pkg/cloudproviders/cloudprovider.go b/pkg/cloudproviders/cloudprovider.go new file mode 100644 index 0000000..adc41eb --- /dev/null +++ b/pkg/cloudproviders/cloudprovider.go @@ -0,0 +1,14 @@ +package cloudproviders + +import "context" + +//go:generate mockgen -destination=./mocks/api_mocks.go github.com/dbschenker/node-undertaker/pkg/cloudproviders CLOUDPROVIDER + +type CLOUDPROVIDER interface { + ValidateConfig() error + + // TerminateNode terminates node with provided providerId. Returns message (for creation of events) and error + TerminateNode(context.Context, string) (string, error) + // PrepareTermination prepares node to be termianted (i.e. removes it from load balancers) + PrepareTermination(context.Context, string) (string, error) +} diff --git a/pkg/cloudproviders/kind/kind.go b/pkg/cloudproviders/kind/kind.go new file mode 100644 index 0000000..89fba44 --- /dev/null +++ b/pkg/cloudproviders/kind/kind.go @@ -0,0 +1,50 @@ +package kind + +import ( + "context" + "fmt" + log "github.com/sirupsen/logrus" + "os/exec" + "regexp" +) + +type KindCloudProvider struct { +} + +func CreateCloudProvider(ctx context.Context) (KindCloudProvider, error) { + log.Warnf("Kind cloud provider should be used only for development and testing. This provider is not intended for production use.") + ret := KindCloudProvider{} + + return ret, nil +} + +func (p KindCloudProvider) ValidateConfig() error { + return nil +} + +func (p KindCloudProvider) TerminateNode(ctx context.Context, cloudProviderNodeId string) (string, error) { + re, err := regexp.Compile("^kind://[^/]+/kind/(.+)$") + if err != nil { + return "InstanceTerminationFailed", err + } + matches := re.FindStringSubmatch(cloudProviderNodeId) + if len(matches) != 2 { + return "InstanceTerminationFailed", fmt.Errorf("couldn't parse providerId: %s", cloudProviderNodeId) + } + + cmd := exec.Command("docker", "stop", matches[1]) + err = cmd.Run() + if err != nil { + return "Instance Termination Failed", err + } + cmd = exec.Command("docker", "rm", matches[1]) + err = cmd.Run() + if err != nil { + return "Instance Termination Failed", err + } + return "Instance Terminated", nil +} + +func (p KindCloudProvider) PrepareTermination(ctx context.Context, cloudProviderNodeId string) (string, error) { + return "No preparation required", nil +} diff --git a/pkg/cloudproviders/kwok/cluster.go b/pkg/cloudproviders/kwok/cluster.go new file mode 100644 index 0000000..3388ee1 --- /dev/null +++ b/pkg/cloudproviders/kwok/cluster.go @@ -0,0 +1,57 @@ +package kwok + +import ( + "context" + "fmt" + "github.com/docker/go-connections/nat" + testcontainers "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "testing" +) + +const ( + KwokImage = "registry.k8s.io/kwok/cluster:v0.2.0-k8s.v1.27.1" + KwokPort = "8080" +) + +func StartCluster(t *testing.T, ctx context.Context) (*kubernetes.Clientset, error) { + t.Helper() + + port := fmt.Sprintf("%s/tcp", KwokPort) + + req := testcontainers.ContainerRequest{ + Image: KwokImage, + ExposedPorts: []string{port}, + WaitingFor: wait.ForAll( + wait.ForListeningPort(nat.Port(port)), + wait.ForHTTP("/readyz").WithPort(nat.Port(port)), + ), + } + container, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ + ContainerRequest: req, + Started: true, + }) + if err != nil { + return nil, err + } + + ip, err := container.Host(ctx) + if err != nil { + return nil, err + } + + mappedPort, err := container.MappedPort(ctx, KwokPort) + if err != nil { + return nil, err + } + + k8sCfg := rest.Config{ + Host: fmt.Sprintf("%s:%s", ip, mappedPort.Port()), + } + + clientset, err := kubernetes.NewForConfig(&k8sCfg) + + return clientset, err +} diff --git a/pkg/cloudproviders/kwok/cluster_test.go b/pkg/cloudproviders/kwok/cluster_test.go new file mode 100644 index 0000000..3f06438 --- /dev/null +++ b/pkg/cloudproviders/kwok/cluster_test.go @@ -0,0 +1,18 @@ +package kwok + +import ( + "context" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "testing" +) + +func TestStartCluster(t *testing.T) { + ctx := context.TODO() + clientset, err := StartCluster(t, ctx) + require.NoError(t, err) + list, err := clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) + assert.NoError(t, err) + assert.Len(t, list.Items, 0) +} diff --git a/pkg/cloudproviders/kwok/dummy_node.go b/pkg/cloudproviders/kwok/dummy_node.go new file mode 100644 index 0000000..c7543f2 --- /dev/null +++ b/pkg/cloudproviders/kwok/dummy_node.go @@ -0,0 +1,63 @@ +package kwok + +import ( + "context" + "fmt" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (k KwokCloudProvider) CreateNode(ctx context.Context, name string) error { + + node := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Annotations: map[string]string{ + "node.alpha.kubernetes.io/ttl": "0", + "kwok.x-k8s.io/node": "fake", + }, + Labels: map[string]string{ + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": name, + "kubernetes.io/os": "linux", + "kubernetes.io/role": "agent", + "node-role.kubernetes.io/agent": "", + "type": "kwok", + }, + }, + Spec: v1.NodeSpec{ + ProviderID: fmt.Sprintf("kwok://%s", name), + }, + Status: v1.NodeStatus{ + Allocatable: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("32"), + v1.ResourceMemory: resource.MustParse("64Gi"), + v1.ResourcePods: resource.MustParse("100"), + }, + Capacity: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("32"), + v1.ResourceMemory: resource.MustParse("64Gi"), + v1.ResourcePods: resource.MustParse("100"), + }, + NodeInfo: v1.NodeSystemInfo{ + Architecture: "amd64", + BootID: "", + ContainerRuntimeVersion: "", + KernelVersion: "", + KubeProxyVersion: "fake", + KubeletVersion: "fake", + MachineID: "", + OperatingSystem: "linux", + SystemUUID: "", + OSImage: "", + }, + }, + } + + _, err := k.K8sClient.CoreV1().Nodes().Create(ctx, &node, metav1.CreateOptions{}) + + return err +} diff --git a/pkg/cloudproviders/kwok/dummy_node_test.go b/pkg/cloudproviders/kwok/dummy_node_test.go new file mode 100644 index 0000000..db53c79 --- /dev/null +++ b/pkg/cloudproviders/kwok/dummy_node_test.go @@ -0,0 +1,35 @@ +package kwok + +import ( + "context" + "fmt" + "github.com/dbschenker/node-undertaker/pkg/nodeundertaker/config" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/rand" + "testing" +) + +func TestCreateNode(t *testing.T) { + ctx := context.TODO() + + clientset, err := StartCluster(t, ctx) + require.NoError(t, err) + + cfg := config.Config{ + K8sClient: clientset, + } + kwokProvider, err := CreateCloudProvider(ctx, &cfg) + require.NoError(t, err) + + nodeName := fmt.Sprintf("kwok-test-create-node-%s", rand.String(20)) + + err = kwokProvider.CreateNode(ctx, nodeName) + assert.NoError(t, err) + ret, err := kwokProvider.K8sClient.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) + assert.NoError(t, err) + assert.NotNil(t, ret) + assert.Equal(t, nodeName, ret.ObjectMeta.Name) + assert.Equal(t, fmt.Sprintf("kwok://%s", nodeName), ret.Spec.ProviderID) +} diff --git a/pkg/cloudproviders/kwok/kwok.go b/pkg/cloudproviders/kwok/kwok.go new file mode 100644 index 0000000..f11e24f --- /dev/null +++ b/pkg/cloudproviders/kwok/kwok.go @@ -0,0 +1,55 @@ +package kwok + +import ( + "context" + "errors" + "fmt" + "github.com/dbschenker/node-undertaker/pkg/nodeundertaker/config" + log "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "regexp" +) + +type KwokCloudProvider struct { + K8sClient kubernetes.Interface +} + +func CreateCloudProvider(ctx context.Context, cfg *config.Config) (KwokCloudProvider, error) { + log.Warnf("Kwok cloud provider should be used only for development and testing. This provider is not intended for production use.") + ret := KwokCloudProvider{} + ret.K8sClient = cfg.K8sClient + var err error = nil + + return ret, err +} + +func (p KwokCloudProvider) ValidateConfig() error { + return nil +} + +func (p KwokCloudProvider) TerminateNode(ctx context.Context, cloudProviderNodeId string) (string, error) { + re, err := regexp.Compile("^kwok://(.+)$") + if err != nil { + return "InstanceTerminationFailed", err + } + matches := re.FindStringSubmatch(cloudProviderNodeId) + if len(matches) != 2 { + return "InstanceTerminationFailed", fmt.Errorf("couldn't parse providerId: %s", cloudProviderNodeId) + } + + if p.K8sClient == nil { + return "Instance Termination Failed", errors.New("K8sclient is nil") + } + + err = p.K8sClient.CoreV1().Nodes().Delete(ctx, matches[1], metav1.DeleteOptions{}) + + if err != nil { + return "Instance Termination Failed", err + } + return "Instance Terminated", nil +} + +func (p KwokCloudProvider) PrepareTermination(ctx context.Context, cloudProviderNodeId string) (string, error) { + return "No preparation required", nil +} diff --git a/pkg/cloudproviders/kwok/kwok_test.go b/pkg/cloudproviders/kwok/kwok_test.go new file mode 100644 index 0000000..dfa002e --- /dev/null +++ b/pkg/cloudproviders/kwok/kwok_test.go @@ -0,0 +1,66 @@ +package kwok + +import ( + "context" + "fmt" + "github.com/dbschenker/node-undertaker/pkg/nodeundertaker/config" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/util/rand" + "testing" +) + +func TestCreateCloudProvider(t *testing.T) { + cfg := config.Config{} + _, err := CreateCloudProvider(context.TODO(), &cfg) + assert.NoError(t, err) +} + +func TestValidateConfig(t *testing.T) { + ctx := context.TODO() + cfg := config.Config{} + cp, _ := CreateCloudProvider(ctx, &cfg) + err := cp.ValidateConfig() + assert.NoError(t, err) +} + +func TestPrepareTermination(t *testing.T) { + ctx := context.TODO() + clientset, err := StartCluster(t, ctx) + require.NoError(t, err) + + nodeName := fmt.Sprintf("kwok-test-terminate-node-%s", rand.String(20)) + cfg := config.Config{ + K8sClient: clientset, + } + + cp, _ := CreateCloudProvider(ctx, &cfg) + err = cp.CreateNode(ctx, nodeName) + assert.NoError(t, err) + + ret, err := cp.PrepareTermination(ctx, fmt.Sprintf("kwok://%s", nodeName)) + assert.NoError(t, err) + assert.Equal(t, "No preparation required", ret) +} + +func TestTerminateNode(t *testing.T) { + ctx := context.TODO() + clientset, err := StartCluster(t, ctx) + require.NoError(t, err) + + cfg := config.Config{ + K8sClient: clientset, + CloudTerminationDelay: 30, + } + cp, _ := CreateCloudProvider(ctx, &cfg) + + nodeName := fmt.Sprintf("kwok-test-terminate-node-%s", rand.String(20)) + + err = cp.CreateNode(ctx, nodeName) + assert.NoError(t, err) + + ret, err := cp.TerminateNode(ctx, fmt.Sprintf("kwok://%s", nodeName)) + assert.NoError(t, err) + assert.Equal(t, "Instance Terminated", ret) + +} diff --git a/pkg/kubeclient/client.go b/pkg/kubeclient/client.go new file mode 100644 index 0000000..4b0654e --- /dev/null +++ b/pkg/kubeclient/client.go @@ -0,0 +1,29 @@ +package kubeclient + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/clientcmd" +) + +// GetClient - gets kubernetes client with namespace it runs in +func GetClient() (kubernetes.Interface, string, error) { + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, nil) + config, err := kubeConfig.ClientConfig() + if err != nil { + return nil, "", err + } + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, "", err + } + namespace, _, err := kubeConfig.Namespace() + + return clientset, namespace, err +} + +func GetFakeClient() (kubernetes.Interface, string, error) { + return fake.NewSimpleClientset(), metav1.NamespaceDefault, nil +} diff --git a/pkg/kubeclient/leader_election.go b/pkg/kubeclient/leader_election.go new file mode 100644 index 0000000..104ad1e --- /dev/null +++ b/pkg/kubeclient/leader_election.go @@ -0,0 +1,62 @@ +package kubeclient + +import ( + "context" + "github.com/dbschenker/node-undertaker/pkg/nodeundertaker/config" + "github.com/google/uuid" + log "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/leaderelection" + "k8s.io/client-go/tools/leaderelection/resourcelock" + "time" +) + +func LeaderElection(ctx context.Context, cfg *config.Config, workload func(ctx2 context.Context), cancel func()) { + id := uuid.New().String() + log.Infof("Starting leader election with id: %s", id) + + lock := &resourcelock.LeaseLock{ + LeaseMeta: metav1.ObjectMeta{ + Name: cfg.LeaseLockName, + Namespace: cfg.LeaseLockNamespace, + }, + Client: cfg.K8sClient.CoordinationV1(), + LockConfig: resourcelock.ResourceLockConfig{ + Identity: id, + }, + } + leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ + Lock: lock, + // IMPORTANT: you MUST ensure that any code you have that + // is protected by the lease must terminate **before** + // you call cancel. Otherwise, you could have a background + // loop still running and another process could + // get elected before your background loop finished, violating + // the stated goal of the lease. + ReleaseOnCancel: true, + LeaseDuration: 60 * time.Second, + RenewDeadline: 20 * time.Second, + RetryPeriod: 5 * time.Second, + Callbacks: leaderelection.LeaderCallbacks{ + OnStartedLeading: func(ctx context.Context) { + // we're notified when we start - this is where you would + // usually put your code + workload(ctx) + }, + OnStoppedLeading: func() { + // we can do cleanup here + log.Infof("leader lost: %s", id) + //os.Exit(0) + cancel() + }, + OnNewLeader: func(identity string) { + // we're notified when new leader elected + if identity == id { + // I just got the lock + return + } + log.Infof("new leader elected: %s", identity) + }, + }, + }) +} diff --git a/pkg/nodeundertaker/config/config.go b/pkg/nodeundertaker/config/config.go new file mode 100644 index 0000000..ff7ae72 --- /dev/null +++ b/pkg/nodeundertaker/config/config.go @@ -0,0 +1,103 @@ +package config + +import ( + "fmt" + "github.com/dbschenker/node-undertaker/cmd/node-undertaker/flags" + "github.com/dbschenker/node-undertaker/pkg/cloudproviders" + log "github.com/sirupsen/logrus" + "github.com/spf13/viper" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/kubernetes" + "os" + "time" +) + +type Config struct { + CloudProvider cloudproviders.CLOUDPROVIDER + DrainDelay int + CloudTerminationDelay int + CloudPrepareTerminationDelay int + NodeInitialThreshold int + Port int + K8sClient kubernetes.Interface + InformerResync time.Duration + Namespace string + Hostname string + LeaseLockName string + LeaseLockNamespace string + NodeLeaseNamespace string + InitialDelay int + StartupTime time.Time + NodeSelector labels.Selector +} + +func GetConfig() (*Config, error) { + ret := Config{} + ret.InformerResync = 60 * time.Second + ret.DrainDelay = viper.GetInt(flags.DrainDelayFlag) + ret.CloudTerminationDelay = viper.GetInt(flags.CloudTerminationDelayFlag) + ret.CloudPrepareTerminationDelay = viper.GetInt(flags.CloudPrepareTerminationDelayFlag) + ret.Port = viper.GetInt(flags.PortFlag) + ret.Namespace = viper.GetString(flags.NamespaceFlag) + ret.LeaseLockNamespace = viper.GetString(flags.LeaseLockNamespaceFlag) + ret.LeaseLockName = viper.GetString(flags.LeaseLockNameFlag) + ret.NodeInitialThreshold = viper.GetInt(flags.NodeInitialThresholdFlag) + ret.NodeLeaseNamespace = viper.GetString(flags.NodeLeaseNamespaceFlag) + ret.InitialDelay = viper.GetInt(flags.InitialDelayFlag) + ret.StartupTime = time.Now() + + hostname, err := os.Hostname() + if err != nil { + return nil, err + } + ret.Hostname = hostname + + selectors, err := labels.Parse(viper.GetString(flags.NodeSelectorFlag)) + if err != nil { + return nil, err + } + ret.NodeSelector = selectors + + return &ret, validateConfig(&ret) +} + +func (cfg *Config) SetK8sClient(k8sClient kubernetes.Interface, namespace string) { + cfg.K8sClient = k8sClient + if cfg.Namespace == "" { + log.Infof("Using autodetected namespace: %s", namespace) + cfg.Namespace = namespace + } + if cfg.LeaseLockNamespace == "" { + log.Infof("Using autodetected namespace for lease lock: %s", namespace) + cfg.LeaseLockNamespace = namespace + } + if cfg.NodeLeaseNamespace == "" { + log.Infof("Using autodetected namespace for node leases: %s", namespace) + cfg.NodeLeaseNamespace = namespace + } +} + +func validateConfig(cfg *Config) error { + if cfg.DrainDelay < 0 { + return fmt.Errorf("%s can't be lower than zero", flags.DrainDelayFlag) + } + if cfg.CloudTerminationDelay < 0 { + return fmt.Errorf("%s can't be lower than zero", flags.CloudTerminationDelayFlag) + } + if cfg.NodeInitialThreshold < 0 { + return fmt.Errorf("%s can't be lower than zero", flags.NodeInitialThresholdFlag) + } + + if cfg.LeaseLockName == "" { + return fmt.Errorf("%s can't be empty", flags.LeaseLockNameFlag) + } + + if cfg.Port < 0 { + return fmt.Errorf("%s can't be lower than zero", flags.PortFlag) + } + if cfg.InitialDelay < 0 { + return fmt.Errorf("%s can't be lower than zero", flags.InitialDelayFlag) + } + + return nil +} diff --git a/pkg/nodeundertaker/config/config_test.go b/pkg/nodeundertaker/config/config_test.go new file mode 100644 index 0000000..81053f5 --- /dev/null +++ b/pkg/nodeundertaker/config/config_test.go @@ -0,0 +1,211 @@ +package config + +import ( + "github.com/dbschenker/node-undertaker/cmd/node-undertaker/flags" + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/client-go/kubernetes/fake" + "os" + "testing" +) + +func TestGetConfigNegativeValidation(t *testing.T) { + viper.Set(flags.DrainDelayFlag, -1) + _, err := GetConfig() + assert.Error(t, err) +} + +func TestGetConfigOk(t *testing.T) { + portValue := 1 + drainDelay := 29 + cloudTerminationDelay := 234 + cloudPrepareTerminationDelay := 544 + namespace := "ns1" + leaseLockNamespace := "ns2" + leaseLockName := "lease-lock1" + hostname, _ := os.Hostname() + + viper.Set(flags.PortFlag, portValue) + viper.Set(flags.DrainDelayFlag, drainDelay) + viper.Set(flags.CloudTerminationDelayFlag, cloudTerminationDelay) + viper.Set(flags.CloudPrepareTerminationDelayFlag, cloudPrepareTerminationDelay) + + viper.Set(flags.LeaseLockNamespaceFlag, leaseLockNamespace) + viper.Set(flags.NamespaceFlag, namespace) + viper.Set(flags.LeaseLockNameFlag, leaseLockName) + + ret, err := GetConfig() + + assert.NoError(t, err) + assert.NotNil(t, ret) + assert.Positive(t, ret.InformerResync) + assert.Equal(t, portValue, ret.Port) + assert.Equal(t, hostname, ret.Hostname) + assert.Equal(t, leaseLockName, ret.LeaseLockName) + assert.Equal(t, leaseLockNamespace, ret.LeaseLockNamespace) + assert.Equal(t, drainDelay, ret.DrainDelay) + assert.Equal(t, cloudPrepareTerminationDelay, ret.CloudPrepareTerminationDelay) + assert.Equal(t, cloudTerminationDelay, ret.CloudTerminationDelay) + assert.Equal(t, namespace, ret.Namespace) + assert.Nil(t, ret.NodeSelector) +} + +func TestGetConfigNodeSelectorNok(t *testing.T) { + namespaceSelector := "__=9999" + viper.Set(flags.NodeSelectorFlag, namespaceSelector) + + cfg, err := GetConfig() + assert.Nil(t, cfg) + assert.Error(t, err) +} + +func TestGetConfigNodeSelectorOk1(t *testing.T) { + namespaceSelector := "node.undertaker/powered=true" + viper.Set(flags.NodeSelectorFlag, namespaceSelector) + viper.Set(flags.LeaseLockNameFlag, "some-value") + + cfg, err := GetConfig() + assert.NotNil(t, cfg) + assert.NoError(t, err) + assert.Len(t, cfg.NodeSelector, 1) + requirements, _ := cfg.NodeSelector.Requirements() + assert.Equal(t, selection.Operator("="), requirements[0].Operator()) + assert.Equal(t, "node.undertaker/powered", requirements[0].Key()) + assert.Equal(t, []string{"true"}, requirements[0].Values().List()) + + testLabelSets := []labels.Set{ + labels.Set{"node.undertaker/powered": "true"}, + labels.Set{"node.undertaker/powered": "false"}, + labels.Set{"anyother": "false"}, + labels.Set{}, + } + testLabelSetResults := []bool{true, false, false, false} + + for k := range testLabelSets { + assert.Equal(t, testLabelSetResults[k], cfg.NodeSelector.Matches(testLabelSets[k])) + } +} + +func TestGetConfigNodeSelectorOk2(t *testing.T) { + namespaceSelector := "karpenter!=true" + + viper.Set(flags.NodeSelectorFlag, namespaceSelector) + viper.Set(flags.LeaseLockNameFlag, "some-value") + + cfg, err := GetConfig() + assert.NotNil(t, cfg) + assert.NoError(t, err) + assert.Len(t, cfg.NodeSelector, 1) + + testLabelSets := []labels.Set{ + labels.Set{"karpenter": "false"}, + labels.Set{"karpenter": "true"}, + labels.Set{"anyother": "false"}, + labels.Set{}, + } + testLabelSetResults := []bool{true, false, true, true} + + for k := range testLabelSets { + assert.Equal(t, testLabelSetResults[k], cfg.NodeSelector.Matches(testLabelSets[k])) + } +} + +func TestValidateConfigOk(t *testing.T) { + cfg := &Config{ + DrainDelay: 1, + CloudTerminationDelay: 1, + Port: 8080, + LeaseLockName: "test", + } + err := validateConfig(cfg) + assert.NoError(t, err) +} + +func TestValidateConfigErrDrainDelay(t *testing.T) { + cfg := &Config{ + DrainDelay: -1, + CloudTerminationDelay: 1, + Port: 8080, + LeaseLockName: "test", + } + err := validateConfig(cfg) + assert.Error(t, err) +} + +func TestValidateConfigErrCloudTerminationDelay(t *testing.T) { + cfg := &Config{ + DrainDelay: 1, + CloudTerminationDelay: -1, + Port: 8080, + LeaseLockName: "test", + } + err := validateConfig(cfg) + assert.Error(t, err) +} + +func TestValidateConfigErrNodeInitialThreshold(t *testing.T) { + cfg := &Config{ + DrainDelay: 1, + CloudTerminationDelay: 1, + NodeInitialThreshold: -1, + Port: 8080, + LeaseLockName: "test", + } + err := validateConfig(cfg) + assert.Error(t, err) +} + +func TestValidateConfigErrLeaseName(t *testing.T) { + cfg := &Config{ + DrainDelay: 1, + CloudTerminationDelay: 1, + Port: 8080, + LeaseLockName: "", + } + err := validateConfig(cfg) + assert.Error(t, err) +} + +func TestValidateConfigErrInitialDelay(t *testing.T) { + cfg := &Config{ + DrainDelay: 1, + CloudTerminationDelay: 1, + Port: 8080, + LeaseLockName: "test", + InitialDelay: -1, + } + err := validateConfig(cfg) + assert.Error(t, err) +} + +func TestSetK8sClient(t *testing.T) { + client := fake.NewSimpleClientset() + currentNamespace := "test" + cfg := Config{} + cfg.SetK8sClient(client, currentNamespace) + assert.Equal(t, currentNamespace, cfg.Namespace) + assert.Equal(t, currentNamespace, cfg.LeaseLockNamespace) + assert.Equal(t, currentNamespace, cfg.NodeLeaseNamespace) + assert.Equal(t, client, cfg.K8sClient) +} + +func TestSetK8sClient1(t *testing.T) { + client := fake.NewSimpleClientset() + currentNamespace := "test" + leaseLockNs := "lease-lock-ns" + nodeLeaseNs := "node-leases" + appNamespace := "app-ns" + cfg := Config{ + LeaseLockNamespace: leaseLockNs, + Namespace: appNamespace, + NodeLeaseNamespace: nodeLeaseNs, + } + + cfg.SetK8sClient(client, currentNamespace) + assert.Equal(t, appNamespace, cfg.Namespace) + assert.Equal(t, leaseLockNs, cfg.LeaseLockNamespace) + assert.Equal(t, nodeLeaseNs, cfg.NodeLeaseNamespace) + assert.Equal(t, client, cfg.K8sClient) +} diff --git a/pkg/nodeundertaker/config/init_test.go b/pkg/nodeundertaker/config/init_test.go new file mode 100644 index 0000000..d778273 --- /dev/null +++ b/pkg/nodeundertaker/config/init_test.go @@ -0,0 +1,11 @@ +package config + +import ( + "github.com/sirupsen/logrus" + "github.com/spf13/viper" +) + +func init() { + viper.Reset() + logrus.Infof("Initialized tests") +} diff --git a/pkg/nodeundertaker/init_test.go b/pkg/nodeundertaker/init_test.go new file mode 100644 index 0000000..dc90b1f --- /dev/null +++ b/pkg/nodeundertaker/init_test.go @@ -0,0 +1,13 @@ +package nodeundertaker + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + "github.com/spf13/viper" +) + +func init() { + viper.Reset() + prometheus.NewRegistry() + logrus.Infof("Initialized tests") +} diff --git a/pkg/nodeundertaker/node/events.go b/pkg/nodeundertaker/node/events.go new file mode 100644 index 0000000..2b83b66 --- /dev/null +++ b/pkg/nodeundertaker/node/events.go @@ -0,0 +1,69 @@ +package node + +import ( + "context" + "fmt" + "github.com/dbschenker/node-undertaker/pkg/nodeundertaker/config" + log "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" + eventsv1 "k8s.io/api/events/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/rand" + "strings" + "time" +) + +const ( + ReportingController = "dbschenker.com/node-undertaker" +) + +func ReportEvent(ctx context.Context, cfg *config.Config, lvl log.Level, n NODE, action, reason, reasonDesc, msgOverride string) { + microTime := metav1.NewMicroTime(time.Now()) + msg := msgOverride + if msg == "" { + if reasonDesc != "" { + msg = fmt.Sprintf("%s due to %s", strings.ToLower(reason), reasonDesc) + } else { + msg = strings.ToLower(reason) + } + } + var eventType string = "" + switch lvl { + case log.ErrorLevel: + eventType = "Warning" + case log.WarnLevel: + eventType = "Warning" + case log.InfoLevel: + eventType = "Normal" + default: + log.Errorf("Unsupported event level: %s", log.ErrorLevel.String()) + return + } + evt := eventsv1.Event{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("node-undertaker.%s", rand.String(16)), + Namespace: cfg.Namespace, + }, + EventTime: microTime, + //Related: - second object related to event + ReportingController: ReportingController, + ReportingInstance: cfg.Hostname, + Action: action, + Reason: reason, + Regarding: v1.ObjectReference{ + Namespace: cfg.Namespace, + Name: n.GetName(), + Kind: n.GetKind(), + }, + + Note: msg, + Type: eventType, + } + + log.StandardLogger().Log(lvl, fmt.Sprintf("%s/%s: %s", n.GetKind(), n.GetName(), msg)) + _, err := cfg.K8sClient.EventsV1().Events(cfg.Namespace).Create(ctx, &evt, metav1.CreateOptions{}) + if err != nil { + log.Errorf("Couldn't create event: %s\n due to %v", msg, err) + } +} diff --git a/pkg/nodeundertaker/node/events_test.go b/pkg/nodeundertaker/node/events_test.go new file mode 100644 index 0000000..970a079 --- /dev/null +++ b/pkg/nodeundertaker/node/events_test.go @@ -0,0 +1,185 @@ +package node + +import ( + "context" + "github.com/dbschenker/node-undertaker/pkg/cloudproviders/kwok" + "github.com/dbschenker/node-undertaker/pkg/nodeundertaker/config" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" + "strings" + "testing" +) + +func TestReportEvent(t *testing.T) { + namespace := "test" + nodeName := "test-node" + action := "DummyAction" + reason := "DummyReason" + hostname := "dummy-host" + reasonDesc := "" + cfg := config.Config{ + K8sClient: fake.NewSimpleClientset(), + Namespace: namespace, + Hostname: hostname, + } + lvl := logrus.ErrorLevel + nodev1 := v1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: nodeName}, + } + node := CreateNode(&nodev1) + ReportEvent(context.TODO(), &cfg, lvl, node, action, reason, reasonDesc, "") + + events, err := cfg.K8sClient.EventsV1().Events(namespace).List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, err) + assert.NotNil(t, events) + assert.Len(t, events.Items, 1) + ev := events.Items[0] + assert.True(t, strings.HasPrefix(ev.ObjectMeta.Name, "node-undertaker.")) + assert.Equal(t, namespace, ev.ObjectMeta.Namespace) + assert.Equal(t, action, ev.Action) + assert.Equal(t, reason, ev.Reason) + assert.Equal(t, ReportingController, ev.ReportingController) + assert.Equal(t, hostname, ev.ReportingInstance) + assert.Equal(t, "Warning", ev.Type) + assert.NotEmpty(t, ev.Note) +} + +func TestReportEventReasonDesc(t *testing.T) { + namespace := "test" + nodeName := "test-node" + action := "DummyAction" + reason := "DummyReason" + hostname := "dummy-host" + reasonDesc := "test-reason-desc" + cfg := config.Config{ + K8sClient: fake.NewSimpleClientset(), + Namespace: namespace, + Hostname: hostname, + } + lvl := logrus.InfoLevel + nodev1 := v1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: nodeName}, + } + node := CreateNode(&nodev1) + ReportEvent(context.TODO(), &cfg, lvl, node, action, reason, reasonDesc, "") + + events, err := cfg.K8sClient.EventsV1().Events(namespace).List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, err) + assert.NotNil(t, events) + assert.Len(t, events.Items, 1) + ev := events.Items[0] + assert.True(t, strings.HasPrefix(ev.ObjectMeta.Name, "node-undertaker.")) + assert.Equal(t, namespace, ev.ObjectMeta.Namespace) + assert.Equal(t, action, ev.Action) + assert.Equal(t, reason, ev.Reason) + assert.Equal(t, ReportingController, ev.ReportingController) + assert.Equal(t, hostname, ev.ReportingInstance) + assert.Equal(t, "Normal", ev.Type) + assert.Contains(t, ev.Note, reasonDesc) +} + +func TestReportEventReasonOverride(t *testing.T) { + namespace := "test" + nodeName := "test-node" + action := "DummyAction" + reason := "DummyReason" + hostname := "dummy-host" + reasonDesc := "test-reason-desc" + reasonOverride := "override-message" + cfg := config.Config{ + K8sClient: fake.NewSimpleClientset(), + Namespace: namespace, + Hostname: hostname, + } + lvl := logrus.WarnLevel + nodev1 := v1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: nodeName}, + } + node := CreateNode(&nodev1) + ReportEvent(context.TODO(), &cfg, lvl, node, action, reason, reasonDesc, reasonOverride) + + events, err := cfg.K8sClient.EventsV1().Events(namespace).List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, err) + assert.NotNil(t, events) + assert.Len(t, events.Items, 1) + ev := events.Items[0] + assert.True(t, strings.HasPrefix(ev.ObjectMeta.Name, "node-undertaker.")) + assert.Equal(t, namespace, ev.ObjectMeta.Namespace) + assert.Equal(t, action, ev.Action) + assert.Equal(t, reason, ev.Reason) + assert.Equal(t, ReportingController, ev.ReportingController) + assert.Equal(t, hostname, ev.ReportingInstance) + assert.Equal(t, "Warning", ev.Type) + assert.Equal(t, reasonOverride, ev.Note) +} + +func TestReportEventUnsupportedLevel(t *testing.T) { + namespace := "test" + nodeName := "test-node" + action := "DummyAction" + reason := "DummyReason" + hostname := "dummy-host" + reasonDesc := "test-reason-desc" + reasonOverride := "override-message" + cfg := config.Config{ + K8sClient: fake.NewSimpleClientset(), + Namespace: namespace, + Hostname: hostname, + } + lvl := logrus.DebugLevel + nodev1 := v1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: nodeName}, + } + node := CreateNode(&nodev1) + ReportEvent(context.TODO(), &cfg, lvl, node, action, reason, reasonDesc, reasonOverride) + + events, err := cfg.K8sClient.EventsV1().Events(namespace).List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, err) + assert.NotNil(t, events) + assert.Len(t, events.Items, 0) +} + +func TestReportEventWithKwok(t *testing.T) { + namespace := "test" + nodeName := "test-node" + action := "DummyAction" + reason := "DummyReason" + hostname := "dummy-host" + reasonDesc := "" + + ctx := context.TODO() + + clientset, err := kwok.StartCluster(t, ctx) + require.NoError(t, err) + + cfg := config.Config{ + K8sClient: clientset, + Namespace: namespace, + Hostname: hostname, + } + + lvl := logrus.ErrorLevel + nodev1 := v1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: nodeName}, + } + node := CreateNode(&nodev1) + ReportEvent(ctx, &cfg, lvl, node, action, reason, reasonDesc, "") + + events, err := cfg.K8sClient.EventsV1().Events(namespace).List(ctx, metav1.ListOptions{}) + assert.NoError(t, err) + assert.NotNil(t, events) + assert.Len(t, events.Items, 1) + ev := events.Items[0] + assert.True(t, strings.HasPrefix(ev.ObjectMeta.Name, "node-undertaker.")) + assert.Equal(t, namespace, ev.ObjectMeta.Namespace) + assert.Equal(t, action, ev.Action) + assert.Equal(t, reason, ev.Reason) + assert.Equal(t, ReportingController, ev.ReportingController) + assert.Equal(t, hostname, ev.ReportingInstance) + assert.Equal(t, "Warning", ev.Type) + assert.NotEmpty(t, ev.Note) +} diff --git a/pkg/nodeundertaker/node/node.go b/pkg/nodeundertaker/node/node.go new file mode 100644 index 0000000..c72a3a2 --- /dev/null +++ b/pkg/nodeundertaker/node/node.go @@ -0,0 +1,232 @@ +package node + +import ( + "context" + "fmt" + "github.com/dbschenker/node-undertaker/pkg/nodeundertaker/config" + log "github.com/sirupsen/logrus" + coordinationv1 "k8s.io/api/coordination/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubectl/pkg/drain" + "time" +) + +//go:generate mockgen -destination=./mocks/api_mocks.go github.com/dbschenker/node-undertaker/pkg/nodeundertaker/node NODE + +const ( + TaintKey = "dbschenker.com/node-undertaker" + TaintValue = "" + Label = "dbschenker.com/node-undertaker" + TimestampAnnotation = "dbschenker.com/node-undertaker-timestamp" +) + +const ( + NodeUnhealthy string = "unhealthy" + NodeTerminating = "terminating" + NodeTainted = "tainted" + NodeDraining = "draining" + NodeHealthy = "" + NodePreparingTermination = "preparing_termination" + NodeTerminationPrepared = "termination_prepared" +) + +type Node struct { + *v1.Node + changed bool +} + +type NODE interface { + IsGrownUp(cfg *config.Config) bool + HasFreshLease(ctx context.Context, cfg *config.Config) (bool, error) + GetLabel() string + RemoveLabel() + RemoveActionTimestamp() + SetLabel(label string) + SetActionTimestamp(t time.Time) + GetActionTimestamp() (time.Time, error) + Taint() + Untaint() + StartDrain(ctx context.Context, cfg *config.Config) + Terminate(ctx context.Context, cfg *config.Config) (string, error) + PrepareTermination(ctx context.Context, cfg *config.Config) (string, error) + Save(ctx context.Context, cfg *config.Config) error + GetName() string + GetKind() string +} + +func CreateNode(n *v1.Node) *Node { + node := Node{ + Node: n.DeepCopy(), + changed: false, + } + if node.Labels == nil { + node.Labels = make(map[string]string) + } + if node.Annotations == nil { + node.Annotations = make(map[string]string) + } + return &node +} + +func (n *Node) IsGrownUp(cfg *config.Config) bool { + creationTime := n.ObjectMeta.CreationTimestamp + before := metav1.NewTime(time.Now().Add(-time.Second * time.Duration(cfg.NodeInitialThreshold))) + return creationTime.Before(&before) +} + +func (n *Node) HasFreshLease(ctx context.Context, cfg *config.Config) (bool, error) { + lease, err := n.findLease(ctx, cfg) + if errors.IsNotFound(err) { + log.Warnf("lease not found for node %s: %v", n.Node.ObjectMeta.Name, err) + return false, nil + } else if err != nil { + return false, err + } + + leaseDuration := time.Duration(*lease.Spec.LeaseDurationSeconds) * time.Second + isFresh := lease.Spec.RenewTime.Add(leaseDuration).After(time.Now()) + return isFresh, nil +} + +func (n *Node) GetLabel() string { + if val, exists := n.Labels[Label]; exists { + return val + } + return "" +} + +func (n *Node) RemoveLabel() { + if _, found := n.ObjectMeta.Labels[Label]; found { + delete(n.ObjectMeta.Labels, Label) + n.changed = true + } +} + +func (n *Node) RemoveActionTimestamp() { + if _, found := n.ObjectMeta.Annotations[TimestampAnnotation]; found { + delete(n.ObjectMeta.Annotations, TimestampAnnotation) + n.changed = true + } +} + +func (n *Node) SetLabel(label string) { + n.ObjectMeta.Labels[Label] = label + n.changed = true +} + +func (n *Node) SetActionTimestamp(t time.Time) { + n.changed = true + n.ObjectMeta.Annotations[TimestampAnnotation] = t.Format(time.RFC3339) + return +} + +func (n *Node) GetActionTimestamp() (time.Time, error) { + if val, ok := n.ObjectMeta.Annotations[TimestampAnnotation]; ok { + ret, err := time.Parse(time.RFC3339, val) + return ret, err + } + return time.Now(), fmt.Errorf("node %s doesn't have annotation: %s", n.ObjectMeta.Name, TimestampAnnotation) +} + +func (n *Node) Taint() { + taint := v1.Taint{ + Key: TaintKey, + Value: TaintValue, + Effect: v1.TaintEffectNoSchedule, + } + + for i := range n.Spec.Taints { + if n.Spec.Taints[i] == taint { + return + } + } + n.Spec.Taints = append(n.Spec.Taints, taint) + n.changed = true +} + +func (n *Node) Untaint() { + taint := v1.Taint{ + Key: TaintKey, + Value: TaintValue, + Effect: v1.TaintEffectNoSchedule, + } + + // assume that there is only taint with same set of parameters (api sever should guard this) + newTaints := make([]v1.Taint, 0) + for i := range n.Spec.Taints { + if n.Spec.Taints[i] != taint { + newTaints = append(newTaints, n.Spec.Taints[i]) + } else { + n.changed = true + } + } + if n.changed { + n.Spec.Taints = newTaints + } +} + +func (n *Node) StartDrain(ctx context.Context, cfg *config.Config) { + //https://github.com/aws/aws-node-termination-handler/blob/main/pkg/node/node.go#L106 + drainHelper := drain.Helper{ + Client: cfg.K8sClient, + Ctx: ctx, + Force: true, + GracePeriodSeconds: -1, //use pods terminationGracePeriodSeconds + IgnoreAllDaemonSets: true, + DeleteEmptyDirData: true, + Timeout: time.Duration(cfg.CloudTerminationDelay) * time.Second, + DisableEviction: false, // true - use delete rather than evict + OnPodDeletedOrEvicted: func(pod *v1.Pod, usingEviction bool) { + operation := "deleted" + if usingEviction { + operation = "evicted" + } + log.Debugf("Pod %s in namespace: %s %s", pod.ObjectMeta.Name, pod.ObjectMeta.Namespace, operation) + }, + Out: log.StandardLogger().Out, + ErrOut: log.StandardLogger().Out, + } + + go func() { + err := drain.RunNodeDrain(&drainHelper, n.GetName()) + if err != nil { + ReportEvent(ctx, cfg, log.ErrorLevel, n, "Drain", "Drain Failed", err.Error(), "") + return + } + ReportEvent(ctx, cfg, log.InfoLevel, n, "Drain", "Drain Completed", "", "") + }() + +} + +// Terminate deletes node from cloud provider +func (n *Node) Terminate(ctx context.Context, cfg *config.Config) (string, error) { + return cfg.CloudProvider.TerminateNode(ctx, n.Spec.ProviderID) +} + +func (n *Node) PrepareTermination(ctx context.Context, cfg *config.Config) (string, error) { + return cfg.CloudProvider.PrepareTermination(ctx, n.Spec.ProviderID) +} + +// TODO: check if saving whole object works fine. Maybe it should be done using patches: https://stackoverflow.com/questions/57310483/whats-the-shortest-way-to-add-a-label-to-a-pod-using-the-kubernetes-go-client +func (n *Node) Save(ctx context.Context, cfg *config.Config) error { + if n.changed { + _, err := cfg.K8sClient.CoreV1().Nodes().Update(ctx, n.Node, metav1.UpdateOptions{}) + //TODO maybe Patch instead of Update will work better + return err + } + return nil +} + +func (n *Node) findLease(ctx context.Context, cfg *config.Config) (*coordinationv1.Lease, error) { + return cfg.K8sClient.CoordinationV1().Leases(cfg.NodeLeaseNamespace).Get(ctx, n.ObjectMeta.Name, metav1.GetOptions{ResourceVersion: "0"}) +} + +func (n *Node) GetName() string { + return n.ObjectMeta.Name +} + +func (n *Node) GetKind() string { + return "Node" +} diff --git a/pkg/nodeundertaker/node/node_test.go b/pkg/nodeundertaker/node/node_test.go new file mode 100644 index 0000000..85413f5 --- /dev/null +++ b/pkg/nodeundertaker/node/node_test.go @@ -0,0 +1,847 @@ +package node + +import ( + "context" + "fmt" + "github.com/dbschenker/node-undertaker/pkg/cloudproviders/kwok" + mockcloudproviders "github.com/dbschenker/node-undertaker/pkg/cloudproviders/mocks" + "github.com/dbschenker/node-undertaker/pkg/nodeundertaker/config" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + coordinationv1 "k8s.io/api/coordination/v1" + v1 "k8s.io/api/core/v1" + policyv1 "k8s.io/api/policy/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" + "testing" + "time" +) + +func TestNodeIsGrownUp(t *testing.T) { + cfg := config.Config{NodeInitialThreshold: 5} + creationTime := metav1.Now().Add(-20 * time.Second).UTC() + + v1node := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + CreationTimestamp: metav1.NewTime(creationTime), + }, + } + + node := CreateNode(&v1node) + + res := node.IsGrownUp(&cfg) + assert.True(t, res) +} + +func TestNodeIsGrownUpNot(t *testing.T) { + cfg := config.Config{NodeInitialThreshold: 90} + creationTime := metav1.Now() + + v1node := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + CreationTimestamp: creationTime, + }, + } + + node := CreateNode(&v1node) + + res := node.IsGrownUp(&cfg) + assert.False(t, res) +} + +func TestGetLabelOk(t *testing.T) { + labelValue := "test" + v1node := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + Labels: map[string]string{ + Label: labelValue, + }, + }, + } + n := Node{ + Node: &v1node, + changed: false, + } + ret := n.GetLabel() + assert.Equal(t, labelValue, ret) +} + +func TestGetLabelEmpty(t *testing.T) { + labelValue := "" + v1node := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + Labels: map[string]string{ + Label: labelValue, + }, + }, + } + n := CreateNode(&v1node) + ret := n.GetLabel() + assert.Equal(t, labelValue, ret) +} + +func TestGetLabelNone(t *testing.T) { + expectedLabelValue := "" + v1node := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + Labels: map[string]string{}, + }, + } + n := CreateNode(&v1node) + ret := n.GetLabel() + assert.Equal(t, expectedLabelValue, ret) +} + +func TestSetLabelOk(t *testing.T) { + labelValue := "test" + v1node := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + } + n := CreateNode(&v1node) + n.SetLabel(labelValue) + + ret, exists := n.ObjectMeta.Labels[Label] + assert.Equal(t, labelValue, ret) + assert.True(t, exists) + assert.True(t, n.changed) +} + +func TestSetLabelEmpty(t *testing.T) { + labelValue := "test" + v1node := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + Labels: map[string]string{}, + }, + } + n := CreateNode(&v1node) + n.SetLabel(labelValue) + + ret, exists := n.ObjectMeta.Labels[Label] + assert.Equal(t, labelValue, ret) + assert.True(t, exists) + assert.True(t, n.changed) +} + +func TestSetLabelOverwrite(t *testing.T) { + expectedLabelValue := "new-value" + v1node := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + Labels: map[string]string{ + Label: "old-value", + }, + }, + } + n := CreateNode(&v1node) + n.SetLabel(expectedLabelValue) + + ret, exists := n.ObjectMeta.Labels[Label] + assert.Equal(t, expectedLabelValue, ret) + assert.True(t, exists) + assert.True(t, n.changed) +} + +func TestSaveNoChange(t *testing.T) { + nodeName := "node1" + nodev1 := v1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: nodeName}, + } + + cfg := config.Config{ + K8sClient: fake.NewSimpleClientset(), + } + _, err := cfg.K8sClient.CoreV1().Nodes().Create(context.TODO(), &nodev1, metav1.CreateOptions{}) + require.NoError(t, err) + + nodes, err := cfg.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, err) + assert.Len(t, nodes.Items, 1) + assert.Equal(t, nodeName, nodes.Items[0].Name) + assert.Empty(t, nodes.Items[0].Spec.ProviderID) + + node := CreateNode(&nodev1) + node.Spec.ProviderID = "test" + + err = node.Save(context.TODO(), &cfg) + assert.NoError(t, err) + + nodes, err = cfg.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, err) + assert.Len(t, nodes.Items, 1) + assert.Equal(t, nodeName, nodes.Items[0].Name) + assert.Empty(t, nodes.Items[0].Spec.ProviderID) +} + +func TestSaveChange(t *testing.T) { + nodeName := "node1" + newProviderId := "test" + + nodev1 := v1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: nodeName}, + } + + cfg := config.Config{ + K8sClient: fake.NewSimpleClientset(), + } + _, err := cfg.K8sClient.CoreV1().Nodes().Create(context.TODO(), &nodev1, metav1.CreateOptions{}) + require.NoError(t, err) + + nodes, err := cfg.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, err) + assert.Len(t, nodes.Items, 1) + assert.Equal(t, nodeName, nodes.Items[0].Name) + assert.Empty(t, nodes.Items[0].Spec.ProviderID) + + node := CreateNode(&nodev1) + node.Spec.ProviderID = newProviderId + node.changed = true + + err = node.Save(context.TODO(), &cfg) + assert.NoError(t, err) + + nodes, err = cfg.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, err) + assert.Len(t, nodes.Items, 1) + assert.Equal(t, nodeName, nodes.Items[0].Name) + assert.Equal(t, newProviderId, nodes.Items[0].Spec.ProviderID) +} + +func TestTaintNoTaints(t *testing.T) { + nodeName := "node1" + nodev1 := v1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: nodeName}, + } + + node := CreateNode(&nodev1) + node.Taint() + + assert.Len(t, node.Spec.Taints, 1) + assert.Contains(t, node.Spec.Taints, v1.Taint{ + Key: TaintKey, Value: "", Effect: v1.TaintEffectNoSchedule, + }) + assert.True(t, node.changed) +} + +func TestTaintDifferentTaints(t *testing.T) { + nodeName := "node1" + nodev1 := v1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: nodeName}, + Spec: v1.NodeSpec{ + Taints: []v1.Taint{ + v1.Taint{Key: "sample", Value: "different", Effect: v1.TaintEffectPreferNoSchedule}, + }, + }, + } + + node := CreateNode(&nodev1) + node.Taint() + + assert.Len(t, node.Spec.Taints, 2) + assert.Contains(t, node.Spec.Taints, v1.Taint{ + Key: TaintKey, Value: TaintValue, Effect: v1.TaintEffectNoSchedule, + }) + assert.Contains(t, node.Spec.Taints, v1.Taint{ + Key: "sample", Value: "different", Effect: v1.TaintEffectPreferNoSchedule, + }) + assert.True(t, node.changed) +} + +func TestTaintExistingTaint(t *testing.T) { + nodeName := "node1" + nodev1 := v1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: nodeName}, + Spec: v1.NodeSpec{ + Taints: []v1.Taint{ + { + Key: TaintKey, + Value: TaintValue, + Effect: v1.TaintEffectNoSchedule, + }, + }, + }, + } + + node := CreateNode(&nodev1) + node.Taint() + + assert.Len(t, node.Spec.Taints, 1) + assert.Contains(t, node.Spec.Taints, v1.Taint{ + Key: TaintKey, + Value: TaintValue, + Effect: v1.TaintEffectNoSchedule, + }) + assert.False(t, node.changed) +} + +func TestUntaintNoTaint(t *testing.T) { + nodeName := "node1" + nodev1 := v1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: nodeName}, + Spec: v1.NodeSpec{}, + } + + node := CreateNode(&nodev1) + node.Untaint() + + assert.Len(t, node.Spec.Taints, 0) + assert.False(t, node.changed) +} + +func TestUntaintExistingTaints(t *testing.T) { + nodeName := "node1" + nodev1 := v1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: nodeName}, + Spec: v1.NodeSpec{ + Taints: []v1.Taint{ + {Key: "sample", Value: "different", Effect: v1.TaintEffectPreferNoSchedule}, + {Key: TaintKey, Value: TaintValue, Effect: v1.TaintEffectNoSchedule}, + {Key: "sample2", Value: "different2", Effect: v1.TaintEffectPreferNoSchedule}, + }, + }, + } + + node := CreateNode(&nodev1) + node.Untaint() + + assert.Len(t, node.Spec.Taints, 2) + assert.True(t, node.changed) + assert.NotContains(t, node.Spec.Taints, v1.Taint{Key: TaintKey, Value: TaintValue, Effect: v1.TaintEffectNoSchedule}) +} + +func TestSetActionTimestampNone(t *testing.T) { + nodeName := "node1" + nodev1 := v1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: nodeName}, + } + node := CreateNode(&nodev1) + tret, err := node.GetActionTimestamp() + assert.Error(t, err) + assert.True(t, tret.After(time.Now().Add(-time.Hour))) + assert.True(t, tret.Before(time.Now())) +} + +func TestSetActionTimestampExists(t *testing.T) { + nodeName := "node1" + tnow := time.Now().Truncate(time.Second).UTC() + ti := tnow.Format(time.RFC3339) + nodev1 := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + Annotations: map[string]string{ + TimestampAnnotation: ti, + }, + }, + } + node := CreateNode(&nodev1) + tret, err := node.GetActionTimestamp() + assert.NoError(t, err) + assert.Equal(t, tnow, tret) +} + +func TestSetActionTimestampWrongFormat(t *testing.T) { + nodeName := "node1" + ti := "test string" + nodev1 := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + Annotations: map[string]string{ + TimestampAnnotation: ti, + }, + }, + } + node := CreateNode(&nodev1) + _, err := node.GetActionTimestamp() // don't care about the date + assert.Error(t, err) +} + +func TestFindLeaseOk(t *testing.T) { + nodeName := "node1" + namespace := "example-lease-ns" + nodev1 := v1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: nodeName}, + } + lease := coordinationv1.Lease{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + Namespace: namespace, + }, + } + cfg := config.Config{ + K8sClient: fake.NewSimpleClientset(), + NodeLeaseNamespace: namespace, + } + _, err := cfg.K8sClient.CoordinationV1().Leases(namespace).Create(context.TODO(), &lease, metav1.CreateOptions{}) + require.NoError(t, err) + + node := CreateNode(&nodev1) + leaseret, err := node.findLease(context.TODO(), &cfg) + assert.NoError(t, err) + assert.Equal(t, lease, *leaseret) +} + +func TestFindLeaseMissing(t *testing.T) { + nodeName := "node1" + namespace := "example-lease-ns" + nodev1 := v1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: nodeName}, + } + + cfg := config.Config{ + K8sClient: fake.NewSimpleClientset(), + Namespace: namespace, + } + + node := CreateNode(&nodev1) + leaseret, err := node.findLease(context.TODO(), &cfg) + assert.Error(t, err) + assert.Equal(t, metav1.StatusReasonNotFound, errors.ReasonForError(err)) + assert.Nil(t, leaseret) +} + +func TestHasFreshLeaseOk(t *testing.T) { + nodeName := "node1" + namespace := "example-lease-ns" + leaseDuration := int32(90) + renewTime := metav1.NewMicroTime(time.Now().Add(-10 * time.Second)) + + nodev1 := v1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: nodeName}, + } + lease := coordinationv1.Lease{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + Namespace: namespace, + }, + Spec: coordinationv1.LeaseSpec{ + LeaseDurationSeconds: &leaseDuration, + RenewTime: &renewTime, + }, + } + cfg := config.Config{ + K8sClient: fake.NewSimpleClientset(), + NodeLeaseNamespace: namespace, + } + _, err := cfg.K8sClient.CoordinationV1().Leases(namespace).Create(context.TODO(), &lease, metav1.CreateOptions{}) + require.NoError(t, err) + + node := CreateNode(&nodev1) + ret, err := node.HasFreshLease(context.TODO(), &cfg) + + assert.NoError(t, err) + assert.True(t, ret) +} + +func TestHasFreshLeaseNok(t *testing.T) { + nodeName := "node1" + namespace := "example-lease-ns" + leaseDuration := int32(90) + renewTime := metav1.NewMicroTime(time.Now().Add(-1000 * time.Second)) + + nodev1 := v1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: nodeName}, + } + lease := coordinationv1.Lease{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + Namespace: namespace, + }, + Spec: coordinationv1.LeaseSpec{ + LeaseDurationSeconds: &leaseDuration, + RenewTime: &renewTime, + }, + } + cfg := config.Config{ + K8sClient: fake.NewSimpleClientset(), + Namespace: namespace, + } + _, err := cfg.K8sClient.CoordinationV1().Leases(namespace).Create(context.TODO(), &lease, metav1.CreateOptions{}) + require.NoError(t, err) + + node := CreateNode(&nodev1) + ret, err := node.HasFreshLease(context.TODO(), &cfg) + + assert.NoError(t, err) + assert.False(t, ret) +} + +func TestHasFreshLeaseNolease(t *testing.T) { + nodeName := "node1" + namespace := "example-lease-ns" + + nodev1 := v1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: nodeName}, + } + + cfg := config.Config{ + K8sClient: fake.NewSimpleClientset(), + Namespace: namespace, + } + + node := CreateNode(&nodev1) + ret, err := node.HasFreshLease(context.TODO(), &cfg) + + assert.NoError(t, err) + assert.False(t, ret) +} + +func TestRemoveLabelOk(t *testing.T) { + v1node := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + Labels: map[string]string{ + Label: "old-value", + }, + }, + } + n := CreateNode(&v1node) + n.RemoveLabel() + + ret, exists := n.ObjectMeta.Labels[Label] + assert.Equal(t, "", ret) + assert.False(t, exists) + assert.True(t, n.changed) +} + +func TestRemoveLabelNotExisting(t *testing.T) { + v1node := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + Labels: map[string]string{ + "test": "old-value", + }, + }, + } + n := CreateNode(&v1node) + n.RemoveLabel() + + ret, exists := n.ObjectMeta.Labels[Label] + assert.Equal(t, "", ret) + assert.False(t, exists) + assert.False(t, n.changed) +} + +func TestRemoveActionTimestampOk(t *testing.T) { + v1node := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + Annotations: map[string]string{ + TimestampAnnotation: "old-value", + }, + }, + } + n := CreateNode(&v1node) + n.RemoveActionTimestamp() + + ret, exists := n.ObjectMeta.Annotations[TimestampAnnotation] + assert.Equal(t, "", ret) + assert.False(t, exists) + assert.True(t, n.changed) +} + +func TestRemoveActionTimestampNotExisting(t *testing.T) { + v1node := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + Annotations: map[string]string{ + "test": "old-value", + }, + }, + } + n := CreateNode(&v1node) + n.RemoveActionTimestamp() + + ret, exists := n.ObjectMeta.Annotations[TimestampAnnotation] + assert.Equal(t, "", ret) + assert.False(t, exists) + assert.False(t, n.changed) +} + +func TestPrepareTermination(t *testing.T) { + termianteAction := "TestAction" + mockCtrl := gomock.NewController(t) + cloudProvider := mockcloudproviders.NewMockCLOUDPROVIDER(mockCtrl) + cloudProvider.EXPECT().PrepareTermination(gomock.Any(), gomock.Any()).Return(termianteAction, nil).Times(1) + + cfg := config.Config{ + CloudProvider: cloudProvider, + } + v1node := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + } + n := CreateNode(&v1node) + res, err := n.PrepareTermination(context.TODO(), &cfg) + assert.NoError(t, err) + assert.Equal(t, termianteAction, res) +} + +func TestTerminate(t *testing.T) { + termianteAction := "TestAction" + mockCtrl := gomock.NewController(t) + cloudProvider := mockcloudproviders.NewMockCLOUDPROVIDER(mockCtrl) + cloudProvider.EXPECT().TerminateNode(gomock.Any(), gomock.Any()).Return(termianteAction, nil).Times(1) + + cfg := config.Config{ + CloudProvider: cloudProvider, + } + v1node := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + } + n := CreateNode(&v1node) + res, err := n.Terminate(context.TODO(), &cfg) + assert.NoError(t, err) + assert.Equal(t, termianteAction, res) +} + +func TestGetName(t *testing.T) { + expectedName := "dummy123" + v1node := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: expectedName, + }, + } + n := CreateNode(&v1node) + ret := n.GetName() + assert.Equal(t, expectedName, ret) +} + +func TestGetKind(t *testing.T) { + expectedName := "dummy123" + expectedKind := "Node" + + v1node := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: expectedName, + }, + } + n := CreateNode(&v1node) + ret := n.GetKind() + assert.Equal(t, expectedKind, ret) +} + +func TestDrain(t *testing.T) { + // setup + ctx := context.TODO() + clientset, err := kwok.StartCluster(t, ctx) + require.NoError(t, err) + + cfg := config.Config{ + K8sClient: clientset, + CloudTerminationDelay: 30, + Namespace: v1.NamespaceDefault, + } + + kwokProvider, err := kwok.CreateCloudProvider(ctx, &cfg) + require.NoError(t, err) + + nodeName := fmt.Sprintf("kwok-test-drain-node-%s", rand.String(20)) + replicaCount := 3 + deploymentName := "test-deployment1" + + err = kwokProvider.CreateNode(ctx, nodeName) + assert.NoError(t, err) + + err = createDeployment(t, ctx, clientset, deploymentName, v1.NamespaceDefault, "pause", int32(replicaCount)) + require.NoError(t, err) + err = waitForDeploymentPodsReady(ctx, clientset, 60*time.Second, deploymentName, v1.NamespaceDefault, replicaCount) + require.NoError(t, err) + + nodePodsBefore, err := clientset.CoreV1().Pods(v1.NamespaceDefault).List(ctx, metav1.ListOptions{ + FieldSelector: "spec.nodeName=" + nodeName, + }) + assert.NoError(t, err) + assert.Len(t, nodePodsBefore.Items, replicaCount) + + // block node from rescheduling pods + nodev1, err := kwokProvider.K8sClient.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) + require.NoError(t, err) + node := CreateNode(nodev1) + node.Taint() + err = node.Save(ctx, &cfg) + require.NoError(t, err) + + // drain + node.StartDrain(ctx, &cfg) + assert.NoError(t, err) + + time.Sleep(time.Duration(cfg.CloudTerminationDelay+20) * time.Second) //sleep longer than drain takes + + err = waitForDeploymentPodsReady(ctx, clientset, 60*time.Second, deploymentName, v1.NamespaceDefault, 0) + require.NoError(t, err) + + ret, err := kwokProvider.K8sClient.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) + assert.NoError(t, err) + assert.NotNil(t, ret) + assert.Equal(t, nodeName, nodev1.ObjectMeta.Name) + assert.Equal(t, fmt.Sprintf("kwok://%s", nodeName), ret.Spec.ProviderID) + + nodePodsAfter, err := clientset.CoreV1().Pods(v1.NamespaceDefault).List(ctx, metav1.ListOptions{ + FieldSelector: "spec.nodeName=" + nodeName, + }) + assert.NoError(t, err) + assert.Len(t, nodePodsAfter.Items, 0) +} + +func TestDrainWithBlockingPDB(t *testing.T) { + // setup + ctx := context.TODO() + + clientset, err := kwok.StartCluster(t, ctx) + require.NoError(t, err) + + cfg := config.Config{ + K8sClient: clientset, + CloudTerminationDelay: 30, + Namespace: v1.NamespaceDefault, + } + + kwokProvider, err := kwok.CreateCloudProvider(ctx, &cfg) + require.NoError(t, err) + + nodeName := fmt.Sprintf("kwok-test-drain-node-%s", rand.String(20)) + replicaCount := 3 + deploymentName := "test-deployment1" + + err = kwokProvider.CreateNode(ctx, nodeName) + assert.NoError(t, err) + + err = createDeployment(t, ctx, clientset, deploymentName, v1.NamespaceDefault, "pause", int32(replicaCount)) + require.NoError(t, err) + err = waitForDeploymentPodsReady(ctx, clientset, 60*time.Second, deploymentName, v1.NamespaceDefault, replicaCount) + require.NoError(t, err) + + pdpbMaxUnavail := intstr.FromInt(0) + pdb := policyv1.PodDisruptionBudget{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pdb", + Namespace: v1.NamespaceDefault, + }, + Spec: policyv1.PodDisruptionBudgetSpec{ + MaxUnavailable: &pdpbMaxUnavail, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": deploymentName, + }, + }, + }, + } + _, err = clientset.PolicyV1().PodDisruptionBudgets(v1.NamespaceDefault).Create(ctx, &pdb, metav1.CreateOptions{}) + require.NoError(t, err) + + nodePodsBefore, err := clientset.CoreV1().Pods(v1.NamespaceDefault).List(ctx, metav1.ListOptions{ + FieldSelector: "spec.nodeName=" + nodeName, + }) + assert.NoError(t, err) + assert.Len(t, nodePodsBefore.Items, replicaCount) + + // block node from rescheduling pods + nodev1, err := kwokProvider.K8sClient.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) + require.NoError(t, err) + node := CreateNode(nodev1) + node.Taint() + err = node.Save(ctx, &cfg) + require.NoError(t, err) + + // drain + node.StartDrain(ctx, &cfg) + + time.Sleep(time.Duration(cfg.CloudTerminationDelay+20) * time.Second) //sleep longer than drain takes + + ret, err := kwokProvider.K8sClient.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) + assert.NoError(t, err) + assert.NotNil(t, ret) + assert.Equal(t, nodeName, nodev1.ObjectMeta.Name) + assert.Equal(t, fmt.Sprintf("kwok://%s", nodeName), ret.Spec.ProviderID) + + nodePodsAfter, err := clientset.CoreV1().Pods(v1.NamespaceDefault).List(ctx, metav1.ListOptions{ + FieldSelector: "spec.nodeName=" + nodeName, + }) + assert.NoError(t, err) + assert.Len(t, nodePodsAfter.Items, replicaCount) +} + +// createDeployment creates deployment +func createDeployment(t *testing.T, ctx context.Context, clientset *kubernetes.Clientset, name, namespace, image string, replicas int32) error { + t.Helper() + deploy := appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": name, + }, + }, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": name, + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: name, + Image: image, + }, + }, + }, + }, + }, + } + + _, err := clientset.AppsV1().Deployments(namespace).Create(ctx, &deploy, metav1.CreateOptions{}) + return err +} + +func waitForDeploymentPodsReady(ctx context.Context, clientset *kubernetes.Clientset, duration time.Duration, name, namespace string, requiredNumber int) error { + return wait.PollUntilContextTimeout(ctx, time.Second, duration, true, + func(context.Context) (bool, error) { + dep, err := clientset.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, err + } + if dep.Status.ReadyReplicas == int32(requiredNumber) { + return true, nil + } + return false, nil + }, + ) + +} + +func TestSetActionTimestamp(t *testing.T) { + v1node := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + } + timeNow := time.Now() + n := CreateNode(&v1node) + n.SetActionTimestamp(timeNow) + + ret, exists := n.ObjectMeta.Annotations[TimestampAnnotation] + assert.Equal(t, timeNow.Format(time.RFC3339), ret) + assert.True(t, exists) + assert.True(t, n.changed) +} diff --git a/pkg/nodeundertaker/nodeundertaker.go b/pkg/nodeundertaker/nodeundertaker.go new file mode 100644 index 0000000..108b99e --- /dev/null +++ b/pkg/nodeundertaker/nodeundertaker.go @@ -0,0 +1,171 @@ +package nodeundertaker + +import ( + "context" + "fmt" + "github.com/dbschenker/node-undertaker/cmd/node-undertaker/flags" + "github.com/dbschenker/node-undertaker/pkg/cloudproviders" + "github.com/dbschenker/node-undertaker/pkg/cloudproviders/aws" + "github.com/dbschenker/node-undertaker/pkg/cloudproviders/kind" + "github.com/dbschenker/node-undertaker/pkg/cloudproviders/kwok" + "github.com/dbschenker/node-undertaker/pkg/kubeclient" + "github.com/dbschenker/node-undertaker/pkg/nodeundertaker/config" + "github.com/dbschenker/node-undertaker/pkg/nodeundertaker/nodeupdatehandler" + "github.com/dbschenker/node-undertaker/pkg/observability" + "github.com/dbschenker/node-undertaker/pkg/observability/metrics" + log "github.com/sirupsen/logrus" + "github.com/spf13/viper" + "golang.org/x/sync/errgroup" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "os" + "os/signal" + "syscall" +) + +// Execute executes node-undertaker logic +func Execute() error { + err := setupLogging() + if err != nil { + return err + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cancelOnSigterm(cancel) + + return executeWithContext(ctx, kubeclient.GetClient, cancel) + +} + +func executeWithContext(ctx context.Context, getk8sClient func() (kubernetes.Interface, string, error), cancel func()) error { + // initialize config + cfg, err := config.GetConfig() + + // k8s ClientSet + k8sClient, currentNamespace, err := getk8sClient() + if err != nil { + return err + } + cfg.SetK8sClient(k8sClient, currentNamespace) + + if err != nil { + return err + } + cloudProvider, err := getCloudProvider(ctx, cfg) + if err != nil { + return err + } + err = cloudProvider.ValidateConfig() + if err != nil { + return err + } + cfg.CloudProvider = cloudProvider + + //observability (logging & monitoring http server setup) + observabilityServer := observability.GetDefaultObservabilityServer(cfg) + observabilityServer.SetupRoutes() + + // workload + workload := func(ctx2 context.Context) error { + kubeclient.LeaderElection( + ctx2, + cfg, + func(ctx3 context.Context) { + startLogic(ctx2, cfg, nodeupdatehandler.GetDefaultUpdateHandlerFuncs(ctx, cfg), cancel) + }, + cancel) + return nil + } + + // start logic + err = startServer(ctx, cfg, &observabilityServer, workload, cancel) + return err +} + +func setupLogging() error { + lvl, err := log.ParseLevel(viper.GetString(flags.LogLevelFlag)) + if err != nil { + return err + } + log.SetLevel(lvl) + format := viper.GetString(flags.LogFormatFlag) + switch format { + case flags.LogFormatText: + log.SetFormatter(&log.TextFormatter{ + FullTimestamp: true, + }) + case flags.LogFormatJson: + log.SetFormatter(&log.JSONFormatter{}) + default: + return fmt.Errorf("unknown log format: %s", format) + } + + return nil +} + +func startServer(ctx context.Context, cfg *config.Config, observabilityServer observability.OBSERVABILITYSERVER, workload func(ctx context.Context) error, cancel func()) error { + g, ctx := errgroup.WithContext(ctx) + g.Go(func() error { return observabilityServer.StartServer(ctx) }) + g.Go(func() error { return workload(ctx) }) + + return g.Wait() +} + +func startLogic(ctx context.Context, cfg *config.Config, handlerFuncs cache.ResourceEventHandlerFuncs, cancel func()) { + tweakListOptionsFunc := func(opts *v1.ListOptions) { + opts.LabelSelector = cfg.NodeSelector.String() + } + options := informers.WithTweakListOptions(tweakListOptionsFunc) + + factory := informers.NewSharedInformerFactoryWithOptions(cfg.K8sClient, cfg.InformerResync, options) + nodeInformer := factory.Core().V1().Nodes() + informer := nodeInformer.Informer() + nodeLister := nodeInformer.Lister() + factory.Start(ctx.Done()) + if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) { + log.Errorf("Timed out waiting for caches to sync") + cancel() + } + _, err := informer.AddEventHandler(handlerFuncs) + if err != nil { + log.Errorf("Error occured while adding event handler funcs: %v", err) + cancel() + } + + unregisterMetrics := metrics.Initialize(nodeLister) + // unregister metrics so there is always only one metric - needed for testing + select { + case <-ctx.Done(): + unregisterMetrics() + } +} + +func getCloudProvider(ctx context.Context, cfg *config.Config) (cloudproviders.CLOUDPROVIDER, error) { + switch cloudProviderName := viper.GetString(flags.CloudProviderFlag); cloudProviderName { + case "aws": + cloudProvider, err := aws.CreateCloudProvider(ctx) + return cloudProvider, err + case "kind": + cloudProvider, err := kind.CreateCloudProvider(ctx) + return cloudProvider, err + + case "kwok": + cloudProvider, err := kwok.CreateCloudProvider(ctx, cfg) + return cloudProvider, err + default: + return nil, fmt.Errorf("Unknown cloud provider: %s", cloudProviderName) + } + +} + +func cancelOnSigterm(cancel func()) { + ch := make(chan os.Signal, 1) + signal.Notify(ch, os.Interrupt, syscall.SIGTERM) + go func() { + <-ch + log.Info("Received termination, signaling shutdown") + cancel() + }() +} diff --git a/pkg/nodeundertaker/nodeundertaker_test.go b/pkg/nodeundertaker/nodeundertaker_test.go new file mode 100644 index 0000000..4a72384 --- /dev/null +++ b/pkg/nodeundertaker/nodeundertaker_test.go @@ -0,0 +1,217 @@ +package nodeundertaker + +import ( + "context" + "errors" + "fmt" + "github.com/dbschenker/node-undertaker/cmd/node-undertaker/flags" + "github.com/dbschenker/node-undertaker/pkg/kubeclient" + "github.com/dbschenker/node-undertaker/pkg/nodeundertaker/config" + mock_observability "github.com/dbschenker/node-undertaker/pkg/observability/mocks" + "github.com/golang/mock/gomock" + log "github.com/sirupsen/logrus" + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" + "testing" + "time" +) + +func TestGetCloudProviderNoProvider(t *testing.T) { + ctx := context.TODO() + cfg := config.Config{} + cloudProvider, err := getCloudProvider(ctx, &cfg) + + assert.Nil(t, cloudProvider) + assert.Error(t, err) +} + +func TestGetCloudProviderUnknownProvider(t *testing.T) { + ctx := context.TODO() + cfg := config.Config{} + viper.Set("cloud-provider", "unknown") + cloudProvider, err := getCloudProvider(ctx, &cfg) + + assert.Nil(t, cloudProvider) + assert.Error(t, err) +} + +func TestGetCloudProviderOk(t *testing.T) { + ctx := context.TODO() + cfg := config.Config{} + viper.Set("cloud-provider", "aws") + cloudProvider, err := getCloudProvider(ctx, &cfg) + + assert.NotNil(t, cloudProvider) + assert.NoError(t, err) +} + +func TestGetCloudProviderKindOk(t *testing.T) { + ctx := context.TODO() + cfg := config.Config{} + viper.Set("cloud-provider", "kind") + cloudProvider, err := getCloudProvider(ctx, &cfg) + + assert.NotNil(t, cloudProvider) + assert.NoError(t, err) +} + +func TestGetCloudProviderKwokOk(t *testing.T) { + ctx := context.TODO() + cfg := config.Config{} + viper.Set("cloud-provider", "kwok") + cloudProvider, err := getCloudProvider(ctx, &cfg) + + assert.NotNil(t, cloudProvider) + assert.NoError(t, err) +} + +func TestStartServerOk(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + errorMsg := "Error happened" + + observability := mock_observability.NewMockOBSERVABILITYSERVER(mockCtrl) + observability.EXPECT().StartServer(gomock.Any()).Times(1).DoAndReturn( + func(ctx3 context.Context) error { + select { + case <-ctx3.Done(): + return fmt.Errorf(errorMsg) + case <-time.After(1 * time.Second): + return nil + } + }) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + cfg := config.Config{} + cfg.K8sClient = fake.NewSimpleClientset() + workload := func(ctx2 context.Context) error { + select { + case <-ctx2.Done(): + return fmt.Errorf(errorMsg) + case <-time.After(5 * time.Second): + return nil + } + } + + res := startServer(ctx, &cfg, observability, workload, cancel) + assert.NoError(t, res) +} + +func TestStartServerNok(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + errorMsg := "Error happened" + + observability := mock_observability.NewMockOBSERVABILITYSERVER(mockCtrl) + observability.EXPECT().StartServer(gomock.Any()).Times(1).DoAndReturn( + func(ctx3 context.Context) error { + return fmt.Errorf(errorMsg) + }) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + cfg := config.Config{} + cfg.K8sClient = fake.NewSimpleClientset() + + workload := func(ctx2 context.Context) error { + select { + case <-ctx2.Done(): + return fmt.Errorf(errorMsg) + case <-time.After(5 * time.Second): + return nil + } + } + + var res error + assert.NotPanics(t, + func() { + res = startServer(ctx, &cfg, observability, workload, cancel) + }, + ) + assert.EqualError(t, res, errorMsg) +} + +func TestCancelOnSigterm(t *testing.T) { + counter := 0 + c := func() { + counter += 1 + } + cancelOnSigterm(c) + assert.Equal(t, 0, counter) +} + +func TestSetupLogLevelNok(t *testing.T) { + err := setupLogging() + assert.Error(t, err) +} + +func TestSetupLogFormatJsonOk(t *testing.T) { + originalLvl := log.GetLevel() + viper.Set(flags.LogLevelFlag, "error") + viper.Set(flags.LogFormatFlag, flags.LogFormatText) + err := setupLogging() + + assert.NoError(t, err) + assert.Equal(t, log.ErrorLevel, log.GetLevel()) + //cleanup + log.SetLevel(originalLvl) + log.SetFormatter(&log.TextFormatter{ + FullTimestamp: true, + }) +} + +func TestSetupLogFormatNok(t *testing.T) { + originalLvl := log.GetLevel() + viper.Set(flags.LogLevelFlag, "error") + viper.Set(flags.LogFormatFlag, "unknonw") + err := setupLogging() + + assert.Error(t, err) + //cleanup + log.SetLevel(originalLvl) + log.SetFormatter(&log.TextFormatter{ + FullTimestamp: true, + }) +} + +func TestSetupLogLevelOk(t *testing.T) { + originalLvl := log.GetLevel() + viper.Set(flags.LogLevelFlag, "error") + viper.Set(flags.LogFormatFlag, flags.LogFormatJson) + err := setupLogging() + + assert.NoError(t, err) + assert.Equal(t, log.ErrorLevel, log.GetLevel()) + //cleanup + log.SetLevel(originalLvl) +} + +func TestExecuteWithContext(t *testing.T) { + viper.Set(flags.LeaseLockNameFlag, "test-lease") + viper.Set(flags.PortFlag, 0) //use random port + viper.Set(flags.CloudProviderFlag, "kwok") + + ctx, cancel := context.WithTimeout(context.TODO(), 10*time.Second) + defer cancel() + + err := executeWithContext(ctx, kubeclient.GetFakeClient, cancel) + assert.NoError(t, err) +} + +func TestExecuteWithContextK8sErr(t *testing.T) { + viper.Set(flags.LeaseLockNameFlag, "test-lease") + viper.Set(flags.PortFlag, 0) //use random port + viper.Set(flags.CloudProviderFlag, "kwok") + + ctx, cancel := context.WithTimeout(context.TODO(), 10*time.Second) + defer cancel() + + err := executeWithContext(ctx, func() (kubernetes.Interface, string, error) { + return nil, "", errors.New("test error") + }, cancel) + assert.Error(t, err) +} diff --git a/pkg/nodeundertaker/nodeupdatehandler/nodeupdatehandler.go b/pkg/nodeundertaker/nodeupdatehandler/nodeupdatehandler.go new file mode 100644 index 0000000..2b9ea8c --- /dev/null +++ b/pkg/nodeundertaker/nodeupdatehandler/nodeupdatehandler.go @@ -0,0 +1,216 @@ +package nodeupdatehandler + +import ( + "context" + "fmt" + "github.com/dbschenker/node-undertaker/pkg/nodeundertaker/config" + nodepkg "github.com/dbschenker/node-undertaker/pkg/nodeundertaker/node" + log "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" + "k8s.io/client-go/tools/cache" + "time" +) + +func OnNodeUpdate(ctx context.Context, cfg *config.Config, nv1 *v1.Node) { + n := nodepkg.CreateNode(nv1) + nodeUpdateInternal(ctx, cfg, n) +} + +func nodeUpdateInternal(ctx context.Context, cfg *config.Config, n nodepkg.NODE) { + if !isAfterInitialDelay(cfg) { + log.Debugf("Node udertaker is not running at least %d seconds", cfg.InitialDelay) + return + } + if !n.IsGrownUp(cfg) { + log.Debugf("%s/%s: is not old enough (%d seconds) - might be not fully initialized.", n.GetKind(), n.GetName(), cfg.NodeInitialThreshold) + return + } + + // check if lease is fresh + fresh, err := n.HasFreshLease(ctx, cfg) + if err != nil { + log.Errorf("Node %s update failed: %v", n.GetName(), err) + return + } + + nodeLabel := n.GetLabel() + + if nodeLabel == nodepkg.NodeTerminating { + reason, err := n.Terminate(ctx, cfg) + if err != nil { + nodepkg.ReportEvent(ctx, cfg, log.ErrorLevel, n, "Termination", reason, err.Error(), "") + } + nodepkg.ReportEvent(ctx, cfg, log.InfoLevel, n, "Termination", reason, "", "") + return + } else if nodeLabel == nodepkg.NodePreparingTermination { + nodePreparingTermination(ctx, cfg, n) + return + } else if nodeLabel == nodepkg.NodeTerminationPrepared { + nodeTerminationPrepared(ctx, cfg, n) + return + } + + if fresh { + if nodeLabel != nodepkg.NodeHealthy { + makeNodeHealthy(ctx, cfg, n) + } else { + log.Debugf("%s/%s: has fresh lease", n.GetKind(), n.GetName()) + } + } else { // node has old lease + switch label := nodeLabel; label { + case nodepkg.NodeHealthy: + makeNodeUnhealthy(ctx, cfg, n) + case nodepkg.NodeUnhealthy: + taintNode(ctx, cfg, n) + case nodepkg.NodeTainted: + drainNode(ctx, cfg, n) + case nodepkg.NodeDraining: + makePrepareNodeTermination(ctx, cfg, n) + default: + nodepkg.ReportEvent(ctx, cfg, log.ErrorLevel, n, "NodeUpdate", "Node Update Failed", fmt.Sprintf("unknown label value found: %s", label), "") + } + } +} + +func GetDefaultUpdateHandlerFuncs(ctx context.Context, cfg *config.Config) cache.ResourceEventHandlerFuncs { + return cache.ResourceEventHandlerFuncs{ + UpdateFunc: func(oldObj, newObj interface{}) { + OnNodeUpdate(ctx, cfg, newObj.(*v1.Node)) + }, + AddFunc: func(obj interface{}) { + OnNodeUpdate(ctx, cfg, obj.(*v1.Node)) + }, + DeleteFunc: nil, + } +} + +func isAfterInitialDelay(cfg *config.Config) bool { + return cfg.StartupTime.Add(time.Duration(cfg.InitialDelay) * time.Second).Before(time.Now()) +} + +func nodePreparingTermination(ctx context.Context, cfg *config.Config, n nodepkg.NODE) { + reason, err := n.PrepareTermination(ctx, cfg) + if err != nil { + nodepkg.ReportEvent(ctx, cfg, log.ErrorLevel, n, "Prepare Termination", reason, err.Error(), "") + return + } + + n.SetActionTimestamp(time.Now()) + n.SetLabel(nodepkg.NodeTerminationPrepared) + err = n.Save(ctx, cfg) + if err != nil { + log.Errorf("Received error while saving node %s: %v", n.GetName(), err) + nodepkg.ReportEvent(ctx, cfg, log.ErrorLevel, n, "Prepare Termination", "Prepare Termination failed", err.Error(), "") + return + } + + nodepkg.ReportEvent(ctx, cfg, log.InfoLevel, n, "Termination prepared", reason, "", "") +} + +func nodeTerminationPrepared(ctx context.Context, cfg *config.Config, n nodepkg.NODE) { + nodeModificationTimestamp, err := n.GetActionTimestamp() + if err != nil { + log.Errorf("Node %s: timestamp is not parsed properly: %v", n.GetName(), err) + return + } + timestampShouldBeBefore := time.Now().Add(-time.Duration(cfg.CloudTerminationDelay) * time.Second) + if nodeModificationTimestamp.After(timestampShouldBeBefore) { + log.Infof("%s/%s: prepared for termintaion less than %d seconds ago", n.GetKind(), n.GetName(), cfg.CloudTerminationDelay) + return + } + + n.SetLabel(nodepkg.NodeTerminating) + err = n.Save(ctx, cfg) + if err != nil { + log.Errorf("Received error while saving node %s: %v", n.GetName(), err) + nodepkg.ReportEvent(ctx, cfg, log.ErrorLevel, n, "Label", "Label terminating failed", err.Error(), "") + return + } + + nodepkg.ReportEvent(ctx, cfg, log.InfoLevel, n, "LabelTerminating", "Labeled terminating", "", "") +} + +func makeNodeHealthy(ctx context.Context, cfg *config.Config, n nodepkg.NODE) { + n.Untaint() + n.RemoveActionTimestamp() + n.RemoveLabel() + err := n.Save(ctx, cfg) + if err != nil { + log.Errorf("Received error while saving node %s: %v", n.GetName(), err) + nodepkg.ReportEvent(ctx, cfg, log.ErrorLevel, n, "Untaint", "Untaint failed", err.Error(), "") + return + } + nodepkg.ReportEvent(ctx, cfg, log.InfoLevel, n, "Untaint", "Untainted", "", "") +} + +func makeNodeUnhealthy(ctx context.Context, cfg *config.Config, n nodepkg.NODE) { + n.SetLabel(nodepkg.NodeUnhealthy) + err := n.Save(ctx, cfg) + if err != nil { + log.Errorf("Received error while saving node %s: %v", n.GetName(), err) + nodepkg.ReportEvent(ctx, cfg, log.ErrorLevel, n, "Label", "Label unhealthy failed", err.Error(), "") + return + } + nodepkg.ReportEvent(ctx, cfg, log.InfoLevel, n, "LabeledUnhealthy", "Labeled unhealthy", "", "") +} + +func taintNode(ctx context.Context, cfg *config.Config, n nodepkg.NODE) { + n.Taint() + n.SetActionTimestamp(time.Now()) + n.SetLabel(nodepkg.NodeTainted) + err := n.Save(ctx, cfg) + if err != nil { + log.Errorf("Received error while saving node %s: %v", n.GetName(), err) + nodepkg.ReportEvent(ctx, cfg, log.ErrorLevel, n, "Tainted", "Failed", err.Error(), "") + return + } + nodepkg.ReportEvent(ctx, cfg, log.InfoLevel, n, "Taint", "Tainted", "", "") +} + +func drainNode(ctx context.Context, cfg *config.Config, n nodepkg.NODE) { + nodeModificationTimestamp, err := n.GetActionTimestamp() + if err != nil { + log.Errorf("Node %s: timestamp is not parsed properly: %v", n.GetName(), err) + return + } + timestampShouldBeBefore := time.Now().Add(-time.Duration(cfg.DrainDelay) * time.Second) + if nodeModificationTimestamp.After(timestampShouldBeBefore) { + log.Infof("%s/%s: tainted less than %d seconds ago", n.GetKind(), n.GetName(), cfg.DrainDelay) + return + } + + n.StartDrain(ctx, cfg) + n.SetActionTimestamp(time.Now()) + n.SetLabel(nodepkg.NodeDraining) + err = n.Save(ctx, cfg) + if err != nil { + log.Errorf("Received error while saving node %s: %v", n.GetName(), err) + nodepkg.ReportEvent(ctx, cfg, log.ErrorLevel, n, "Drain", "Drain Start Failed", err.Error(), "") + return + } + nodepkg.ReportEvent(ctx, cfg, log.InfoLevel, n, "Drain", "Drain started", "", "") +} + +func makePrepareNodeTermination(ctx context.Context, cfg *config.Config, n nodepkg.NODE) { + nodeModificationTimestamp, err := n.GetActionTimestamp() + if err != nil { + log.Errorf("Node %s: timestamp is not parsed properly: %v", n.GetName(), err) + return + } + timestampShouldBeBefore := time.Now().Add(-time.Duration(cfg.CloudPrepareTerminationDelay) * time.Second) + if nodeModificationTimestamp.After(timestampShouldBeBefore) { + log.Infof("%s/%s: drained less than %d seconds ago", n.GetKind(), n.GetName(), cfg.CloudPrepareTerminationDelay) + return + } + + n.SetActionTimestamp(time.Now()) + n.SetLabel(nodepkg.NodePreparingTermination) + err = n.Save(ctx, cfg) + if err != nil { + log.Errorf("Received error while saving node %s: %v", n.GetName(), err) + nodepkg.ReportEvent(ctx, cfg, log.ErrorLevel, n, "Label", "Label Prepare Termination Failed", err.Error(), "") + return + } + + nodepkg.ReportEvent(ctx, cfg, log.InfoLevel, n, "Prepare Termination", "Instance preparing for termination", "", "") +} diff --git a/pkg/nodeundertaker/nodeupdatehandler/nodeupdatehandler_test.go b/pkg/nodeundertaker/nodeupdatehandler/nodeupdatehandler_test.go new file mode 100644 index 0000000..4fa7bf3 --- /dev/null +++ b/pkg/nodeundertaker/nodeupdatehandler/nodeupdatehandler_test.go @@ -0,0 +1,537 @@ +package nodeupdatehandler + +import ( + "context" + "github.com/dbschenker/node-undertaker/pkg/nodeundertaker/config" + nodepkg "github.com/dbschenker/node-undertaker/pkg/nodeundertaker/node" + mocknode "github.com/dbschenker/node-undertaker/pkg/nodeundertaker/node/mocks" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" + "testing" + "time" +) + +func TestOnNodeUpdate(t *testing.T) { + nodeName := "test-node1" + namespaceName := "test-dummy-ns" + creationTime := metav1.Now().Add(-20 * time.Second).UTC() + + nv1 := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + Namespace: namespaceName, + CreationTimestamp: metav1.NewTime(creationTime), + }, + } + + cfg := config.Config{ + K8sClient: fake.NewSimpleClientset(), + Namespace: namespaceName, + NodeInitialThreshold: 1000, + } + + OnNodeUpdate(context.TODO(), &cfg, &nv1) + + events, evErr := cfg.K8sClient.EventsV1().Events(namespaceName).List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, evErr) + assert.Len(t, events.Items, 0) +} + +// unknown node label, node with old lease - should do nothin +func TestUnknownLabel(t *testing.T) { + nodeName := "test-node1" + namespaceName := "test-dummy-ns" + mockCtrl := gomock.NewController(t) + node := mocknode.NewMockNODE(mockCtrl) + + node.EXPECT().GetName().Return(nodeName).AnyTimes() + node.EXPECT().GetKind().Return("Node").AnyTimes() + + node.EXPECT().GetLabel().Return("unknown-label").Times(1) + node.EXPECT().IsGrownUp(gomock.Any()).Return(true).Times(1) + node.EXPECT().HasFreshLease(gomock.Any(), gomock.Any()).Return(false, nil).Times(1) + + cfg := config.Config{ + K8sClient: fake.NewSimpleClientset(), + Namespace: namespaceName, + } + + nodeUpdateInternal(context.TODO(), &cfg, node) + + events, evErr := cfg.K8sClient.EventsV1().Events(namespaceName).List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, evErr) + assert.Len(t, events.Items, 1) +} + +func TestNodeUpdateInternalNotAfterInitialDelay(t *testing.T) { + cfg := config.Config{ + StartupTime: time.Now().Add(-50 * time.Second), + InitialDelay: 100, + } + n := nodepkg.Node{} + nodeUpdateInternal(context.TODO(), &cfg, &n) +} + +// node not grown up - should do nothing +func TestNodeUpdateInternalNotGrownUp(t *testing.T) { + nodeName := "test-node1" + namespaceName := "test-dummy-ns" + mockCtrl := gomock.NewController(t) + node := mocknode.NewMockNODE(mockCtrl) + node.EXPECT().GetName().Return(nodeName) + node.EXPECT().GetKind().Return("Node").AnyTimes() + node.EXPECT().IsGrownUp(gomock.Any()).Return(false).Times(1) + + cfg := config.Config{ + K8sClient: fake.NewSimpleClientset(), + Namespace: namespaceName, + } + + nodeUpdateInternal(context.TODO(), &cfg, node) + + events, evErr := cfg.K8sClient.EventsV1().Events(namespaceName).List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, evErr) + assert.Len(t, events.Items, 0) +} + +// node grown up & with recent lease & no label - should do nothing +func TestNodeUpdateInternalHealthyNoLabel(t *testing.T) { + nodeName := "test-node1" + hasFreshLease := true + nodeLabel := nodepkg.NodeHealthy + var hasFreshLeaseErr error = nil + mockCtrl := gomock.NewController(t) + node := mocknode.NewMockNODE(mockCtrl) + node.EXPECT().GetName().Return(nodeName).AnyTimes() + node.EXPECT().GetKind().Return("Node").AnyTimes() + + node.EXPECT().IsGrownUp(gomock.Any()).Return(true).Times(1) + node.EXPECT().HasFreshLease(gomock.Any(), gomock.Any()).Return(hasFreshLease, hasFreshLeaseErr).Times(1) + node.EXPECT().GetLabel().Return(nodeLabel).Times(1) + + cfg := config.Config{} + nodeUpdateInternal(context.TODO(), &cfg, node) +} + +// node grown up & with recent lease & has label - should remove label, taint and annotation +func TestNodeUpdateInternalHealthyUnhealthyLabel(t *testing.T) { + nodeName := "test-node1" + namespaceName := "dummy-ns" + hasFreshLease := true + nodeLabel := nodepkg.NodeDraining + var hasFreshLeaseErr error = nil + var saveErr error = nil + mockCtrl := gomock.NewController(t) + node := mocknode.NewMockNODE(mockCtrl) + + node.EXPECT().GetName().Return(nodeName).AnyTimes() + node.EXPECT().GetKind().Return("Node").AnyTimes() + + node.EXPECT().IsGrownUp(gomock.Any()).Return(true).Times(1) + node.EXPECT().HasFreshLease(gomock.Any(), gomock.Any()).Return(hasFreshLease, hasFreshLeaseErr).Times(1) + node.EXPECT().GetLabel().Return(nodeLabel).Times(1) + + node.EXPECT().Untaint().Times(1) + node.EXPECT().RemoveActionTimestamp().Times(1) + node.EXPECT().Save(gomock.Any(), gomock.Any()).Return(saveErr).Times(1) + node.EXPECT().RemoveLabel().Times(1) + + cfg := config.Config{K8sClient: fake.NewSimpleClientset(), Namespace: namespaceName} + + nodeUpdateInternal(context.TODO(), &cfg, node) + events, evErr := cfg.K8sClient.EventsV1().Events(namespaceName).List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, evErr) + assert.Len(t, events.Items, 1) +} + +// node grown up & with old lease & has no label - should add label & produce event +func TestNodeUpdateInternalUnhealthyNoLabel(t *testing.T) { + nodeName := "test-node1" + namespaceName := "dummy-ns" + hasFreshLease := false + nodeLabel := nodepkg.NodeHealthy + var hasFreshLeaseErr error = nil + var saveErr error = nil + mockCtrl := gomock.NewController(t) + node := mocknode.NewMockNODE(mockCtrl) + + node.EXPECT().GetName().Return(nodeName).AnyTimes() + node.EXPECT().GetKind().Return("Node").AnyTimes() + + node.EXPECT().IsGrownUp(gomock.Any()).Return(true).Times(1) + node.EXPECT().HasFreshLease(gomock.Any(), gomock.Any()).Return(hasFreshLease, hasFreshLeaseErr).Times(1) + node.EXPECT().GetLabel().Return(nodeLabel).Times(1) + + setLabelCall := node.EXPECT().SetLabel(nodepkg.NodeUnhealthy).Times(1) + node.EXPECT().Save(gomock.Any(), gomock.Any()).Return(saveErr).Times(1).After(setLabelCall) + + cfg := config.Config{K8sClient: fake.NewSimpleClientset(), Namespace: namespaceName} + + nodeUpdateInternal(context.TODO(), &cfg, node) + events, evErr := cfg.K8sClient.EventsV1().Events(namespaceName).List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, evErr) + assert.Len(t, events.Items, 1) +} + +// node grown up & with old lease & has unhealthy label - should add timestamp, taint & change label + save + produce event +func TestNodeUpdateInternalUnhealthyUnhealthyLabel(t *testing.T) { + nodeName := "test-node1" + namespaceName := "dummy-ns" + hasFreshLease := false + nodeLabel := nodepkg.NodeUnhealthy + var hasFreshLeaseErr error = nil + var saveErr error = nil + mockCtrl := gomock.NewController(t) + node := mocknode.NewMockNODE(mockCtrl) + + node.EXPECT().GetName().Return(nodeName).AnyTimes() + node.EXPECT().GetKind().Return("Node").AnyTimes() + + node.EXPECT().IsGrownUp(gomock.Any()).Return(true).Times(1) + node.EXPECT().HasFreshLease(gomock.Any(), gomock.Any()).Return(hasFreshLease, hasFreshLeaseErr).Times(1) + node.EXPECT().GetLabel().Return(nodeLabel).Times(1) + + setTimestampCall := node.EXPECT().SetActionTimestamp(gomock.Any()).Times(1) + setLabelCall := node.EXPECT().SetLabel(nodepkg.NodeTainted).Times(1) + taintCall := node.EXPECT().Taint().Times(1) + node.EXPECT().Save(gomock.Any(), gomock.Any()).Return(saveErr).Times(1).After(setLabelCall).After(setTimestampCall).After(taintCall) + + cfg := config.Config{K8sClient: fake.NewSimpleClientset(), Namespace: namespaceName} + + nodeUpdateInternal(context.TODO(), &cfg, node) + events, evErr := cfg.K8sClient.EventsV1().Events(namespaceName).List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, evErr) + assert.Len(t, events.Items, 1) +} + +// node grown up & with old lease & label=tainted + timetamp less than threshold - should do nothing +func TestNodeUpdateInternalUnhealthyTaintedLabelRecent(t *testing.T) { + nodeName := "test-node1" + namespaceName := "dummy-ns" + hasFreshLease := false + nodeLabel := nodepkg.NodeTainted + var hasFreshLeaseErr error = nil + var getTimestampErr error = nil + mockCtrl := gomock.NewController(t) + node := mocknode.NewMockNODE(mockCtrl) + + node.EXPECT().GetName().Return(nodeName).AnyTimes() + node.EXPECT().GetKind().Return("Node").AnyTimes() + + node.EXPECT().IsGrownUp(gomock.Any()).Return(true).Times(1) + node.EXPECT().HasFreshLease(gomock.Any(), gomock.Any()).Return(hasFreshLease, hasFreshLeaseErr).Times(1) + node.EXPECT().GetLabel().Return(nodeLabel).Times(1) + + node.EXPECT().GetActionTimestamp().Return(time.Now().Add(-5*time.Second), getTimestampErr).Times(1) + + cfg := config.Config{ + K8sClient: fake.NewSimpleClientset(), + Namespace: namespaceName, + DrainDelay: 90, + } + + nodeUpdateInternal(context.TODO(), &cfg, node) + events, evErr := cfg.K8sClient.EventsV1().Events(namespaceName).List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, evErr) + assert.Len(t, events.Items, 0) +} + +// node grown up & with old lease & label=tainted + timetamp less than threshold - should drain node + label + update timestamp +func TestNodeUpdateInternalUnhealthyTaintedLabelOld(t *testing.T) { + nodeName := "test-node1" + namespaceName := "dummy-ns" + hasFreshLease := false + var saveErr error = nil + nodeLabel := nodepkg.NodeTainted + var hasFreshLeaseErr error = nil + var getTimestampErr error = nil + mockCtrl := gomock.NewController(t) + node := mocknode.NewMockNODE(mockCtrl) + + node.EXPECT().GetName().Return(nodeName).AnyTimes() + node.EXPECT().GetKind().Return("Node").AnyTimes() + + node.EXPECT().IsGrownUp(gomock.Any()).Return(true).Times(1) + node.EXPECT().HasFreshLease(gomock.Any(), gomock.Any()).Return(hasFreshLease, hasFreshLeaseErr).Times(1) + node.EXPECT().GetLabel().Return(nodeLabel).Times(1) + + node.EXPECT().GetActionTimestamp().Return(time.Now().Add(-100*time.Second), getTimestampErr).Times(1) + + drainCall := node.EXPECT().StartDrain(gomock.Any(), gomock.Any()).Times(1) + drainingCall := node.EXPECT().SetLabel(nodepkg.NodeDraining).Times(1) + timestampCall := node.EXPECT().SetActionTimestamp(gomock.Any()).Times(1) + node.EXPECT().Save(gomock.Any(), gomock.Any()).Return(saveErr).Times(1).After(drainingCall).After(timestampCall).After(drainCall) + + cfg := config.Config{ + K8sClient: fake.NewSimpleClientset(), + Namespace: namespaceName, + DrainDelay: 90, + } + + nodeUpdateInternal(context.TODO(), &cfg, node) + events, evErr := cfg.K8sClient.EventsV1().Events(namespaceName).List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, evErr) + assert.Len(t, events.Items, 1) +} + +// node grown up & with old lease & label=draining + timetamp less than threshold - should do nothing +func TestNodeUpdateInternalUnhealthyDrainingLabelRecent(t *testing.T) { + nodeName := "test-node1" + namespaceName := "dummy-ns" + hasFreshLease := false + nodeLabel := nodepkg.NodeDraining + var hasFreshLeaseErr error = nil + var getTimestampErr error = nil + mockCtrl := gomock.NewController(t) + node := mocknode.NewMockNODE(mockCtrl) + + node.EXPECT().GetName().Return(nodeName).AnyTimes() + node.EXPECT().GetKind().Return("Node").AnyTimes() + + node.EXPECT().IsGrownUp(gomock.Any()).Return(true).Times(1) + node.EXPECT().HasFreshLease(gomock.Any(), gomock.Any()).Return(hasFreshLease, hasFreshLeaseErr).Times(1) + node.EXPECT().GetLabel().Return(nodeLabel).Times(1) + + node.EXPECT().GetActionTimestamp().Return(time.Now().Add(-5*time.Second), getTimestampErr).Times(1) + + cfg := config.Config{ + K8sClient: fake.NewSimpleClientset(), + Namespace: namespaceName, + CloudPrepareTerminationDelay: 90, + } + + nodeUpdateInternal(context.TODO(), &cfg, node) + events, evErr := cfg.K8sClient.EventsV1().Events(namespaceName).List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, evErr) + assert.Len(t, events.Items, 0) +} + +// node grown up & with old lease & label=draining + timestamp less than threshold - should terminate the node in cloud + label + annotate + produce event +func TestNodeUpdateInternalUnhealthyDrainingLabelOld(t *testing.T) { + nodeName := "test-node1" + namespaceName := "dummy-ns" + hasFreshLease := false + var saveErr error = nil + nodeLabel := nodepkg.NodeDraining + var hasFreshLeaseErr error = nil + var getTimestampErr error = nil + mockCtrl := gomock.NewController(t) + node := mocknode.NewMockNODE(mockCtrl) + + node.EXPECT().GetName().Return(nodeName).AnyTimes() + node.EXPECT().GetKind().Return("Node").AnyTimes() + + node.EXPECT().IsGrownUp(gomock.Any()).Return(true).Times(1) + node.EXPECT().HasFreshLease(gomock.Any(), gomock.Any()).Return(hasFreshLease, hasFreshLeaseErr).Times(1) + node.EXPECT().GetLabel().Return(nodeLabel).Times(1) + + getTimestampCall := node.EXPECT().GetActionTimestamp().Return(time.Now().Add(-100*time.Second), getTimestampErr).Times(1) + setLabelCall := node.EXPECT().SetLabel(nodepkg.NodePreparingTermination) + setTimestampCall := node.EXPECT().SetActionTimestamp(gomock.Any()).Times(1).After(getTimestampCall) + node.EXPECT().Save(gomock.Any(), gomock.Any()).Return(saveErr).Times(1).After(setLabelCall).After(setTimestampCall) + + cfg := config.Config{ + K8sClient: fake.NewSimpleClientset(), + Namespace: namespaceName, + CloudPrepareTerminationDelay: 90, + } + + nodeUpdateInternal(context.TODO(), &cfg, node) + events, evErr := cfg.K8sClient.EventsV1().Events(namespaceName).List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, evErr) + assert.Len(t, events.Items, 1) +} + +// node grown up &with old lease & label=preparing_termination - should prepare termination and label: termination_prepared +func TestNodeUpdateInternalPrepareTermination(t *testing.T) { + nodeName := "test-node1" + namespaceName := "dummy-ns" + hasFreshLease := false + terminationAction := "CloudInstanceTerminated" + var terminationErr error = nil + nodeLabel := nodepkg.NodePreparingTermination + var hasFreshLeaseErr error = nil + mockCtrl := gomock.NewController(t) + node := mocknode.NewMockNODE(mockCtrl) + + node.EXPECT().GetName().Return(nodeName).AnyTimes() + node.EXPECT().GetKind().Return("Node").AnyTimes() + + node.EXPECT().IsGrownUp(gomock.Any()).Return(true).Times(1) + node.EXPECT().HasFreshLease(gomock.Any(), gomock.Any()).Return(hasFreshLease, hasFreshLeaseErr).Times(1) + node.EXPECT().GetLabel().Return(nodeLabel).Times(1) + setLabelCall := node.EXPECT().SetLabel(nodepkg.NodeTerminationPrepared).Return().Times(1) + setTimestampCall := node.EXPECT().SetActionTimestamp(gomock.Any()).Return().Times(1) + node.EXPECT().Save(gomock.Any(), gomock.Any()).Return(nil).Times(1).After(setLabelCall).After(setTimestampCall) + + node.EXPECT().PrepareTermination(gomock.Any(), gomock.Any()).Return(terminationAction, terminationErr).Times(1) + + cfg := config.Config{ + K8sClient: fake.NewSimpleClientset(), + Namespace: namespaceName, + } + + nodeUpdateInternal(context.TODO(), &cfg, node) + events, evErr := cfg.K8sClient.EventsV1().Events(namespaceName).List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, evErr) + assert.Len(t, events.Items, 1) +} + +// node grown up &with old lease & label=prepared_termination + timestamp is older than CloudPrepareTerminationDelay - should prepare termination and label: terminating +func TestNodeUpdateInternalPreparedTerminationOld(t *testing.T) { + nodeName := "test-node1" + namespaceName := "dummy-ns" + hasFreshLease := false + var getTimestampErr error = nil + nodeLabel := nodepkg.NodeTerminationPrepared + var hasFreshLeaseErr error = nil + mockCtrl := gomock.NewController(t) + node := mocknode.NewMockNODE(mockCtrl) + + node.EXPECT().GetName().Return(nodeName).AnyTimes() + node.EXPECT().GetKind().Return("Node").AnyTimes() + + node.EXPECT().IsGrownUp(gomock.Any()).Return(true).Times(1) + node.EXPECT().HasFreshLease(gomock.Any(), gomock.Any()).Return(hasFreshLease, hasFreshLeaseErr).Times(1) + node.EXPECT().GetLabel().Return(nodeLabel).Times(1) + getTimestampCall := node.EXPECT().GetActionTimestamp().Return(time.Now().Add(-100*time.Second), getTimestampErr).Times(1) + setLabelCall := node.EXPECT().SetLabel(nodepkg.NodeTerminating).Return().Times(1) + node.EXPECT().Save(gomock.Any(), gomock.Any()).Return(nil).Times(1).After(setLabelCall).After(getTimestampCall) + + cfg := config.Config{ + K8sClient: fake.NewSimpleClientset(), + Namespace: namespaceName, + CloudTerminationDelay: 90, + } + + nodeUpdateInternal(context.TODO(), &cfg, node) + events, evErr := cfg.K8sClient.EventsV1().Events(namespaceName).List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, evErr) + assert.Len(t, events.Items, 1) +} + +// node grown up &with old lease & label=prepared_termination + timestamp is not older than CloudPrepareTerminationDelay - should prepare termination and label: terminating +func TestNodeUpdateInternalPreparedTerminationRecent(t *testing.T) { + nodeName := "test-node1" + namespaceName := "dummy-ns" + hasFreshLease := false + nodeLabel := nodepkg.NodeTerminationPrepared + var hasFreshLeaseErr error = nil + mockCtrl := gomock.NewController(t) + node := mocknode.NewMockNODE(mockCtrl) + + node.EXPECT().GetName().Return(nodeName).AnyTimes() + node.EXPECT().GetKind().Return("Node").AnyTimes() + + node.EXPECT().IsGrownUp(gomock.Any()).Return(true).Times(1) + node.EXPECT().HasFreshLease(gomock.Any(), gomock.Any()).Return(hasFreshLease, hasFreshLeaseErr).Times(1) + node.EXPECT().GetLabel().Return(nodeLabel).Times(1) + node.EXPECT().GetActionTimestamp().Return(time.Now().Add(-10*time.Second), nil).Times(1) + + cfg := config.Config{ + K8sClient: fake.NewSimpleClientset(), + Namespace: namespaceName, + CloudTerminationDelay: 90, + } + + nodeUpdateInternal(context.TODO(), &cfg, node) + events, evErr := cfg.K8sClient.EventsV1().Events(namespaceName).List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, evErr) + assert.Len(t, events.Items, 0) //no node was saved +} + +// node grown up & with old lease & label=terminating - should terminate the node in cloud + label + annotate + produce event +func TestNodeUpdateInternalUnhealthyDeletingOldLease(t *testing.T) { + nodeName := "test-node1" + namespaceName := "dummy-ns" + hasFreshLease := false + terminationAction := "CloudInstanceTerminated" + var terminationErr error = nil + nodeLabel := nodepkg.NodeTerminating + var hasFreshLeaseErr error = nil + mockCtrl := gomock.NewController(t) + node := mocknode.NewMockNODE(mockCtrl) + + node.EXPECT().GetName().Return(nodeName).AnyTimes() + node.EXPECT().GetKind().Return("Node").AnyTimes() + + node.EXPECT().IsGrownUp(gomock.Any()).Return(true).Times(1) + node.EXPECT().HasFreshLease(gomock.Any(), gomock.Any()).Return(hasFreshLease, hasFreshLeaseErr).Times(1) + node.EXPECT().GetLabel().Return(nodeLabel).Times(1) + + node.EXPECT().Terminate(gomock.Any(), gomock.Any()).Return(terminationAction, terminationErr).Times(1) + + cfg := config.Config{ + K8sClient: fake.NewSimpleClientset(), + Namespace: namespaceName, + } + + nodeUpdateInternal(context.TODO(), &cfg, node) + events, evErr := cfg.K8sClient.EventsV1().Events(namespaceName).List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, evErr) + assert.Len(t, events.Items, 1) +} + +// node grown up & with fresh lease & label=deleting + timestamp less than threshold - should terminate the node in cloud + label + annotate + produce event +func TestNodeUpdateInternalUnhealthyDeletingFreshLease(t *testing.T) { + nodeName := "test-node1" + namespaceName := "dummy-ns" + terminationAction := "CloudInstanceTerminated" + + hasFreshLease := true + var terminationErr error = nil + nodeLabel := nodepkg.NodeTerminating + var hasFreshLeaseErr error = nil + mockCtrl := gomock.NewController(t) + node := mocknode.NewMockNODE(mockCtrl) + + node.EXPECT().GetName().Return(nodeName).AnyTimes() + node.EXPECT().GetKind().Return("Node").AnyTimes() + + node.EXPECT().IsGrownUp(gomock.Any()).Return(true).Times(1) + node.EXPECT().HasFreshLease(gomock.Any(), gomock.Any()).Return(hasFreshLease, hasFreshLeaseErr).Times(1) + node.EXPECT().GetLabel().Return(nodeLabel).Times(1) + + node.EXPECT().Terminate(gomock.Any(), gomock.Any()).Return(terminationAction, terminationErr).Times(1) + + cfg := config.Config{ + K8sClient: fake.NewSimpleClientset(), + Namespace: namespaceName, + } + + nodeUpdateInternal(context.TODO(), &cfg, node) + events, evErr := cfg.K8sClient.EventsV1().Events(namespaceName).List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, evErr) + assert.Len(t, events.Items, 1) +} + +func TestGetDefaultUpdateHandlerFuncs(t *testing.T) { + ctx := context.TODO() + cfg := &config.Config{} + + result := GetDefaultUpdateHandlerFuncs(ctx, cfg) + assert.Nil(t, result.DeleteFunc) + assert.NotNil(t, result.AddFunc) + assert.NotNil(t, result.UpdateFunc) +} + +func TestIsAfterInitialDelayOk(t *testing.T) { + cfg := config.Config{ + StartupTime: time.Now().Add(-50 * time.Second), + InitialDelay: 20, + } + ret := isAfterInitialDelay(&cfg) + assert.True(t, ret) +} + +func TestIsAfterInitialDelayNok(t *testing.T) { + cfg := config.Config{ + StartupTime: time.Now().Add(-50 * time.Second), + InitialDelay: 100, + } + ret := isAfterInitialDelay(&cfg) + assert.False(t, ret) +} diff --git a/pkg/observability/api.go b/pkg/observability/api.go new file mode 100644 index 0000000..65eddf0 --- /dev/null +++ b/pkg/observability/api.go @@ -0,0 +1,12 @@ +package observability + +import ( + "context" +) + +//go:generate mockgen -destination=./mocks/api_mocks.go github.com/dbschenker/node-undertaker/pkg/observability OBSERVABILITYSERVER + +type OBSERVABILITYSERVER interface { + StartServer(context.Context) error + SetupRoutes() +} diff --git a/pkg/observability/health/health.go b/pkg/observability/health/health.go new file mode 100644 index 0000000..5218b4c --- /dev/null +++ b/pkg/observability/health/health.go @@ -0,0 +1,38 @@ +package health + +import ( + "encoding/json" + "net/http" +) + +type liveness struct { + Healthy bool +} + +type readiness struct { + Ready bool +} + +func LivenessProbe(w http.ResponseWriter, r *http.Request) { + ret := liveness{Healthy: true} + + resp, err := json.Marshal(ret) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(resp) +} + +func ReadinessProbe(w http.ResponseWriter, r *http.Request) { + ret := readiness{Ready: true} + + resp, err := json.Marshal(ret) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(resp) +} diff --git a/pkg/observability/health/health_test.go b/pkg/observability/health/health_test.go new file mode 100644 index 0000000..f3acccc --- /dev/null +++ b/pkg/observability/health/health_test.go @@ -0,0 +1,41 @@ +package health + +import ( + "encoding/json" + "github.com/stretchr/testify/require" + "io" + "net/http/httptest" + "testing" +) + +func TestLivenessProbe(t *testing.T) { + expectedResponse := liveness{Healthy: true} + + req := httptest.NewRequest("GET", "http://lcoalhost:8081/livez", nil) + w := httptest.NewRecorder() + LivenessProbe(w, req) + + resp := w.Result() + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + var response liveness + err = json.Unmarshal(body, &response) + require.NoError(t, err) + require.Equal(t, expectedResponse, response) +} + +func TestReadinessProbe(t *testing.T) { + expectedResponse := readiness{Ready: true} + + req := httptest.NewRequest("GET", "http://localhost:8081/readyz", nil) + w := httptest.NewRecorder() + ReadinessProbe(w, req) + + resp := w.Result() + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + var response readiness + err = json.Unmarshal(body, &response) + require.NoError(t, err) + require.Equal(t, expectedResponse, response) +} diff --git a/pkg/observability/metrics/metrics.go b/pkg/observability/metrics/metrics.go new file mode 100644 index 0000000..db7b4d0 --- /dev/null +++ b/pkg/observability/metrics/metrics.go @@ -0,0 +1,15 @@ +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + v1 "k8s.io/client-go/listers/core/v1" +) + +func Initialize(lister v1.NodeLister) func() { + nsc := CreateNodeStatusCollector(lister) + prometheus.MustRegister(nsc) + + return func() { + prometheus.Unregister(nsc) + } +} diff --git a/pkg/observability/metrics/node_status_collector.go b/pkg/observability/metrics/node_status_collector.go new file mode 100644 index 0000000..c415306 --- /dev/null +++ b/pkg/observability/metrics/node_status_collector.go @@ -0,0 +1,58 @@ +package metrics + +import ( + "github.com/dbschenker/node-undertaker/pkg/nodeundertaker/node" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/labels" + v1 "k8s.io/client-go/listers/core/v1" +) + +const ( + NodeHealthyLabelOverride = "healthy" + MetricsNamespace = "node_undertaker" + NodeMetricsSubsystem = "node" + HealthMetricName = "health" + MetricLabelNode = "node" + MetricLabelStatus = "status" +) + +type NodeStatusCollector struct { + lister v1.NodeLister + desc *prometheus.Desc +} + +func CreateNodeStatusCollector(lister v1.NodeLister) NodeStatusCollector { + + ret := NodeStatusCollector{ + lister: lister, + desc: prometheus.NewDesc( + prometheus.BuildFQName(MetricsNamespace, NodeMetricsSubsystem, HealthMetricName), + "Node health status", + []string{MetricLabelNode, MetricLabelStatus}, nil, + ), + } + return ret +} + +func (nsc NodeStatusCollector) Describe(descs chan<- *prometheus.Desc) { + descs <- nsc.desc + //prometheus.DescribeByCollect(nsc, descs) +} + +func (nsc NodeStatusCollector) Collect(metrics chan<- prometheus.Metric) { + nodes, err := nsc.lister.List(labels.Everything()) + if err != nil { + log.Errorf("Error while collecting metrics: %v", err) + return + } + for i := range nodes { + n := node.CreateNode(nodes[i]) + statusLabel := n.GetLabel() + if statusLabel == node.NodeHealthy { + statusLabel = NodeHealthyLabelOverride + } + metrics <- prometheus.MustNewConstMetric(nsc.desc, prometheus.GaugeValue, 1.0, n.GetName(), statusLabel) + + } +} diff --git a/pkg/observability/metrics/node_status_collector_test.go b/pkg/observability/metrics/node_status_collector_test.go new file mode 100644 index 0000000..eb2c34c --- /dev/null +++ b/pkg/observability/metrics/node_status_collector_test.go @@ -0,0 +1,68 @@ +package metrics + +//go:generate mockgen -destination=./mocks/informer_mocks.go k8s.io/client-go/listers/core/v1 NodeLister + +import ( + "github.com/dbschenker/node-undertaker/pkg/nodeundertaker/node" + mock_v1 "github.com/dbschenker/node-undertaker/pkg/observability/metrics/mocks" + "github.com/golang/mock/gomock" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "strings" + "testing" +) + +func TestCreateNodeStatusCollectorDescribe(t *testing.T) { + nsc := CreateNodeStatusCollector(nil) + + c := make(chan *prometheus.Desc) + go nsc.Describe(c) + desc := <-c + assert.NotNil(t, desc) +} + +func TestCreateNodeStatusCollectorCollect(t *testing.T) { + nodes := []*v1.Node{ + &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + }, + }, + &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-2", + }, + }, + &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-3", + Labels: map[string]string{ + node.Label: node.NodeDraining, + }, + }, + }, + } + mockCtrl := gomock.NewController(t) + fakeLister := mock_v1.NewMockNodeLister(mockCtrl) + fakeLister.EXPECT().List(gomock.Any()).Return(nodes, nil).Times(2) + + nsc := CreateNodeStatusCollector(fakeLister) + + const expectedMetadata = ` + # HELP node_undertaker_node_health Node health status + # TYPE node_undertaker_node_health gauge + ` + expectedMetricText := ` + node_undertaker_node_health{node="node-1",status="healthy"} 1 + node_undertaker_node_health{node="node-2",status="healthy"} 1 + node_undertaker_node_health{node="node-3",status="draining"} 1 + ` + + count := testutil.CollectAndCount(nsc) + assert.Equal(t, 3, count) + err := testutil.CollectAndCompare(nsc, strings.NewReader(expectedMetadata+expectedMetricText)) + assert.NoError(t, err) +} diff --git a/pkg/observability/server.go b/pkg/observability/server.go new file mode 100644 index 0000000..f9913d6 --- /dev/null +++ b/pkg/observability/server.go @@ -0,0 +1,49 @@ +package observability + +import ( + "context" + "errors" + "fmt" + "github.com/dbschenker/node-undertaker/pkg/nodeundertaker/config" + "github.com/dbschenker/node-undertaker/pkg/observability/health" + "github.com/prometheus/client_golang/prometheus/promhttp" + log "github.com/sirupsen/logrus" + "net/http" +) + +type DefaultObservabilityServer struct { + server *http.Server +} + +func GetDefaultObservabilityServer(config *config.Config) DefaultObservabilityServer { + o := DefaultObservabilityServer{} + hostAddress := fmt.Sprintf(":%v", config.Port) + o.server = &http.Server{ + Addr: hostAddress, + } + return o +} + +func (o *DefaultObservabilityServer) SetupRoutes() { + http.Handle("/metrics", promhttp.Handler()) + http.HandleFunc("/livez", health.LivenessProbe) + http.HandleFunc("/readyz", health.ReadinessProbe) +} + +func (o *DefaultObservabilityServer) StartServer(ctx context.Context) error { + + go func() { + select { + case <-ctx.Done(): + log.Debugf("shutting down prometheus server") + o.server.Shutdown(ctx) + } + }() + err := o.server.ListenAndServe() + + if !errors.Is(err, http.ErrServerClosed) { + return err + } + + return nil +} diff --git a/pkg/observability/server_test.go b/pkg/observability/server_test.go new file mode 100644 index 0000000..adb370c --- /dev/null +++ b/pkg/observability/server_test.go @@ -0,0 +1,76 @@ +package observability + +import ( + "github.com/dbschenker/node-undertaker/pkg/nodeundertaker/config" + "github.com/dbschenker/node-undertaker/pkg/observability/health" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "io" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestLivenessServer(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(health.LivenessProbe)) + defer ts.Close() + + res, err := http.Get(ts.URL) + require.NoError(t, err) + assert.Equal(t, http.StatusOK, res.StatusCode) + response, err := io.ReadAll(res.Body) + res.Body.Close() + assert.NoError(t, err) + assert.Equal(t, "{\"Healthy\":true}", string(response)) +} + +func TestReadinessServer(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(health.ReadinessProbe)) + defer ts.Close() + + res, err := http.Get(ts.URL) + require.NoError(t, err) + assert.Equal(t, http.StatusOK, res.StatusCode) + response, err := io.ReadAll(res.Body) + res.Body.Close() + assert.NoError(t, err) + assert.Equal(t, "{\"Ready\":true}", string(response)) +} + +func TestMetricsServer(t *testing.T) { + //dummy metric initialization + var AppStartCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "mytopic", + Subsystem: "mysystem", + Name: "myapp", + Help: "Number of starts for this app", + }, + ) + prometheus.MustRegister(AppStartCounter) + + ts := httptest.NewServer(promhttp.Handler()) + defer ts.Close() + + res, err := http.Get(ts.URL) + require.NoError(t, err) + assert.Equal(t, http.StatusOK, res.StatusCode) + response, err := io.ReadAll(res.Body) + res.Body.Close() + assert.NoError(t, err) + arrStrings := strings.Split(string(response), "\n") + assert.Contains(t, arrStrings, "mytopic_mysystem_myapp 0") + +} + +func TestGetDefaultObservabilityServerAndSetupRoutes(t *testing.T) { + cfg := &config.Config{ + Port: 8080, + } + res := GetDefaultObservabilityServer(cfg) + assert.Equal(t, res.server.Addr, ":8080") + res.SetupRoutes() +} diff --git a/sonar-project.properties b/sonar-project.properties new file mode 100644 index 0000000..83f27fb --- /dev/null +++ b/sonar-project.properties @@ -0,0 +1,10 @@ +sonar.projectKey=github.com_dbschenker_node-undertaker +sonar.projectName=Node undertaker + +sonar.sources=. +sonar.exclusions=**/*_test.go,**/mocks/*_mocks.go + +sonar.tests=. +sonar.test.inclusions=**/*_test.go + +sonar.go.coverage.reportPaths=coverage.out