diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 645c140285..fcf324930f 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -15,8 +15,12 @@ updates: - "ok-to-test" - "go" - "dependencies" + commit-message: + prefix: "NO-ISSUE" - package-ecosystem: "docker" directory: "/" schedule: interval: "weekly" + commit-message: + prefix: "NO-ISSUE" diff --git a/Dockerfile.assisted-installer b/Dockerfile.assisted-installer index 281f8eba57..0f05aea403 100644 --- a/Dockerfile.assisted-installer +++ b/Dockerfile.assisted-installer @@ -1,4 +1,9 @@ -FROM registry.ci.openshift.org/openshift/release:golang-1.15 AS builder +FROM --platform=$BUILDPLATFORM golang:1.16 AS builder + +ARG TARGETPLATFORM + +WORKDIR /go/src/github.com/openshift/assisted-installer-agent + ENV GOFLAGS=-mod=mod WORKDIR /go/src/github.com/openshift/assisted-installer @@ -9,9 +14,10 @@ COPY go.sum go.sum RUN go mod download COPY . . -RUN make installer -FROM quay.io/centos/centos:centos8 +RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then GO_BUILD_ENV_PARAMS="GOOS=linux GOARCH=arm64" make installer;else make installer; fi + +FROM --platform=$TARGETPLATFORM quay.io/centos/centos:centos8 COPY --from=builder /go/src/github.com/openshift/assisted-installer/build/installer /usr/bin/installer COPY --from=builder /go/src/github.com/openshift/assisted-installer/deploy/assisted-installer-controller /assisted-installer-controller/deploy diff --git a/Dockerfile.assisted-installer-build b/Dockerfile.assisted-installer-build index 173efc9e22..b82b49f413 100644 --- a/Dockerfile.assisted-installer-build +++ b/Dockerfile.assisted-installer-build @@ -1,4 +1,4 @@ -FROM registry.ci.openshift.org/openshift/release:golang-1.15 +FROM registry.ci.openshift.org/openshift/release:golang-1.16 ENV GO111MODULE=on ENV GOFLAGS="" diff --git a/Dockerfile.assisted-installer-controller b/Dockerfile.assisted-installer-controller index 3a97db4305..367db08c3a 100644 --- a/Dockerfile.assisted-installer-controller +++ b/Dockerfile.assisted-installer-controller @@ -1,7 +1,10 @@ -FROM quay.io/openshift/origin-cli:4.9.0 AS cli +FROM --platform=$BUILDPLATFORM quay.io/openshift/origin-cli:4.9.0 AS cli -FROM registry.ci.openshift.org/openshift/release:golang-1.15 AS builder +FROM --platform=$BUILDPLATFORM golang:1.16 AS builder + +ARG TARGETPLATFORM ENV GOFLAGS=-mod=mod + WORKDIR /go/src/github.com/openshift/assisted-installer # Bring in the go dependencies before anything else so we can take @@ -11,9 +14,10 @@ COPY go.sum go.sum RUN go mod download COPY . . -RUN make controller -FROM quay.io/centos/centos:centos8 +RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then GO_BUILD_ENV_PARAMS="GOOS=linux GOARCH=arm64" make controller;else make controller; fi + +FROM --platform=$TARGETPLATFORM quay.io/centos/centos:centos8 RUN yum -y install make gcc unzip wget curl rsync && yum clean all COPY --from=builder /go/src/github.com/openshift/assisted-installer/build/assisted-installer-controller /usr/bin/assisted-installer-controller diff --git a/Makefile b/Makefile index b4940cd21f..641ba3042a 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ NAMESPACE := $(or ${NAMESPACE},assisted-installer) GIT_REVISION := $(shell git rev-parse HEAD) PUBLISH_TAG := $(or ${GIT_REVISION}) -CONTAINER_BUILD_PARAMS = --network=host --label git_revision=${GIT_REVISION} +CONTAINER_BUILD_PARAMS = --label git_revision=${GIT_REVISION} REPORTS ?= $(ROOT_DIR)/reports CI ?= false @@ -53,18 +53,18 @@ endif build: installer controller installer: - CGO_ENABLED=0 go build -o build/installer src/main/main.go + $(GO_BUILD_ENV_PARAMS) CGO_ENABLED=0 go build -o build/installer src/main/main.go controller: - CGO_ENABLED=0 go build -o build/assisted-installer-controller src/main/assisted-installer-controller/assisted_installer_main.go + $(GO_BUILD_ENV_PARAMS) CGO_ENABLED=0 go build -o build/assisted-installer-controller src/main/assisted-installer-controller/assisted_installer_main.go build-images: installer-image controller-image installer-image: - $(CONTAINER_COMMAND) build $(CONTAINER_BUILD_PARAMS) -f Dockerfile.assisted-installer . -t $(INSTALLER) + docker buildx build --platform=linux/arm64,linux/amd64 $(CONTAINER_BUILD_PARAMS) -f Dockerfile.assisted-installer . -t $(INSTALLER) --push controller-image: - $(CONTAINER_COMMAND) build $(CONTAINER_BUILD_PARAMS) -f Dockerfile.assisted-installer-controller . -t $(CONTROLLER) + docker buildx build --platform=linux/arm64,linux/amd64 $(CONTAINER_BUILD_PARAMS) -f Dockerfile.assisted-installer-controller . -t $(CONTROLLER) --push push-installer: installer-image $(CONTAINER_COMMAND) push $(INSTALLER) diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index af14e79dca..4851193a77 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -9,13 +9,9 @@ aliases: - gamli75 - ori-amizur - oshercc - - razregev - romfreiman - - ronniel1 - tsorya - yevgeny-shnaidman - - yuvigold - - masayag - nmagnezi - carbonin - rollandf @@ -23,12 +19,14 @@ aliases: - ybettan - slaviered - osherdp - - asalkeld - flaper87 - mkowalski + - lranjbar + - omertuc code-reviewers: - jakub-dzon - pkliczewski - masayag - jordigilh - machacekondra + - sagidayan diff --git a/deploy/assisted-installer-controller/assisted-installer-controller-cm.yaml.template b/deploy/assisted-installer-controller/assisted-installer-controller-cm.yaml.template index e5e58e057c..f888865b08 100644 --- a/deploy/assisted-installer-controller/assisted-installer-controller-cm.yaml.template +++ b/deploy/assisted-installer-controller/assisted-installer-controller-cm.yaml.template @@ -12,4 +12,4 @@ data: ca-cert-path: '{{.CACertPath}}' check-cluster-version: '{{.CheckCVO}}' high-availability-mode: {{.HaMode}} - must-gather-image: {{.MustGatherImage}} \ No newline at end of file + must-gather-image: '{{.MustGatherImage}}' \ No newline at end of file diff --git a/deploy/assisted-installer-controller/assisted-installer-controller-nm.yaml b/deploy/assisted-installer-controller/assisted-installer-controller-nm.yaml index f2a39392b6..3be4e80095 100644 --- a/deploy/assisted-installer-controller/assisted-installer-controller-nm.yaml +++ b/deploy/assisted-installer-controller/assisted-installer-controller-nm.yaml @@ -2,5 +2,3 @@ apiVersion: v1 kind: Namespace metadata: name: assisted-installer - labels: - openshift.io/run-level: "0" \ No newline at end of file diff --git a/deploy/assisted-installer-controller/assisted-installer-controller-role.yaml b/deploy/assisted-installer-controller/assisted-installer-controller-role.yaml index aa5e4348f3..44abc74eb0 100644 --- a/deploy/assisted-installer-controller/assisted-installer-controller-role.yaml +++ b/deploy/assisted-installer-controller/assisted-installer-controller-role.yaml @@ -154,3 +154,17 @@ rules: - pods verbs: - deletecollection + - apiGroups: + - "security.openshift.io" + resourceNames: + - "anyuid" + - "nonroot" + - "hostmount-anyuid" + - "machine-api-termination-handler" + - "hostnetwork" + - "hostaccess" + - "node-exporter" + resources: + - securitycontextconstraints + verbs: + - use diff --git a/go.mod b/go.mod index 3e4255297b..ef808ef256 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/openshift/assisted-installer -go 1.14 +go 1.16 require ( github.com/Microsoft/go-winio v0.4.15-0.20200113171025-3fe6c5262873 // indirect @@ -8,6 +8,7 @@ require ( github.com/aybabtme/iocontrol v0.0.0-20150809002002-ad15bcfc95a0 // indirect github.com/benbjohnson/clock v1.0.3 // indirect github.com/coreos/ignition/v2 v2.10.1 + github.com/go-logr/logr v0.4.0 // indirect github.com/go-openapi/runtime v0.19.28 github.com/go-openapi/strfmt v0.20.1 github.com/go-openapi/swag v0.19.9 @@ -16,11 +17,11 @@ require ( github.com/hashicorp/go-version v1.3.0 github.com/kelseyhightower/envconfig v1.4.0 github.com/metal3-io/baremetal-operator v0.0.0 - github.com/onsi/ginkgo v1.16.2 - github.com/onsi/gomega v1.12.0 + github.com/onsi/ginkgo v1.16.4 + github.com/onsi/gomega v1.13.0 github.com/openshift/api v3.9.1-0.20191111211345-a27ff30ebf09+incompatible github.com/openshift/assisted-installer-agent v0.0.0-20200811180147-bc9c7b899b8a - github.com/openshift/assisted-service v1.0.10-0.20210526082015-cf99d1fca3fe + github.com/openshift/assisted-service v1.0.10-0.20210921191140-7dcb60579fdf github.com/openshift/client-go v0.0.0-20201020074620-f8fd44879f7c github.com/openshift/machine-api-operator v0.2.1-0.20201002104344-6abfb5440597 github.com/operator-framework/api v0.8.0 @@ -30,13 +31,12 @@ require ( github.com/thoas/go-funk v0.8.0 github.com/vincent-petithory/dataurl v0.0.0-20191104211930-d1553a71de50 golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 - golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c gopkg.in/yaml.v2 v2.4.0 - honnef.co/go/tools v0.0.1-2020.1.6 // indirect - k8s.io/api v0.21.0 - k8s.io/apimachinery v0.21.0 + k8s.io/api v0.21.1 + k8s.io/apimachinery v0.21.1 k8s.io/client-go v12.0.0+incompatible - sigs.k8s.io/controller-runtime v0.8.3 + sigs.k8s.io/controller-runtime v0.9.0 ) replace ( @@ -44,6 +44,7 @@ replace ( github.com/openshift/api => github.com/openshift/api v0.0.0-20200901182017-7ac89ba6b971 github.com/openshift/hive/pkg/apis => github.com/carbonin/hive/pkg/apis v0.0.0-20210209195732-57e8c3ae12d1 github.com/openshift/machine-api-operator => github.com/openshift/machine-api-operator v0.2.1-0.20201026110925-50ea569da51b + github.com/irifrance/gini => github.com/go-air/gini v1.0.1 k8s.io/api => k8s.io/api v0.19.2 k8s.io/apimachinery => k8s.io/apimachinery v0.19.2 k8s.io/client-go => k8s.io/client-go v0.19.2 diff --git a/go.sum b/go.sum index 01b6abb0da..56cbbf5838 100644 --- a/go.sum +++ b/go.sum @@ -70,7 +70,6 @@ github.com/Azure/go-autorest/autorest/to v0.3.1-0.20191028180845-3492b2aff503/go github.com/Azure/go-autorest/autorest/validation v0.2.1-0.20191028180845-3492b2aff503/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= @@ -118,7 +117,6 @@ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat6 github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -153,7 +151,6 @@ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN github.com/aws/aws-sdk-go v1.30.28/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.32.6/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.34.21/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.34.28 h1:sscPpn/Ns3i0F4HPEWAVcwdIRaZZCuL7llJ2/60yPIk= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aybabtme/iocontrol v0.0.0-20150809002002-ad15bcfc95a0 h1:0NmehRCgyk5rljDQLKUO+cRJCnduDyn11+zGZIc9Z48= @@ -181,6 +178,7 @@ github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQ github.com/brancz/gojsontoyaml v0.0.0-20190425155809-e8bd32d46b3d/go.mod h1:IyUJYN1gvWjtLF5ZuygmxbnsAyP3aJS6cHzIuZY50B0= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/bugsnag-go v1.5.3/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= @@ -188,7 +186,6 @@ github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3k github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e h1:hHg27A0RSSp2Om9lubZpiMgVbvn39bsUmW9U5h0twqc= github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A= github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= @@ -312,7 +309,6 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc= -github.com/diskfs/go-diskfs v1.1.2-0.20210216073915-ba492710e2d8 h1:1QzBnogt3Wut5Qw/6qKFLey1fjBgRHPp6deHCLmsKe4= github.com/diskfs/go-diskfs v1.1.2-0.20210216073915-ba492710e2d8/go.mod h1:ZTeTbzixuyfnZW5y5qKMtjV2o+GLLHo1KfMhotJI4Rk= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= @@ -324,7 +320,6 @@ github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v0.7.3-0.20190817195342-4760db040282/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v1.4.2-0.20191219165747-a9416c67da9f/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce h1:KXS1Jg+ddGcWA8e1N7cupxaHHZhit5rB9tfDU+mfjyY= github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= @@ -342,7 +337,6 @@ github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNE github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= @@ -382,7 +376,6 @@ github.com/filanov/stateswitch v0.0.0-20200714113403-51a42a34c604/go.mod h1:GYnX github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= @@ -395,11 +388,13 @@ github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05 github.com/getsentry/raven-go v0.0.0-20190513200303-c977f96e1095/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-air/gini v1.0.1/go.mod h1:swH5OTtiG/X/YrU06r288qZwq6I1agpbuXQOB55xqGU= github.com/go-bindata/go-bindata v3.1.2+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= github.com/go-bindata/go-bindata/v3 v3.1.3/go.mod h1:1/zrpXsLD8YDIbhZRqXzm1Ghc7NhEvIN9+Z6R5/xH4I= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -409,6 +404,7 @@ github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3I github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= @@ -591,7 +587,6 @@ github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFU github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0 h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -674,7 +669,6 @@ github.com/gophercloud/gophercloud v0.2.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEo github.com/gophercloud/gophercloud v0.3.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/gophercloud v0.6.0/go.mod h1:GICNByuaEBibcjmjvI7QvYJSZEbGkcYwAR7EZK2WMqM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de h1:F7WD09S8QB4LrkEpka0dFPLSotH11HRpCsLIbIcJ7sU= github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gordonklaus/ineffassign v0.0.0-20190601041439-ed7b1b5ee0f8/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= @@ -792,9 +786,7 @@ github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= @@ -807,13 +799,13 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jsonnet-bundler/jsonnet-bundler v0.2.0/go.mod h1:/by7P/OoohkI3q4CgSFqcoFsVY+IaNbzOVDknEsKDeU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= @@ -837,7 +829,6 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= @@ -914,6 +905,7 @@ github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsO github.com/mattn/go-sqlite3 v1.12.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v2.0.1+incompatible h1:xQ15muvnzGBHpIpdrNi1DA5x0+TcBZzsIDwmw9uTHzw= github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= @@ -945,7 +937,6 @@ github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8 github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/moby v1.13.1 h1:mC5WwQwCXt/dYxZ1cIrRsnJAWw7VdtcTZUIGr4tXzOM= github.com/moby/moby v1.13.1/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o= github.com/moby/sys/mountinfo v0.3.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= @@ -1005,8 +996,9 @@ github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0 github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.2 h1:HFB2fbVIlhIfCfOW81bZFbiC/RvnpXSdhbF2/DJr134= github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -1016,8 +1008,8 @@ github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.12.0 h1:p4oGGk2M2UJc0wWN4lHFvIB71lxsh0T/UiKCCgFADY8= -github.com/onsi/gomega v1.12.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -1039,19 +1031,21 @@ github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.m github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= -github.com/openshift-online/ocm-sdk-go v0.1.160 h1:IrmZoTmPiqa8VmdthDcztWHmXHeHiFDvF5wife6hYdc= -github.com/openshift-online/ocm-sdk-go v0.1.160/go.mod h1:9y8jM+VhZdl5VLy8l3RI+uDltcWPL1oW6lqtjtoDHqY= -github.com/openshift-online/ocm-sdk-go v0.1.165 h1:NndMhSbJzTsBgZuoIgDhHHmk6pgF9rYmJOYikAunJxg= -github.com/openshift-online/ocm-sdk-go v0.1.165/go.mod h1:/DStCZJQ2XOV/ktkODyVnUCPnGfH3agwp0e+GZTLr3E= +github.com/openshift-online/ocm-sdk-go v0.1.190 h1:GKQbhOeNIHNVQGBAPKhzPyUTrKKatz2j4d4AU2DNnJQ= +github.com/openshift-online/ocm-sdk-go v0.1.190/go.mod h1:XpupkiWFXkiAPdgS8Dq7Gknk2E6AximJnpC98Hk4fl4= github.com/openshift/api v0.0.0-20200901182017-7ac89ba6b971 h1:l4jU2pbYCFlWffDL8gQaN24UohhHI8Zq/zmiSpYzy7o= github.com/openshift/api v0.0.0-20200901182017-7ac89ba6b971/go.mod h1:M3xexPhgM8DISzzRpuFUy+jfPjQPIcs9yqEYj17mXV8= github.com/openshift/assisted-installer-agent v0.0.0-20200811180147-bc9c7b899b8a h1:mk+JGnFSuRTTWzODLs1gclp5om0+k4lH8btJSJxQA80= github.com/openshift/assisted-installer-agent v0.0.0-20200811180147-bc9c7b899b8a/go.mod h1:q1jXr/OgGA7bOBu1uzlZXOjExPHIoAXxzQlifXBmXLY= github.com/openshift/assisted-service v0.0.0-20200811075806-62dcbcd62c0b/go.mod h1:H96z5QdPNv7PZ+/p+VafuHyAUqlVcpOFTnfMl8QYzQ4= -github.com/openshift/assisted-service v1.0.10-0.20210310114450-f60ae14adc85 h1:SPISKZsxv4CWXd6sVFiS6jE5pUSLUVuPEFJXAtDOvrc= -github.com/openshift/assisted-service v1.0.10-0.20210310114450-f60ae14adc85/go.mod h1:73ccdCnFCgHCVdJJcAZUf/fM3AWOctmMosIRF+fgCNM= -github.com/openshift/assisted-service v1.0.10-0.20210526082015-cf99d1fca3fe h1:EW8FQ82Q7uDTWqsKURv9sRu31K4yeLRfXAaIO60e7Dg= -github.com/openshift/assisted-service v1.0.10-0.20210526082015-cf99d1fca3fe/go.mod h1:uRETrhDQ7oKwMmc45F4yXXnf+gfryMDJnXq7pPFcayU= +github.com/openshift/assisted-service v1.0.10-0.20210729090313-b33b6f69330b h1:8b4eTlKzJFVZmtwFHEjHflBPIv5UHx7Cqr7txb3Xf0I= +github.com/openshift/assisted-service v1.0.10-0.20210729090313-b33b6f69330b/go.mod h1:06CYHjrS5tanbGRM4ZB3Sd0gcaSIfMkXNjDmZVFAWhs= +github.com/openshift/assisted-service v1.0.10-0.20210808073533-4afc4b5ae515 h1:1CnH/Cy9KjPeT3+ThA6nLSUY3cA3IEix1k3rKvNuxJY= +github.com/openshift/assisted-service v1.0.10-0.20210808073533-4afc4b5ae515/go.mod h1:06CYHjrS5tanbGRM4ZB3Sd0gcaSIfMkXNjDmZVFAWhs= +github.com/openshift/assisted-service v1.0.10-0.20210919133239-48a7ecedcb85 h1:xU8eqsoc2YenPcD9MAaXDMEQDTmcTsVfbaHoKZ+eemw= +github.com/openshift/assisted-service v1.0.10-0.20210919133239-48a7ecedcb85/go.mod h1:SsuNh9LQjVGO4N8PG5fp8G0LVSkVcWcF158yOCJFAj8= +github.com/openshift/assisted-service v1.0.10-0.20210921191140-7dcb60579fdf h1:wvV0DoZNXFPPLiqv1O4VpjhOhAZW+KcKkZ9w0z/f7Lg= +github.com/openshift/assisted-service v1.0.10-0.20210921191140-7dcb60579fdf/go.mod h1:SsuNh9LQjVGO4N8PG5fp8G0LVSkVcWcF158yOCJFAj8= github.com/openshift/baremetal-operator v0.0.0-20200715132148-0f91f62a41fe h1:bu99IMkaN6o/JcxpWEb1eT8gDdL9hLcwOmfiVIbXWj8= github.com/openshift/baremetal-operator v0.0.0-20200715132148-0f91f62a41fe/go.mod h1:DOgBIuBcXuTD8uub0jL7h6gBdIBt3CFrwz6K2FtfMBA= github.com/openshift/build-machinery-go v0.0.0-20200819073603-48aa266c95f7/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= @@ -1066,9 +1060,7 @@ github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20201016155852- github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20201016155852-4090a6970205/go.mod h1:oOG/TNSBse4brosfLCH/G2Q/42ye+DZQq8VslA5SxOs= github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201002065957-9854f7420570 h1:Bmi2b7YADMXpNQ6EPV4rQqoVRSjj3dzDU3dSAEKXut0= github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201002065957-9854f7420570/go.mod h1:7NRECVE26rvP1/fs1CbhfY5gsgnnFQNhb9txTFzWmUw= -github.com/openshift/custom-resource-status v0.0.0-20200602122900-c002fd1547ca/go.mod h1:GDjWl0tX6FNIj82vIxeudWeSx2Ff6nDZ8uJn0ohUFvo= github.com/openshift/custom-resource-status v1.1.0/go.mod h1:GDjWl0tX6FNIj82vIxeudWeSx2Ff6nDZ8uJn0ohUFvo= -github.com/openshift/hive/apis v0.0.0-20210302234131-7026427c0ae5/go.mod h1:jpKB2/wcJC/WZF0xg2TqOrH6QHG+8/4eqFsAvZQkGdo= github.com/openshift/hive/apis v0.0.0-20210506000654-5c038fb05190/go.mod h1:Ujw9ImzSYvo9VlUX6Gjy7zPFP7xYUAU50tdf1wPpN6c= github.com/openshift/library-go v0.0.0-20200909173121-1d055d971916/go.mod h1:6vwp+YhYOIlj8MpkQKkebTTSn2TuYyvgiAFQ206jIEQ= github.com/openshift/machine-api-operator v0.2.1-0.20201026110925-50ea569da51b h1:vV5t7qPtp1GrowV+eJvUgzqxf/ZtSOvTsjLKgSN9caw= @@ -1129,7 +1121,6 @@ github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.3.0+incompatible h1:CZzRn4Ut9GbUkHlQ7jqBXeZQV41ZSKWFc302ZU6lUTk= github.com/pierrec/lz4 v2.3.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pin/tftp v2.1.0+incompatible/go.mod h1:xVpZOMCXTy+A5QMjEVN0Glwa1sUvaJhFXbr/aAxuxGY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1139,7 +1130,6 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/profile v1.3.0/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pkg/xattr v0.4.1 h1:dhclzL6EqOXNaPDWqoeb9tIxATfBSmjqL0b4DpSjwRw= github.com/pkg/xattr v0.4.1/go.mod h1:W2cGD0TBEus7MkUgv0tNZ9JutLtVO3cXu+IBRuHqnFs= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -1148,6 +1138,7 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.49.0/go.mod h1:3WYi4xqXxGGXWDdQIITnLNmuDzO5n6wYva9spVhR4fg= github.com/prometheus/alertmanager v0.18.0/go.mod h1:WcxHBl40VSPuOaqWae6l6HpnEOVRIycEJ7i9iYkadEE= github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -1163,8 +1154,9 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.9.0 h1:Rrch9mh17XcxvEu9D9DEpb4isxjGBtcevQjKvxPRQIU= github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -1183,8 +1175,9 @@ github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM= github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1199,8 +1192,9 @@ github.com/prometheus/procfs v0.0.6/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/prometheus/prometheus v1.8.2-0.20200110114423-1e64d757f711/go.mod h1:7U90zPoLkWjEIQcy/rweQla82OCTUzxVHE51G3OhJbI= github.com/prometheus/prometheus v2.3.2+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= @@ -1253,11 +1247,9 @@ github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic github.com/slok/go-http-metrics v0.8.0 h1:rsIKW30MzLjbWRBkCQoe/Oxh/F283MKT6afdH3mXTaA= github.com/slok/go-http-metrics v0.8.0/go.mod h1:f22ekj0Ht4taz2clntVmLRSK4D+feX33zkdDW0Eytvk= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= @@ -1302,8 +1294,9 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.0/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stripe/safesql v0.2.0/go.mod h1:q7b2n0JmzM1mVGfcYpanfVb2j23cXZeWFxcILPn3JV4= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -1329,7 +1322,6 @@ github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVM github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= -github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= @@ -1373,6 +1365,7 @@ github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7Jul github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/go-metrics v0.0.0-20150112132944-c25f46c4b940/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= @@ -1426,7 +1419,6 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= @@ -1468,8 +1460,8 @@ golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1491,7 +1483,6 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -1500,8 +1491,8 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1556,6 +1547,7 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1574,8 +1566,10 @@ golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1672,8 +1666,13 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da h1:b3NXsE2LusjYGGjL5bxEVZZORm/YEFFrWFjR8eFrw/c= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1757,7 +1756,6 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200327195553-82bb89366a1e/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200410194907-79a7a3126eef/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200430192856-2840dafb9ee1/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -1769,10 +1767,9 @@ golang.org/x/tools v0.0.0-20200610160956-3e83d1e96d0e/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200616195046-dc31b401abb5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20201118003311-bd56c0adb394/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1879,7 +1876,6 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= -gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1889,7 +1885,6 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/djherbis/times.v1 v1.2.0 h1:UCvDKl1L/fmBygl2Y7hubXCnY7t4Yj46ZrBFNUipFbM= gopkg.in/djherbis/times.v1 v1.2.0/go.mod h1:AQlg6unIsrsCEdQYhTzERy542dz6SFdQFZFv6mUY0P8= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= @@ -1905,7 +1900,6 @@ gopkg.in/imdario/mergo.v0 v0.3.7/go.mod h1:9qPP6AGrlC1G2PTNXko614FwGZvorN7MiBU0E gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ldap.v2 v2.5.1/go.mod h1:oI0cpe/D7HRtBQl8aTg+ZmzFUAvu4lsv3eLXMLGFxWk= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= @@ -1950,8 +1944,6 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.6 h1:W18jzjh8mfPez+AwGLxmOImucz/IFjpNlrKVnaj2YVc= -honnef.co/go/tools v0.0.1-2020.1.6/go.mod h1:pyyisuGw24ruLjrr1ddx39WE0y9OooInRzEYLhQB2YY= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= k8s.io/api v0.19.2 h1:q+/krnHWKsL7OBZg/rxnycsl9569Pud76UJ77MvKXms= k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= @@ -1963,6 +1955,7 @@ k8s.io/apiextensions-apiserver v0.17.3/go.mod h1:CJbCyMfkKftAd/X/V6OTHYhVn7zXnDd k8s.io/apiextensions-apiserver v0.17.4/go.mod h1:rCbbbaFS/s3Qau3/1HbPlHblrWpFivoaLYccCffvQGI= k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF+aOdIWDOZxgo= k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY= +k8s.io/apiextensions-apiserver v0.18.3/go.mod h1:TMsNGs7DYpMXd+8MOCX8KzPOCx8fnZMoIGB24m03+JE= k8s.io/apiextensions-apiserver v0.19.0/go.mod h1:znfQxNpjqz/ZehvbfMg5N6fvBJW5Lqu5HVLTJQdP4Fs= k8s.io/apiextensions-apiserver v0.20.0/go.mod h1:ZH+C33L2Bh1LY1+HphoRmN1IQVLTShVcTojivK3N9xg= k8s.io/apiextensions-apiserver v0.20.1/go.mod h1:ntnrZV+6a3dB504qwC5PN/Yg9PBiDNt1EVqbW2kORVk= @@ -1978,6 +1971,7 @@ k8s.io/apiserver v0.17.3/go.mod h1:iJtsPpu1ZpEnHaNawpSV0nYTGBhhX2dUlnn7/QS7QiY= k8s.io/apiserver v0.17.4/go.mod h1:5ZDQ6Xr5MNBxyi3iUZXS84QOhZl+W7Oq2us/29c0j9I= k8s.io/apiserver v0.18.0/go.mod h1:3S2O6FeBBd6XTo0njUrLxiqk8GNy6wWOftjhJcXYnjw= k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw= +k8s.io/apiserver v0.18.3/go.mod h1:tHQRmthRPLUtwqsOnJJMoI8SW3lnoReZeE861lH8vUw= k8s.io/apiserver v0.19.0/go.mod h1:XvzqavYj73931x7FLtyagh8WibHpePJ1QwWrSJs2CLk= k8s.io/apiserver v0.20.0/go.mod h1:6gRIWiOkvGvQt12WTYmsiYoUyYW0FXSiMdNl4m+sxY8= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= @@ -1990,7 +1984,6 @@ k8s.io/cli-runtime v0.17.4/go.mod h1:IVW4zrKKx/8gBgNNkhiUIc7nZbVVNhc1+HcQh+PiNHc k8s.io/cli-runtime v0.18.0/go.mod h1:1eXfmBsIJosjn9LjEBUd2WVPoPAY9XGTqTFcPMIBsUQ= k8s.io/cli-runtime v0.19.0/go.mod h1:tun9l0eUklT8IHIM0jors17KmUjcrAxn0myoBYwuNuo= k8s.io/cli-runtime v0.20.0/go.mod h1:C5tewU1SC1t09D7pmkk83FT4lMAw+bvMDuRxA7f0t2s= -k8s.io/cli-runtime v0.20.5/go.mod h1:ihjPeQWDk7NGVIkNEvpwxA3gJvqtU+LtkDj11TvyXn4= k8s.io/client-go v0.19.2 h1:gMJuU3xJZs86L1oQ99R4EViAADUPMHHtS9jFshasHSc= k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA= k8s.io/code-generator v0.0.0-20200214080538-dc8f3adce97c/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= @@ -2002,14 +1995,13 @@ k8s.io/component-base v0.17.3/go.mod h1:GeQf4BrgelWm64PXkIXiPh/XS0hnO42d9gx9BtbZ k8s.io/component-base v0.17.4/go.mod h1:5BRqHMbbQPm2kKu35v3G+CpVq4K0RJKC7TRioF0I9lE= k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c= k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM= +k8s.io/component-base v0.18.3/go.mod h1:bp5GzGR0aGkYEfTj+eTY0AN/vXTgkJdQXjNTTVUaa3k= k8s.io/component-base v0.19.0/go.mod h1:dKsY8BxkA+9dZIAh2aWJLL/UdASFDNtGYTCItL4LM7Y= k8s.io/component-base v0.20.0/go.mod h1:wKPj+RHnAr8LW2EIBIK7AxOHPde4gme2lzXwVSoRXeA= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= -k8s.io/component-base v0.20.5/go.mod h1:l0isoBLGyQKwRoTWbPHR6jNDd3/VqQD43cNlsjddGng= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= k8s.io/component-helpers v0.20.0/go.mod h1:nx6NOtfSfGOxnSZsDJxpGbnsVuUA1UXpwDvZIrtigNk= -k8s.io/component-helpers v0.20.5/go.mod h1:AzTdoPj6YAN2SUfhBX/FUUU3ntfFuse03q/VMLovEsE= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20191010091904-7fa3014cb28f/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= @@ -2033,6 +2025,7 @@ k8s.io/kube-openapi v0.0.0-20190320154901-5e45bb682580/go.mod h1:BXM9ceUBTj2QnfH k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= @@ -2043,7 +2036,6 @@ k8s.io/kubectl v0.17.4/go.mod h1:im5QWmh6fvtmJkkNm4HToLe8z9aM3jihYK5X/wOybcY= k8s.io/kubectl v0.18.0/go.mod h1:LOkWx9Z5DXMEg5KtOjHhRiC1fqJPLyCr3KtQgEolCkU= k8s.io/kubectl v0.19.0/go.mod h1:gPCjjsmE6unJzgaUNXIFGZGafiUp5jh0If3F/x7/rRg= k8s.io/kubectl v0.20.0/go.mod h1:8x5GzQkgikz7M2eFGGuu6yOfrenwnw5g4RXOUgbjR1M= -k8s.io/kubectl v0.20.5/go.mod h1:mlNQgyV18D4XFt5BmfSkrxQNS+arT2pXDQxxnH5lMiw= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/metrics v0.17.2/go.mod h1:3TkNHET4ROd+NfzNxkjoVfQ0Ob4iZnaHmSEA4vYpwLw= k8s.io/metrics v0.17.3/go.mod h1:HEJGy1fhHOjHggW9rMDBJBD3YuGroH3Y1pnIRw9FFaI= @@ -2051,7 +2043,6 @@ k8s.io/metrics v0.17.4/go.mod h1:6rylW2iD3M9VppnEAAtJASY1XS8Pt9tcYh+tHxBeV3I= k8s.io/metrics v0.18.0/go.mod h1:8aYTW18koXqjLVKL7Ds05RPMX9ipJZI3mywYvBOxXd4= k8s.io/metrics v0.19.0/go.mod h1:WykpW8B60OeAJx1imdwUgyOID2kDljr/Q+1zrPJ98Wo= k8s.io/metrics v0.20.0/go.mod h1:9yiRhfr8K8sjdj2EthQQE9WvpYDvsXIV3CjN4Ruq4Jw= -k8s.io/metrics v0.20.5/go.mod h1:vsptOayjKWKWHvWR1vFQY++vxydzaEo/2+JC7kSDKPU= k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= @@ -2060,8 +2051,9 @@ k8s.io/utils v0.0.0-20200229041039-0a110f9eb7ab/go.mod h1:sZAwmy6armz5eXlNoLmJcl k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 h1:0T5IaWHO3sJTEmCP6mUlBvMukxPKUQWqiI/YuiBNMiQ= k8s.io/utils v0.0.0-20210111153108-fddb29f9d009/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210527160623-6fdb442a123b h1:MSqsVQ3pZvPGTqCjptfimO2WjG7A9un2zcpiHkA6M/s= +k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/src/assisted_installer_controller/assisted_installer_controller.go b/src/assisted_installer_controller/assisted_installer_controller.go index 4b5d132caf..027d3696ec 100644 --- a/src/assisted_installer_controller/assisted_installer_controller.go +++ b/src/assisted_installer_controller/assisted_installer_controller.go @@ -2,6 +2,7 @@ package assisted_installer_controller import ( "context" + "encoding/base64" "encoding/json" "fmt" "io" @@ -23,7 +24,6 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - configv1 "github.com/openshift/api/config/v1" "github.com/openshift/assisted-installer/src/common" "github.com/openshift/assisted-installer/src/inventory_client" "github.com/openshift/assisted-installer/src/k8s_client" @@ -34,20 +34,24 @@ import ( ) const ( + // We retry 10 times in 30sec interval meaning that we tolerate the operator to be in failed + // state for 5minutes. + failedOperatorRetry = 10 generalWaitTimeoutInt = 30 controllerLogsSecondsAgo = 120 * 60 consoleOperatorName = "console" - cvoOperatorName = "cvo" ingressConfigMapName = "default-ingress-cert" ingressConfigMapNamespace = "openshift-config-managed" dnsServiceName = "dns-default" dnsServiceNamespace = "openshift-dns" dnsOperatorNamespace = "openshift-dns-operator" + maxFetchAttempts = 5 maxDeletionAttempts = 5 maxDNSServiceIPAttempts = 45 KeepWaiting = false ExitWaiting = true - customManifestsFile = "custom_manifests.yaml" + customManifestsFile = "custom_manifests.json" + kubeconfigFileName = "kubeconfig-noingress" ) var ( @@ -59,7 +63,9 @@ var ( CompleteTimeout = 30 * time.Minute DNSAddressRetryInterval = 20 * time.Second DeletionRetryInterval = 10 * time.Second + FetchRetryInterval = 10 * time.Second LongWaitTimeout = 10 * time.Hour + CVOMaxTimeout = 3 * time.Hour ) // assisted installer controller is added to control installation process after bootstrap pivot @@ -67,9 +73,9 @@ var ( // as a first step it will wait till nodes are added to cluster and update their status to Done type ControllerConfig struct { - ClusterID string `envconfig:"CLUSTER_ID" required:"true" ` + ClusterID string `envconfig:"CLUSTER_ID" required:"true"` URL string `envconfig:"INVENTORY_URL" required:"true"` - PullSecretToken string `envconfig:"PULL_SECRET_TOKEN" required:"true"` + PullSecretToken string `envconfig:"PULL_SECRET_TOKEN" required:"true" secret:"true"` SkipCertVerification bool `envconfig:"SKIP_CERT_VERIFICATION" required:"false" default:"false"` CACertPath string `envconfig:"CA_CERT_PATH" required:"false" default:""` Namespace string `envconfig:"NAMESPACE" required:"false" default:"assisted-installer"` @@ -84,14 +90,25 @@ type Controller interface { type ControllerStatus struct { errCounter uint32 + components map[string]bool + lock sync.Mutex } type controller struct { ControllerConfig - log *logrus.Logger - ops ops.Ops - ic inventory_client.InventoryClient - kc k8s_client.K8SClient + Status *ControllerStatus + log *logrus.Logger + ops ops.Ops + ic inventory_client.InventoryClient + kc k8s_client.K8SClient +} + +// manifest store the operator manifest used by assisted-installer to create CRs of the OLM: +type manifest struct { + // name of the operator the CR manifest we want create + Name string + // content of the manifest of the opreator + Content string } func NewController(log *logrus.Logger, cfg ControllerConfig, ops ops.Ops, ic inventory_client.InventoryClient, kc k8s_client.K8SClient) *controller { @@ -101,6 +118,13 @@ func NewController(log *logrus.Logger, cfg ControllerConfig, ops ops.Ops, ic inv ops: ops, ic: ic, kc: kc, + Status: NewControllerStatus(), + } +} + +func NewControllerStatus() *ControllerStatus { + return &ControllerStatus{ + components: make(map[string]bool), } } @@ -112,6 +136,28 @@ func (status *ControllerStatus) HasError() bool { return atomic.LoadUint32(&status.errCounter) > 0 } +func (status *ControllerStatus) OperatorError(component string) { + status.lock.Lock() + defer status.lock.Unlock() + status.components[component] = true +} + +func (status *ControllerStatus) HasOperatorError() bool { + status.lock.Lock() + defer status.lock.Unlock() + return len(status.components) > 0 +} + +func (status *ControllerStatus) GetOperatorsInError() []string { + result := make([]string, 0) + status.lock.Lock() + defer status.lock.Unlock() + for op := range status.components { + result = append(result, op) + } + return result +} + func logHostsStatus(log logrus.FieldLogger, hosts map[string]inventory_client.HostData) { hostsStatus := make(map[string][]string) for hostname, hostData := range hosts { @@ -148,6 +194,7 @@ func (c *controller) waitAndUpdateNodesStatus() bool { log := utils.RequestIDLogger(ctxReq, c.log) assistedNodesMap, err := c.ic.GetHosts(ctxReq, log, ignoreStatuses) + knownIpAddresses := common.BuildHostsMapIPAddressBased(assistedNodesMap) if err != nil { log.WithError(err).Error("Failed to get node map from the assisted service") return KeepWaiting @@ -177,25 +224,25 @@ func (c *controller) waitAndUpdateNodesStatus() bool { return KeepWaiting } for _, node := range nodes.Items { - host, ok := hostsInProgressMap[strings.ToLower(node.Name)] + host, ok := common.HostMatchByNameOrIPAddress(node, hostsInProgressMap, knownIpAddresses) if !ok { - if _, ok := assistedNodesMap[strings.ToLower(node.Name)]; !ok { - log.Warnf("Node %s is not in inventory hosts", strings.ToLower(node.Name)) - } - + log.Warnf("Node %s is not in inventory hosts", strings.ToLower(node.Name)) continue } - if common.IsK8sNodeIsReady(node) { - log.Infof("Found new ready node %s with inventory id %s, kubernetes id %s, updating its status to %s", - node.Name, host.Host.ID.String(), node.Status.NodeInfo.SystemUUID, models.HostStageDone) - if err := c.ic.UpdateHostInstallProgress(ctxReq, host.Host.ID.String(), models.HostStageDone, ""); err != nil { + + if host.Host.Progress.CurrentStage == models.HostStageConfiguring { + log.Infof("Found new joined node %s with inventory id %s, kubernetes id %s, updating its status to %s", + node.Name, host.Host.ID.String(), node.Status.NodeInfo.SystemUUID, models.HostStageJoined) + if err := c.ic.UpdateHostInstallProgress(ctxReq, host.Host.InfraEnvID.String(), host.Host.ID.String(), models.HostStageJoined, ""); err != nil { log.WithError(err).Errorf("Failed to update node %s installation status", node.Name) continue } - } else if host.Host.Progress.CurrentStage == models.HostStageConfiguring { - log.Infof("Found new joined node %s with inventory id %s, kubernetes id %s, updating its status to %s", - node.Name, host.Host.ID.String(), node.Status.NodeInfo.SystemUUID, models.HostStageJoined) - if err := c.ic.UpdateHostInstallProgress(ctxReq, host.Host.ID.String(), models.HostStageJoined, ""); err != nil { + } + + if common.IsK8sNodeIsReady(node) { + log.Infof("Found new ready node %s with inventory id %s, kubernetes id %s, updating its status to %s", + node.Name, host.Host.ID.String(), node.Status.NodeInfo.SystemUUID, models.HostStageDone) + if err := c.ic.UpdateHostInstallProgress(ctxReq, host.Host.InfraEnvID.String(), host.Host.ID.String(), models.HostStageDone, ""); err != nil { log.WithError(err).Errorf("Failed to update node %s installation status", node.Name) continue } @@ -219,13 +266,14 @@ func (c *controller) HackDNSAddressConflict(wg *sync.WaitGroup) { return } - ip, _, _ := net.ParseCIDR(networks[0]) - ip4 := ip.To4() - if ip4 == nil { - c.log.Infof("Service network is IPv6: %s, skipping the .10 address hack", ip) + netIp, _, _ := net.ParseCIDR(networks[0]) + ip := netIp.To16() + if ip == nil { + c.log.Infof("Failed to parse service network cidr %s, skipping", networks[0]) return } - ip4[3] = 10 // .10 is the conflicting address + + ip[len(ip)-1] = 10 // .10 or :a is the conflicting address for i := 0; i < maxDNSServiceIPAttempts; i++ { svs, err := c.kc.ListServices("") @@ -234,17 +282,17 @@ func (c *controller) HackDNSAddressConflict(wg *sync.WaitGroup) { time.Sleep(DNSAddressRetryInterval) continue } - s := c.findServiceByIP(ip4.String(), &svs.Items) + s := c.findServiceByIP(ip.String(), &svs.Items) if s == nil { - c.log.Infof("No service found with IP %s, attempt %d/%d", ip4, i+1, maxDNSServiceIPAttempts) + c.log.Infof("No service found with IP %s, attempt %d/%d", ip, i+1, maxDNSServiceIPAttempts) time.Sleep(DNSAddressRetryInterval) continue } if s.Name == dnsServiceName && s.Namespace == dnsServiceNamespace { - c.log.Infof("Service %s has successfully taken IP %s", dnsServiceName, ip4) + c.log.Infof("Service %s has successfully taken IP %s", dnsServiceName, ip) break } - c.log.Warnf("Deleting service %s in namespace %s whose IP %s conflicts with %s", s.Name, s.Namespace, ip4, dnsServiceName) + c.log.Warnf("Deleting service %s in namespace %s whose IP %s conflicts with %s", s.Name, s.Namespace, ip, dnsServiceName) if err := c.killConflictingService(s); err != nil { c.log.WithError(err).Warnf("Failed to delete service %s in namespace %s", s.Name, s.Namespace) continue @@ -341,7 +389,7 @@ func isCsrApproved(csr *certificatesv1.CertificateSigningRequest) bool { return false } -func (c controller) PostInstallConfigs(ctx context.Context, wg *sync.WaitGroup, status *ControllerStatus) { +func (c controller) PostInstallConfigs(ctx context.Context, wg *sync.WaitGroup) { defer func() { c.log.Infof("Finished PostInstallConfigs") wg.Done() @@ -367,8 +415,9 @@ func (c controller) PostInstallConfigs(ctx context.Context, wg *sync.WaitGroup, return } if err != nil { + c.log.Error(err) errMessage = err.Error() - status.Error() + c.Status.Error() } success := err == nil c.sendCompleteInstallation(ctx, success, errMessage) @@ -377,74 +426,110 @@ func (c controller) PostInstallConfigs(ctx context.Context, wg *sync.WaitGroup, func (c controller) postInstallConfigs(ctx context.Context) error { var err error - c.log.Infof("Waiting for cluster version operator: %t", c.WaitForClusterVersion) - - if c.WaitForClusterVersion { - err = c.waitingForClusterVersion(ctx) - if err != nil { - return err - } - } - - // Unlabel run-level from assisted-installer namespace after the installation. - // Keeping the `run-level` label represents a security risk as it overwrites the SecurityContext configurations - // used for applications deployed in this namespace. - data := []byte(`{"metadata":{"labels":{"$patch": "delete", "openshift.io/run-level":"0"}}}`) - c.log.Infof("Removing run-level label from %s namespace", c.ControllerConfig.Namespace) - err = c.kc.PatchNamespace(c.ControllerConfig.Namespace, data) - if err != nil { - // It is a conscious decision not to fail an installation if for any reason patching the namespace - // in order to remove the `run-level` label has failed. This will be redesigned in the next release - // so that the `run-level` label is not created in the first place. - c.log.Warn("Failed to unlabel AI namespace after the installation.") + if err = c.waitingForClusterOperators(ctx); err != nil { + return errors.Wrapf(err, "Timeout while waiting for cluster operators to be available") } err = utils.WaitForPredicateWithContext(ctx, WaitTimeout, GeneralWaitInterval, c.addRouterCAToClusterCA) if err != nil { - return errors.Errorf("Timeout while waiting router ca data") + return errors.Wrapf(err, "Timeout while waiting router ca data") } unpatch, err := utils.EtcdPatchRequired(c.ControllerConfig.OpenshiftVersion) if err != nil { - return err + return errors.Wrapf(err, "Failed to patch etcd") } if unpatch && c.HighAvailabilityMode != models.ClusterHighAvailabilityModeNone { - err = utils.WaitForPredicateWithContext(ctx, WaitTimeout, GeneralWaitInterval, c.unpatchEtcd) - if err != nil { - return errors.Errorf("Timeout while trying to unpatch etcd") + if err = utils.WaitForPredicateWithContext(ctx, WaitTimeout, GeneralWaitInterval, c.unpatchEtcd); err != nil { + return errors.Wrapf(err, "Timeout while trying to unpatch etcd") } } else { c.log.Infof("Skipping etcd unpatch for cluster version %s", c.ControllerConfig.OpenshiftVersion) } - err = utils.WaitForPredicateWithContext(ctx, WaitTimeout, GeneralWaitInterval, c.validateConsoleAvailability) + // Wait for OLM operators + if err = c.waitForOLMOperators(ctx); err != nil { + return errors.Wrapf(err, "Error while initializing OLM operators") + } + + return nil +} + +func (c controller) waitForOLMOperators(ctx context.Context) error { + var operators []models.MonitoredOperator + var err error + + // Get the monitored operators: + err = utils.Retry(maxFetchAttempts, FetchRetryInterval, c.log, func() error { + operators, err = c.ic.GetClusterMonitoredOLMOperators(context.TODO(), c.ClusterID) + if err != nil { + return errors.Wrapf(err, "Error while fetch the monitored operators from assisted-service.") + } + return nil + }) if err != nil { - return errors.Errorf("Timeout while waiting for console to become available") + return errors.Wrapf(err, "Failed to fetch monitored operators") + } + if len(operators) == 0 { + c.log.Info("No OLM operators found.") + return nil } - // Apply post install manifests - err = utils.WaitForPredicateWithContext(ctx, retryPostManifestTimeout, GeneralWaitInterval, c.applyPostInstallManifests) + // Get maximum wait timeout for OLM operators: + waitTimeout := c.getMaximumOLMTimeout(operators) + c.log.Infof("OLM operators %v wait timeout %v", waitTimeout, operators) + + // Wait for the CSV state of the OLM operators, before applying OLM CRs + err = utils.WaitForPredicateParamsWithContext(ctx, waitTimeout, GeneralWaitInterval, c.waitForCSVBeCreated, operators) if err != nil { - c.log.WithError(err).Warnf("Failed to apply post manifests.") - return err + // We continue in case of failure, because we want to try to apply manifest at least for operators which are ready. + c.log.WithError(err).Warnf("Failed to wait for some of the OLM operators to be initilized") } - waitTimeout := c.getMaximumOLMTimeout() - err = utils.WaitForPredicateWithContext(ctx, waitTimeout, GeneralWaitInterval, c.waitForOLMOperators) + // Apply post install manifests + err = utils.WaitForPredicateParamsWithContext(ctx, retryPostManifestTimeout, GeneralWaitInterval, c.applyPostInstallManifests, operators) if err != nil { + return errors.Wrapf(err, "Failed to apply post manifests") + } + + if err != c.waitForCSV(ctx, waitTimeout) { // In case the timeout occur, we have to update the pending OLM operators to failed state, // so the assisted-service can update the cluster state to completed. - if err = c.updatePendingOLMOperators(); err != nil { + if err = c.updatePendingOLMOperators(ctx); err != nil { return errors.Errorf("Timeout while waiting for some of the operators and not able to update its state") } - c.log.WithError(err).Warnf("Timeout while waiting for OLM operators be installed") - return err + return errors.Wrapf(err, "Timeout while waiting for OLM operators be installed") } return nil } -func (c controller) applyPostInstallManifests() bool { +func (c controller) getReadyOperators(operators []models.MonitoredOperator) ([]string, []models.MonitoredOperator, error) { + var readyOperators []string + for index := range operators { + handler := NewClusterServiceVersionHandler(c.kc, &operators[index], c.Status) + if handler.IsInitialized() { + readyOperators = append(readyOperators, handler.GetName()) + } + } + return readyOperators, operators, nil +} + +func (c controller) waitForCSVBeCreated(arg interface{}) bool { + operators := arg.([]models.MonitoredOperator) + readyOperators, operators, err := c.getReadyOperators(operators) + if err != nil { + c.log.WithError(err).Warn("Error while fetch the operators state.") + return false + } + if len(operators) == len(readyOperators) { + return true + } + + return false +} + +func (c controller) applyPostInstallManifests(arg interface{}) bool { ctx := utils.GenerateRequestContext() tempDir, err := ioutil.TempDir("", "controller-custom-manifests-") if err != nil { @@ -464,11 +549,56 @@ func (c controller) applyPostInstallManifests() bool { return false } - err = c.ops.CreateManifests(kubeconfigName, customManifestPath) + // Unmarshall the content of the operators manifests: + var manifests []manifest + data, err := ioutil.ReadFile(customManifestPath) if err != nil { - c.log.WithError(err).Error("Failed to apply manifest file.") + c.log.WithError(err).Errorf("Failed to read the custom manifests file.") return false } + if err = json.Unmarshal(data, &manifests); err != nil { + c.log.WithError(err).Errorf("Failed to unmarshall custom manifest file content %s.", data) + return false + } + + // Create the manifests of the opreators, which are properly initialized: + readyOperators, _, err := c.getReadyOperators(arg.([]models.MonitoredOperator)) + if err != nil { + c.log.WithError(err).Errorf("Failed to fetch operators from assisted-service") + return false + } + + c.log.Infof("Ready operators to be applied: %v", readyOperators) + + for _, manifest := range manifests { + c.log.Infof("Applying manifest %s: %s", manifest.Name, manifest.Content) + + // Check if the operator is properly initialized by CSV: + if !func() bool { + for _, readyOperator := range readyOperators { + if readyOperator == manifest.Name { + return true + } + } + return false + }() { + continue + } + + content, err := base64.StdEncoding.DecodeString(manifest.Content) + if err != nil { + c.log.WithError(err).Errorf("Failed to decode content of operator CR %s.", manifest.Name) + return false + } + + err = c.ops.CreateManifests(kubeconfigName, content) + if err != nil { + c.log.WithError(err).Error("Failed to apply manifest file.") + return false + } + + c.log.Infof("Manifest %s applied.", manifest.Name) + } return true } @@ -720,14 +850,7 @@ func (c controller) addRouterCAToClusterCA() bool { } -func (c controller) getMaximumOLMTimeout() time.Duration { - - operators, err := c.ic.GetClusterMonitoredOLMOperators(context.TODO(), c.ClusterID) - if err != nil { - c.log.WithError(err).Warningf("Failed to connect to assisted service") - return WaitTimeout - } - +func (c controller) getMaximumOLMTimeout(operators []models.MonitoredOperator) time.Duration { timeout := WaitTimeout.Seconds() for _, operator := range operators { timeout = math.Max(float64(operator.TimeoutSeconds), timeout) @@ -736,26 +859,30 @@ func (c controller) getMaximumOLMTimeout() time.Duration { return time.Duration(timeout * float64(time.Second)) } -func (c controller) getProgressingOLMOperators() ([]models.MonitoredOperator, error) { - ret := make([]models.MonitoredOperator, 0) +func (c controller) getProgressingOLMOperators() ([]*models.MonitoredOperator, error) { + ret := make([]*models.MonitoredOperator, 0) operators, err := c.ic.GetClusterMonitoredOLMOperators(context.TODO(), c.ClusterID) if err != nil { c.log.WithError(err).Warningf("Failed to connect to assisted service") return ret, err } - for _, operator := range operators { - if operator.Status != models.OperatorStatusAvailable && operator.Status != models.OperatorStatusFailed { - ret = append(ret, operator) + for index := range operators { + if operators[index].Status != models.OperatorStatusAvailable && operators[index].Status != models.OperatorStatusFailed { + ret = append(ret, &operators[index]) } } return ret, nil } -func (c controller) updatePendingOLMOperators() error { +func (c controller) updatePendingOLMOperators(ctx context.Context) error { c.log.Infof("Updating pending OLM operators") - operators, _ := c.getProgressingOLMOperators() + operators, err := c.getProgressingOLMOperators() + if err != nil { + return err + } for _, operator := range operators { - err := c.ic.UpdateClusterOperator(context.TODO(), c.ClusterID, operator.Name, models.OperatorStatusFailed, "Waiting for operator timed out") + c.Status.OperatorError(operator.Name) + err := c.ic.UpdateClusterOperator(ctx, c.ClusterID, operator.Name, models.OperatorStatusFailed, "Waiting for operator timed out") if err != nil { c.log.WithError(err).Warnf("Failed to update olm %s status", operator.Name) return err @@ -764,141 +891,61 @@ func (c controller) updatePendingOLMOperators() error { return nil } -// waitForOLMOperators wait until all OLM monitored operators are available or failed. -func (c controller) waitForOLMOperators() bool { - c.log.Infof("Checking OLM operators") - operators, _ := c.getProgressingOLMOperators() - if len(operators) == 0 { - return true - } - for _, operator := range operators { - csvName, err := c.kc.GetCSVFromSubscription(operator.Namespace, operator.SubscriptionName) - if err != nil { - c.log.WithError(err).Warnf("Failed to get subscription of operator %s", operator.Name) - continue - } - - csv, err := c.kc.GetCSV(operator.Namespace, csvName) - if err != nil { - c.log.WithError(err).Warnf("Failed to get %s", operator.Name) - continue - } - - operatorStatus := utils.CsvStatusToOperatorStatus(string(csv.Status.Phase)) - err = c.ic.UpdateClusterOperator(context.TODO(), c.ClusterID, operator.Name, operatorStatus, csv.Status.Message) - if err != nil { - c.log.WithError(err).Warnf("Failed to update olm %s status", operator.Name) - continue - } - - c.log.Infof("CSV %s is in status %s, message %s.", operator.Name, csv.Status.Phase, csv.Status.Message) - } - return false -} - -func (c controller) isOperatorAvailableInCluster(operatorName string) bool { - c.log.Infof("Checking %s operator availability status", operatorName) - co, err := c.kc.GetClusterOperator(operatorName) +// waitForCSV wait until all OLM monitored operators are available or failed. +func (c controller) waitForCSV(ctx context.Context, waitTimeout time.Duration) error { + operators, err := c.getProgressingOLMOperators() if err != nil { - c.log.WithError(err).Warnf("Failed to get %s operator", operatorName) - return false + return err } - - operatorStatus, operatorMessage := utils.ClusterOperatorConditionsToMonitoredOperatorStatus(co.Status.Conditions) - err = c.ic.UpdateClusterOperator(context.TODO(), c.ClusterID, operatorName, operatorStatus, operatorMessage) - if err != nil { - c.log.WithError(err).Warnf("Failed to update %s operator status %s with message %s", operatorName, operatorStatus, operatorMessage) - return false + if len(operators) == 0 { + return nil } - if !c.checkOperatorStatusCondition(co, configv1.OperatorAvailable, configv1.ConditionTrue) || - !c.checkOperatorStatusCondition(co, configv1.OperatorDegraded, configv1.ConditionFalse) { - return false - } + handlers := make(map[string]*ClusterServiceVersionHandler) - c.log.Infof("%s operator is available in cluster", operatorName) + for index := range operators { + handlers[operators[index].Name] = NewClusterServiceVersionHandler(c.kc, operators[index], c.Status) + } - return true -} + areOLMOperatorsAvailable := func() bool { + if len(handlers) == 0 { + return true + } -func (c controller) isOperatorAvailableInService(operatorName string) bool { - operatorStatusInService, err := c.ic.GetClusterMonitoredOperator(utils.GenerateRequestContext(), c.ClusterID, operatorName) - if err != nil { - c.log.WithError(err).Errorf("Failed to get cluster %s %s operator status", c.ClusterID, operatorName) + for index := range handlers { + if c.isOperatorAvailable(handlers[index]) { + delete(handlers, index) + } + } return false } - if operatorStatusInService.Status == models.OperatorStatusAvailable { - c.log.Infof("Service acknowledged %s operator is available for cluster %s", operatorName, c.ClusterID) - return true - } - - return false -} - -// validateConsoleAvailability checks if the console operator is available -func (c controller) validateConsoleAvailability() bool { - return c.isOperatorAvailableInCluster(consoleOperatorName) && - c.isOperatorAvailableInService(consoleOperatorName) + return utils.WaitForPredicateWithContext(ctx, waitTimeout, GeneralWaitInterval, areOLMOperatorsAvailable) } -// waitingForClusterVersion checks the Cluster Version Operator availability in the -// new OCP cluster. A success would be announced only when the service acknowledges -// the CVO availability, in order to avoid unsycned scenarios. -// -// This function would be aligned with the console operator reporting workflow -// as part of the deprecation of the old API in MGMT-5188. -func (c controller) waitingForClusterVersion(ctx context.Context) error { +// waitingForClusterOperators checks Console operator and the Cluster Version Operator availability in the +// new OCP cluster in parallel. +// A success would be announced only when the service acknowledges the operators availability, +// in order to avoid unsycned scenarios. +func (c controller) waitingForClusterOperators(ctx context.Context) error { + // In case cvo changes it message we will update timer but we want to have maximum timeout + // for this context with timeout is used + ctxWithTimeout, cancel := context.WithTimeout(ctx, CVOMaxTimeout) + defer cancel() isClusterVersionAvailable := func(timer *time.Timer) bool { - c.log.Infof("Checking cluster version operator availability status") - co, err := c.kc.GetClusterVersion("version") - if err != nil { - c.log.WithError(err).Warn("Failed to get cluster version operator") - return false - } - - cvoStatusInService, err := c.ic.GetClusterMonitoredOperator(utils.GenerateRequestContext(), c.ClusterID, cvoOperatorName) - if err != nil { - c.log.WithError(err).Errorf("Failed to get cluster %s cvo status", c.ClusterID) - return false - } - - if cvoStatusInService.Status == models.OperatorStatusAvailable { - c.log.Infof("Service acknowledged CVO is available for cluster %s", c.ClusterID) - return true - } - - operatorStatus, operatorMessage := utils.ClusterOperatorConditionsToMonitoredOperatorStatus(co.Status.Conditions) + result := c.isOperatorAvailable(NewClusterOperatorHandler(c.kc, consoleOperatorName)) - if cvoStatusInService.Status != operatorStatus || (cvoStatusInService.StatusInfo != operatorMessage && operatorMessage != "") { - // This is a common pattern to ensure the channel is empty after a stop has been called - // More info on time/sleep.go documentation - if !timer.Stop() { - <-timer.C - } - timer.Reset(WaitTimeout) - - status := fmt.Sprintf("Cluster version status: %s message: %s", operatorStatus, operatorMessage) - c.log.Infof(status) - - // Update built-in monitored operator cluster version status - if err := c.ic.UpdateClusterOperator(utils.GenerateRequestContext(), c.ClusterID, cvoOperatorName, operatorStatus, operatorMessage); err != nil { - c.log.WithError(err).Errorf("Failed to update cluster %s cvo status", c.ClusterID) - } + if c.WaitForClusterVersion { + result = c.isOperatorAvailable(NewClusterVersionHandler(c.kc, timer)) } - return false + return result } - - err := utils.WaitForPredicateWithTimer(ctx, WaitTimeout, GeneralProgressUpdateInt, isClusterVersionAvailable) - if err != nil { - return errors.Wrapf(err, "Timeout while waiting for cluster version to be available") - } - return nil + return utils.WaitForPredicateWithTimer(ctxWithTimeout, WaitTimeout, GeneralProgressUpdateInt, isClusterVersionAvailable) } func (c controller) sendCompleteInstallation(ctx context.Context, isSuccess bool, errorInfo string) { - c.log.Infof("Start complete installation step, with params success:%t, error info %s", isSuccess, errorInfo) + c.log.Infof("Start complete installation step, with params success: %t, error info: %s", isSuccess, errorInfo) _ = utils.WaitForPredicateWithContext(ctx, CompleteTimeout, GeneralProgressUpdateInt, func() bool { ctxReq := utils.GenerateRequestContext() if err := c.ic.CompleteInstallation(ctxReq, c.ClusterID, isSuccess, errorInfo); err != nil { @@ -925,20 +972,27 @@ func (c controller) logClusterOperatorsStatus() { /** * This function upload the following logs at once to the service at the end of the installation process - * It takes a linient approach so if some logs are not available it ignores them and moves on + * It takes a lenient approach so if some logs are not available it ignores them and moves on * currently the bundled logs are: * - controller logs * - oc must-gather logs **/ -func (c controller) uploadSummaryLogs(podName string, namespace string, sinceSeconds int64, isMustGatherEnabled bool, mustGatherImg string) error { +func (c controller) uploadSummaryLogs(podName string, namespace string, sinceSeconds int64) error { var tarentries = make([]utils.TarEntry, 0) var ok bool = true ctx := utils.GenerateRequestContext() + // Send upload operator logs before must-gather c.logClusterOperatorsStatus() - if isMustGatherEnabled { + if c.Status.HasError() || c.Status.HasOperatorError() { + c.log.Infof("Uploading cluster operator status logs before must-gather") + err := common.UploadPodLogs(c.kc, c.ic, c.ClusterID, podName, c.Namespace, controllerLogsSecondsAgo, c.log) + if err != nil { + c.log.WithError(err).Warnf("Failed to upload controller logs") + } c.log.Infof("Uploading oc must-gather logs") - if tarfile, err := c.collectMustGatherLogs(ctx, mustGatherImg); err == nil { + images := c.parseMustGatherImages() + if tarfile, err := c.collectMustGatherLogs(ctx, images...); err == nil { if entry, tarerr := utils.NewTarEntryFromFile(tarfile); tarerr == nil { tarentries = append(tarentries, *entry) } @@ -988,11 +1042,44 @@ func (c controller) uploadSummaryLogs(podName string, namespace string, sinceSec return nil } +func (c controller) parseMustGatherImages() []string { + images := make([]string, 0) + if c.MustGatherImage == "" { + c.log.Infof("collecting must-gather logs into using image from release") + return images + } + + c.log.Infof("collecting must-gather logs using this image configuration %s", c.MustGatherImage) + var imageMap map[string]string + err := json.Unmarshal([]byte(c.MustGatherImage), &imageMap) + if err != nil { + //MustGatherImage is not a JSON. Pass it as is + images = append(images, c.MustGatherImage) + return images + } + + //Use the parsed MustGatherImage to find the images needed for collecting + //the information + if c.Status.HasError() { + //general error - collect all data from the cluster using the standard image + images = append(images, imageMap["ocp"]) + } + + for _, op := range c.Status.GetOperatorsInError() { + if imageMap[op] != "" { + //per failed operator - add feature image for collecting more + //information about failed olm operators + images = append(images, imageMap[op]) + } + } + c.log.Infof("collecting must-gather logs with images: %v", images) + return images +} + func (c controller) downloadKubeconfigNoingress(ctx context.Context, dir string) (string, error) { // Download kubeconfig file - kubeconfigFileName := "kubeconfig-noingress" kubeconfigPath := path.Join(dir, kubeconfigFileName) - err := c.ic.DownloadFile(ctx, kubeconfigFileName, kubeconfigPath) + err := c.ic.DownloadClusterCredentials(ctx, kubeconfigFileName, kubeconfigPath) if err != nil { c.log.Errorf("Failed to download noingress kubeconfig %v\n", err) return "", err @@ -1002,7 +1089,7 @@ func (c controller) downloadKubeconfigNoingress(ctx context.Context, dir string) return kubeconfigPath, nil } -func (c controller) collectMustGatherLogs(ctx context.Context, mustGatherImg string) (string, error) { +func (c controller) collectMustGatherLogs(ctx context.Context, images ...string) (string, error) { tempDir, ferr := ioutil.TempDir("", "controller-must-gather-logs-") if ferr != nil { c.log.Errorf("Failed to create temp directory for must-gather-logs %v\n", ferr) @@ -1015,7 +1102,7 @@ func (c controller) collectMustGatherLogs(ctx context.Context, mustGatherImg str } //collect must gather logs - logtar, err := c.ops.GetMustGatherLogs(tempDir, kubeconfigPath, mustGatherImg) + logtar, err := c.ops.GetMustGatherLogs(tempDir, kubeconfigPath, images...) if err != nil { c.log.Errorf("Failed to collect must-gather logs %v\n", err) return "", err @@ -1027,7 +1114,7 @@ func (c controller) collectMustGatherLogs(ctx context.Context, mustGatherImg str // Uploading logs every 5 minutes // We will take logs of assisted controller and upload them to assisted-service // by creating tar gz of them. -func (c *controller) UploadLogs(ctx context.Context, wg *sync.WaitGroup, status *ControllerStatus) { +func (c *controller) UploadLogs(ctx context.Context, wg *sync.WaitGroup) { podName := "" ticker := time.NewTicker(LogsUploadPeriod) progressCtx := utils.GenerateRequestContext() @@ -1044,7 +1131,7 @@ func (c *controller) UploadLogs(ctx context.Context, wg *sync.WaitGroup, status c.log.Infof("Upload final controller and cluster logs before exit") c.ic.ClusterLogProgressReport(progressCtx, c.ClusterID, models.LogsStateRequested) _ = utils.WaitForPredicate(WaitTimeout, LogsUploadPeriod, func() bool { - err := c.uploadSummaryLogs(podName, c.Namespace, controllerLogsSecondsAgo, status.HasError(), c.MustGatherImage) + err := c.uploadSummaryLogs(podName, c.Namespace, controllerLogsSecondsAgo) if err != nil { c.log.Infof("retry uploading logs in 5 minutes...") } @@ -1107,21 +1194,3 @@ func (c controller) SetReadyState() { return true }) } - -// checkOperatorStatusCondition checks if given operator has a condition with an expected status. -func (c controller) checkOperatorStatusCondition(co *configv1.ClusterOperator, - conditionType configv1.ClusterStatusConditionType, - status configv1.ConditionStatus) bool { - for _, condition := range co.Status.Conditions { - if condition.Type == conditionType { - if condition.Status == status { - return true - } - c.log.Warnf("Operator %s condition '%s' is not met due to '%s': %s", - co.Name, conditionType, condition.Reason, condition.Message) - return false - } - } - c.log.Warnf("Operator %s condition '%s' does not exist", co.Name, conditionType) - return false -} diff --git a/src/assisted_installer_controller/assisted_installer_controller_test.go b/src/assisted_installer_controller/assisted_installer_controller_test.go index 2bcb925b93..b1eda488dc 100644 --- a/src/assisted_installer_controller/assisted_installer_controller_test.go +++ b/src/assisted_installer_controller/assisted_installer_controller_test.go @@ -12,6 +12,7 @@ import ( "testing" "time" + "github.com/google/uuid" metal3v1alpha1 "github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1" "github.com/openshift/assisted-installer/src/common" machinev1beta1 "github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1" @@ -48,8 +49,6 @@ var ( MustGatherImage: "quay.io/test-must-gather:latest", } - aiNamespaceRunlevelPatch = []byte(`{"metadata":{"labels":{"$patch": "delete", "openshift.io/run-level":"0"}}}`) - progressClusterVersionCondition = &configv1.ClusterVersion{ Status: configv1.ClusterVersionStatus{ Conditions: []configv1.ClusterOperatorStatusCondition{{Type: configv1.OperatorProgressing, @@ -84,28 +83,27 @@ var _ = Describe("installer HostRoleMaster role", func() { inventoryNamesIds map[string]inventory_client.HostData kubeNamesIds map[string]string wg sync.WaitGroup - status *ControllerStatus defaultStages []models.HostStage ) kubeNamesIds = map[string]string{"node0": "6d6f00e8-70dd-48a5-859a-0f1459485ad9", "node1": "2834ff2e-8965-48a5-859a-0f1459485a77", "node2": "57df89ee-3546-48a5-859a-0f1459485a66"} - l.SetOutput(ioutil.Discard) BeforeEach(func() { ctrl = gomock.NewController(GinkgoT()) mockops = ops.NewMockOps(ctrl) mockbmclient = inventory_client.NewMockInventoryClient(ctrl) mockk8sclient = k8s_client.NewMockK8SClient(ctrl) + infraEnvId := strfmt.UUID("7916fa89-ea7a-443e-a862-b3e930309f50") node0Id := strfmt.UUID("7916fa89-ea7a-443e-a862-b3e930309f65") node1Id := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f238") node2Id := strfmt.UUID("b898d516-3e16-49d0-86a5-0ad5bd04e3ed") - currentState := models.HostProgressInfo{CurrentStage: models.HostStageConfiguring} + currentState := models.HostProgressInfo{CurrentStage: models.HostStageJoined} currentStatus := models.HostStatusInstallingInProgress inventoryNamesIds = map[string]inventory_client.HostData{ - "node0": {Host: &models.Host{ID: &node0Id, Progress: ¤tState, Status: ¤tStatus}}, - "node1": {Host: &models.Host{ID: &node1Id, Progress: ¤tState, Status: ¤tStatus}}, - "node2": {Host: &models.Host{ID: &node2Id, Progress: ¤tState, Status: ¤tStatus}}} + "node0": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node0Id, Progress: ¤tState, Status: ¤tStatus}}, + "node1": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node1Id, Progress: ¤tState, Status: ¤tStatus}}, + "node2": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node2Id, Progress: ¤tState, Status: ¤tStatus}}} kubeNamesIds = map[string]string{"node0": "6d6f00e8-70dd-48a5-859a-0f1459485ad9", "node1": "2834ff2e-8965-48a5-859a-0f1459485a77", "node2": "57df89ee-3546-48a5-859a-0f1459485a66"} @@ -117,7 +115,6 @@ var _ = Describe("installer HostRoleMaster role", func() { models.HostStageDone} assistedController = NewController(l, defaultTestControllerConf, mockops, mockbmclient, mockk8sclient) - status = &ControllerStatus{} }) AfterEach(func() { ctrl.Finish() @@ -125,17 +122,19 @@ var _ = Describe("installer HostRoleMaster role", func() { configuringSuccess := func() { mockk8sclient.EXPECT().GetPods(gomock.Any(), gomock.Any(), "").Return([]v1.Pod{}, nil).AnyTimes() - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), gomock.Any(), models.HostStageConfiguring, gomock.Any()).AnyTimes() + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), gomock.Any(), gomock.Any(), models.HostStageConfiguring, gomock.Any()).AnyTimes() } updateProgressSuccess := func(stages []models.HostStage, inventoryNamesIds map[string]inventory_client.HostData) { var hostIds []string + var infraEnvIds []string for _, host := range inventoryNamesIds { hostIds = append(hostIds, host.Host.ID.String()) + infraEnvIds = append(infraEnvIds, host.Host.InfraEnvID.String()) } for i, stage := range stages { - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), hostIds[i], stage, "").Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvIds[i], hostIds[i], stage, "").Return(nil).Times(1) } } @@ -154,10 +153,38 @@ var _ = Describe("installer HostRoleMaster role", func() { mockbmclient.EXPECT().ClusterLogProgressReport(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() } + mockGetServiceOperators := func(operators []models.MonitoredOperator) { + for index := range operators { + if operators[index].Status != models.OperatorStatusAvailable { + mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), operators[index].Name).Return(&operators[index], nil).Times(1) + } else { + mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), operators[index].Name).Return(&operators[index], nil).MinTimes(1) + } + } + } + + mockGetCSV := func(operator models.MonitoredOperator, csv *olmv1alpha1.ClusterServiceVersion) { + randomCSV := uuid.New().String() + mockk8sclient.EXPECT().GetCSVFromSubscription(operator.Namespace, operator.SubscriptionName).Return(randomCSV, nil).Times(1) + mockk8sclient.EXPECT().GetCSV(operator.Namespace, randomCSV).Return(csv, nil).Times(1) + } + setConsoleAsAvailable := func(clusterID string) { + WaitTimeout = 100 * time.Millisecond + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return(validConsoleOperator, nil).Times(1) mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), clusterID, consoleOperatorName, models.OperatorStatusAvailable, gomock.Any()).Return(nil).Times(1) - mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), clusterID, consoleOperatorName).Return(&models.MonitoredOperator{Status: models.OperatorStatusAvailable}, nil).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusAvailable}}) + } + + setCvoAsAvailable := func() { + mockGetServiceOperators([]models.MonitoredOperator{{Name: cvoOperatorName, Status: models.OperatorStatusProgressing}}) + mockk8sclient.EXPECT().GetClusterVersion(clusterVersionName).Return(availableClusterVersionCondition, nil).Times(1) + mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), gomock.Any(), cvoOperatorName, models.OperatorStatusAvailable, availableClusterVersionCondition.Status.Conditions[0].Message).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: cvoOperatorName, Status: models.OperatorStatusAvailable}}) } setClusterAsFinalizing := func() { @@ -172,14 +199,12 @@ var _ = Describe("installer HostRoleMaster role", func() { } setControllerWaitForOLMOperators := func(clusterID string) { - WaitTimeout = 100 * time.Millisecond - setClusterAsFinalizing() - uploadIngressCert(clusterID) setConsoleAsAvailable(clusterID) + uploadIngressCert(clusterID) } - returnServiceWithDot10Address := func(name, namespace string) *gomock.Call { + returnServiceWithAddress := func(name, namespace, ip string) *gomock.Call { return mockk8sclient.EXPECT().ListServices("").Return(&v1.ServiceList{ Items: []v1.Service{ { @@ -188,17 +213,38 @@ var _ = Describe("installer HostRoleMaster role", func() { Namespace: namespace, }, Spec: v1.ServiceSpec{ - ClusterIP: "10.56.20.10", + ClusterIP: ip, }, }, }, }, nil) } + returnServiceWithDot10Address := func(name, namespace string) *gomock.Call { + return returnServiceWithAddress(name, namespace, "10.56.20.10") + } + returnServiceNetwork := func() { mockk8sclient.EXPECT().GetServiceNetworks().Return([]string{"10.56.20.0/24"}, nil) } + mockGetOLMOperators := func(operators []models.MonitoredOperator) { + mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return(operators, nil).Times(1) + } + + mockApplyPostInstallManifests := func(operators []models.MonitoredOperator) { + mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return(operators, nil).Times(1) + mockbmclient.EXPECT().DownloadFile(gomock.Any(), customManifestsFile, gomock.Any()).DoAndReturn( + func(ctx context.Context, filename, dest string) error { + if err := ioutil.WriteFile(dest, []byte("[]"), 0644); err != nil { + return err + } + return nil + }, + ).Times(1) + mockbmclient.EXPECT().DownloadClusterCredentials(gomock.Any(), kubeconfigFileName, gomock.Any()).Return(nil).Times(1) + } + Context("Waiting for 3 nodes", func() { It("Set ready event", func() { // fail to connect to assisted and then succeed @@ -214,10 +260,14 @@ var _ = Describe("installer HostRoleMaster role", func() { mockk8sclient.EXPECT().CreateEvent(assistedController.Namespace, common.AssistedControllerIsReadyEvent, gomock.Any(), common.AssistedControllerPrefix).Return(nil, nil).Times(1) assistedController.SetReadyState() - Expect(status.HasError()).Should(Equal(false)) + Expect(assistedController.Status.HasError()).Should(Equal(false)) }) It("waitAndUpdateNodesStatus happy flow - all nodes installing", func() { + + updateProgressSuccess([]models.HostStage{models.HostStageJoined, + models.HostStageJoined, + models.HostStageJoined}, inventoryNamesIds) updateProgressSuccess(defaultStages, inventoryNamesIds) hosts := create3Hosts(models.HostStatusInstalling, models.HostStageConfiguring) @@ -302,11 +352,13 @@ var _ = Describe("installer HostRoleMaster role", func() { BeforeEach(func() { updateProgressSuccess = func(stages []models.HostStage, inventoryNamesIds map[string]inventory_client.HostData) { var hostIds []string + var infraEnvIds []string for _, host := range inventoryNamesIds { hostIds = append(hostIds, host.Host.ID.String()) + infraEnvIds = append(infraEnvIds, host.Host.InfraEnvID.String()) } for i, stage := range stages { - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), hostIds[i], stage, "").Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvIds[i], hostIds[i], stage, "").Return(nil).Times(1) } } kubeNamesIds = map[string]string{"node0": "6d6f00e8-70dd-48a5-859a-0f1459485ad9", @@ -355,12 +407,14 @@ var _ = Describe("installer HostRoleMaster role", func() { It("UpdateStatus fails and then succeeds, list nodes failed ", func() { updateProgressSuccessFailureTest := func(stages []models.HostStage, inventoryNamesIds map[string]inventory_client.HostData) { var hostIds []string + var infraEnvIds []string for _, host := range inventoryNamesIds { hostIds = append(hostIds, host.Host.ID.String()) + infraEnvIds = append(infraEnvIds, host.Host.InfraEnvID.String()) } for i, stage := range stages { - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), hostIds[i], stage, "").Return(fmt.Errorf("dummy")).Times(1) - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), hostIds[i], stage, "").Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvIds[i], hostIds[i], stage, "").Return(fmt.Errorf("dummy")).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvIds[i], hostIds[i], stage, "").Return(nil).Times(1) } } mockk8sclient.EXPECT().ListNodes().Return(GetKubeNodes(kubeNamesIds), nil).Times(2) @@ -478,6 +532,54 @@ var _ = Describe("installer HostRoleMaster role", func() { }) }) + Context("waitForCSVBeCreated", func() { + var ( + operatorName = "lso" + subscriptionName = "local-storage-operator" + namespaceName = "openshift-local-storage" + ) + BeforeEach(func() { + assistedController.WaitForClusterVersion = true + GeneralWaitInterval = 1 * time.Millisecond + }) + It("empty operators", func() { + Expect(assistedController.waitForCSVBeCreated([]models.MonitoredOperator{})).Should(Equal(true)) + }) + It("wrong subscription", func() { + operators := []models.MonitoredOperator{ + { + SubscriptionName: subscriptionName, Namespace: namespaceName, + Name: operatorName, Status: models.OperatorStatusProgressing, OperatorType: models.OperatorTypeOlm, + }, + } + + mockk8sclient.EXPECT().GetCSVFromSubscription(operators[0].Namespace, operators[0].SubscriptionName).Return("", fmt.Errorf("dummy")).Times(1) + Expect(assistedController.waitForCSVBeCreated(operators)).Should(Equal(false)) + }) + It("non-initialized operator", func() { + operators := []models.MonitoredOperator{ + { + SubscriptionName: subscriptionName, Namespace: namespaceName, + Name: operatorName, Status: models.OperatorStatusProgressing, OperatorType: models.OperatorTypeOlm, + }, + } + + mockk8sclient.EXPECT().GetCSVFromSubscription(operators[0].Namespace, operators[0].SubscriptionName).Return("", nil).Times(1) + Expect(assistedController.waitForCSVBeCreated(operators)).Should(Equal(false)) + }) + It("initialized operator", func() { + operators := []models.MonitoredOperator{ + { + SubscriptionName: subscriptionName, Namespace: namespaceName, + Name: operatorName, Status: models.OperatorStatusProgressing, OperatorType: models.OperatorTypeOlm, + }, + } + + mockk8sclient.EXPECT().GetCSVFromSubscription(operators[0].Namespace, operators[0].SubscriptionName).Return("randomCSV", nil).Times(1) + Expect(assistedController.waitForCSVBeCreated(operators)).Should(Equal(true)) + }) + }) + Context("PostInstallConfigs", func() { Context("waiting for cluster version", func() { BeforeEach(func() { @@ -487,31 +589,60 @@ var _ = Describe("installer HostRoleMaster role", func() { It("success", func() { installing := models.ClusterStatusInstalling - mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(nil, fmt.Errorf("dummy")).Times(1) + mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(&models.Cluster{Status: &installing}, nil).Times(1) + setControllerWaitForOLMOperators(assistedController.ClusterID) + setCvoAsAvailable() + + // Completion + mockGetOLMOperators([]models.MonitoredOperator{}) + mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(fmt.Errorf("dummy")).Times(1) + mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(nil).Times(1) + + wg.Add(1) + go assistedController.PostInstallConfigs(context.TODO(), &wg) + wg.Wait() + + Expect(assistedController.Status.HasError()).Should(Equal(false)) + }) + + It("lots of failures then success", func() { + installing := models.ClusterStatusInstalling mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(&models.Cluster{Status: &installing}, nil).Times(1) setClusterAsFinalizing() - uploadIngressCert(assistedController.ClusterID) - // Console + // Console errors + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return(nil, fmt.Errorf("no-operator")).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( &configv1.ClusterOperator{ Status: configv1.ClusterOperatorStatus{ Conditions: []configv1.ClusterOperatorStatusCondition{}, }, }, fmt.Errorf("no-conditions")).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( getClusterOperatorWithCondition(configv1.OperatorDegraded, configv1.ConditionFalse), fmt.Errorf("false-degraded-condition")).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( getClusterOperatorWithCondition(configv1.OperatorAvailable, configv1.ConditionTrue), fmt.Errorf("missing-degraded-condition")).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( getClusterOperatorWithCondition(configv1.OperatorAvailable, configv1.ConditionFalse), fmt.Errorf("false-available-condition")).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( getClusterOperatorWithCondition(configv1.OperatorAvailable, configv1.ConditionTrue), fmt.Errorf("true-degraded-condition")).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( &configv1.ClusterOperator{ Status: configv1.ClusterOperatorStatus{ @@ -520,60 +651,69 @@ var _ = Describe("installer HostRoleMaster role", func() { }, }, }, fmt.Errorf("missing-conditions")).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( getClusterOperatorWithConditionsStatus(configv1.ConditionTrue, configv1.ConditionTrue), fmt.Errorf("bad-conditions-status")).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( getClusterOperatorWithConditionsStatus(configv1.ConditionFalse, configv1.ConditionTrue), fmt.Errorf("bad-conditions-status")).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( getClusterOperatorWithConditionsStatus(configv1.ConditionFalse, configv1.ConditionFalse), fmt.Errorf("bad-conditions-status")).Times(1) - setConsoleAsAvailable("cluster-id") - // Patching NS - mockk8sclient.EXPECT().PatchNamespace(defaultTestControllerConf.Namespace, aiNamespaceRunlevelPatch).Return(nil) + setConsoleAsAvailable("cluster-id") + uploadIngressCert(assistedController.ClusterID) - // CVO - mockk8sclient.EXPECT().GetClusterVersion("version").Return(nil, fmt.Errorf("dummy")).Times(1) + // CVO errors + mockGetServiceOperators([]models.MonitoredOperator{{Name: cvoOperatorName, Status: ""}}) + mockk8sclient.EXPECT().GetClusterVersion(clusterVersionName).Return(nil, fmt.Errorf("dummy")).Times(1) - mockk8sclient.EXPECT().GetClusterVersion("version").Return(progressClusterVersionCondition, nil).Times(1) - mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), cvoOperatorName). - Return(&models.MonitoredOperator{Status: "", StatusInfo: ""}, nil).Times(1) + mockGetServiceOperators([]models.MonitoredOperator{{Name: cvoOperatorName, Status: ""}}) + mockk8sclient.EXPECT().GetClusterVersion(clusterVersionName).Return(progressClusterVersionCondition, nil).Times(1) mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), gomock.Any(), cvoOperatorName, models.OperatorStatusProgressing, progressClusterVersionCondition.Status.Conditions[0].Message).Times(1) - mockk8sclient.EXPECT().GetClusterVersion("version").Return(availableClusterVersionCondition, nil).Times(2) - mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), cvoOperatorName). - Return(&models.MonitoredOperator{Status: models.OperatorStatusProgressing, StatusInfo: progressClusterVersionCondition.Status.Conditions[0].Message}, nil).Times(1) - mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), gomock.Any(), cvoOperatorName, models.OperatorStatusAvailable, availableClusterVersionCondition.Status.Conditions[0].Message).Times(1) - mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), cvoOperatorName). - Return(&models.MonitoredOperator{Status: models.OperatorStatusAvailable, StatusInfo: availableClusterVersionCondition.Status.Conditions[0].Message}, nil).Times(1) + // Fail 8 more times when console fail + extraFailTimes := 8 + for i := 0; i < extraFailTimes; i++ { + mockGetServiceOperators([]models.MonitoredOperator{{Name: cvoOperatorName, Status: ""}}) + } + mockk8sclient.EXPECT().GetClusterVersion(clusterVersionName).Return(nil, fmt.Errorf("dummy")).Times(extraFailTimes) - // Completion - mockbmclient.EXPECT().DownloadFile(gomock.Any(), "custom_manifests.yaml", gomock.Any()).Return(nil).Times(1) - mockbmclient.EXPECT().DownloadFile(gomock.Any(), "kubeconfig-noingress", gomock.Any()).Return(nil).Times(1) - mockops.EXPECT().CreateManifests(gomock.Any(), gomock.Any()).Return(nil).Times(1) - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return([]models.MonitoredOperator{}, nil).Times(2) + setCvoAsAvailable() + + mockGetOLMOperators([]models.MonitoredOperator{}) mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(fmt.Errorf("dummy")).Times(1) mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(nil).Times(1) wg.Add(1) - go assistedController.PostInstallConfigs(context.TODO(), &wg, status) + go assistedController.PostInstallConfigs(context.TODO(), &wg) wg.Wait() - Expect(status.HasError()).Should(Equal(false)) + Expect(assistedController.Status.HasError()).Should(Equal(false)) }) + It("failure", func() { - WaitTimeout = 20 * time.Millisecond - GeneralProgressUpdateInt = 30 * time.Millisecond setClusterAsFinalizing() + mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), consoleOperatorName). + Return(&models.MonitoredOperator{Status: "", StatusInfo: ""}, nil).AnyTimes() + mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return(nil, fmt.Errorf("dummy")).AnyTimes() + mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), cvoOperatorName). + Return(&models.MonitoredOperator{Status: "", StatusInfo: ""}, nil).AnyTimes() + mockk8sclient.EXPECT().GetClusterVersion(clusterVersionName).Return(nil, fmt.Errorf("dummy")).AnyTimes() + mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", false, gomock.Any()).Return(nil).Times(1) wg.Add(1) - go assistedController.PostInstallConfigs(context.TODO(), &wg, status) + go assistedController.PostInstallConfigs(context.TODO(), &wg) wg.Wait() - Expect(status.HasError()).Should(Equal(true)) + Expect(assistedController.Status.HasError()).Should(Equal(true)) }) }) @@ -584,40 +724,28 @@ var _ = Describe("installer HostRoleMaster role", func() { }) It("success", func() { installing := models.ClusterStatusInstalling - mockbmclient.EXPECT().DownloadFile(gomock.Any(), "custom_manifests.yaml", gomock.Any()).Return(nil).Times(1) - mockbmclient.EXPECT().DownloadFile(gomock.Any(), "kubeconfig-noingress", gomock.Any()).Return(nil).Times(1) - mockops.EXPECT().CreateManifests(gomock.Any(), gomock.Any()).Return(nil).Times(1) - mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(nil, fmt.Errorf("dummy")).Times(1) mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(&models.Cluster{Status: &installing}, nil).Times(1) - setClusterAsFinalizing() - uploadIngressCert(assistedController.ClusterID) - setConsoleAsAvailable("cluster-id") - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return([]models.MonitoredOperator{}, nil).AnyTimes() + setControllerWaitForOLMOperators(assistedController.ClusterID) + mockGetOLMOperators([]models.MonitoredOperator{}) mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(fmt.Errorf("dummy")).Times(1) mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(nil).Times(1) - // Patching NS - mockk8sclient.EXPECT().PatchNamespace(defaultTestControllerConf.Namespace, aiNamespaceRunlevelPatch).Return(nil) - wg.Add(1) - assistedController.PostInstallConfigs(context.TODO(), &wg, status) + assistedController.PostInstallConfigs(context.TODO(), &wg) wg.Wait() - Expect(status.HasError()).Should(Equal(false)) + Expect(assistedController.Status.HasError()).Should(Equal(false)) }) It("failure", func() { - WaitTimeout = 20 * time.Millisecond setClusterAsFinalizing() + setConsoleAsAvailable("cluster-id") mockk8sclient.EXPECT().GetConfigMap(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("aaa")).MinTimes(1) mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", false, - "Timeout while waiting router ca data").Return(nil).Times(1) - - // Patching NS - mockk8sclient.EXPECT().PatchNamespace(defaultTestControllerConf.Namespace, aiNamespaceRunlevelPatch).Return(nil) + "Timeout while waiting router ca data: timed out").Return(nil).Times(1) wg.Add(1) - go assistedController.PostInstallConfigs(context.TODO(), &wg, status) + go assistedController.PostInstallConfigs(context.TODO(), &wg) wg.Wait() - Expect(status.HasError()).Should(Equal(true)) + Expect(assistedController.Status.HasError()).Should(Equal(true)) }) }) @@ -628,56 +756,77 @@ var _ = Describe("installer HostRoleMaster role", func() { }) It("waiting for single OLM operator", func() { - setControllerWaitForOLMOperators(assistedController.ClusterID) + By("setup", func() { + setControllerWaitForOLMOperators(assistedController.ClusterID) + operators := []models.MonitoredOperator{ + {SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", OperatorType: models.OperatorTypeOlm, Name: "lso", Status: "", TimeoutSeconds: 120 * 60}, + } + mockGetOLMOperators(operators) + mockApplyPostInstallManifests(operators) + mockk8sclient.EXPECT().GetCSVFromSubscription(operators[0].Namespace, operators[0].SubscriptionName).Return("local-storage-operator", nil).Times(2) + }) + + By("empty status", func() { + mockGetServiceOperators([]models.MonitoredOperator{{Name: "lso", Status: ""}}) + mockGetCSV( + models.MonitoredOperator{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", Name: "lso"}, + &olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseInstalling}}, + ) + }) + + By("in progress", func() { + mockGetServiceOperators([]models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", Name: "lso", Status: models.OperatorStatusProgressing}}) + mockGetCSV( + models.MonitoredOperator{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", Name: "lso"}, + &olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseInstalling}}, + ) + mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), "cluster-id", "lso", models.OperatorStatusProgressing, gomock.Any()).Return(nil).Times(1) + }) + + By("available", func() { + mockGetServiceOperators([]models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", Name: "lso", Status: models.OperatorStatusProgressing}}) + mockGetCSV( + models.MonitoredOperator{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", Name: "lso"}, + &olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseSucceeded}}, + ) + mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), "cluster-id", "lso", models.OperatorStatusAvailable, gomock.Any()).Return(nil).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", Name: "lso", Status: models.OperatorStatusAvailable}}) + }) - mockbmclient.EXPECT().DownloadFile(gomock.Any(), "custom_manifests.yaml", gomock.Any()).Return(nil).Times(1) - mockbmclient.EXPECT().DownloadFile(gomock.Any(), "kubeconfig-noingress", gomock.Any()).Return(nil).Times(1) - mockops.EXPECT().CreateManifests(gomock.Any(), gomock.Any()).Return(nil).Times(1) - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( - []models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", OperatorType: models.OperatorTypeOlm, Name: "lso", Status: "", TimeoutSeconds: 120 * 60}}, nil, - ).Times(1) - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( - []models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", OperatorType: models.OperatorTypeOlm, Name: "lso", Status: models.OperatorStatusProgressing, TimeoutSeconds: 120 * 60}}, nil, - ).Times(1) - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( - []models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", OperatorType: models.OperatorTypeOlm, Name: "lso", Status: models.OperatorStatusAvailable, TimeoutSeconds: 120 * 60}}, nil, - ).Times(1) - mockk8sclient.EXPECT().GetCSVFromSubscription("openshift-local-storage", "local-storage-operator").Return("lso-1.1", nil).Times(1) - mockk8sclient.EXPECT().GetCSV("openshift-local-storage", "lso-1.1").Return(&olmv1alpha1.ClusterServiceVersion{}, nil).Times(1) - mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), "cluster-id", "lso", gomock.Any(), gomock.Any()).Return(nil).AnyTimes() mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(fmt.Errorf("dummy")).Times(1) mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(nil).Times(1) - // Patching NS - mockk8sclient.EXPECT().PatchNamespace(defaultTestControllerConf.Namespace, aiNamespaceRunlevelPatch).Return(nil) - wg.Add(1) - assistedController.PostInstallConfigs(context.TODO(), &wg, status) + assistedController.PostInstallConfigs(context.TODO(), &wg) wg.Wait() - Expect(status.HasError()).Should(Equal(false)) + Expect(assistedController.Status.HasError()).Should(Equal(false)) + Expect(assistedController.Status.HasOperatorError()).Should(Equal(false)) }) + It("waiting for single OLM operator which timeouts", func() { - setControllerWaitForOLMOperators(assistedController.ClusterID) + By("setup", func() { + setControllerWaitForOLMOperators(assistedController.ClusterID) + operators := []models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", OperatorType: models.OperatorTypeOlm, Name: "lso", Status: models.OperatorStatusProgressing, TimeoutSeconds: 0}} + mockApplyPostInstallManifests(operators) + mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return(operators, nil).AnyTimes() + }) + + By("endless empty status", func() { + mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), "lso").Return(&models.MonitoredOperator{Name: "lso", Status: ""}, nil).AnyTimes() + mockk8sclient.EXPECT().GetCSVFromSubscription("openshift-local-storage", "local-storage-operator").Return("lso-1.1", nil).AnyTimes() + mockk8sclient.EXPECT().GetCSV("openshift-local-storage", "lso-1.1").Return(&olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseInstalling}}, nil).AnyTimes() + mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), "cluster-id", "lso", models.OperatorStatusProgressing, gomock.Any()).Return(nil).AnyTimes() + }) - mockbmclient.EXPECT().DownloadFile(gomock.Any(), "custom_manifests.yaml", gomock.Any()).Return(nil).Times(1) - mockbmclient.EXPECT().DownloadFile(gomock.Any(), "kubeconfig-noingress", gomock.Any()).Return(nil).Times(1) - mockops.EXPECT().CreateManifests(gomock.Any(), gomock.Any()).Return(nil).Times(1) - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( - []models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", OperatorType: models.OperatorTypeOlm, Name: "lso", Status: models.OperatorStatusProgressing, TimeoutSeconds: 1}}, nil, - ).AnyTimes() - mockk8sclient.EXPECT().GetCSVFromSubscription("openshift-local-storage", "local-storage-operator").Return("lso-1.1", nil).AnyTimes() - mockk8sclient.EXPECT().GetCSV("openshift-local-storage", "lso-1.1").Return(&olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseInstalling}}, nil).AnyTimes() - mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), "cluster-id", "lso", models.OperatorStatusProgressing, gomock.Any()).Return(nil).AnyTimes() mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), "cluster-id", "lso", models.OperatorStatusFailed, "Waiting for operator timed out").Return(nil).Times(1) mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(nil).Times(1) - // Patching NS - mockk8sclient.EXPECT().PatchNamespace(defaultTestControllerConf.Namespace, aiNamespaceRunlevelPatch).Return(nil) - wg.Add(1) - assistedController.PostInstallConfigs(context.TODO(), &wg, status) + assistedController.PostInstallConfigs(context.TODO(), &wg) wg.Wait() - Expect(status.HasError()).Should(Equal(false)) + Expect(assistedController.Status.HasError()).Should(Equal(false)) + Expect(assistedController.Status.GetOperatorsInError()).To(ContainElement("lso")) }) }) }) @@ -813,7 +962,7 @@ var _ = Describe("installer HostRoleMaster role", func() { mockk8sclient.EXPECT().GetPods(assistedController.Namespace, gomock.Any(), fmt.Sprintf("status.phase=%s", v1.PodRunning)).Return(nil, fmt.Errorf("dummy")).MinTimes(2).MaxTimes(10) ctx, cancel := context.WithCancel(context.Background()) wg.Add(1) - go assistedController.UploadLogs(ctx, &wg, status) + go assistedController.UploadLogs(ctx, &wg) time.Sleep(1 * time.Second) cancel() wg.Wait() @@ -823,9 +972,10 @@ var _ = Describe("installer HostRoleMaster role", func() { reportLogProgressSuccess() mockk8sclient.EXPECT().GetPods(assistedController.Namespace, gomock.Any(), fmt.Sprintf("status.phase=%s", v1.PodRunning)).Return([]v1.Pod{pod}, nil).MinTimes(1) mockk8sclient.EXPECT().GetPodLogsAsBuffer(assistedController.Namespace, "test", gomock.Any()).Return(nil, fmt.Errorf("dummy")).MinTimes(1) + mockbmclient.EXPECT().UploadLogs(gomock.Any(), assistedController.ClusterID, models.LogsTypeController, gomock.Any()).Return(nil).MinTimes(1) ctx, cancel := context.WithCancel(context.Background()) wg.Add(1) - go assistedController.UploadLogs(ctx, &wg, status) + go assistedController.UploadLogs(ctx, &wg) time.Sleep(500 * time.Millisecond) cancel() wg.Wait() @@ -837,7 +987,7 @@ var _ = Describe("installer HostRoleMaster role", func() { mockbmclient.EXPECT().UploadLogs(gomock.Any(), assistedController.ClusterID, models.LogsTypeController, gomock.Any()).Return(fmt.Errorf("dummy")).Times(1) logClusterOperatorsSuccess() reportLogProgressSuccess() - err := assistedController.uploadSummaryLogs("test", assistedController.Namespace, controllerLogsSecondsAgo, false, "") + err := assistedController.uploadSummaryLogs("test", assistedController.Namespace, controllerLogsSecondsAgo) Expect(err).To(HaveOccurred()) }) It("Validate upload logs happy flow (controllers logs only)", func() { @@ -846,7 +996,7 @@ var _ = Describe("installer HostRoleMaster role", func() { mockbmclient.EXPECT().UploadLogs(gomock.Any(), assistedController.ClusterID, models.LogsTypeController, gomock.Any()).Return(nil).Times(1) logClusterOperatorsSuccess() reportLogProgressSuccess() - err := assistedController.uploadSummaryLogs("test", assistedController.Namespace, controllerLogsSecondsAgo, false, "") + err := assistedController.uploadSummaryLogs("test", assistedController.Namespace, controllerLogsSecondsAgo) Expect(err).NotTo(HaveOccurred()) }) @@ -856,7 +1006,7 @@ var _ = Describe("installer HostRoleMaster role", func() { r := bytes.NewBuffer([]byte("test")) mockk8sclient.EXPECT().GetPodLogsAsBuffer(assistedController.Namespace, "test", gomock.Any()).Return(r, nil).Times(1) mockbmclient.EXPECT().UploadLogs(gomock.Any(), assistedController.ClusterID, models.LogsTypeController, gomock.Any()).Return(nil).Times(1) - err := assistedController.uploadSummaryLogs("test", assistedController.Namespace, controllerLogsSecondsAgo, false, "") + err := assistedController.uploadSummaryLogs("test", assistedController.Namespace, controllerLogsSecondsAgo) Expect(err).NotTo(HaveOccurred()) }) @@ -869,7 +1019,7 @@ var _ = Describe("installer HostRoleMaster role", func() { callUploadLogs := func(waitTime time.Duration) { wg.Add(1) - go assistedController.UploadLogs(ctx, &wg, status) + go assistedController.UploadLogs(ctx, &wg) time.Sleep(waitTime) cancel() wg.Wait() @@ -898,8 +1048,8 @@ var _ = Describe("installer HostRoleMaster role", func() { successUpload() logClusterOperatorsSuccess() mockops.EXPECT().GetMustGatherLogs(gomock.Any(), gomock.Any(), assistedController.MustGatherImage).Return("../../test_files/tartest.tar.gz", nil).Times(1) - mockbmclient.EXPECT().DownloadFile(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(1) - status.Error() + mockbmclient.EXPECT().DownloadClusterCredentials(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(1) + assistedController.Status.Error() callUploadLogs(150 * time.Millisecond) }) @@ -907,14 +1057,14 @@ var _ = Describe("installer HostRoleMaster role", func() { successUpload() logClusterOperatorsSuccess() mockops.EXPECT().GetMustGatherLogs(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) - mockbmclient.EXPECT().DownloadFile(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) + mockbmclient.EXPECT().DownloadClusterCredentials(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) callUploadLogs(50 * time.Millisecond) }) It("Validate upload logs exits with no error + failed upload", func() { logClusterOperatorsSuccess() mockops.EXPECT().GetMustGatherLogs(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) - mockbmclient.EXPECT().DownloadFile(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) + mockbmclient.EXPECT().DownloadClusterCredentials(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) mockbmclient.EXPECT().UploadLogs(gomock.Any(), assistedController.ClusterID, models.LogsTypeController, gomock.Any()).Return(fmt.Errorf("dummy")).AnyTimes() callUploadLogs(50 * time.Millisecond) }) @@ -924,77 +1074,222 @@ var _ = Describe("installer HostRoleMaster role", func() { logClusterOperatorsSuccess() mockops.EXPECT().GetMustGatherLogs(gomock.Any(), gomock.Any(), gomock.Any()).Return("", fmt.Errorf("failed")) mockops.EXPECT().GetMustGatherLogs(gomock.Any(), gomock.Any(), gomock.Any()).Return("../../test_files/tartest.tar.gz", nil) - mockbmclient.EXPECT().DownloadFile(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(2) - status.Error() + mockbmclient.EXPECT().DownloadClusterCredentials(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(2) + assistedController.Status.Error() callUploadLogs(50 * time.Millisecond) }) }) - Context("getMaximumOLMTimeout", func() { - It("Return general timeout if no OLM's present", func() { - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return([]models.MonitoredOperator{}, nil).Times(1) - Expect(assistedController.getMaximumOLMTimeout()).To(Equal(WaitTimeout)) + Context("must-gather image set parsing", func() { + var ac *controller + BeforeEach(func() { + ac = NewController(l, defaultTestControllerConf, mockops, mockbmclient, mockk8sclient) + }) + + It("MustGatherImage is empty", func() { + ac.MustGatherImage = "" + Expect(ac.parseMustGatherImages()).To(BeEmpty()) + }) + It("MustGatherImage is string", func() { + images := ac.parseMustGatherImages() + Expect(images).NotTo(BeEmpty()) + Expect(images[0]).To(Equal(ac.MustGatherImage)) }) + It("MustGatherImage is json", func() { + ac.MustGatherImage = `{"ocp": "quay.io/openshift/must-gather", "cnv": "blah", "ocs": "foo"}` + ac.Status.Error() + ac.Status.OperatorError("cnv") + images := ac.parseMustGatherImages() + Expect(len(images)).To(Equal(2)) + Expect(images).To(ContainElement("quay.io/openshift/must-gather")) + Expect(images).To(ContainElement("blah")) + }) + }) - It("Return general timeout if assisted service is not reacheble", func() { - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("error")).Times(1) - Expect(assistedController.getMaximumOLMTimeout()).To(Equal(WaitTimeout)) + Context("getMaximumOLMTimeout", func() { + It("Return general timeout if no OLM's present", func() { + opertors := []models.MonitoredOperator{} + Expect(assistedController.getMaximumOLMTimeout(opertors)).To(Equal(WaitTimeout)) }) It("Return general timeout if OLM's timeout is lower", func() { - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return([]models.MonitoredOperator{}, nil).Times(1) - Expect(assistedController.getMaximumOLMTimeout()).To(Equal(WaitTimeout)) + opertors := []models.MonitoredOperator{ + { + TimeoutSeconds: 0, + }, + } + + Expect(assistedController.getMaximumOLMTimeout(opertors)).To(Equal(WaitTimeout)) }) It("Return maximum from multiple OLM's", func() { - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( - []models.MonitoredOperator{ - {OperatorType: models.OperatorTypeOlm, TimeoutSeconds: 120 * 60}, - {OperatorType: models.OperatorTypeOlm, TimeoutSeconds: 130 * 60}, - }, nil, - ).Times(1) - Expect(assistedController.getMaximumOLMTimeout()).To(Equal(130 * 60 * time.Second)) + opertors := []models.MonitoredOperator{ + {OperatorType: models.OperatorTypeOlm, TimeoutSeconds: 120 * 60}, + {OperatorType: models.OperatorTypeOlm, TimeoutSeconds: 130 * 60}, + } + Expect(assistedController.getMaximumOLMTimeout(opertors)).To(Equal(130 * 60 * time.Second)) }) }) Context("waitForOLMOperators", func() { - It("Don't wait if OLM operators list is empty", func() { - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( - []models.MonitoredOperator{}, nil, - ).Times(1) - Expect(assistedController.waitForOLMOperators()).To(Equal(true)) - }) - It("Don't wait if OLM operator available", func() { - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( - []models.MonitoredOperator{{Status: models.OperatorStatusAvailable, OperatorType: models.OperatorTypeOlm}}, nil, - ).Times(1) - Expect(assistedController.waitForOLMOperators()).To(Equal(true)) - }) - It("Don't wait if OLM operator failed", func() { - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( - []models.MonitoredOperator{{Status: models.OperatorStatusFailed, OperatorType: models.OperatorTypeOlm}}, nil, - ).Times(1) - Expect(assistedController.waitForOLMOperators()).To(Equal(true)) - }) - It("Wait if OLM operator progressing and k8s unavailable", func() { - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( - []models.MonitoredOperator{{Name: "lso", Status: models.OperatorStatusProgressing, OperatorType: models.OperatorTypeOlm}}, nil, - ).Times(1) - mockk8sclient.EXPECT().GetCSVFromSubscription(gomock.Any(), gomock.Any()).Return("", fmt.Errorf("Error")).Times(1) - Expect(assistedController.waitForOLMOperators()).To(Equal(false)) - }) - It("Wait if OLM operator progressing", func() { - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( - []models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", Name: "lso", Status: models.OperatorStatusProgressing, OperatorType: models.OperatorTypeOlm}}, nil, - ).Times(1) - mockk8sclient.EXPECT().GetCSVFromSubscription("openshift-local-storage", "local-storage-operator").Return("lso-1.1", nil).Times(1) - mockk8sclient.EXPECT().GetCSV("openshift-local-storage", "lso-1.1").Return(&olmv1alpha1.ClusterServiceVersion{}, nil).Times(1) - mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), "cluster-id", "lso", gomock.Any(), gomock.Any()).Return(nil).Times(1) - Expect(assistedController.waitForOLMOperators()).To(Equal(false)) + var ( + operatorName = "lso" + subscriptionName = "local-storage-operator" + namespaceName = "openshift-local-storage" + ) + + BeforeEach(func() { + GeneralWaitInterval = 100 * time.Millisecond + WaitTimeout = 150 * time.Millisecond + }) + + It("List is empty", func() { + mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return([]models.MonitoredOperator{}, nil).Times(1) + Expect(assistedController.waitForOLMOperators(context.TODO())).To(BeNil()) + }) + It("progressing - no update (empty message)", func() { + operators := []models.MonitoredOperator{ + { + SubscriptionName: subscriptionName, Namespace: namespaceName, + Name: operatorName, Status: models.OperatorStatusProgressing, OperatorType: models.OperatorTypeOlm, + }, + } + + mockGetOLMOperators(operators) + mockGetServiceOperators(operators) + mockGetCSV( + operators[0], + &olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseInstalling}}, + ) + Expect(assistedController.waitForCSV(context.TODO(), WaitTimeout)).To(HaveOccurred()) + }) + It("progressing - no update (same message)", func() { + operators := []models.MonitoredOperator{ + { + SubscriptionName: subscriptionName, Namespace: namespaceName, + Name: operatorName, Status: models.OperatorStatusProgressing, OperatorType: models.OperatorTypeOlm, + StatusInfo: "same", + }, + } + + mockGetOLMOperators(operators) + mockGetServiceOperators(operators) + mockGetCSV( + operators[0], + &olmv1alpha1.ClusterServiceVersion{ + Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseInstalling, Message: "same"}, + }, + ) + Expect(assistedController.waitForCSV(context.TODO(), WaitTimeout)).To(HaveOccurred()) + }) + It("progressing - update (new message)", func() { + operators := []models.MonitoredOperator{ + { + SubscriptionName: subscriptionName, Namespace: namespaceName, + Name: operatorName, Status: models.OperatorStatusProgressing, OperatorType: models.OperatorTypeOlm, + StatusInfo: "old", + }, + } + + mockGetOLMOperators(operators) + mockGetServiceOperators(operators) + mockGetCSV( + operators[0], + &olmv1alpha1.ClusterServiceVersion{ + Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseInstalling, Message: "new"}, + }, + ) + + mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), "cluster-id", operatorName, gomock.Any(), gomock.Any()).Return(nil).Times(1) + Expect(assistedController.waitForCSV(context.TODO(), WaitTimeout)).To(HaveOccurred()) + }) + It("check that we tolerate the failed state reported by CSV", func() { + WaitTimeout = WaitTimeout * 10 + + operators := []models.MonitoredOperator{ + { + SubscriptionName: subscriptionName, Namespace: namespaceName, + OperatorType: models.OperatorTypeOlm, Name: operatorName, Status: models.OperatorStatusProgressing, TimeoutSeconds: 1, + }, + } + + mockGetOLMOperators(operators) + + mockGetServiceOperators(operators) + mockGetCSV( + operators[0], + &olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseFailed}}, + ) + + mockGetServiceOperators(operators) + mockGetCSV( + operators[0], + &olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseSucceeded}}, + ) + mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), gomock.Any(), operatorName, models.OperatorStatusAvailable, gomock.Any()).Return(nil).Times(1) + + newOperators := make([]models.MonitoredOperator, 0) + newOperators = append(newOperators, operators...) + newOperators[0].Status = models.OperatorStatusAvailable + mockGetServiceOperators(newOperators) + Expect(assistedController.waitForCSV(context.TODO(), LongWaitTimeout)).To(BeNil()) + }) + + It("multiple OLMs", func() { + operators := []models.MonitoredOperator{ + { + SubscriptionName: "subscription-1", Namespace: "namespace-1", + OperatorType: models.OperatorTypeOlm, Name: "operator-1", Status: models.OperatorStatusProgressing, TimeoutSeconds: 120 * 60, + }, + { + SubscriptionName: "subscription-2", Namespace: "namespace-2", + OperatorType: models.OperatorTypeOlm, Name: "operator-2", Status: models.OperatorStatusProgressing, TimeoutSeconds: 120 * 60, + }, + { + SubscriptionName: "subscription-3", Namespace: "namespace-3", + OperatorType: models.OperatorTypeOlm, Name: "operator-3", Status: models.OperatorStatusProgressing, TimeoutSeconds: 120 * 60, + }, + } + + mockGetOLMOperators(operators) + + By("first is available", func() { + newOperators := make([]models.MonitoredOperator, 0) + newOperators = append(newOperators, operators...) + newOperators[0].Status = models.OperatorStatusAvailable + mockGetServiceOperators(newOperators) + + mockGetCSV( + newOperators[1], + &olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseInstalling}}, + ) + mockGetCSV( + newOperators[2], + &olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseInstalling}}, + ) + }) + + By("last is available", func() { + newerOperators := make([]models.MonitoredOperator, 0) + newerOperators = append(newerOperators, operators[1], operators[2]) + newerOperators[1].Status = models.OperatorStatusAvailable + mockGetServiceOperators(newerOperators) + + mockGetCSV( + newerOperators[0], + &olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseInstalling}}, + ) + }) + + lastOne := []models.MonitoredOperator{operators[1]} + lastOne[0].Status = models.OperatorStatusAvailable + mockGetServiceOperators(lastOne) + + Expect(assistedController.waitForCSV(context.TODO(), LongWaitTimeout)).To(BeNil()) }) }) - Context("waitingForClusterVersion", func() { + Context("waitingForClusterOperators", func() { ctx := context.TODO() tests := []struct { name string @@ -1064,8 +1359,12 @@ var _ = Describe("installer HostRoleMaster role", func() { } BeforeEach(func() { + assistedController.WaitForClusterVersion = true GeneralProgressUpdateInt = 100 * time.Millisecond WaitTimeout = 150 * time.Millisecond + CVOMaxTimeout = 1 * time.Second + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusAvailable}}) }) for i := range tests { @@ -1081,28 +1380,26 @@ var _ = Describe("installer HostRoleMaster role", func() { StatusInfo: t.newCVOCondition.Message, } - amountOfSamples := 1 - mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), cvoOperatorName).Return(t.currentServiceCVOStatus, nil).Times(1) if t.shouldSendUpdate { if t.currentServiceCVOStatus.Status != models.OperatorStatusAvailable { - // If a change occured and it is still false - we expect the timer to be resetted, - // hence another round would happen. - amountOfSamples += 1 - mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), cvoOperatorName).Return(newServiceCVOStatus, nil).Times(1) } mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), gomock.Any(), cvoOperatorName, gomock.Any(), gomock.Any()).Times(1) } - mockk8sclient.EXPECT().GetClusterVersion("version").Return(clusterVersionReport, nil).Times(amountOfSamples) + amountOfSamples := 0 + if t.currentServiceCVOStatus.Status != models.OperatorStatusAvailable { + amountOfSamples++ + } + mockk8sclient.EXPECT().GetClusterVersion(clusterVersionName).Return(clusterVersionReport, nil).MinTimes(amountOfSamples) if newServiceCVOStatus.Status == models.OperatorStatusAvailable { - Expect(assistedController.waitingForClusterVersion(ctx)).ShouldNot(HaveOccurred()) + Expect(assistedController.waitingForClusterOperators(ctx)).ShouldNot(HaveOccurred()) } else { - Expect(assistedController.waitingForClusterVersion(ctx)).Should(HaveOccurred()) + Expect(assistedController.waitingForClusterOperators(ctx)).Should(HaveOccurred()) } }) } @@ -1117,14 +1414,37 @@ var _ = Describe("installer HostRoleMaster role", func() { }, } - mockk8sclient.EXPECT().GetClusterVersion("version").Return(clusterVersionReport, nil).AnyTimes() + mockk8sclient.EXPECT().GetClusterVersion(clusterVersionName).Return(clusterVersionReport, nil).AnyTimes() mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), cvoOperatorName).Return(currentServiceCVOStatus, nil).AnyTimes() mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), gomock.Any(), cvoOperatorName, gomock.Any(), gomock.Any()).AnyTimes() err := func() error { - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + ctxTimeout, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() - return assistedController.waitingForClusterVersion(ctx) + return assistedController.waitingForClusterOperators(ctxTimeout) + }() + + Expect(errors.Is(err, context.DeadlineExceeded)).To(BeTrue()) + }) + + It("service fail to sync - maxTimeout applied", func() { + WaitTimeout = 1 * time.Second + CVOMaxTimeout = 200 * time.Millisecond + currentServiceCVOStatus := &models.MonitoredOperator{Status: models.OperatorStatusProgressing, StatusInfo: ""} + clusterVersionReport := &configv1.ClusterVersion{ + Status: configv1.ClusterVersionStatus{ + Conditions: []configv1.ClusterOperatorStatusCondition{ + {Type: configv1.OperatorAvailable, Status: configv1.ConditionTrue, Message: ""}, + }, + }, + } + + mockk8sclient.EXPECT().GetClusterVersion(clusterVersionName).Return(clusterVersionReport, nil).AnyTimes() + mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), cvoOperatorName).Return(currentServiceCVOStatus, nil).AnyTimes() + mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), gomock.Any(), cvoOperatorName, gomock.Any(), gomock.Any()).AnyTimes() + + err := func() error { + return assistedController.waitingForClusterOperators(ctx) }() Expect(errors.Is(err, context.DeadlineExceeded)).To(BeTrue()) @@ -1142,14 +1462,14 @@ var _ = Describe("installer HostRoleMaster role", func() { } // Fail twice - mockk8sclient.EXPECT().GetClusterVersion("version").Return(clusterVersionReport, nil).Times(3) + mockk8sclient.EXPECT().GetClusterVersion(clusterVersionName).Return(clusterVersionReport, nil).Times(2) mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), cvoOperatorName).Return(currentServiceCVOStatus, nil).Times(2) mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), gomock.Any(), cvoOperatorName, gomock.Any(), gomock.Any()).Times(2) // Service succeed mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), cvoOperatorName).Return(newServiceCVOStatus, nil).Times(1) - Expect(assistedController.waitingForClusterVersion(context.TODO())).ShouldNot(HaveOccurred()) + Expect(assistedController.waitingForClusterOperators(context.TODO())).ShouldNot(HaveOccurred()) }) }) @@ -1175,8 +1495,12 @@ var _ = Describe("installer HostRoleMaster role", func() { mockk8sclient.EXPECT().GetServiceNetworks().Return(nil, errors.New("get service network failed")) hackConflict() }) - It("Exit if service network is IPv6", func() { + It("Kill service and DNS pods if DNS service IP is taken in IPV6 env", func() { mockk8sclient.EXPECT().GetServiceNetworks().Return([]string{"2002:db8::/64"}, nil) + returnServiceWithAddress(conflictServiceName, conflictServiceNamespace, "2002:db8::a") + mockk8sclient.EXPECT().DeleteService(conflictServiceName, conflictServiceNamespace).Return(nil) + mockk8sclient.EXPECT().DeletePods(dnsOperatorNamespace).Return(nil) + returnServiceWithAddress(dnsServiceName, dnsServiceNamespace, "2002:db8::a") hackConflict() }) It("Retry if list services fails", func() { @@ -1273,11 +1597,12 @@ func getClusterOperatorWithConditionsStatus(availableStatus, degradedStatus conf func create3Hosts(currentStatus string, stage models.HostStage) map[string]inventory_client.HostData { currentState := models.HostProgressInfo{CurrentStage: stage} + infraEnvId := strfmt.UUID("7916fa89-ea7a-443e-a862-b3e930309f50") node0Id := strfmt.UUID("7916fa89-ea7a-443e-a862-b3e930309f65") node1Id := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f238") node2Id := strfmt.UUID("b898d516-3e16-49d0-86a5-0ad5bd04e3ed") return map[string]inventory_client.HostData{ - "node0": {Host: &models.Host{ID: &node0Id, Progress: ¤tState, Status: ¤tStatus}}, - "node1": {Host: &models.Host{ID: &node1Id, Progress: ¤tState, Status: ¤tStatus}}, - "node2": {Host: &models.Host{ID: &node2Id, Progress: ¤tState, Status: ¤tStatus}}} + "node0": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node0Id, Progress: ¤tState, Status: ¤tStatus}}, + "node1": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node1Id, Progress: ¤tState, Status: ¤tStatus}}, + "node2": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node2Id, Progress: ¤tState, Status: ¤tStatus}}} } diff --git a/src/assisted_installer_controller/operator_handler.go b/src/assisted_installer_controller/operator_handler.go new file mode 100644 index 0000000000..6a000de255 --- /dev/null +++ b/src/assisted_installer_controller/operator_handler.go @@ -0,0 +1,192 @@ +package assisted_installer_controller + +import ( + "context" + "time" + + "github.com/openshift/assisted-installer/src/k8s_client" + "github.com/openshift/assisted-installer/src/utils" + "github.com/openshift/assisted-service/models" +) + +const ( + cvoOperatorName = "cvo" + clusterVersionName = "version" +) + +type OperatorHandler interface { + GetName() string + GetStatus() (models.OperatorStatus, string, error) + OnChange(newStatus models.OperatorStatus) bool + IsInitialized() bool +} + +func (c controller) isOperatorAvailable(handler OperatorHandler) bool { + operatorName := handler.GetName() + c.log.Infof("Checking <%s> operator availability status", operatorName) + + operatorStatusInService, isAvailable := c.isOperatorAvailableInService(operatorName) + if isAvailable { + return true + } + + operatorStatus, operatorMessage, err := handler.GetStatus() + if err != nil { + c.log.WithError(err).Warnf("Failed to get <%s> operator", operatorName) + return false + } + + if operatorStatusInService.Status != operatorStatus || (operatorStatusInService.StatusInfo != operatorMessage && operatorMessage != "") { + c.log.Infof("Operator <%s> updated, status: %s -> %s, message: %s -> %s.", operatorName, operatorStatusInService.Status, operatorStatus, operatorStatusInService.StatusInfo, operatorMessage) + if !handler.OnChange(operatorStatus) { + c.log.WithError(err).Warnf("<%s> operator's OnChange() returned false. Will skip an update.", operatorName) + return false + } + + err = c.ic.UpdateClusterOperator(context.TODO(), c.ClusterID, operatorName, operatorStatus, operatorMessage) + if err != nil { + c.log.WithError(err).Warnf("Failed to update %s operator status %s with message %s", operatorName, operatorStatus, operatorMessage) + return false + } + } + + return false +} + +func (c controller) isOperatorAvailableInService(operatorName string) (*models.MonitoredOperator, bool) { + operatorStatusInService, err := c.ic.GetClusterMonitoredOperator(utils.GenerateRequestContext(), c.ClusterID, operatorName) + if err != nil { + c.log.WithError(err).Errorf("Failed to get cluster %s %s operator status", c.ClusterID, operatorName) + return nil, false + } + + if operatorStatusInService.Status == models.OperatorStatusAvailable { + c.log.Infof("Service acknowledged <%s> operator is available for cluster %s", operatorName, c.ClusterID) + return operatorStatusInService, true + } + + return operatorStatusInService, false +} + +type ClusterOperatorHandler struct { + kc k8s_client.K8SClient + operatorName string +} + +func NewClusterOperatorHandler(kc k8s_client.K8SClient, operatorName string) *ClusterOperatorHandler { + return &ClusterOperatorHandler{kc: kc, operatorName: operatorName} +} + +func (handler ClusterOperatorHandler) GetName() string { return handler.operatorName } + +func (handler ClusterOperatorHandler) IsInitialized() bool { return true } + +func (handler ClusterOperatorHandler) GetStatus() (models.OperatorStatus, string, error) { + co, err := handler.kc.GetClusterOperator(handler.operatorName) + if err != nil { + return "", "", err + } + + operatorStatus, operatorMessage := utils.ClusterOperatorConditionsToMonitoredOperatorStatus(co.Status.Conditions) + return operatorStatus, operatorMessage, nil +} + +func (handler ClusterOperatorHandler) OnChange(_ models.OperatorStatus) bool { return true } + +type ClusterVersionHandler struct { + kc k8s_client.K8SClient + timer *time.Timer +} + +func NewClusterVersionHandler(kc k8s_client.K8SClient, timer *time.Timer) *ClusterVersionHandler { + return &ClusterVersionHandler{kc: kc, timer: timer} +} + +func (handler ClusterVersionHandler) GetName() string { return cvoOperatorName } + +func (handler ClusterVersionHandler) IsInitialized() bool { return true } + +func (handler ClusterVersionHandler) GetStatus() (models.OperatorStatus, string, error) { + co, err := handler.kc.GetClusterVersion(clusterVersionName) + if err != nil { + return "", "", err + } + + operatorStatus, operatorMessage := utils.ClusterOperatorConditionsToMonitoredOperatorStatus(co.Status.Conditions) + return operatorStatus, operatorMessage, nil +} + +func (handler ClusterVersionHandler) OnChange(_ models.OperatorStatus) bool { + // This is a common pattern to ensure the channel is empty after a stop has been called + // More info on time/sleep.go documentation + if !handler.timer.Stop() { + <-handler.timer.C + } + handler.timer.Reset(WaitTimeout) + + return true +} + +type ClusterServiceVersionHandler struct { + kc k8s_client.K8SClient + operator *models.MonitoredOperator + status *ControllerStatus + retries int +} + +func NewClusterServiceVersionHandler(kc k8s_client.K8SClient, operator *models.MonitoredOperator, status *ControllerStatus) *ClusterServiceVersionHandler { + return &ClusterServiceVersionHandler{kc: kc, operator: operator, status: status, retries: 0} +} + +func (handler ClusterServiceVersionHandler) GetName() string { return handler.operator.Name } + +func (handler ClusterServiceVersionHandler) IsInitialized() bool { + csvName, err := handler.kc.GetCSVFromSubscription(handler.operator.Namespace, handler.operator.SubscriptionName) + if err != nil { + return false + } + + if csvName == "" { + return false + } + + return true +} + +func (handler ClusterServiceVersionHandler) GetStatus() (models.OperatorStatus, string, error) { + csvName, err := handler.kc.GetCSVFromSubscription(handler.operator.Namespace, handler.operator.SubscriptionName) + if err != nil { + return "", "", err + } + + csv, err := handler.kc.GetCSV(handler.operator.Namespace, csvName) + if err != nil { + return "", "", err + } + + operatorStatus := utils.CsvStatusToOperatorStatus(string(csv.Status.Phase)) + + return operatorStatus, csv.Status.Message, nil +} + +func (handler ClusterServiceVersionHandler) OnChange(newStatus models.OperatorStatus) bool { + if IsStatusFailed(newStatus) { + if handler.retries < failedOperatorRetry { + // FIXME: We retry the check of the operator status in case it's in failed state to WA bug 1968606 + // Remove this code when bug 1968606 is fixed + handler.retries++ + return false + } + handler.status.OperatorError(handler.operator.Name) + } + + return true +} + +func IsStatusFailed(operatorStatus models.OperatorStatus) bool { + return operatorStatus == models.OperatorStatusFailed +} + +func IsStatusSucceeded(operatorStatus models.OperatorStatus) bool { + return operatorStatus == models.OperatorStatusAvailable +} diff --git a/src/common/common.go b/src/common/common.go index 40bb1e0bca..a8c1ed1264 100644 --- a/src/common/common.go +++ b/src/common/common.go @@ -1,6 +1,7 @@ package common import ( + "bytes" "fmt" "io" "regexp" @@ -44,7 +45,7 @@ func SetConfiguringStatusForHosts(client inventory_client.InventoryClient, inven continue } log.Infof("Verifying if host %s pulled ignition", hostName) - pat := fmt.Sprintf("(%s).{1,20}(Ignition)", strings.Join(host.IPs, "|")) + pat := fmt.Sprintf("(%s).{1,40}(Ignition)", strings.Join(host.IPs, "|")) pattern, err := regexp.Compile(pat) if err != nil { log.WithError(err).Errorf("Failed to compile regex from host %s ips list", hostName) @@ -58,7 +59,7 @@ func SetConfiguringStatusForHosts(client inventory_client.InventoryClient, inven ctx := utils.GenerateRequestContext() requestLog := utils.RequestIDLogger(ctx, log) requestLog.Infof("Host %s %q found in mcs logs, moving it to %s state", hostName, host.Host.ID.String(), status) - if err := client.UpdateHostInstallProgress(ctx, host.Host.ID.String(), status, ""); err != nil { + if err := client.UpdateHostInstallProgress(ctx, host.Host.InfraEnvID.String(), host.Host.ID.String(), status, ""); err != nil { requestLog.Errorf("Failed to update node installation status, %s", err) continue } @@ -97,7 +98,8 @@ func UploadPodLogs(kc k8s_client.K8SClient, ic inventory_client.InventoryClient, log.Infof("Uploading logs for %s in %s", podName, namespace) podLogs, err := kc.GetPodLogsAsBuffer(namespace, podName, sinceSeconds) if err != nil { - return errors.Wrapf(err, "Failed to get logs of pod %s", podName) + podLogs = &bytes.Buffer{} + podLogs.WriteString(errors.Wrapf(err, "Failed to get logs of pod %s", podName).Error()) } pr, pw := io.Pipe() defer pr.Close() @@ -130,3 +132,35 @@ func IsK8sNodeIsReady(node v1.Node) bool { } return false } + +// BuildHostsMapIPAddressBased builds a map containing all the IP addresses of the hosts in the +// inventory so that later we can match reporting hosts based on the IP and not only on the name. +func BuildHostsMapIPAddressBased(inventoryHostsMap map[string]inventory_client.HostData) map[string]inventory_client.HostData { + knownIpAddresses := map[string]inventory_client.HostData{} + for _, v := range inventoryHostsMap { + for _, ip := range v.IPs { + knownIpAddresses[ip] = v + } + } + return knownIpAddresses +} + +// Matching of the host happens based on 2 rules +// * if the name of the host and in the inventory is exactly the same, use use it +// * if the name is not known in the inventory, we check if the IP address of the +// reporting host is known to the inventory +// Using those rules we can cover the cases where e.g. inventory expects a short +// hostname, but the host reports itself using its FQDN +func HostMatchByNameOrIPAddress(node v1.Node, namesMap, IPAddressMap map[string]inventory_client.HostData) (inventory_client.HostData, bool) { + host, ok := namesMap[strings.ToLower(node.Name)] + if !ok { + for _, ip := range node.Status.Addresses { + _, exists := IPAddressMap[ip.Address] + if exists && ip.Type == v1.NodeInternalIP { + ok = true + host = IPAddressMap[ip.Address] + } + } + } + return host, ok +} diff --git a/src/common/common_test.go b/src/common/common_test.go index 59e4f1a61c..7f09ac7655 100644 --- a/src/common/common_test.go +++ b/src/common/common_test.go @@ -1,6 +1,7 @@ package common import ( + "encoding/json" "fmt" "io/ioutil" "testing" @@ -13,6 +14,7 @@ import ( "github.com/openshift/assisted-installer/src/inventory_client" "github.com/openshift/assisted-service/models" "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" ) func TestCommon(t *testing.T) { @@ -33,23 +35,24 @@ var _ = Describe("verify common", func() { var logs string logsInBytes, _ := ioutil.ReadFile("../../test_files/mcs_logs.txt") logs = string(logsInBytes) + infraEnvId := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f250") node0Id := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f238") node1Id := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f239") node2Id := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f240") - testInventoryIdsIps := map[string]inventory_client.HostData{"node0": {Host: &models.Host{ID: &node0Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}, Role: models.HostRoleMaster}, + testInventoryIdsIps := map[string]inventory_client.HostData{"node0": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node0Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}, Role: models.HostRoleMaster}, IPs: []string{"192.168.126.10", "192.168.11.122", "fe80::5054:ff:fe9a:4738"}}, - "node1": {Host: &models.Host{ID: &node1Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}, Role: models.HostRoleMaster}, IPs: []string{"192.168.126.11", "192.168.11.123", "fe80::5054:ff:fe9a:4739"}}, - "node2": {Host: &models.Host{ID: &node2Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}, Role: models.HostRoleWorker}, IPs: []string{"192.168.126.12", "192.168.11.124", "fe80::5054:ff:fe9a:4740"}}} - - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), node1Id.String(), models.HostStageConfiguring, gomock.Any()).Return(fmt.Errorf("dummy")).Times(1) - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), node2Id.String(), models.HostStageWaitingForIgnition, gomock.Any()).Return(nil).Times(1) + "node1": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node1Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}, Role: models.HostRoleMaster}, IPs: []string{"192.168.126.11", "192.168.11.123", "fe80::5054:ff:fe9a:4739"}}, + "node2": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node2Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}, Role: models.HostRoleWorker}, IPs: []string{"192.168.126.12", "192.168.11.124", "fe80::5054:ff:fe9a:4740"}}} + // note that in the MCS log we use node 1 IPv6 address + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvId.String(), node1Id.String(), models.HostStageConfiguring, gomock.Any()).Return(fmt.Errorf("dummy")).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvId.String(), node2Id.String(), models.HostStageWaitingForIgnition, gomock.Any()).Return(nil).Times(1) SetConfiguringStatusForHosts(mockbmclient, testInventoryIdsIps, logs, true, l) Expect(testInventoryIdsIps["node0"].Host.Progress.CurrentStage).Should(Equal(models.HostStageRebooting)) Expect(testInventoryIdsIps["node1"].Host.Progress.CurrentStage).Should(Equal(models.HostStageRebooting)) Expect(testInventoryIdsIps["node2"].Host.Progress.CurrentStage).Should(Equal(models.HostStageWaitingForIgnition)) - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), node1Id.String(), models.HostStageConfiguring, gomock.Any()).Return(nil).Times(1) - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), node2Id.String(), models.HostStageConfiguring, gomock.Any()).Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvId.String(), node1Id.String(), models.HostStageConfiguring, gomock.Any()).Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvId.String(), node2Id.String(), models.HostStageConfiguring, gomock.Any()).Return(nil).Times(1) SetConfiguringStatusForHosts(mockbmclient, testInventoryIdsIps, logs, false, l) Expect(testInventoryIdsIps["node1"].Host.Progress.CurrentStage).Should(Equal(models.HostStageConfiguring)) Expect(testInventoryIdsIps["node2"].Host.Progress.CurrentStage).Should(Equal(models.HostStageConfiguring)) @@ -120,4 +123,61 @@ var _ = Describe("verify common", func() { }) } }) + + Context("Verify name- and IP-based matching", func() { + var testInventoryIdsIps, knownIpAddresses map[string]inventory_client.HostData + var node0Id, node1Id, node2Id strfmt.UUID + + BeforeEach(func() { + infraEnvId := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f250") + node0Id = strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f238") + node1Id = strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f239") + node2Id = strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f240") + + testInventoryIdsIps = map[string]inventory_client.HostData{"node0": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node0Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}, Role: models.HostRoleMaster}, + IPs: []string{"192.168.126.10", "192.168.39.248", "fe80::5054:ff:fe9a:4738"}}, + "node1": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node1Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}, Role: models.HostRoleMaster}, IPs: []string{"192.168.126.11", "192.168.11.123", "fe80::5054:ff:fe9a:4739"}}, + "node2": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node2Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}, Role: models.HostRoleWorker}, IPs: []string{"192.168.126.12", "192.168.11.124", "fe80::5054:ff:fe9a:4740"}}} + knownIpAddresses = BuildHostsMapIPAddressBased(testInventoryIdsIps) + }) + + It("test BuildHostsMapIPAddressBased", func() { + Expect(len(knownIpAddresses)).To(Equal(9)) + Expect(knownIpAddresses["192.168.126.10"].Host.ID).To(Equal(&node0Id)) + Expect(knownIpAddresses["192.168.11.123"].Host.ID).To(Equal(&node1Id)) + Expect(knownIpAddresses["fe80::5054:ff:fe9a:4740"].Host.ID).To(Equal(&node2Id)) + Expect(knownIpAddresses["10.0.0.1"]).To(Equal(inventory_client.HostData{IPs: nil, Inventory: nil, Host: nil})) + }) + + It("test HostMatchByNameOrIPAddress by name", func() { + nodes := GetKubeNodes(map[string]string{"node1": "6d6f00e8-dead-beef-cafe-0f1459485ad9"}) + Expect(len(nodes.Items)).To(Equal(1)) + Expect(nodes.Items[0].Name).To(Equal("node1")) + match, ok := HostMatchByNameOrIPAddress(nodes.Items[0], testInventoryIdsIps, knownIpAddresses) + Expect(ok).To(Equal(true)) + Expect(match.Host.ID).To(Equal(&node1Id)) + }) + + It("test HostMatchByNameOrIPAddress by IP", func() { + nodes := GetKubeNodes(map[string]string{"some-fake-name": "6d6f00e8-dead-beef-cafe-0f1459485ad9"}) + Expect(len(nodes.Items)).To(Equal(1)) + Expect(nodes.Items[0].Name).To(Equal("some-fake-name")) + match, ok := HostMatchByNameOrIPAddress(nodes.Items[0], testInventoryIdsIps, knownIpAddresses) + Expect(ok).To(Equal(true)) + Expect(match.Host.ID).To(Equal(&node0Id)) + }) + }) }) + +func GetKubeNodes(kubeNamesIds map[string]string) *v1.NodeList { + file, _ := ioutil.ReadFile("../../test_files/node.json") + var node v1.Node + _ = json.Unmarshal(file, &node) + nodeList := &v1.NodeList{} + for name, id := range kubeNamesIds { + node.Status.NodeInfo.SystemUUID = id + node.Name = name + nodeList.Items = append(nodeList.Items, node) + } + return nodeList +} diff --git a/src/config/config.go b/src/config/config.go index fbabec49b8..72222f2ac6 100644 --- a/src/config/config.go +++ b/src/config/config.go @@ -15,6 +15,7 @@ import ( type Config struct { Role string ClusterID string + InfraEnvID string HostID string Device string URL string @@ -23,7 +24,6 @@ type Config struct { MCOImage string ControllerImage string AgentImage string - InstallationTimeout uint PullSecretToken string `secret:"true"` SkipCertVerification bool CACertPath string @@ -48,6 +48,7 @@ func ProcessArgs() { ret := &GlobalConfig flag.StringVar(&ret.Role, "role", string(models.HostRoleMaster), "The node role") flag.StringVar(&ret.ClusterID, "cluster-id", "", "The cluster id") + flag.StringVar(&ret.InfraEnvID, "infra-env-id", "", "This host infra env id") flag.StringVar(&ret.HostID, "host-id", "", "This host id") flag.StringVar(&ret.Device, "boot-device", "", "The boot device") flag.StringVar(&ret.URL, "url", "", "The BM inventory URL, including a scheme and optionally a port (overrides the host and port arguments") @@ -58,8 +59,6 @@ func ProcessArgs() { "Assisted Installer Controller image URL") flag.StringVar(&ret.AgentImage, "agent-image", "quay.io/ocpmetal/assisted-installer-agent:latest", "Assisted Installer Agent image URL that will be used to send logs on successful installation") - // Remove installation-timeout once the assisted-service stop sending it. - flag.UintVar(&ret.InstallationTimeout, "installation-timeout", 120, "Installation timeout in minutes - OBSOLETE") flag.BoolVar(&ret.SkipCertVerification, "insecure", false, "Do not validate TLS certificate") flag.StringVar(&ret.CACertPath, "cacert", "", "Path to custom CA certificate in PEM format") flag.StringVar(&ret.HTTPProxy, "http-proxy", "", "A proxy URL to use for creating HTTP connections outside the cluster") @@ -96,4 +95,7 @@ func ProcessArgs() { printHelpAndExit() } } + if ret.InfraEnvID == "" { + ret.InfraEnvID = ret.ClusterID + } } diff --git a/src/installer/installer.go b/src/installer/installer.go index b759364a80..b5558686a0 100644 --- a/src/installer/installer.go +++ b/src/installer/installer.go @@ -4,9 +4,9 @@ import ( "context" "fmt" "path/filepath" - "strings" "time" + "github.com/go-openapi/swag" "github.com/google/uuid" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -26,7 +26,6 @@ import ( const ( InstallDir = "/opt/install-dir" - KubeconfigPathLoopBack = "/opt/openshift/auth/kubeconfig-loopback" KubeconfigPath = "/opt/openshift/auth/kubeconfig" minMasterNodes = 2 dockerConfigFile = "/root/.docker/config.json" @@ -134,9 +133,15 @@ func (i *installer) InstallNode() error { } i.log.Info("Setting bootstrap node new role to master") + } else if i.Config.Role == string(models.HostRoleWorker) { + // Wait for 2 masters to be ready before rebooting + if err = i.workerWaitFor2ReadyMasters(ctx); err != nil { + return err + } } //upload host logs and report log status before reboot - i.inventoryClient.HostLogProgressReport(ctx, i.Config.ClusterID, i.Config.HostID, models.LogsStateRequested) + i.log.Infof("Uploading logs and reporting status before rebooting the node %s for cluster %s", i.Config.HostID, i.Config.ClusterID) + i.inventoryClient.HostLogProgressReport(ctx, i.Config.InfraEnvID, i.Config.HostID, models.LogsStateRequested) _, err = i.ops.UploadInstallationLogs(isBootstrap || i.HighAvailabilityMode == models.ClusterHighAvailabilityModeNone) if err != nil { i.log.Errorf("upload installation logs %s", err) @@ -195,6 +200,12 @@ func (i *installer) writeImageToDisk(ignitionPath string) error { func (i *installer) startBootstrap() error { i.log.Infof("Running bootstrap") + // This is required for the log collection command to work since it will try to mount this directory + // This directory is also required by `generateSshKeyPair` as it will place the key there + if err := i.ops.Mkdir(sshDir); err != nil { + i.log.WithError(err).Error("Failed to create SSH dir") + return err + } ignitionFileName := "bootstrap.ign" ignitionPath, err := i.getFileFromService(ignitionFileName) if err != nil { @@ -289,10 +300,6 @@ func (i *installer) extractIgnitionToFS(ignitionPath string) (err error) { func (i *installer) generateSshKeyPair() error { i.log.Info("Generating new SSH key pair") - if err := i.ops.Mkdir(sshDir); err != nil { - i.log.WithError(err).Error("Failed to create SSH dir") - return err - } if _, err := i.ops.ExecPrivilegeCommand(utils.NewLogWriter(i.log), "ssh-keygen", "-q", "-f", sshKeyPath, "-N", ""); err != nil { i.log.WithError(err).Error("Failed to generate SSH key pair") return err @@ -319,7 +326,7 @@ func (i *installer) downloadHostIgnition() (string, error) { log.Infof("Getting %s file", filename) dest := filepath.Join(InstallDir, filename) - err := i.inventoryClient.DownloadHostIgnition(ctx, i.Config.HostID, dest) + err := i.inventoryClient.DownloadHostIgnition(ctx, i.Config.InfraEnvID, i.Config.HostID, dest) if err != nil { log.Errorf("Failed to fetch file (%s) from server. err: %s", filename, err) } @@ -337,12 +344,18 @@ func (i *installer) waitForNetworkType(kc k8s_client.K8SClient) error { } func (i *installer) waitForControlPlane(ctx context.Context) error { - kc, err := i.kcBuilder(KubeconfigPathLoopBack, i.log) + err := i.ops.ReloadHostFile("/etc/resolv.conf") + if err != nil { + i.log.WithError(err).Error("Failed to reload resolv.conf") + return err + } + kc, err := i.kcBuilder(KubeconfigPath, i.log) if err != nil { i.log.Error(err) return err } i.UpdateHostInstallProgress(models.HostStageWaitingForControlPlane, "") + if err = i.waitForMinMasterNodes(ctx, kc); err != nil { return err } @@ -364,7 +377,7 @@ func (i *installer) waitForControlPlane(ctx context.Context) error { i.waitForBootkube(ctx) // waiting for controller pod to be running - if err := i.waitForController(); err != nil { + if err := i.waitForController(kc); err != nil { i.log.Error(err) return err } @@ -372,6 +385,32 @@ func (i *installer) waitForControlPlane(ctx context.Context) error { return nil } +func numDoneMasters(cluster *models.Cluster) int { + numDoneMasters := 0 + for _, h := range cluster.Hosts { + if h.Role == models.HostRoleMaster && h.Progress.CurrentStage == models.HostStageDone { + numDoneMasters++ + } + } + return numDoneMasters +} + +func (i *installer) workerWaitFor2ReadyMasters(ctx context.Context) error { + i.log.Info("Waiting for 2 ready masters") + i.UpdateHostInstallProgress(models.HostStageWaitingForControlPlane, "") + for { + cluster, err := i.inventoryClient.GetCluster(ctx) + if err != nil { + i.log.WithError(err).Errorf("Getting cluster %s", i.ClusterID) + return err + } + if swag.StringValue(cluster.Kind) == models.ClusterKindAddHostsCluster || numDoneMasters(cluster) >= minMasterNodes { + return nil + } + time.Sleep(generalWaitInterval) + } +} + func (i *installer) shouldControlPlaneReplicasPatchApplied(kc k8s_client.K8SClient) (bool, error) { controlPlanePatchRequired, err := utils.IsVersionLessThan47(i.Config.OpenshiftVersion) if err != nil { @@ -441,7 +480,7 @@ func (i *installer) UpdateHostInstallProgress(newStage models.HostStage, info st log := utils.RequestIDLogger(ctx, i.log) log.Infof("Updating node installation stage: %s - %s", newStage, info) if i.HostID != "" { - if err := i.inventoryClient.UpdateHostInstallProgress(ctx, i.HostID, newStage, info); err != nil { + if err := i.inventoryClient.UpdateHostInstallProgress(ctx, i.Config.InfraEnvID, i.Config.HostID, newStage, info); err != nil { log.Errorf("Failed to update node installation stage, %s", err) } } @@ -469,20 +508,9 @@ func (i *installer) waitForBootkube(ctx context.Context) { } } -func (i *installer) waitForController() error { +func (i *installer) waitForController(kc k8s_client.K8SClient) error { i.log.Infof("Waiting for controller to be ready") i.UpdateHostInstallProgress(models.HostStageWaitingForController, "waiting for controller pod ready event") - err := i.ops.ReloadHostFile("/etc/resolv.conf") - if err != nil { - i.log.WithError(err).Error("Failed to reload resolv.conf") - return err - } - - kc, err := i.kcBuilder(KubeconfigPath, i.log) - if err != nil { - i.log.WithError(err).Errorf("Failed to create kc client from %s", KubeconfigPath) - return err - } events := map[string]string{} tickerUploadLogs := time.NewTicker(5 * time.Minute) @@ -605,6 +633,8 @@ func (i *installer) getInventoryHostsMap(hostsMap map[string]inventory_client.Ho func (i *installer) updateReadyMasters(nodes *v1.NodeList, readyMasters *[]string, inventoryHostsMap map[string]inventory_client.HostData) error { nodeNameAndCondition := map[string][]v1.NodeCondition{} + knownIpAddresses := common.BuildHostsMapIPAddressBased(inventoryHostsMap) + for _, node := range nodes.Items { nodeNameAndCondition[node.Name] = node.Status.Conditions if common.IsK8sNodeIsReady(node) && !funk.ContainsString(*readyMasters, node.Name) { @@ -612,12 +642,13 @@ func (i *installer) updateReadyMasters(nodes *v1.NodeList, readyMasters *[]strin log := utils.RequestIDLogger(ctx, i.log) log.Infof("Found a new ready master node %s with id %s", node.Name, node.Status.NodeInfo.SystemUUID) *readyMasters = append(*readyMasters, node.Name) - host, ok := inventoryHostsMap[strings.ToLower(node.Name)] + + host, ok := common.HostMatchByNameOrIPAddress(node, inventoryHostsMap, knownIpAddresses) if !ok { return fmt.Errorf("Node %s is not in inventory hosts", node.Name) } ctx = utils.GenerateRequestContext() - if err := i.inventoryClient.UpdateHostInstallProgress(ctx, host.Host.ID.String(), models.HostStageJoined, ""); err != nil { + if err := i.inventoryClient.UpdateHostInstallProgress(ctx, host.Host.InfraEnvID.String(), host.Host.ID.String(), models.HostStageJoined, ""); err != nil { utils.RequestIDLogger(ctx, i.log).Errorf("Failed to update node installation status, %s", err) } } diff --git a/src/installer/installer_test.go b/src/installer/installer_test.go index 56665a4b41..9ed3e5f63e 100644 --- a/src/installer/installer_test.go +++ b/src/installer/installer_test.go @@ -43,6 +43,7 @@ var _ = Describe("installer HostRoleMaster role", func() { mockIgnition *ignition.MockIgnition installerObj *installer hostId = "host-id" + infraEnvId = "infra-env-id" bootstrapIgn = "bootstrap.ign" openShiftVersion = "4.7" inventoryNamesHost map[string]inventory_client.HostData @@ -65,8 +66,8 @@ var _ = Describe("installer HostRoleMaster role", func() { downloadFileSuccess := func(fileName string) { mockbmclient.EXPECT().DownloadFile(gomock.Any(), fileName, filepath.Join(InstallDir, fileName)).Return(nil).Times(1) } - downloadHostIgnitionSuccess := func(hostID string, fileName string) { - mockbmclient.EXPECT().DownloadHostIgnition(gomock.Any(), hostID, filepath.Join(InstallDir, fileName)).Return(nil).Times(1) + downloadHostIgnitionSuccess := func(infraEnvID string, hostID string, fileName string) { + mockbmclient.EXPECT().DownloadHostIgnition(gomock.Any(), infraEnvID, hostID, filepath.Join(InstallDir, fileName)).Return(nil).Times(1) } reportLogProgressSuccess := func() { @@ -89,9 +90,9 @@ var _ = Describe("installer HostRoleMaster role", func() { updateProgressSuccess := func(stages [][]string) { for _, stage := range stages { if len(stage) == 2 { - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), hostId, models.HostStage(stage[0]), stage[1]).Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvId, hostId, models.HostStage(stage[0]), stage[1]).Return(nil).Times(1) } else { - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), hostId, models.HostStage(stage[0]), "").Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvId, hostId, models.HostStage(stage[0]), "").Return(nil).Times(1) } } } @@ -109,7 +110,7 @@ var _ = Describe("installer HostRoleMaster role", func() { } waitForControllerSuccessfully := func(clusterId string) { - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), hostId, models.HostStageWaitingForController, "waiting for controller pod ready event").Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvId, hostId, models.HostStageWaitingForController, "waiting for controller pod ready event").Return(nil).Times(1) mockk8sclient.EXPECT().GetPods("assisted-installer", gomock.Any(), "").Return([]v1.Pod{{TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{Name: common.AssistedControllerPrefix + "aasdasd"}, Status: v1.PodStatus{Phase: "Running"}}}, nil).Times(1) @@ -133,12 +134,13 @@ var _ = Describe("installer HostRoleMaster role", func() { mockbmclient = inventory_client.NewMockInventoryClient(ctrl) mockk8sclient = k8s_client.NewMockK8SClient(ctrl) mockIgnition = ignition.NewMockIgnition(ctrl) + nodesInfraEnvId := strfmt.UUID("7916fa89-ea7a-443e-a862-b3e930309f50") node0Id := strfmt.UUID("7916fa89-ea7a-443e-a862-b3e930309f65") node1Id := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f238") node2Id := strfmt.UUID("b898d516-3e16-49d0-86a5-0ad5bd04e3ed") - inventoryNamesHost = map[string]inventory_client.HostData{"node0": {Host: &models.Host{ID: &node0Id}, IPs: []string{"192.168.126.10"}}, - "node1": {Host: &models.Host{ID: &node1Id}, IPs: []string{"192.168.126.11"}}, - "node2": {Host: &models.Host{ID: &node2Id}, IPs: []string{"192.168.126.12"}}} + inventoryNamesHost = map[string]inventory_client.HostData{"node0": {Host: &models.Host{InfraEnvID: nodesInfraEnvId, ID: &node0Id}, IPs: []string{"192.168.126.10"}}, + "node1": {Host: &models.Host{InfraEnvID: nodesInfraEnvId, ID: &node1Id}, IPs: []string{"192.168.126.11"}}, + "node2": {Host: &models.Host{InfraEnvID: nodesInfraEnvId, ID: &node2Id}, IPs: []string{"192.168.126.12"}}} }) k8sBuilder := func(configPath string, logger *logrus.Logger) (k8s_client.K8SClient, error) { return mockk8sclient, nil @@ -148,6 +150,7 @@ var _ = Describe("installer HostRoleMaster role", func() { conf := config.Config{Role: string(models.HostRoleBootstrap), ClusterID: "cluster-id", + InfraEnvID: "infra-env-id", HostID: "host-id", Device: "/dev/vda", URL: "https://assisted-service.com:80", @@ -194,11 +197,11 @@ var _ = Describe("installer HostRoleMaster role", func() { mockk8sclient.EXPECT().ListMasterNodes().Return(GetKubeNodes(map[string]string{}), nil).Times(1) kubeNamesIds = map[string]string{"node0": "7916fa89-ea7a-443e-a862-b3e930309f65"} mockk8sclient.EXPECT().ListMasterNodes().Return(GetKubeNodes(kubeNamesIds), nil).Times(1) - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), inventoryNamesHost["node0"].Host.ID.String(), models.HostStageJoined, "").Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), inventoryNamesHost["node0"].Host.InfraEnvID.String(), inventoryNamesHost["node0"].Host.ID.String(), models.HostStageJoined, "").Times(1) kubeNamesIds = map[string]string{"node0": "7916fa89-ea7a-443e-a862-b3e930309f65", "node1": "eb82821f-bf21-4614-9a3b-ecb07929f238"} mockk8sclient.EXPECT().ListMasterNodes().Return(GetKubeNodes(kubeNamesIds), nil).Times(1) - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), inventoryNamesHost["node1"].Host.ID.String(), models.HostStageJoined, "").Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), inventoryNamesHost["node1"].Host.InfraEnvID.String(), inventoryNamesHost["node1"].Host.ID.String(), models.HostStageJoined, "").Times(1) } getNetworkTypeSuccessOpenshiftSDN := func() { mockk8sclient.EXPECT().GetNetworkType().Return("OpenshiftSDN", nil).Times(2) @@ -219,7 +222,7 @@ var _ = Describe("installer HostRoleMaster role", func() { mockops.EXPECT().PrepareController().Return(nil).Times(1) } waitForBootkubeSuccess := func() { - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), hostId, models.HostStageWaitingForBootkube, "").Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvId, hostId, models.HostStageWaitingForBootkube, "").Return(nil).Times(1) mockops.EXPECT().ExecPrivilegeCommand(gomock.Any(), "stat", "/opt/openshift/.bootkube.done").Return("OK", nil).Times(1) } bootkubeStatusSuccess := func() { @@ -230,7 +233,6 @@ var _ = Describe("installer HostRoleMaster role", func() { mockops.EXPECT().ExtractFromIgnition(filepath.Join(InstallDir, bootstrapIgn), dockerConfigFile).Return(nil).Times(1) } generateSshKeyPairSuccess := func() { - mkdirSuccess(sshDir) mockops.EXPECT().ExecPrivilegeCommand(gomock.Any(), "ssh-keygen", "-q", "-f", sshKeyPath, "-N", "").Return("OK", nil).Times(1) } createOpenshiftSshManifestSuccess := func() { @@ -239,6 +241,7 @@ var _ = Describe("installer HostRoleMaster role", func() { bootstrapSetup := func() { cleanInstallDevice() + mkdirSuccess(sshDir) mkdirSuccess(InstallDir) downloadFileSuccess(bootstrapIgn) extractSecretFromIgnitionSuccess() @@ -276,7 +279,7 @@ var _ = Describe("installer HostRoleMaster role", func() { resolvConfSuccess() waitForControllerSuccessfully(conf.ClusterID) //HostRoleMaster flow: - downloadHostIgnitionSuccess(hostId, "master-host-id.ign") + downloadHostIgnitionSuccess(infraEnvId, hostId, "master-host-id.ign") writeToDiskSuccess(gomock.Any()) reportLogProgressSuccess() setBootOrderSuccess(gomock.Any()) @@ -309,7 +312,7 @@ var _ = Describe("installer HostRoleMaster role", func() { resolvConfSuccess() waitForControllerSuccessfully(conf.ClusterID) //HostRoleMaster flow: - downloadHostIgnitionSuccess(hostId, "master-host-id.ign") + downloadHostIgnitionSuccess(infraEnvId, hostId, "master-host-id.ign") writeToDiskSuccess(gomock.Any()) setBootOrderSuccess(gomock.Any()) uploadLogsSuccess(true) @@ -327,6 +330,7 @@ var _ = Describe("installer HostRoleMaster role", func() { }) cleanInstallDevice() mkdirSuccess(InstallDir) + mkdirSuccess(sshDir) downloadFileSuccess(bootstrapIgn) extractSecretFromIgnitionSuccess() extractIgnitionToFS("Success", nil) @@ -334,7 +338,7 @@ var _ = Describe("installer HostRoleMaster role", func() { err := fmt.Errorf("generate SSH keys failed") mockops.EXPECT().CreateOpenshiftSshManifest(assistedInstallerSshManifest, sshManifestTmpl, sshPubKeyPath).Return(err).Times(1) //HostRoleMaster flow: - downloadHostIgnitionSuccess(hostId, "master-host-id.ign") + downloadHostIgnitionSuccess(infraEnvId, hostId, "master-host-id.ign") writeToDiskSuccess(gomock.Any()) setBootOrderSuccess(gomock.Any()) ret := installerObj.InstallNode() @@ -359,7 +363,7 @@ var _ = Describe("installer HostRoleMaster role", func() { resolvConfSuccess() waitForControllerSuccessfully(conf.ClusterID) //HostRoleMaster flow: - downloadHostIgnitionSuccess(hostId, "master-host-id.ign") + downloadHostIgnitionSuccess(infraEnvId, hostId, "master-host-id.ign") writeToDiskSuccess(gomock.Any()) setBootOrderSuccess(gomock.Any()) uploadLogsSuccess(true) @@ -375,8 +379,9 @@ var _ = Describe("installer HostRoleMaster role", func() { }) cleanInstallDevice() mkdirSuccess(InstallDir) + mkdirSuccess(sshDir) downloadFileSuccess(bootstrapIgn) - downloadHostIgnitionSuccess(hostId, "master-host-id.ign") + downloadHostIgnitionSuccess(infraEnvId, hostId, "master-host-id.ign") writeToDiskSuccess(gomock.Any()) setBootOrderSuccess(gomock.Any()) extractSecretFromIgnitionSuccess() @@ -397,17 +402,18 @@ var _ = Describe("installer HostRoleMaster role", func() { err := fmt.Errorf("Failed to restart NetworkManager") restartNetworkManager(err) //HostRoleMaster flow: - downloadHostIgnitionSuccess(hostId, "master-host-id.ign") + downloadHostIgnitionSuccess(infraEnvId, hostId, "master-host-id.ign") writeToDiskSuccess(gomock.Any()) setBootOrderSuccess(gomock.Any()) ret := installerObj.InstallNode() Expect(ret).Should(Equal(err)) }) }) - Context("Bootstrap role waiting for controller", func() { + Context("Bootstrap role waiting for control plane", func() { conf := config.Config{Role: string(models.HostRoleBootstrap), ClusterID: "cluster-id", + InfraEnvID: "infra-env-id", HostID: "host-id", Device: "/dev/vda", URL: "https://assisted-service.com:80", @@ -417,35 +423,34 @@ var _ = Describe("installer HostRoleMaster role", func() { BeforeEach(func() { installerObj = NewAssistedInstaller(l, conf, mockops, mockbmclient, k8sBuilder, mockIgnition) }) + It("waitForControlPlane reload resolv.conf failed", func() { + mockops.EXPECT().ReloadHostFile("/etc/resolv.conf").Return(fmt.Errorf("failed to load file")).Times(1) - It("waitForController reload resolv.conf failed", func() { - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), hostId, models.HostStageWaitingForController, "waiting for controller pod ready event").Return(nil).Times(1) - mockops.EXPECT().ReloadHostFile("/etc/resolv.conf").Return(fmt.Errorf("dummy")).Times(1) - - err := installerObj.waitForController() + err := installerObj.waitForControlPlane(context.Background()) Expect(err).To(HaveOccurred()) }) + It("waitForController reload get pods fails then succeeds", func() { - resolvConfSuccess() reportLogProgressSuccess() - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), hostId, models.HostStageWaitingForController, "waiting for controller pod ready event").Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvId, hostId, models.HostStageWaitingForController, "waiting for controller pod ready event").Return(nil).Times(1) mockk8sclient.EXPECT().GetPods("assisted-installer", gomock.Any(), "").Return(nil, fmt.Errorf("dummy")).Times(1) mockk8sclient.EXPECT().ListEvents(assistedControllerNamespace).Return(&events, nil).Times(1) - err := installerObj.waitForController() + err := installerObj.waitForController(mockk8sclient) Expect(err).NotTo(HaveOccurred()) }) It("Configuring state", func() { var logs string logsInBytes, _ := ioutil.ReadFile("../../test_files/mcs_logs.txt") logs = string(logsInBytes) + infraEnvID := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f250") node0Id := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f238") node1Id := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f239") node2Id := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f240") - testInventoryIdsIps := map[string]inventory_client.HostData{"node0": {Host: &models.Host{ID: &node0Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}}, + testInventoryIdsIps := map[string]inventory_client.HostData{"node0": {Host: &models.Host{InfraEnvID: infraEnvID, ID: &node0Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}}, IPs: []string{"192.168.126.10", "192.168.11.122", "fe80::5054:ff:fe9a:4738"}}, - "node1": {Host: &models.Host{ID: &node1Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}}, IPs: []string{"192.168.126.11", "192.168.11.123", "fe80::5054:ff:fe9a:4739"}}, - "node2": {Host: &models.Host{ID: &node2Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}}, IPs: []string{"192.168.126.12", "192.168.11.124", "fe80::5054:ff:fe9a:4740"}}} + "node1": {Host: &models.Host{InfraEnvID: infraEnvID, ID: &node1Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}}, IPs: []string{"192.168.126.11", "192.168.11.123", "fe80::5054:ff:fe9a:4739"}}, + "node2": {Host: &models.Host{InfraEnvID: infraEnvID, ID: &node2Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}}, IPs: []string{"192.168.126.12", "192.168.11.124", "fe80::5054:ff:fe9a:4740"}}} mockbmclient.EXPECT().GetEnabledHostsNamesHosts(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("dummy")).Times(1) mockbmclient.EXPECT().GetEnabledHostsNamesHosts(gomock.Any(), gomock.Any()).Return(testInventoryIdsIps, nil).Times(1) mockops.EXPECT().GetMCSLogs().Return("", fmt.Errorf("dummy")).Times(1) @@ -453,9 +458,9 @@ var _ = Describe("installer HostRoleMaster role", func() { mockops.EXPECT().GetMCSLogs().Return("dummy logs", nil).Times(1) mockops.EXPECT().GetMCSLogs().Return(logs, nil).AnyTimes() - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), gomock.Any(), models.HostStageConfiguring, gomock.Any()).Return(fmt.Errorf("dummy")).Times(1) - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), "eb82821f-bf21-4614-9a3b-ecb07929f240", models.HostStageConfiguring, gomock.Any()).Return(nil).Times(1) - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), "eb82821f-bf21-4614-9a3b-ecb07929f239", models.HostStageConfiguring, gomock.Any()).Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), gomock.Any(), gomock.Any(), models.HostStageConfiguring, gomock.Any()).Return(fmt.Errorf("dummy")).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), "eb82821f-bf21-4614-9a3b-ecb07929f250", "eb82821f-bf21-4614-9a3b-ecb07929f240", models.HostStageConfiguring, gomock.Any()).Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), "eb82821f-bf21-4614-9a3b-ecb07929f250", "eb82821f-bf21-4614-9a3b-ecb07929f239", models.HostStageConfiguring, gomock.Any()).Return(nil).Times(1) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() @@ -466,12 +471,13 @@ var _ = Describe("installer HostRoleMaster role", func() { var logs string logsInBytes, _ := ioutil.ReadFile("../../test_files/mcs_logs.txt") logs = string(logsInBytes) + infraEnvId := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f250") node1Id := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f239") node2Id := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f240") testInventoryIdsIps := map[string]inventory_client.HostData{ - "node1": {Host: &models.Host{ID: &node1Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}}, IPs: []string{"192.168.126.11", "192.168.11.123", "fe80::5054:ff:fe9a:4739"}}, - "node2": {Host: &models.Host{ID: &node2Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}}, IPs: []string{"192.168.126.12", "192.168.11.124", "fe80::5054:ff:fe9a:4740"}}} + "node1": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node1Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}}, IPs: []string{"192.168.126.11", "192.168.11.123", "fe80::5054:ff:fe9a:4739"}}, + "node2": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node2Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}}, IPs: []string{"192.168.126.12", "192.168.11.124", "fe80::5054:ff:fe9a:4740"}}} mockbmclient.EXPECT().GetEnabledHostsNamesHosts(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("dummy")).Times(1) mockbmclient.EXPECT().GetEnabledHostsNamesHosts(gomock.Any(), gomock.Any()).Return(testInventoryIdsIps, nil).Times(1) mockops.EXPECT().GetMCSLogs().Return("", fmt.Errorf("dummy")).Times(1) @@ -479,9 +485,9 @@ var _ = Describe("installer HostRoleMaster role", func() { mockops.EXPECT().GetMCSLogs().Return("dummy logs", nil).Times(1) mockops.EXPECT().GetMCSLogs().Return(logs, nil).AnyTimes() - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), gomock.Any(), models.HostStageConfiguring, gomock.Any()).Return(fmt.Errorf("dummy")).Times(1) - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), "eb82821f-bf21-4614-9a3b-ecb07929f240", models.HostStageConfiguring, gomock.Any()).Return(nil).Times(1) - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), "eb82821f-bf21-4614-9a3b-ecb07929f239", models.HostStageConfiguring, gomock.Any()).Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), gomock.Any(), gomock.Any(), models.HostStageConfiguring, gomock.Any()).Return(fmt.Errorf("dummy")).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), "eb82821f-bf21-4614-9a3b-ecb07929f250", "eb82821f-bf21-4614-9a3b-ecb07929f240", models.HostStageConfiguring, gomock.Any()).Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), "eb82821f-bf21-4614-9a3b-ecb07929f250", "eb82821f-bf21-4614-9a3b-ecb07929f239", models.HostStageConfiguring, gomock.Any()).Return(nil).Times(1) ctx, cancel := context.WithCancel(context.Background()) defer cancel() installerObj.updateConfiguringStatus(ctx) @@ -491,6 +497,7 @@ var _ = Describe("installer HostRoleMaster role", func() { installerArgs := []string{"-n", "--append-karg", "nameserver=8.8.8.8"} conf := config.Config{Role: string(models.HostRoleMaster), ClusterID: "cluster-id", + InfraEnvID: "infra-env-id", HostID: "host-id", Device: "/dev/vda", URL: "https://assisted-service.com:80", @@ -510,7 +517,7 @@ var _ = Describe("installer HostRoleMaster role", func() { }) cleanInstallDevice() mkdirSuccess(InstallDir) - downloadHostIgnitionSuccess(hostId, "master-host-id.ign") + downloadHostIgnitionSuccess(infraEnvId, hostId, "master-host-id.ign") writeToDiskSuccess(installerArgs) setBootOrderSuccess(gomock.Any()) uploadLogsSuccess(false) @@ -559,7 +566,7 @@ var _ = Describe("installer HostRoleMaster role", func() { cleanInstallDevice() mkdirSuccess(InstallDir) err := fmt.Errorf("failed to fetch file") - mockbmclient.EXPECT().DownloadHostIgnition(gomock.Any(), hostId, filepath.Join(InstallDir, "master-host-id.ign")).Return(err).Times(1) + mockbmclient.EXPECT().DownloadHostIgnition(gomock.Any(), infraEnvId, hostId, filepath.Join(InstallDir, "master-host-id.ign")).Return(err).Times(1) ret := installerObj.InstallNode() Expect(ret).Should(Equal(err)) }) @@ -570,7 +577,7 @@ var _ = Describe("installer HostRoleMaster role", func() { }) cleanInstallDevice() mkdirSuccess(InstallDir) - downloadHostIgnitionSuccess(hostId, "master-host-id.ign") + downloadHostIgnitionSuccess(infraEnvId, hostId, "master-host-id.ign") err := fmt.Errorf("failed to write image to disk") mockops.EXPECT().WriteImageToDisk(filepath.Join(InstallDir, "master-host-id.ign"), device, mockbmclient, installerArgs).Return(err).Times(3) ret := installerObj.InstallNode() @@ -584,7 +591,7 @@ var _ = Describe("installer HostRoleMaster role", func() { }) cleanInstallDevice() mkdirSuccess(InstallDir) - downloadHostIgnitionSuccess(hostId, "master-host-id.ign") + downloadHostIgnitionSuccess(infraEnvId, hostId, "master-host-id.ign") uploadLogsSuccess(false) reportLogProgressSuccess() writeToDiskSuccess(installerArgs) @@ -598,6 +605,7 @@ var _ = Describe("installer HostRoleMaster role", func() { Context("Worker role", func() { conf := config.Config{Role: string(models.HostRoleWorker), ClusterID: "cluster-id", + InfraEnvID: "infra-env-id", HostID: "host-id", Device: "/dev/vda", URL: "https://assisted-service.com:80", @@ -611,11 +619,29 @@ var _ = Describe("installer HostRoleMaster role", func() { updateProgressSuccess([][]string{{string(models.HostStageStartingInstallation), conf.Role}, {string(models.HostStageInstalling), conf.Role}, {string(models.HostStageWritingImageToDisk)}, + {string(models.HostStageWaitingForControlPlane)}, {string(models.HostStageRebooting)}, }) + cluster := models.Cluster{ + Hosts: []*models.Host{ + { + Role: models.HostRoleMaster, + Progress: &models.HostProgressInfo{ + CurrentStage: models.HostStageDone, + }, + }, + { + Role: models.HostRoleMaster, + Progress: &models.HostProgressInfo{ + CurrentStage: models.HostStageDone, + }, + }, + }, + } + mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(&cluster, nil).Times(1) cleanInstallDevice() mkdirSuccess(InstallDir) - downloadHostIgnitionSuccess(hostId, "worker-host-id.ign") + downloadHostIgnitionSuccess(infraEnvId, hostId, "worker-host-id.ign") mockops.EXPECT().WriteImageToDisk(filepath.Join(InstallDir, "worker-host-id.ign"), device, mockbmclient, nil).Return(nil).Times(1) setBootOrderSuccess(gomock.Any()) // failure must do nothing @@ -630,6 +656,7 @@ var _ = Describe("installer HostRoleMaster role", func() { conf := config.Config{Role: string(models.HostRoleMaster), ClusterID: "cluster-id", + InfraEnvID: "infra-env-id", HostID: "host-id", Device: "/dev/vda", URL: "https://assisted-service.com:80", @@ -676,7 +703,7 @@ var _ = Describe("installer HostRoleMaster role", func() { mockops.EXPECT().PrepareController().Return(nil).Times(1) } waitForBootkubeSuccess := func() { - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), hostId, models.HostStageWaitingForBootkube, "").Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvId, hostId, models.HostStageWaitingForBootkube, "").Return(nil).Times(1) mockops.EXPECT().ExecPrivilegeCommand(gomock.Any(), "stat", "/opt/openshift/.bootkube.done").Return("OK", nil).Times(1) } bootkubeStatusSuccess := func() { @@ -688,6 +715,7 @@ var _ = Describe("installer HostRoleMaster role", func() { singleNodeBootstrapSetup := func() { cleanInstallDevice() mkdirSuccess(InstallDir) + mkdirSuccess(sshDir) downloadFileSuccess(bootstrapIgn) extractSecretFromIgnitionSuccess() extractIgnitionToFS("Success", nil) @@ -714,7 +742,7 @@ var _ = Describe("installer HostRoleMaster role", func() { //HostRoleMaster flow: verifySingleNodeMasterIgnitionSuccess() singleNodeMergeIgnitionSuccess() - downloadHostIgnitionSuccess(hostId, "master-host-id.ign") + downloadHostIgnitionSuccess(infraEnvId, hostId, "master-host-id.ign") mockops.EXPECT().WriteImageToDisk(singleNodeMasterIgnitionPath, device, mockbmclient, nil).Return(nil).Times(1) setBootOrderSuccess(gomock.Any()) uploadLogsSuccess(true) diff --git a/src/inventory_client/inventory_client.go b/src/inventory_client/inventory_client.go index cca2e44fe7..eb688cbb05 100644 --- a/src/inventory_client/inventory_client.go +++ b/src/inventory_client/inventory_client.go @@ -36,14 +36,16 @@ import ( const ( defaultRetryMinDelay = time.Duration(2) * time.Second defaultRetryMaxDelay = time.Duration(10) * time.Second - defaultMaxRetries = 10 + defaultMinRetries = 10 + defaultMaxRetries = 360 ) //go:generate mockgen -source=inventory_client.go -package=inventory_client -destination=mock_inventory_client.go type InventoryClient interface { DownloadFile(ctx context.Context, filename string, dest string) error - DownloadHostIgnition(ctx context.Context, hostID string, dest string) error - UpdateHostInstallProgress(ctx context.Context, hostId string, newStage models.HostStage, info string) error + DownloadClusterCredentials(ctx context.Context, filename string, dest string) error + DownloadHostIgnition(ctx context.Context, infraEnvID string, hostID string, dest string) error + UpdateHostInstallProgress(ctx context.Context, infraEnvId string, hostId string, newStage models.HostStage, info string) error GetEnabledHostsNamesHosts(ctx context.Context, log logrus.FieldLogger) (map[string]HostData, error) UploadIngressCa(ctx context.Context, ingressCA string, clusterId string) error GetCluster(ctx context.Context) (*models.Cluster, error) @@ -53,7 +55,7 @@ type InventoryClient interface { GetHosts(ctx context.Context, log logrus.FieldLogger, skippedStatuses []string) (map[string]HostData, error) UploadLogs(ctx context.Context, clusterId string, logsType models.LogsType, upfile io.Reader) error ClusterLogProgressReport(ctx context.Context, clusterId string, progress models.LogsState) - HostLogProgressReport(ctx context.Context, clusterId string, hostId string, progress models.LogsState) + HostLogProgressReport(ctx context.Context, infraEnvId string, hostId string, progress models.LogsState) UpdateClusterOperator(ctx context.Context, clusterId string, operatorName string, operatorStatus models.OperatorStatus, operatorStatusInfo string) error } @@ -72,12 +74,12 @@ type HostData struct { func CreateInventoryClient(clusterId string, inventoryURL string, pullSecret string, insecure bool, caPath string, logger *logrus.Logger, proxyFunc func(*http.Request) (*url.URL, error)) (*inventoryClient, error) { return CreateInventoryClientWithDelay(clusterId, inventoryURL, pullSecret, insecure, caPath, - logger, proxyFunc, defaultRetryMinDelay, defaultRetryMaxDelay, defaultMaxRetries) + logger, proxyFunc, defaultRetryMinDelay, defaultRetryMaxDelay, defaultMaxRetries, defaultMinRetries) } func CreateInventoryClientWithDelay(clusterId string, inventoryURL string, pullSecret string, insecure bool, caPath string, logger *logrus.Logger, proxyFunc func(*http.Request) (*url.URL, error), - retryMinDelay, retryMaxDelay time.Duration, maxRetries int) (*inventoryClient, error) { + retryMinDelay, retryMaxDelay time.Duration, maxRetries int, minRetries int) (*inventoryClient, error) { clientConfig := client.Config{} var err error clientConfig.URL, err = url.ParseRequestURI(createUrl(inventoryURL)) @@ -115,9 +117,13 @@ func CreateInventoryClientWithDelay(clusterId string, inventoryURL string, pullS tr := rehttp.NewTransport( transport, rehttp.RetryAny( + rehttp.RetryAll( + rehttp.RetryMaxRetries(minRetries), + rehttp.RetryStatusInterval(400, 404), + ), rehttp.RetryAll( rehttp.RetryMaxRetries(maxRetries), - rehttp.RetryStatusInterval(400, 600), + rehttp.RetryStatusInterval(405, 600), ), rehttp.RetryAll( rehttp.RetryMaxRetries(maxRetries), @@ -184,11 +190,11 @@ func (c *inventoryClient) DownloadFile(ctx context.Context, filename string, des fo.Close() }() c.logger.Infof("Downloading file %s to %s", filename, dest) - _, err = c.ai.Installer.DownloadClusterFiles(ctx, c.createDownloadParams(filename), fo) + _, err = c.ai.Installer.V2DownloadClusterFiles(ctx, c.createDownloadParams(filename), fo) return aserror.GetAssistedError(err) } -func (c *inventoryClient) DownloadHostIgnition(ctx context.Context, hostID string, dest string) error { +func (c *inventoryClient) DownloadClusterCredentials(ctx context.Context, filename string, dest string) error { // open output file fo, err := os.Create(dest) if err != nil { @@ -198,28 +204,48 @@ func (c *inventoryClient) DownloadHostIgnition(ctx context.Context, hostID strin defer func() { fo.Close() }() + c.logger.Infof("Downloading cluster credentials %s to %s", filename, dest) - params := installer.DownloadHostIgnitionParams{ + params := installer.V2DownloadClusterCredentialsParams{ ClusterID: c.clusterId, - HostID: strfmt.UUID(hostID), + FileName: filename, } - _, err = c.ai.Installer.DownloadHostIgnition(ctx, ¶ms, fo) + _, err = c.ai.Installer.V2DownloadClusterCredentials(ctx, ¶ms, fo) return aserror.GetAssistedError(err) } -func (c *inventoryClient) UpdateHostInstallProgress(ctx context.Context, hostId string, newStage models.HostStage, info string) error { - _, err := c.ai.Installer.UpdateHostInstallProgress(ctx, c.createUpdateHostInstallProgressParams(hostId, newStage, info)) +func (c *inventoryClient) DownloadHostIgnition(ctx context.Context, infraEnvID string, hostID string, dest string) error { + // open output file + fo, err := os.Create(dest) + if err != nil { + return err + } + // close fo on exit and check for its returned error + defer func() { + fo.Close() + }() + + params := installer.V2DownloadHostIgnitionParams{ + InfraEnvID: strfmt.UUID(infraEnvID), + HostID: strfmt.UUID(hostID), + } + _, err = c.ai.Installer.V2DownloadHostIgnition(ctx, ¶ms, fo) + return aserror.GetAssistedError(err) +} + +func (c *inventoryClient) UpdateHostInstallProgress(ctx context.Context, infraEnvId, hostId string, newStage models.HostStage, info string) error { + _, err := c.ai.Installer.V2UpdateHostInstallProgress(ctx, c.createUpdateHostInstallProgressParams(infraEnvId, hostId, newStage, info)) return aserror.GetAssistedError(err) } func (c *inventoryClient) UploadIngressCa(ctx context.Context, ingressCA string, clusterId string) error { - _, err := c.ai.Installer.UploadClusterIngressCert(ctx, - &installer.UploadClusterIngressCertParams{ClusterID: strfmt.UUID(clusterId), IngressCertParams: models.IngressCertParams(ingressCA)}) + _, err := c.ai.Installer.V2UploadClusterIngressCert(ctx, + &installer.V2UploadClusterIngressCertParams{ClusterID: strfmt.UUID(clusterId), IngressCertParams: models.IngressCertParams(ingressCA)}) return aserror.GetAssistedError(err) } func (c *inventoryClient) GetCluster(ctx context.Context) (*models.Cluster, error) { - cluster, err := c.ai.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: c.clusterId}) + cluster, err := c.ai.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: c.clusterId}) if err != nil { return nil, err } @@ -228,7 +254,7 @@ func (c *inventoryClient) GetCluster(ctx context.Context) (*models.Cluster, erro } func (c *inventoryClient) GetClusterMonitoredOperator(ctx context.Context, clusterId, operatorName string) (*models.MonitoredOperator, error) { - monitoredOperators, err := c.ai.Operators.ListOfClusterOperators(ctx, &operators.ListOfClusterOperatorsParams{ + monitoredOperators, err := c.ai.Operators.V2ListOfClusterOperators(ctx, &operators.V2ListOfClusterOperatorsParams{ ClusterID: strfmt.UUID(clusterId), OperatorName: &operatorName, }) @@ -240,7 +266,7 @@ func (c *inventoryClient) GetClusterMonitoredOperator(ctx context.Context, clust } func (c *inventoryClient) GetClusterMonitoredOLMOperators(ctx context.Context, clusterId string) ([]models.MonitoredOperator, error) { - monitoredOperators, err := c.ai.Operators.ListOfClusterOperators(ctx, &operators.ListOfClusterOperatorsParams{ClusterID: strfmt.UUID(clusterId)}) + monitoredOperators, err := c.ai.Operators.V2ListOfClusterOperators(ctx, &operators.V2ListOfClusterOperatorsParams{ClusterID: strfmt.UUID(clusterId)}) if err != nil { return nil, aserror.GetAssistedError(err) } @@ -284,17 +310,17 @@ func createUrl(baseURL string) string { ) } -func (c *inventoryClient) createDownloadParams(filename string) *installer.DownloadClusterFilesParams { - return &installer.DownloadClusterFilesParams{ +func (c *inventoryClient) createDownloadParams(filename string) *installer.V2DownloadClusterFilesParams { + return &installer.V2DownloadClusterFilesParams{ ClusterID: c.clusterId, FileName: filename, } } -func (c *inventoryClient) createUpdateHostInstallProgressParams(hostId string, newStage models.HostStage, info string) *installer.UpdateHostInstallProgressParams { - return &installer.UpdateHostInstallProgressParams{ - ClusterID: c.clusterId, - HostID: strfmt.UUID(hostId), +func (c *inventoryClient) createUpdateHostInstallProgressParams(infraEnvId, hostId string, newStage models.HostStage, info string) *installer.V2UpdateHostInstallProgressParams { + return &installer.V2UpdateHostInstallProgressParams{ + InfraEnvID: strfmt.UUID(infraEnvId), + HostID: strfmt.UUID(hostId), HostProgress: &models.HostProgress{ CurrentStage: newStage, ProgressInfo: info, @@ -304,11 +330,11 @@ func (c *inventoryClient) createUpdateHostInstallProgressParams(hostId string, n func (c *inventoryClient) getHostsWithInventoryInfo(ctx context.Context, log logrus.FieldLogger, skippedStatuses []string) (map[string]HostData, error) { hostsWithHwInfo := make(map[string]HostData) - hosts, err := c.ai.Installer.ListHosts(ctx, &installer.ListHostsParams{ClusterID: c.clusterId}) + clusterData, err := c.GetCluster(ctx) if err != nil { - return nil, aserror.GetAssistedError(err) + return nil, err } - for _, host := range hosts.Payload { + for _, host := range clusterData.Hosts { if funk.IndexOf(skippedStatuses, *host.Status) > -1 { continue } @@ -324,22 +350,22 @@ func (c *inventoryClient) getHostsWithInventoryInfo(ctx context.Context, log log } func (c *inventoryClient) CompleteInstallation(ctx context.Context, clusterId string, isSuccess bool, errorInfo string) error { - _, err := c.ai.Installer.CompleteInstallation(ctx, - &installer.CompleteInstallationParams{ClusterID: strfmt.UUID(clusterId), + _, err := c.ai.Installer.V2CompleteInstallation(ctx, + &installer.V2CompleteInstallationParams{ClusterID: strfmt.UUID(clusterId), CompletionParams: &models.CompletionParams{IsSuccess: &isSuccess, ErrorInfo: errorInfo}}) return aserror.GetAssistedError(err) } func (c *inventoryClient) UploadLogs(ctx context.Context, clusterId string, logsType models.LogsType, upfile io.Reader) error { fileName := fmt.Sprintf("%s_logs.tar.gz", string(logsType)) - _, err := c.ai.Installer.UploadLogs(ctx, - &installer.UploadLogsParams{ClusterID: strfmt.UUID(clusterId), LogsType: string(logsType), + _, err := c.ai.Installer.V2UploadLogs(ctx, + &installer.V2UploadLogsParams{ClusterID: strfmt.UUID(clusterId), LogsType: string(logsType), Upfile: runtime.NamedReader(fileName, upfile)}) return aserror.GetAssistedError(err) } func (c *inventoryClient) ClusterLogProgressReport(ctx context.Context, clusterId string, progress models.LogsState) { - _, err := c.ai.Installer.UpdateClusterLogsProgress(ctx, &installer.UpdateClusterLogsProgressParams{ + _, err := c.ai.Installer.V2UpdateClusterLogsProgress(ctx, &installer.V2UpdateClusterLogsProgressParams{ ClusterID: strfmt.UUID(clusterId), LogsProgressParams: &models.LogsProgressParams{ LogsState: progress, @@ -350,10 +376,10 @@ func (c *inventoryClient) ClusterLogProgressReport(ctx context.Context, clusterI } } -func (c *inventoryClient) HostLogProgressReport(ctx context.Context, clusterId string, hostId string, progress models.LogsState) { - _, err := c.ai.Installer.UpdateHostLogsProgress(ctx, &installer.UpdateHostLogsProgressParams{ - ClusterID: strfmt.UUID(clusterId), - HostID: strfmt.UUID(hostId), +func (c *inventoryClient) HostLogProgressReport(ctx context.Context, infraEnvId string, hostId string, progress models.LogsState) { + _, err := c.ai.Installer.V2UpdateHostLogsProgress(ctx, &installer.V2UpdateHostLogsProgressParams{ + InfraEnvID: strfmt.UUID(infraEnvId), + HostID: strfmt.UUID(hostId), LogsProgressParams: &models.LogsProgressParams{ LogsState: progress, }, @@ -364,7 +390,7 @@ func (c *inventoryClient) HostLogProgressReport(ctx context.Context, clusterId s } func (c *inventoryClient) UpdateClusterOperator(ctx context.Context, clusterId string, operatorName string, operatorStatus models.OperatorStatus, operatorStatusInfo string) error { - _, err := c.ai.Operators.ReportMonitoredOperatorStatus(ctx, &operators.ReportMonitoredOperatorStatusParams{ + _, err := c.ai.Operators.V2ReportMonitoredOperatorStatus(ctx, &operators.V2ReportMonitoredOperatorStatusParams{ ClusterID: c.clusterId, ReportParams: &models.OperatorMonitorReport{ Name: operatorName, diff --git a/src/inventory_client/inventory_client_test.go b/src/inventory_client/inventory_client_test.go index 3cfc2d8663..41b5468147 100644 --- a/src/inventory_client/inventory_client_test.go +++ b/src/inventory_client/inventory_client_test.go @@ -29,10 +29,11 @@ const ( var _ = Describe("inventory_client_tests", func() { var ( - clusterID = "cluster-id" - logger = logrus.New() - client *inventoryClient - server *ghttp.Server + clusterID = "cluster-id" + infraEnvID = "infra-env-id" + logger = logrus.New() + client *inventoryClient + server *ghttp.Server ) AfterEach(func() { @@ -45,7 +46,7 @@ var _ = Describe("inventory_client_tests", func() { server.SetAllowUnhandledRequests(true) server.SetUnhandledRequestStatusCode(http.StatusInternalServerError) // 500 client, err = CreateInventoryClientWithDelay(clusterID, "http://"+server.Addr(), "pullSecret", true, "", - logger, nil, testRetryDelay, testRetryMaxDelay, testMaxRetries) + logger, nil, testRetryDelay, testRetryMaxDelay, testMaxRetries, testMaxRetries) Expect(err).ShouldNot(HaveOccurred()) Expect(client).ShouldNot(BeNil()) }) @@ -62,26 +63,26 @@ var _ = Describe("inventory_client_tests", func() { It("positive_response", func() { server.Start() - expectServerCall(server, fmt.Sprintf("/api/assisted-install/v1/clusters/%s/hosts/%s/progress", clusterID, hostID), expectedJson, http.StatusOK) - Expect(client.UpdateHostInstallProgress(context.Background(), hostID, models.HostStageInstalling, "")).ShouldNot(HaveOccurred()) + expectServerCall(server, fmt.Sprintf("/api/assisted-install/v2/infra-envs/%s/hosts/%s/progress", infraEnvID, hostID), expectedJson, http.StatusOK) + Expect(client.UpdateHostInstallProgress(context.Background(), infraEnvID, hostID, models.HostStageInstalling, "")).ShouldNot(HaveOccurred()) Expect(server.ReceivedRequests()).Should(HaveLen(1)) }) It("negative_server_error_response", func() { server.Start() - Expect(client.UpdateHostInstallProgress(context.Background(), hostID, models.HostStageInstalling, "")).Should(HaveOccurred()) + Expect(client.UpdateHostInstallProgress(context.Background(), infraEnvID, hostID, models.HostStageInstalling, "")).Should(HaveOccurred()) Expect(server.ReceivedRequests()).Should(HaveLen(testMaxRetries + 1)) }) It("positive_late_response", func() { server.Start() - expectServerCall(server, fmt.Sprintf("/api/assisted-install/v1/clusters/%s/hosts/%s/progress", clusterID, hostID), expectedJson, http.StatusInternalServerError) - expectServerCall(server, fmt.Sprintf("/api/assisted-install/v1/clusters/%s/hosts/%s/progress", clusterID, hostID), expectedJson, http.StatusForbidden) - expectServerCall(server, fmt.Sprintf("/api/assisted-install/v1/clusters/%s/hosts/%s/progress", clusterID, hostID), expectedJson, http.StatusOK) + expectServerCall(server, fmt.Sprintf("/api/assisted-install/v2/infra-envs/%s/hosts/%s/progress", infraEnvID, hostID), expectedJson, http.StatusInternalServerError) + expectServerCall(server, fmt.Sprintf("/api/assisted-install/v2/infra-envs/%s/hosts/%s/progress", infraEnvID, hostID), expectedJson, http.StatusServiceUnavailable) + expectServerCall(server, fmt.Sprintf("/api/assisted-install/v2/infra-envs/%s/hosts/%s/progress", infraEnvID, hostID), expectedJson, http.StatusOK) - Expect(client.UpdateHostInstallProgress(context.Background(), hostID, models.HostStageInstalling, "")).ShouldNot(HaveOccurred()) + Expect(client.UpdateHostInstallProgress(context.Background(), infraEnvID, hostID, models.HostStageInstalling, "")).ShouldNot(HaveOccurred()) Expect(server.ReceivedRequests()).Should(HaveLen(3)) }) @@ -89,18 +90,18 @@ var _ = Describe("inventory_client_tests", func() { go func() { time.Sleep(testRetryMaxDelay * 2) - expectServerCall(server, fmt.Sprintf("/api/assisted-install/v1/clusters/%s/hosts/%s/progress", clusterID, hostID), expectedJson, http.StatusOK) + expectServerCall(server, fmt.Sprintf("/api/assisted-install/v2/infra-envs/%s/hosts/%s/progress", infraEnvID, hostID), expectedJson, http.StatusOK) server.Start() }() - Expect(client.UpdateHostInstallProgress(context.Background(), hostID, models.HostStageInstalling, "")).ShouldNot(HaveOccurred()) + Expect(client.UpdateHostInstallProgress(context.Background(), infraEnvID, hostID, models.HostStageInstalling, "")).ShouldNot(HaveOccurred()) Expect(server.ReceivedRequests()).Should(HaveLen(1)) }) It("server_down", func() { server.Start() server.Close() - Expect(client.UpdateHostInstallProgress(context.Background(), hostID, models.HostStageInstalling, "")).Should(HaveOccurred()) + Expect(client.UpdateHostInstallProgress(context.Background(), infraEnvID, hostID, models.HostStageInstalling, "")).Should(HaveOccurred()) }) }) }) diff --git a/src/inventory_client/mock_inventory_client.go b/src/inventory_client/mock_inventory_client.go index ac3872910f..c36b054952 100644 --- a/src/inventory_client/mock_inventory_client.go +++ b/src/inventory_client/mock_inventory_client.go @@ -51,32 +51,46 @@ func (mr *MockInventoryClientMockRecorder) DownloadFile(ctx, filename, dest inte return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DownloadFile", reflect.TypeOf((*MockInventoryClient)(nil).DownloadFile), ctx, filename, dest) } +// DownloadClusterCredentials mocks base method +func (m *MockInventoryClient) DownloadClusterCredentials(ctx context.Context, filename, dest string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DownloadClusterCredentials", ctx, filename, dest) + ret0, _ := ret[0].(error) + return ret0 +} + +// DownloadClusterCredentials indicates an expected call of DownloadClusterCredentials +func (mr *MockInventoryClientMockRecorder) DownloadClusterCredentials(ctx, filename, dest interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DownloadClusterCredentials", reflect.TypeOf((*MockInventoryClient)(nil).DownloadClusterCredentials), ctx, filename, dest) +} + // DownloadHostIgnition mocks base method -func (m *MockInventoryClient) DownloadHostIgnition(ctx context.Context, hostID, dest string) error { +func (m *MockInventoryClient) DownloadHostIgnition(ctx context.Context, infraEnvID, hostID, dest string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DownloadHostIgnition", ctx, hostID, dest) + ret := m.ctrl.Call(m, "DownloadHostIgnition", ctx, infraEnvID, hostID, dest) ret0, _ := ret[0].(error) return ret0 } // DownloadHostIgnition indicates an expected call of DownloadHostIgnition -func (mr *MockInventoryClientMockRecorder) DownloadHostIgnition(ctx, hostID, dest interface{}) *gomock.Call { +func (mr *MockInventoryClientMockRecorder) DownloadHostIgnition(ctx, infraEnvID, hostID, dest interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DownloadHostIgnition", reflect.TypeOf((*MockInventoryClient)(nil).DownloadHostIgnition), ctx, hostID, dest) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DownloadHostIgnition", reflect.TypeOf((*MockInventoryClient)(nil).DownloadHostIgnition), ctx, infraEnvID, hostID, dest) } // UpdateHostInstallProgress mocks base method -func (m *MockInventoryClient) UpdateHostInstallProgress(ctx context.Context, hostId string, newStage models.HostStage, info string) error { +func (m *MockInventoryClient) UpdateHostInstallProgress(ctx context.Context, infraEnvId, hostId string, newStage models.HostStage, info string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateHostInstallProgress", ctx, hostId, newStage, info) + ret := m.ctrl.Call(m, "UpdateHostInstallProgress", ctx, infraEnvId, hostId, newStage, info) ret0, _ := ret[0].(error) return ret0 } // UpdateHostInstallProgress indicates an expected call of UpdateHostInstallProgress -func (mr *MockInventoryClientMockRecorder) UpdateHostInstallProgress(ctx, hostId, newStage, info interface{}) *gomock.Call { +func (mr *MockInventoryClientMockRecorder) UpdateHostInstallProgress(ctx, infraEnvId, hostId, newStage, info interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateHostInstallProgress", reflect.TypeOf((*MockInventoryClient)(nil).UpdateHostInstallProgress), ctx, hostId, newStage, info) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateHostInstallProgress", reflect.TypeOf((*MockInventoryClient)(nil).UpdateHostInstallProgress), ctx, infraEnvId, hostId, newStage, info) } // GetEnabledHostsNamesHosts mocks base method @@ -209,15 +223,15 @@ func (mr *MockInventoryClientMockRecorder) ClusterLogProgressReport(ctx, cluster } // HostLogProgressReport mocks base method -func (m *MockInventoryClient) HostLogProgressReport(ctx context.Context, clusterId, hostId string, progress models.LogsState) { +func (m *MockInventoryClient) HostLogProgressReport(ctx context.Context, infraEnvId, hostId string, progress models.LogsState) { m.ctrl.T.Helper() - m.ctrl.Call(m, "HostLogProgressReport", ctx, clusterId, hostId, progress) + m.ctrl.Call(m, "HostLogProgressReport", ctx, infraEnvId, hostId, progress) } // HostLogProgressReport indicates an expected call of HostLogProgressReport -func (mr *MockInventoryClientMockRecorder) HostLogProgressReport(ctx, clusterId, hostId, progress interface{}) *gomock.Call { +func (mr *MockInventoryClientMockRecorder) HostLogProgressReport(ctx, infraEnvId, hostId, progress interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HostLogProgressReport", reflect.TypeOf((*MockInventoryClient)(nil).HostLogProgressReport), ctx, clusterId, hostId, progress) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HostLogProgressReport", reflect.TypeOf((*MockInventoryClient)(nil).HostLogProgressReport), ctx, infraEnvId, hostId, progress) } // UpdateClusterOperator mocks base method diff --git a/src/main/assisted-installer-controller/assisted_installer_main.go b/src/main/assisted-installer-controller/assisted_installer_main.go index 8effdc96e8..73ec8d4218 100644 --- a/src/main/assisted-installer-controller/assisted_installer_main.go +++ b/src/main/assisted-installer-controller/assisted_installer_main.go @@ -66,7 +66,6 @@ func main() { ) var wg sync.WaitGroup - var status assistedinstallercontroller.ControllerStatus mainContext, mainContextCancel := context.WithCancel(context.Background()) // No need to cancel with context, will finish quickly @@ -89,16 +88,16 @@ func main() { go assistedController.WaitAndUpdateNodesStatus(mainContext, &wg) wg.Add(1) - go assistedController.PostInstallConfigs(mainContext, &wg, &status) + go assistedController.PostInstallConfigs(mainContext, &wg) wg.Add(1) go assistedController.UpdateBMHs(mainContext, &wg) wg.Add(1) - go assistedController.UploadLogs(mainContext, &wg, &status) + go assistedController.UploadLogs(mainContext, &wg) wg.Add(1) // monitoring installation by cluster status - waitForInstallation(client, logger, &status) + waitForInstallation(client, logger, assistedController.Status) } // waitForInstallation monitor cluster status and is blocking main from cancelling all go routine s @@ -118,10 +117,10 @@ func waitForInstallation(client inventory_client.InventoryClient, log logrus.Fie // we should exit controller after maximumErrorsBeforeExit errors // in case cluster was deleted we should exit immediately switch err.(type) { - case *installer.GetClusterNotFound: + case *installer.V2GetClusterNotFound: errCounter = errCounter + maximumErrorsBeforeExit log.WithError(err).Errorf("Cluster was not found in inventory or user is not authorized") - case *installer.GetClusterUnauthorized: + case *installer.V2GetClusterUnauthorized: errCounter++ log.WithError(err).Errorf("User is not authenticated to perform the operation") } diff --git a/src/main/assisted-installer-controller/assisted_installer_main_test.go b/src/main/assisted-installer-controller/assisted_installer_main_test.go index e2316e9b9d..718839dc94 100644 --- a/src/main/assisted-installer-controller/assisted_installer_main_test.go +++ b/src/main/assisted-installer-controller/assisted_installer_main_test.go @@ -37,7 +37,7 @@ var _ = Describe("installer HostRoleMaster role", func() { ctrl = gomock.NewController(GinkgoT()) mockbmclient = inventory_client.NewMockInventoryClient(ctrl) waitForInstallationInterval = 10 * time.Millisecond - status = &assistedinstallercontroller.ControllerStatus{} + status = assistedinstallercontroller.NewControllerStatus() }) AfterEach(func() { ctrl.Finish() @@ -71,7 +71,7 @@ var _ = Describe("installer HostRoleMaster role", func() { exit = func(code int) { exitCode = code } - mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(nil, installer.NewGetClusterUnauthorized()).Times(maximumErrorsBeforeExit) + mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(nil, installer.NewV2GetClusterUnauthorized()).Times(maximumErrorsBeforeExit) // added to make waitForInstallation exit mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(&models.Cluster{Status: swag.String(models.ClusterStatusInstalled)}, nil).Times(1) waitForInstallation(mockbmclient, l, status) @@ -84,7 +84,7 @@ var _ = Describe("installer HostRoleMaster role", func() { exit = func(code int) { exitCode = code } - mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(nil, installer.NewGetClusterNotFound()).Times(maximumErrorsBeforeExit) + mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(nil, installer.NewV2GetClusterNotFound()).Times(maximumErrorsBeforeExit) // added to make waitForInstallation exit mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(&models.Cluster{Status: swag.String(models.ClusterStatusInstalled)}, nil).Times(1) @@ -99,7 +99,7 @@ var _ = Describe("installer HostRoleMaster role", func() { exit = func(code int) { exitCode = code } - mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(nil, installer.NewGetClusterNotFound()).Times(1) + mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(nil, installer.NewV2GetClusterNotFound()).Times(1) // added to make waitForInstallation exit mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(&models.Cluster{Status: swag.String(models.ClusterStatusInstalled)}, nil).Times(1) @@ -113,7 +113,7 @@ var _ = Describe("installer HostRoleMaster role", func() { exit = func(code int) { exitCode = code } - mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(nil, installer.NewGetClusterUnauthorized()).Times(maximumErrorsBeforeExit) + mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(nil, installer.NewV2GetClusterUnauthorized()).Times(maximumErrorsBeforeExit) // added to make waitForInstallation exit mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(&models.Cluster{Status: swag.String(models.ClusterStatusInstalled)}, nil).Times(1) @@ -127,7 +127,7 @@ var _ = Describe("installer HostRoleMaster role", func() { exit = func(code int) { exitCode = code } - mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(nil, installer.NewGetClusterUnauthorized()).Times(maximumErrorsBeforeExit - 2) + mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(nil, installer.NewV2GetClusterUnauthorized()).Times(maximumErrorsBeforeExit - 2) // added to make waitForInstallation exit mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(&models.Cluster{Status: swag.String(models.ClusterStatusInstalled)}, nil).Times(1) diff --git a/src/ops/coreos_installer_log_writer.go b/src/ops/coreos_installer_log_writer.go index 6b71fd8756..d1bf3cc93d 100644 --- a/src/ops/coreos_installer_log_writer.go +++ b/src/ops/coreos_installer_log_writer.go @@ -21,15 +21,17 @@ type CoreosInstallerLogWriter struct { lastLogLine []byte progressReporter inventory_client.InventoryClient progressRegex *regexp.Regexp + infraEnvID string hostID string lastProgress int } -func NewCoreosInstallerLogWriter(logger *logrus.Logger, progressReporter inventory_client.InventoryClient, hostID string) *CoreosInstallerLogWriter { +func NewCoreosInstallerLogWriter(logger *logrus.Logger, progressReporter inventory_client.InventoryClient, infraEnvID string, hostID string) *CoreosInstallerLogWriter { return &CoreosInstallerLogWriter{log: logger, lastLogLine: []byte{}, progressReporter: progressReporter, progressRegex: regexp.MustCompile(`(.*?)\((.*?\%)\)\s*`), + infraEnvID: infraEnvID, hostID: hostID, lastProgress: 0, } @@ -60,7 +62,7 @@ func (l *CoreosInstallerLogWriter) reportProgress() { if currentPercent >= l.lastProgress+MinProgressDelta || (currentPercent == completed && l.lastProgress != completed) { // If the progress is more than 5% report it ctx := utils.GenerateRequestContext() - if err := l.progressReporter.UpdateHostInstallProgress(ctx, l.hostID, models.HostStageWritingImageToDisk, match[2]); err == nil { + if err := l.progressReporter.UpdateHostInstallProgress(ctx, l.infraEnvID, l.hostID, models.HostStageWritingImageToDisk, match[2]); err == nil { l.lastProgress = currentPercent } } diff --git a/src/ops/coreos_installer_log_writer_test.go b/src/ops/coreos_installer_log_writer_test.go index 4d211a0438..7b6f8de5bd 100644 --- a/src/ops/coreos_installer_log_writer_test.go +++ b/src/ops/coreos_installer_log_writer_test.go @@ -30,9 +30,9 @@ var _ = Describe("Verify CoreosInstallerLogger", func() { updateProgressSuccess := func(stages [][]string) { for _, stage := range stages { if len(stage) == 2 { - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), "hostID", models.HostStage(stage[0]), stage[1]).Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), "infraEnvID", "hostID", models.HostStage(stage[0]), stage[1]).Return(nil).Times(1) } else { - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), "hostID", models.HostStage(stage[0]), "").Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), "infraEnvID", "hostID", models.HostStage(stage[0]), "").Return(nil).Times(1) } } } @@ -41,7 +41,7 @@ var _ = Describe("Verify CoreosInstallerLogger", func() { logger, hook = test.NewNullLogger() ctrl := gomock.NewController(GinkgoT()) mockbmclient = inventory_client.NewMockInventoryClient(ctrl) - cilogger = NewCoreosInstallerLogWriter(logger, mockbmclient, "hostID") + cilogger = NewCoreosInstallerLogWriter(logger, mockbmclient, "infraEnvID", "hostID") }) It("test log with new line", func() { _, err := cilogger.Write([]byte("some log with a new line \n")) diff --git a/src/ops/mock_ops.go b/src/ops/mock_ops.go index 2f1e852669..088cce261c 100644 --- a/src/ops/mock_ops.go +++ b/src/ops/mock_ops.go @@ -308,18 +308,23 @@ func (mr *MockOpsMockRecorder) CreateOpenshiftSshManifest(filePath, template, ss } // GetMustGatherLogs mocks base method -func (m *MockOps) GetMustGatherLogs(workDir, kubeconfigPath, mustGatherImg string) (string, error) { +func (m *MockOps) GetMustGatherLogs(workDir, kubeconfigPath string, images ...string) (string, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetMustGatherLogs", workDir, kubeconfigPath, mustGatherImg) + varargs := []interface{}{workDir, kubeconfigPath} + for _, a := range images { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetMustGatherLogs", varargs...) ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } // GetMustGatherLogs indicates an expected call of GetMustGatherLogs -func (mr *MockOpsMockRecorder) GetMustGatherLogs(workDir, kubeconfigPath, mustGatherImg interface{}) *gomock.Call { +func (mr *MockOpsMockRecorder) GetMustGatherLogs(workDir, kubeconfigPath interface{}, images ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMustGatherLogs", reflect.TypeOf((*MockOps)(nil).GetMustGatherLogs), workDir, kubeconfigPath, mustGatherImg) + varargs := append([]interface{}{workDir, kubeconfigPath}, images...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMustGatherLogs", reflect.TypeOf((*MockOps)(nil).GetMustGatherLogs), varargs...) } // CreateRandomHostname mocks base method @@ -366,7 +371,7 @@ func (mr *MockOpsMockRecorder) EvaluateDiskSymlink(arg0 interface{}) *gomock.Cal } // CreateManifests mocks base method -func (m *MockOps) CreateManifests(arg0 string, arg1 string) error { +func (m *MockOps) CreateManifests(arg0 string, arg1 []byte) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateManifests", arg0, arg1) ret0, _ := ret[0].(error) @@ -374,7 +379,7 @@ func (m *MockOps) CreateManifests(arg0 string, arg1 string) error { } // CreateManifests indicates an expected call of CreateManifests -func (mr *MockOpsMockRecorder) CreateManifests(arg0 interface{}, arg1 interface{}) *gomock.Call { +func (mr *MockOpsMockRecorder) CreateManifests(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateManifests", reflect.TypeOf((*MockOps)(nil).CreateManifests), arg0, arg1) } diff --git a/src/ops/ops.go b/src/ops/ops.go index e02098c648..2d013a4064 100644 --- a/src/ops/ops.go +++ b/src/ops/ops.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "os" + "runtime" "strconv" "text/template" @@ -43,11 +44,11 @@ type Ops interface { UploadInstallationLogs(isBootstrap bool) (string, error) ReloadHostFile(filepath string) error CreateOpenshiftSshManifest(filePath, template, sshPubKeyPath string) error - GetMustGatherLogs(workDir, kubeconfigPath, mustGatherImg string) (string, error) + GetMustGatherLogs(workDir, kubeconfigPath string, images ...string) (string, error) CreateRandomHostname(hostname string) error GetHostname() (string, error) EvaluateDiskSymlink(string) string - CreateManifests(string, string) error + CreateManifests(string, []byte) error } const ( @@ -101,17 +102,20 @@ type ExecCommandError struct { WaitStatus int } +func removePullSecret(s []string) []string { + return strings.Split(strings.ReplaceAll(strings.Join(s, " "), config.GlobalConfig.PullSecretToken, ""), " ") +} + func (e *ExecCommandError) Error() string { lastOutput := e.Output if len(e.Output) > 200 { lastOutput = "... " + e.Output[len(e.Output)-200:] } - - return fmt.Sprintf("failed executing %s %v, Error %s, LastOutput \"%s\"", e.Command, e.Args, e.ExitErr, lastOutput) + return fmt.Sprintf("failed executing %s %v, Error %s, LastOutput \"%s\"", e.Command, removePullSecret(e.Args), e.ExitErr, lastOutput) } func (e *ExecCommandError) DetailedError() string { - return fmt.Sprintf("failed executing %s %v, env vars %v, error %s, waitStatus %d, Output \"%s\"", e.Command, e.Args, e.Env, e.ExitErr, e.WaitStatus, e.Output) + return fmt.Sprintf("failed executing %s %v, env vars %v, error %s, waitStatus %d, Output \"%s\"", e.Command, removePullSecret(e.Args), removePullSecret(e.Env), e.ExitErr, e.WaitStatus, e.Output) } // ExecCommand executes command. @@ -156,7 +160,7 @@ func (o *ops) ExecCommand(liveLogger io.Writer, command string, args ...string) } return output, execErr } - o.log.Debug("Command executed:", " command", command, " arguments", args, "env vars", cmd.Env, "output", output) + o.log.Debug("Command executed:", " command", command, " arguments", removePullSecret(args), "env vars", removePullSecret(cmd.Env), "output", output) return output, err } @@ -178,7 +182,7 @@ func (o *ops) SystemctlAction(action string, args ...string) error { func (o *ops) WriteImageToDisk(ignitionPath string, device string, progressReporter inventory_client.InventoryClient, extraArgs []string) error { allArgs := installerArgs(ignitionPath, device, extraArgs) o.log.Infof("Writing image and ignition to disk with arguments: %v", allArgs) - _, err := o.ExecPrivilegeCommand(NewCoreosInstallerLogWriter(o.log, progressReporter, config.GlobalConfig.HostID), + _, err := o.ExecPrivilegeCommand(NewCoreosInstallerLogWriter(o.log, progressReporter, config.GlobalConfig.InfraEnvID, config.GlobalConfig.HostID), "coreos-installer", allArgs...) return err } @@ -220,16 +224,16 @@ func (o *ops) Reboot() error { } func (o *ops) SetBootOrder(device string) error { - _, err := o.ExecPrivilegeCommand(o.logWriter, "test", "-d", "/sys/firmware/efi") + _, err := o.ExecPrivilegeCommand(nil, "test", "-d", "/sys/firmware/efi") if err != nil { - o.log.Info("efi not supported") + o.log.Info("setting the boot order on BIOS systems is not supported. Skipping...") return nil } o.log.Info("Setting efibootmgr to boot from disk") // efi-system is installed onto partition 2 - _, err = o.ExecPrivilegeCommand(o.logWriter, "efibootmgr", "-d", device, "-p", "2", "-c", "-L", "Red Hat Enterprise Linux", "-l", "\\EFI\\redhat\\shimx64.efi") + _, err = o.ExecPrivilegeCommand(o.logWriter, "efibootmgr", "-v", "-d", device, "-p", "2", "-c", "-L", "Red Hat Enterprise Linux", "-l", o.getEfiFilePath()) if err != nil { o.log.Errorf("Failed to set efibootmgr to boot from disk %s, err: %s", device, err) return err @@ -237,8 +241,20 @@ func (o *ops) SetBootOrder(device string) error { return nil } +func (o *ops) getEfiFilePath() string { + var efiFileName string + switch runtime.GOARCH { + case "arm64": + efiFileName = "shimaa64.efi" + default: + efiFileName = "shimx64.efi" + } + o.log.Infof("Using EFI file '%s' for GOARCH '%s'", efiFileName, runtime.GOARCH) + return fmt.Sprintf("\\EFI\\redhat\\%s", efiFileName) +} + func (o *ops) ExtractFromIgnition(ignitionPath string, fileToExtract string) error { - o.log.Infof("Getting pull secret from %s", ignitionPath) + o.log.Infof("Getting data from %s", ignitionPath) ignitionData, err := ioutil.ReadFile(ignitionPath) if err != nil { o.log.Errorf("Error occurred while trying to read %s : %e", ignitionPath, err) @@ -447,12 +463,16 @@ func (o *ops) GetMCSLogs() (string, error) { return string(logs), nil } +// This function actually runs container that imeplements logs_sender command +// Any change to the assisted-service API that is used by the logs_sender command +// ( for example UploadLogs), must be reflected here (input parameters, etc'), +// if needed func (o *ops) UploadInstallationLogs(isBootstrap bool) (string, error) { command := "podman" args := []string{"run", "--rm", "--privileged", "--net=host", "--pid=host", "-v", "/run/systemd/journal/socket:/run/systemd/journal/socket", "-v", "/var/log:/var/log", config.GlobalConfig.AgentImage, "logs_sender", "-cluster-id", config.GlobalConfig.ClusterID, "-url", config.GlobalConfig.URL, - "-host-id", config.GlobalConfig.HostID, + "-host-id", config.GlobalConfig.HostID, "-infra-env-id", config.GlobalConfig.InfraEnvID, "-pull-secret-token", config.GlobalConfig.PullSecretToken, fmt.Sprintf("-insecure=%s", strconv.FormatBool(config.GlobalConfig.SkipCertVerification)), fmt.Sprintf("-bootstrap=%s", strconv.FormatBool(isBootstrap)), @@ -514,16 +534,13 @@ func (o *ops) CreateOpenshiftSshManifest(filePath, tmpl, sshPubKeyPath string) e return nil } -func (o *ops) GetMustGatherLogs(workDir, kubeconfigPath, mustGatherImg string) (string, error) { +func (o *ops) GetMustGatherLogs(workDir, kubeconfigPath string, images ...string) (string, error) { //invoke oc adm must-gather command in the working directory - var imageOption string - if mustGatherImg == "" { - o.log.Infof("collecting must-gather logs into %s using image from release", workDir) - imageOption = "" - } else { - o.log.Infof("collecting must-gather logs into %s using image %s", workDir, mustGatherImg) - imageOption = fmt.Sprintf(" --image=%s", mustGatherImg) + var imageOption string = "" + for _, img := range images { + imageOption = imageOption + fmt.Sprintf(" --image=%s", img) } + command := fmt.Sprintf("cd %s && oc --kubeconfig=%s adm must-gather%s", workDir, kubeconfigPath, imageOption) output, err := o.ExecCommand(o.logWriter, "bash", "-c", command) if err != nil { @@ -568,13 +585,26 @@ func (o *ops) GetHostname() (string, error) { return os.Hostname() } -func (o *ops) CreateManifests(kubeconfig string, manifestFilePath string) error { - command := fmt.Sprintf("oc --kubeconfig=%s apply -f %s", kubeconfig, manifestFilePath) +func (o *ops) CreateManifests(kubeconfig string, content []byte) error { + // Create temp file, where we store the content to be create by oc command: + file, err := ioutil.TempFile("", "operator-manifest") + if err != nil { + return err + } + defer os.Remove(file.Name()) + + // Write the content to the temporary file: + if err = ioutil.WriteFile(file.Name(), content, 0644); err != nil { + return err + } + + // Run oc command that creates the custom manifest: + command := fmt.Sprintf("oc --kubeconfig=%s apply -f %s", kubeconfig, file.Name()) output, err := o.ExecCommand(o.logWriter, "bash", "-c", command) if err != nil { return err } - o.log.Infof("Applying custom manifest file %s succeed %s", manifestFilePath, output) + o.log.Infof("Applying custom manifest file %s succeed %s", file.Name(), output) return nil } diff --git a/src/ops/ops_test.go b/src/ops/ops_test.go index 6148b3d182..f3239bee0a 100644 --- a/src/ops/ops_test.go +++ b/src/ops/ops_test.go @@ -5,9 +5,13 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "github.com/openshift/assisted-installer/src/config" ) var _ = Describe("ExecCommandError", func() { + pullSecret := "TEST-TOKEN" + config.GlobalConfig.PullSecretToken = pullSecret + It("Creates the correct error for mkdir", func() { err := &ExecCommandError{ Command: "mkdir", @@ -26,15 +30,15 @@ var _ = Describe("ExecCommandError", func() { It("Creates the correct error for ignition extract", func() { err := &ExecCommandError{ Command: "nsenter", - Args: []string{"-t", "1", "-m", "-i", "--", "podman", "run", "--net", "host", "--volume", "/:/rootfs:rw", "--volume", "/usr/bin/rpm-ostree:/usr/bin/rpm-ostree", "--privileged", "--entrypoint", "/usr/bin/machine-config-daemon", "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dc1a34f55c712b2b9c5e5a14dd85e67cbdae11fd147046ac2fef9eaf179ab221", "start", "--node-name", "localhost", "--root-mount", "/rootfs", "--once-from", "/opt/install-dir/bootstrap.ign", "--skip-reboot"}, - Env: []string{"HOME=/home/userZ"}, + Args: []string{"-t", "1", "-m", "-i", "--", "podman", "run", "--net", "host", "--volume", "/:/rootfs:rw", "--volume", "/usr/bin/rpm-ostree:/usr/bin/rpm-ostree", "--privileged", "--entrypoint", "/usr/bin/machine-config-daemon", "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dc1a34f55c712b2b9c5e5a14dd85e67cbdae11fd147046ac2fef9eaf179ab221", "start", "--node-name", "localhost", "--root-mount", "/rootfs", "--once-from", "/opt/install-dir/bootstrap.ign", "--skip-reboot", "--pull-secret", pullSecret}, + Env: []string{"HOME=/home/userZ", fmt.Sprintf("PULL_SECRET_TOKEN=%s", pullSecret)}, ExitErr: fmt.Errorf("exit status 255"), WaitStatus: 255, Output: "Trying to pull quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dc1a34f55c712b2b9c5e5a14dd85e67cbdae11fd147046ac2fef9eaf179ab221...\nGetting image source signatures\nCopying blob sha256:74cbb6607642df5f9f70e8588e3c56d6de795d1a9af22866ea4cc82f2dad4f14\nCopying blob sha256:c9fa7d57b9028d4bd02b51cef3c3039fa7b23a8b2d9d26a6ce66b3428f6e2457\nCopying blob sha256:c676df4ac84e718ecee4f8129e43e9c2b7492942606cc65f1fc5e6f3da413160\nCopying blob sha256:b147db91a07555d29ed6085e4733f34dbaa673076488caa8f95f4677f55b3a5c\nCopying blob sha256:ad956945835b7630565fc23fcbd8194eef32b4300c28546d574b2a377fe5d0a5\nCopying config sha256:c4356549f53a30a1baefc5d1515ec1ab8b3786a4bf1738c0abaedc0e44829498\nWriting manifest to image destination\nStoring signatures\nI1019 19:03:28.797092 1 start.go:108] Version: v4.6.0-202008262209.p0-dirty (16d243c4bed178f5d4fd400c0518ebf1dbaface8)\nI1019 19:03:28.797227 1 start.go:118] Calling chroot(\"/rootfs\")\nI1019 19:03:28.797307 1 rpm-ostree.go:261] Running captured: rpm-ostree status --json\nerror: Timeout was reached\nF1019 19:04:35.869592 1 start.go:147] Failed to initialize single run daemon: error reading osImageURL from rpm-ostree: error running rpm-ostree status --json: : exit status 1)", } - wantError := `failed executing nsenter [-t 1 -m -i -- podman run --net host --volume /:/rootfs:rw --volume /usr/bin/rpm-ostree:/usr/bin/rpm-ostree --privileged --entrypoint /usr/bin/machine-config-daemon quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dc1a34f55c712b2b9c5e5a14dd85e67cbdae11fd147046ac2fef9eaf179ab221 start --node-name localhost --root-mount /rootfs --once-from /opt/install-dir/bootstrap.ign --skip-reboot], Error exit status 255, LastOutput "... or: Timeout was reached + wantError := `failed executing nsenter [-t 1 -m -i -- podman run --net host --volume /:/rootfs:rw --volume /usr/bin/rpm-ostree:/usr/bin/rpm-ostree --privileged --entrypoint /usr/bin/machine-config-daemon quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dc1a34f55c712b2b9c5e5a14dd85e67cbdae11fd147046ac2fef9eaf179ab221 start --node-name localhost --root-mount /rootfs --once-from /opt/install-dir/bootstrap.ign --skip-reboot --pull-secret ], Error exit status 255, LastOutput "... or: Timeout was reached F1019 19:04:35.869592 1 start.go:147] Failed to initialize single run daemon: error reading osImageURL from rpm-ostree: error running rpm-ostree status --json: : exit status 1)"` - wantDetailedError := `failed executing nsenter [-t 1 -m -i -- podman run --net host --volume /:/rootfs:rw --volume /usr/bin/rpm-ostree:/usr/bin/rpm-ostree --privileged --entrypoint /usr/bin/machine-config-daemon quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dc1a34f55c712b2b9c5e5a14dd85e67cbdae11fd147046ac2fef9eaf179ab221 start --node-name localhost --root-mount /rootfs --once-from /opt/install-dir/bootstrap.ign --skip-reboot], env vars [HOME=/home/userZ], error exit status 255, waitStatus 255, Output "Trying to pull quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dc1a34f55c712b2b9c5e5a14dd85e67cbdae11fd147046ac2fef9eaf179ab221... + wantDetailedError := `failed executing nsenter [-t 1 -m -i -- podman run --net host --volume /:/rootfs:rw --volume /usr/bin/rpm-ostree:/usr/bin/rpm-ostree --privileged --entrypoint /usr/bin/machine-config-daemon quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dc1a34f55c712b2b9c5e5a14dd85e67cbdae11fd147046ac2fef9eaf179ab221 start --node-name localhost --root-mount /rootfs --once-from /opt/install-dir/bootstrap.ign --skip-reboot --pull-secret ], env vars [HOME=/home/userZ PULL_SECRET_TOKEN=], error exit status 255, waitStatus 255, Output "Trying to pull quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dc1a34f55c712b2b9c5e5a14dd85e67cbdae11fd147046ac2fef9eaf179ab221... Getting image source signatures Copying blob sha256:74cbb6607642df5f9f70e8588e3c56d6de795d1a9af22866ea4cc82f2dad4f14 Copying blob sha256:c9fa7d57b9028d4bd02b51cef3c3039fa7b23a8b2d9d26a6ce66b3428f6e2457 diff --git a/src/utils/utils.go b/src/utils/utils.go index f19ef79bc9..457853e72c 100644 --- a/src/utils/utils.go +++ b/src/utils/utils.go @@ -222,6 +222,12 @@ func WaitForPredicateWithContext(ctx context.Context, timeout time.Duration, int }) } +func WaitForPredicateParamsWithContext(ctx context.Context, timeout time.Duration, interval time.Duration, predicate func(arg interface{}) bool, arg interface{}) error { + return WaitForPredicateWithTimer(ctx, timeout, interval, func(timer *time.Timer) bool { + return predicate(arg) + }) +} + // ProxyFromEnvVars provides an alternative to http.ProxyFromEnvironment since it is being initialized only // once and that happens by k8s before proxy settings was obtained. While this is no issue for k8s, it prevents // any out-of-cluster traffic from using the proxy diff --git a/test_files/mcs_logs.txt b/test_files/mcs_logs.txt index bfb6b1f948..bf5891ea01 100644 --- a/test_files/mcs_logs.txt +++ b/test_files/mcs_logs.txt @@ -4,6 +4,6 @@ 2020-07-01T16:57:08.449846700+00:00 stderr F I0701 16:57:08.449808 1 api.go:102] Pool master requested by 192.168.126.12:32780 User-Agent:"Ignition/2.6.0" 2020-07-01T16:57:08.449904020+00:00 stderr F I0701 16:57:08.449893 1 bootstrap_server.go:64] reading file "/etc/mcs/bootstrap/machine-pools/master.yaml" 2020-07-01T16:57:08.451073060+00:00 stderr F I0701 16:57:08.451054 1 bootstrap_server.go:84] reading file "/etc/mcs/bootstrap/machine-configs/rendered-master-39287e7d053e8395ab3c1ecd762dd578.yaml" -2020-07-01T16:57:22.319520480+00:00 stderr F I0701 16:57:22.319461 1 api.go:102] Pool master requested by 192.168.126.11:40548 User-Agent:"Ignition/2.6.0" +2020-07-01T16:57:22.319520480+00:00 stderr F I0701 16:57:22.319461 1 api.go:102] Pool master requested by [fe80::5054:ff:fe9a:4739%ens3]:40548 User-Agent:"Ignition/2.6.0" 2020-07-01T16:57:22.319520480+00:00 stderr F I0701 16:57:22.319485 1 bootstrap_server.go:64] reading file "/etc/mcs/bootstrap/machine-pools/master.yaml" 2020-07-01T16:57:22.320165920+00:00 stderr F I0701 16:57:22.320128 1 bootstrap_server.go:84] reading file "/etc/mcs/bootstrap/machine-configs/rendered-master-39287e7d053e8395ab3c1ecd762dd578.yaml"