diff --git a/.drone.yml b/.drone.yml index 554214ce50c8..a748b2f3e207 100644 --- a/.drone.yml +++ b/.drone.yml @@ -719,7 +719,7 @@ steps: UPGRADE_CHANNEL="latest" fi fi - E2E_RELEASE_CHANNEL=$UPGRADE_CHANNEL go test -v -timeout=45m ./upgradecluster_test.go -ci -local + E2E_RELEASE_CHANNEL=$UPGRADE_CHANNEL go test -v -timeout=45m ./upgradecluster_test.go -ci -local -ginkgo.v cp ./coverage.out /tmp/artifacts/upgrade-coverage.out fi - docker stop registry && docker rm registry diff --git a/.github/actions/vagrant-setup/action.yaml b/.github/actions/vagrant-setup/action.yaml index 37f268809e20..d3158c40303a 100644 --- a/.github/actions/vagrant-setup/action.yaml +++ b/.github/actions/vagrant-setup/action.yaml @@ -8,17 +8,15 @@ runs: run: | curl -fsSL https://apt.releases.hashicorp.com/gpg | sudo gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list - sudo sed -i 's/^# deb-src/deb-src/' /etc/apt/sources.list - name: Install vagrant and libvirt shell: bash run: | sudo apt-get update - sudo apt-get install -y libvirt-daemon libvirt-daemon-system vagrant + sudo apt-get install -y libvirt-daemon libvirt-daemon-system vagrant ruby-libvirt sudo systemctl enable --now libvirtd - - name: Build vagrant dependencies + - name: Install vagrant dependencies shell: bash run: | - sudo apt-get build-dep -y vagrant ruby-libvirt sudo apt-get install -y --no-install-recommends libxslt-dev libxml2-dev libvirt-dev ruby-bundler ruby-dev zlib1g-dev # This is a workaround for the libvirt group not being available in the current shell # https://github.com/actions/runner-images/issues/7670#issuecomment-1900711711 @@ -26,8 +24,6 @@ runs: shell: bash run: | sudo chmod a+rw /var/run/libvirt/libvirt-sock - - - name: Install vagrant-libvirt plugin shell: bash run: vagrant plugin install vagrant-libvirt \ No newline at end of file diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index 684051d199c1..3f8f41da58bf 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -9,6 +9,7 @@ on: - "!tests/e2e**" - "!tests/docker**" - ".github/**" + - "!.github/actions/**" - "!.github/workflows/e2e.yaml" pull_request: paths-ignore: @@ -19,6 +20,7 @@ on: - "!tests/e2e**" - "!tests/docker**" - ".github/**" + - "!.github/actions/**" - "!.github/workflows/e2e.yaml" workflow_dispatch: {} @@ -33,7 +35,7 @@ jobs: e2e: name: "E2E Tests" needs: build - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 timeout-minutes: 40 strategy: fail-fast: false diff --git a/.github/workflows/integration.yaml b/.github/workflows/integration.yaml index 2cde5fc0a9c1..9821880adebf 100644 --- a/.github/workflows/integration.yaml +++ b/.github/workflows/integration.yaml @@ -38,7 +38,7 @@ jobs: strategy: fail-fast: false matrix: - itest: [certrotation, etcdrestore, localstorage, startup, custometcdargs, etcdsnapshot, kubeflags, longhorn, secretsencryption, flannelnone] + itest: [certrotation, cacertrotation, etcdrestore, localstorage, startup, custometcdargs, etcdsnapshot, kubeflags, longhorn, secretsencryption, flannelnone] max-parallel: 3 steps: - name: Checkout @@ -56,7 +56,7 @@ jobs: run: | chmod +x ./dist/artifacts/k3s mkdir -p $GOCOVERDIR - sudo -E env "PATH=$PATH" go test -v -timeout=45m ./tests/integration/${{ matrix.itest }}/... -run Integration + sudo -E env "PATH=$PATH" go test -timeout=45m ./tests/integration/${{ matrix.itest }}/... -run Integration -ginkgo.v -test.v - name: On Failure, Launch Debug Session uses: lhotari/action-upterm@v1 if: ${{ failure() }} @@ -71,4 +71,4 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} files: ./${{ matrix.itest }}.out flags: inttests # optional - verbose: true # optional (default = false) \ No newline at end of file + verbose: true # optional (default = false) diff --git a/.github/workflows/trivy.yaml b/.github/workflows/trivy.yaml index 09b1b166b19c..46acda87ed5e 100644 --- a/.github/workflows/trivy.yaml +++ b/.github/workflows/trivy.yaml @@ -40,13 +40,19 @@ jobs: make package-image make tag-image-latest + - name: Download Rancher's VEX Hub report + run: curl -fsSO https://raw.githubusercontent.com/rancher/vexhub/refs/heads/main/reports/rancher.openvex.json + - name: Run Trivy vulnerability scanner - uses: aquasecurity/trivy-action@0.24.0 + uses: aquasecurity/trivy-action@0.28.0 with: image-ref: 'rancher/k3s:latest' format: 'table' severity: "HIGH,CRITICAL" output: "trivy-report.txt" + env: + TRIVY_VEX: rancher.openvex.json + TRIVY_SHOW_SUPPRESSED: true - name: Upload Trivy Report uses: actions/upload-artifact@v4 @@ -93,4 +99,4 @@ jobs: steps: - name: Report Failure run: | - gh issue comment ${{ github.event.issue.number }} -b ":x: Trivy scan action failed, check logs :x:" \ No newline at end of file + gh issue comment ${{ github.event.issue.number }} -b ":x: Trivy scan action failed, check logs :x:" diff --git a/.github/workflows/unitcoverage.yaml b/.github/workflows/unitcoverage.yaml index 7f30a6ef57dd..33b42431ff15 100644 --- a/.github/workflows/unitcoverage.yaml +++ b/.github/workflows/unitcoverage.yaml @@ -28,7 +28,7 @@ permissions: jobs: test: name: Unit Tests - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 timeout-minutes: 20 steps: - name: Checkout diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 68642e04ada3..c797447bf9cf 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -14,9 +14,9 @@ If you're interested in contributing documentation, please note the following: If you're interested in contributing new tests, please see the [TESTING.md](./tests/TESTING.md). -## Code Convetion +## Code Convention -See the [code convetions documentation](./docs/contrib/code_conventions.md) for more information on how to write code for K3s. +See the [code conventions documentation](./docs/contrib/code_conventions.md) for more information on how to write code for K3s. ### Opening PRs and organizing commits PRs should generally address only 1 issue at a time. If you need to fix two bugs, open two separate PRs. This will keep the scope of your pull requests smaller and allow them to be reviewed and merged more quickly. diff --git a/Dockerfile.dapper b/Dockerfile.dapper index 76aabbf2e1b1..bdd5b5c67c53 100644 --- a/Dockerfile.dapper +++ b/Dockerfile.dapper @@ -1,4 +1,4 @@ -ARG GOLANG=golang:1.22.6-alpine3.20 +ARG GOLANG=golang:1.22.8-alpine3.20 FROM ${GOLANG} # Set proxy environment variables @@ -22,7 +22,7 @@ RUN apk -U --no-cache add \ RUN PIPX_BIN_DIR=/usr/local/bin pipx install awscli # Install Trivy -ENV TRIVY_VERSION="0.55.2" +ENV TRIVY_VERSION="0.56.2" RUN case "$(go env GOARCH)" in \ arm64) TRIVY_ARCH="ARM64" ;; \ amd64) TRIVY_ARCH="64bit" ;; \ diff --git a/Dockerfile.local b/Dockerfile.local index ba985ef1da02..a0270dc73304 100644 --- a/Dockerfile.local +++ b/Dockerfile.local @@ -1,4 +1,4 @@ -ARG GOLANG=golang:1.22.6-alpine3.19 +ARG GOLANG=golang:1.22.8-alpine3.19 FROM ${GOLANG} as infra ARG http_proxy=$http_proxy diff --git a/Dockerfile.manifest b/Dockerfile.manifest index f304672f69de..3681af7fb73c 100644 --- a/Dockerfile.manifest +++ b/Dockerfile.manifest @@ -1,4 +1,4 @@ -ARG GOLANG=golang:1.22.6-alpine3.20 +ARG GOLANG=golang:1.22.8-alpine3.20 FROM ${GOLANG} COPY --from=plugins/manifest:1.2.3 /bin/* /bin/ diff --git a/Dockerfile.test b/Dockerfile.test index 2f9b0676894b..bf1aee3161fb 100644 --- a/Dockerfile.test +++ b/Dockerfile.test @@ -1,4 +1,4 @@ -ARG GOLANG=golang:1.22.6-alpine3.20 +ARG GOLANG=golang:1.22.8-alpine3.20 FROM ${GOLANG} as test-base RUN apk -U --no-cache add bash jq diff --git a/README.md b/README.md index f5c142ac6016..6a9e8d2d59fe 100644 --- a/README.md +++ b/README.md @@ -61,7 +61,7 @@ What's with the name? We wanted an installation of Kubernetes that was half the size in terms of memory footprint. Kubernetes is a 10 letter word stylized as k8s. So something half as big as Kubernetes would be a 5 letter word stylized as -K3s. There is neither a long-form of K3s nor official pronunciation. +K3s. A '3' is also an '8' cut in half vertically. There is neither a long-form of K3s nor official pronunciation. Is this a fork? --------------- diff --git a/channel.yaml b/channel.yaml index 1469ea9e9f6e..bf7b76cf46f8 100644 --- a/channel.yaml +++ b/channel.yaml @@ -1,7 +1,7 @@ # Example channels config channels: - name: stable - latest: v1.30.5+k3s1 + latest: v1.30.6+k3s1 - name: latest latestRegexp: .* excludeRegexp: (^[^+]+-|v1\.25\.5\+k3s1|v1\.26\.0\+k3s1) diff --git a/cmd/k3s/main.go b/cmd/k3s/main.go index 0b3cad81d423..52d5cefbf62e 100644 --- a/cmd/k3s/main.go +++ b/cmd/k3s/main.go @@ -30,6 +30,10 @@ var criDefaultConfigPath = "/etc/crictl.yaml" // main entrypoint for the k3s multicall binary func main() { + if findDebug(os.Args) { + logrus.SetLevel(logrus.DebugLevel) + } + dataDir := findDataDir(os.Args) // Handle direct invocation via symlink alias (multicall binary behavior) @@ -87,6 +91,24 @@ func main() { } } +// findDebug reads debug settings from the environment, CLI args, and config file. +func findDebug(args []string) bool { + debug, _ := strconv.ParseBool(os.Getenv(version.ProgramUpper + "_DEBUG")) + if debug { + return debug + } + fs := pflag.NewFlagSet("debug-set", pflag.ContinueOnError) + fs.ParseErrorsWhitelist.UnknownFlags = true + fs.SetOutput(io.Discard) + fs.BoolVarP(&debug, "debug", "", false, "(logging) Turn on debug logs") + fs.Parse(args) + if debug { + return debug + } + debug, _ = strconv.ParseBool(configfilearg.MustFindString(args, "debug")) + return debug +} + // findDataDir reads data-dir settings from the environment, CLI args, and config file. // If not found, the default will be used, which varies depending on whether // k3s is being run as root or not. @@ -280,11 +302,6 @@ func extract(dataDir string) (string, error) { return "", err } - // Rename the new directory into place, before updating symlinks - if err := os.Rename(tempDest, dir); err != nil { - return "", err - } - // Create a stable CNI bin dir and place it first in the path so that users have a // consistent location to drop their own CNI plugin binaries. cniPath := filepath.Join(dataDir, "data", "cni") @@ -292,19 +309,38 @@ func extract(dataDir string) (string, error) { if err := os.MkdirAll(cniPath, 0755); err != nil { return "", err } + // Create symlink that points at the cni multicall binary itself + logrus.Debugf("Creating symlink %s -> %s", filepath.Join(cniPath, "cni"), cniBin) + os.Remove(filepath.Join(cniPath, "cni")) if err := os.Symlink(cniBin, filepath.Join(cniPath, "cni")); err != nil { return "", err } // Find symlinks that point to the cni multicall binary, and clone them in the stable CNI bin dir. - ents, err := os.ReadDir(filepath.Join(dir, "bin")) + // Non-symlink plugins in the stable CNI bin dir will not be overwritten, to allow users to replace our + // CNI plugins with their own versions if they want. Note that the cni multicall binary itself is always + // symlinked into the stable bin dir and should not be replaced. + ents, err := os.ReadDir(filepath.Join(tempDest, "bin")) if err != nil { return "", err } for _, ent := range ents { if info, err := ent.Info(); err == nil && info.Mode()&fs.ModeSymlink != 0 { - if target, err := os.Readlink(filepath.Join(dir, "bin", ent.Name())); err == nil && target == "cni" { - if err := os.Symlink(cniBin, filepath.Join(cniPath, ent.Name())); err != nil { + if target, err := os.Readlink(filepath.Join(tempDest, "bin", ent.Name())); err == nil && target == "cni" { + src := filepath.Join(cniPath, ent.Name()) + // Check if plugin already exists in stable CNI bin dir + if info, err := os.Lstat(src); err == nil { + if info.Mode()&fs.ModeSymlink != 0 { + // Exists and is a symlink, remove it so we can create a new symlink for the new bin. + os.Remove(src) + } else { + // Not a symlink, leave it alone + logrus.Debugf("Not replacing non-symlink CNI plugin %s with mode %O", src, info.Mode()) + continue + } + } + logrus.Debugf("Creating symlink %s -> %s", src, cniBin) + if err := os.Symlink(cniBin, src); err != nil { return "", err } } @@ -324,6 +360,12 @@ func extract(dataDir string) (string, error) { return "", err } + // Rename the new directory into place after updating symlinks, so that the k3s binary check at the start + // of this function only succeeds if everything else has been completed successfully. + if err := os.Rename(tempDest, dir); err != nil { + return "", err + } + return dir, nil } diff --git a/docs/adrs/remove-svclb-daemonset.md b/docs/adrs/remove-svclb-daemonset.md new file mode 100644 index 000000000000..f3e02458716e --- /dev/null +++ b/docs/adrs/remove-svclb-daemonset.md @@ -0,0 +1,96 @@ +# Remove svclb daemonset + +Date: 2024-09-26 + +## Status + +Not approved + +## Context + +There are three types of services in Kubernetes: +* ClusterIP +* NodePort +* LoadBalancer + +If we want to expose the service to external clients, i.e. clients outside of the Kubernetes cluster, we need to use NodePort or Loadbalancer types. The latter uses an externalIP, normally a publicIP, which can be easily reached from external clients. To support Loadbalancer service types, an external controller (loadbalancer controller) is required. + +The loadbalancer controller takes care of three tasks: +1 - Watches the kube-api for services of type LoadBalancer +2 - Sets up the infrastructure to provide the connectivity (externalIP ==> service) +3 - Sets the externalIP + +K3s embeds a simple [loadbalancer controller](https://github.com/k3s-io/k3s/tree/master/pkg/cloudprovider) that we call svclb, which has been part of K3s since its inception. When a new service of type LoadBalancer comes up, this svclb [creates a daemonset](https://github.com/k3s-io/k3s/blob/master/pkg/cloudprovider/loadbalancer.go#L35). That daemonset uses [hostPort](https://github.com/k3s-io/k3s/blob/master/pkg/cloudprovider/servicelb.go#L526-L531) to reserve the service port in all nodes. Subsequently, the serviceLB controller queries the daemonset pods [to know the node ips](https://github.com/k3s-io/k3s/blob/master/pkg/cloudprovider/servicelb.go#L291) and sets those node ips as [the externalIPs for the service](https://github.com/k3s-io/k3s/blob/master/pkg/cloudprovider/servicelb.go#L299) + +When an external client wants to reach the service, it needs to point to any of the node ips and use the service port. The flow of traffic would be the following: +1 - Traffic reaches the node +2 - Because hostport is reserving the service port in the node, traffic is forwarded to the daemonset pod +3 - The daemonset pod, [using klipper-lb image](https://github.com/k3s-io/klipper-lb), applies some iptables magic which replaces the destination IP with the clusterIP of the desired service +4 - Traffic gets routed to the service using regular kubernetes networking + +However, after some investigation, it was found that traffic is never reaching the daemonset pod. The reason for this is that when a service gets an externalIP, kube-proxy reacts to this and adds a new rule in iptables chain `KUBE-SERVICES`. This rule also replaces the destination IP with the clusterIP of the desired service. Moreover, the `KUBE-SERVICES` chain comes before the hostPort logic and hence this is the path the traffic takes. + +EXAMPLE: + +Imagine a two node cluster. The traefik service uses type LoadBalancer for two ports: 80 and 443. It gets 4 external ips (2 IPv4 and 2 IPv6) +``` +NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +default kubernetes ClusterIP 10.43.0.1 443/TCP 56m +kube-system kube-dns ClusterIP 10.43.0.10 53/UDP,53/TCP,9153/TCP 56m +kube-system metrics-server ClusterIP 10.43.55.117 443/TCP 56m +kube-system traefik LoadBalancer 10.43.206.216 10.1.1.13,10.1.1.16,fd56:5da5:a285:eea0::6,fd56:5da5:a285:eea0::8 80:30235/TCP,443:32373/TCP 56m +``` + +In iptables, in the chain OUTPUT, we can observe that the `KUBE-SERVICES` chain comes before the `CNI-HOSTPORT-DNAT`, which is the chain taking care of the hostport functionality: +``` +Chain OUTPUT (policy ACCEPT) +target prot opt source destination +KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */ +CNI-HOSTPORT-DNAT all -- 0.0.0.0/0 0.0.0.0/0 ADDRTYPE match dst-type LOCAL +``` + +In the KUBE-SERVICES chain, we can observe that there is one rule for each of the external-IP & port pairs, which start with `KUBE-EXT-`: +``` +Chain KUBE-SERVICES (2 references) +target prot opt source destination +KUBE-SVC-Z4ANX4WAEWEBLCTM tcp -- 0.0.0.0/0 10.43.55.117 /* kube-system/metrics-server:https cluster IP */ tcp dpt:443 +KUBE-SVC-UQMCRMJZLI3FTLDP tcp -- 0.0.0.0/0 10.43.206.216 /* kube-system/traefik:web cluster IP */ tcp dpt:80 +KUBE-EXT-UQMCRMJZLI3FTLDP tcp -- 0.0.0.0/0 10.1.1.13 /* kube-system/traefik:web loadbalancer IP */ tcp dpt:80 +KUBE-EXT-UQMCRMJZLI3FTLDP tcp -- 0.0.0.0/0 10.1.1.16 /* kube-system/traefik:web loadbalancer IP */ tcp dpt:80 +KUBE-SVC-CVG3OEGEH7H5P3HQ tcp -- 0.0.0.0/0 10.43.206.216 /* kube-system/traefik:websecure cluster IP */ tcp dpt:443 +KUBE-EXT-CVG3OEGEH7H5P3HQ tcp -- 0.0.0.0/0 10.1.1.13 /* kube-system/traefik:websecure loadbalancer IP */ tcp dpt:443 +KUBE-EXT-CVG3OEGEH7H5P3HQ tcp -- 0.0.0.0/0 10.1.1.16 /* kube-system/traefik:websecure loadbalancer IP */ tcp dpt:443 +KUBE-SVC-NPX46M4PTMTKRN6Y tcp -- 0.0.0.0/0 10.43.0.1 /* default/kubernetes:https cluster IP */ tcp dpt:443 +KUBE-SVC-JD5MR3NA4I4DYORP tcp -- 0.0.0.0/0 10.43.0.10 /* kube-system/kube-dns:metrics cluster IP */ tcp dpt:9153 +KUBE-SVC-TCOU7JCQXEZGVUNU udp -- 0.0.0.0/0 10.43.0.10 /* kube-system/kube-dns:dns cluster IP */ udp dpt:53 +KUBE-SVC-ERIFXISQEP7F7OF4 tcp -- 0.0.0.0/0 10.43.0.10 /* kube-system/kube-dns:dns-tcp cluster IP */ tcp dpt:53 +KUBE-NODEPORTS all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service nodeports; NOTE: this must be the last rule in this chain */ ADDRTYPE match dst-type LOCAL +``` + +Those `KUBE-EXT` chains, end up calling the rule starting with `KUBE-SVC-` which replaces the destination IP with the IP of one of pods implementing the service. For example: +``` +Chain KUBE-EXT-CVG3OEGEH7H5P3HQ (4 references) +target prot opt source destination +KUBE-MARK-MASQ all -- 0.0.0.0/0 0.0.0.0/0 /* masquerade traffic for kube-system/traefik:websecure external destinations */ +KUBE-SVC-CVG3OEGEH7H5P3HQ all -- 0.0.0.0/0 0.0.0.0/0 +``` + +As a consequence, the traffic never gets into the svclb daemonset pod. This can be additionally demonstrated by running a tcpdump on the svclb daemonset pod and no traffic will appear. This can also be demonstrated by tracing the iptables flow, where we will see how traffic is following the described path. + +Therefore, if we replace the logic to find the node IPs of the serviceLB controller by something which does not require the svclb daemonset, we could get rid of that daemonset since traffic is never reaching it. That replacement should be easy because in the end a daemonset means all nodes, so we could basically query kube-api to provide the IPs of all nodes. + + +## Decision + +There is one use case where klipper-lb is used. When deploying in a public cloud and using the publicIP as the --node-external-ip, kube-proxy is expecting the publicIP to be the destination IP. However, public clouds are normally doing a DNAT, so the kube-proxy's rule will never be used because the incoming packet does not have the publicIP anymore. In that case, the packet is capable of reaching the service because of the hostPort functionality on the daemonset svclb pushing the packet to svclb and then, klipper-lb routing the packet to the service. Conclusion: klipper-lb is needed + +## Consequences + +### Positives +* Less resource consumption as we won't need one daemonset per LoadBalancer type of service +* One fewer repo to maintain (klipper-lb) +* Easier to understand flow of traffic + +### Negatives +* Possible confusion for users that have been using this feature for a long time ("Where is the daemonset?") or users relying on that daemonset for their automation +* In today's solution, if two LoadBalancer type services are using the same port, it is rather easy to notice that things don't work because the second daemonset will not deploy, as the port is already being used by the first daemonset. Kube-proxy does not check if two services are using the same port and it will create both rules without any error. The service that gets its rules higher in the chain is the one that will be reached when querying $nodeIP:$port. Perhaps we could add some logic in the controller that warns users about a duplication of the pair ip&port diff --git a/go.mod b/go.mod index 559588e5573b..ec3ee90ef8c1 100644 --- a/go.mod +++ b/go.mod @@ -6,10 +6,9 @@ replace ( github.com/Microsoft/hcsshim => github.com/Microsoft/hcsshim v0.11.7 github.com/Mirantis/cri-dockerd => github.com/k3s-io/cri-dockerd v0.3.15-k3s1.31-3 //v1.31 github.com/cloudnativelabs/kube-router/v2 => github.com/k3s-io/kube-router/v2 v2.2.1 - github.com/containerd/containerd => github.com/k3s-io/containerd v1.7.21-k3s2 + github.com/containerd/containerd => github.com/k3s-io/containerd v1.7.22-k3s1 github.com/containerd/imgcrypt => github.com/containerd/imgcrypt v1.1.11 github.com/distribution/reference => github.com/distribution/reference v0.5.0 - github.com/docker/cli => github.com/docker/cli v27.1.2+incompatible github.com/docker/distribution => github.com/docker/distribution v2.8.3+incompatible github.com/docker/docker => github.com/docker/docker v25.0.6+incompatible github.com/emicklei/go-restful/v3 => github.com/emicklei/go-restful/v3 v3.11.0 @@ -22,14 +21,14 @@ replace ( github.com/prometheus/common => github.com/prometheus/common v0.55.0 github.com/spegel-org/spegel => github.com/k3s-io/spegel v0.0.23-0.20240516234953-f3d2c4072314 github.com/ugorji/go => github.com/ugorji/go v1.2.11 - go.etcd.io/etcd/api/v3 => github.com/k3s-io/etcd/api/v3 v3.5.13-k3s1 - go.etcd.io/etcd/client/pkg/v3 => github.com/k3s-io/etcd/client/pkg/v3 v3.5.13-k3s1 - go.etcd.io/etcd/client/v2 => github.com/k3s-io/etcd/client/v2 v2.305.13-k3s1 - go.etcd.io/etcd/client/v3 => github.com/k3s-io/etcd/client/v3 v3.5.13-k3s1 - go.etcd.io/etcd/etcdutl/v3 => github.com/k3s-io/etcd/etcdutl/v3 v3.5.13-k3s1 - go.etcd.io/etcd/pkg/v3 => github.com/k3s-io/etcd/pkg/v3 v3.5.13-k3s1 - go.etcd.io/etcd/raft/v3 => github.com/k3s-io/etcd/raft/v3 v3.5.13-k3s1 - go.etcd.io/etcd/server/v3 => github.com/k3s-io/etcd/server/v3 v3.5.13-k3s1 + go.etcd.io/etcd/api/v3 => github.com/k3s-io/etcd/api/v3 v3.5.16-k3s1 + go.etcd.io/etcd/client/pkg/v3 => github.com/k3s-io/etcd/client/pkg/v3 v3.5.16-k3s1 + go.etcd.io/etcd/client/v2 => github.com/k3s-io/etcd/client/v2 v2.305.16-k3s1 + go.etcd.io/etcd/client/v3 => github.com/k3s-io/etcd/client/v3 v3.5.16-k3s1 + go.etcd.io/etcd/etcdutl/v3 => github.com/k3s-io/etcd/etcdutl/v3 v3.5.16-k3s1 + go.etcd.io/etcd/pkg/v3 => github.com/k3s-io/etcd/pkg/v3 v3.5.16-k3s1 + go.etcd.io/etcd/raft/v3 => github.com/k3s-io/etcd/raft/v3 v3.5.16-k3s1 + go.etcd.io/etcd/server/v3 => github.com/k3s-io/etcd/server/v3 v3.5.16-k3s1 go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful => go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.44.0 golang.org/x/crypto => golang.org/x/crypto v0.24.0 golang.org/x/net => golang.org/x/net v0.26.0 @@ -37,39 +36,39 @@ replace ( google.golang.org/genproto => google.golang.org/genproto v0.0.0-20230525234035-dd9d682886f9 google.golang.org/grpc => google.golang.org/grpc v1.65.0 gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.6.0 - k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.31.1-k3s3 - k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.31.1-k3s3 - k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.31.1-k3s3 - k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.31.1-k3s3 - k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.31.1-k3s3 - k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.31.1-k3s3 - k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.31.1-k3s3 - k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.31.1-k3s3 - k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.31.1-k3s3 - k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.31.1-k3s3 - k8s.io/component-helpers => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.31.1-k3s3 - k8s.io/controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.31.1-k3s3 - k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.31.1-k3s3 - k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.31.1-k3s3 - k8s.io/dynamic-resource-allocation => github.com/k3s-io/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v1.31.1-k3s3 - k8s.io/endpointslice => github.com/k3s-io/kubernetes/staging/src/k8s.io/endpointslice v1.31.1-k3s3 + k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.31.2-k3s1 + k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.31.2-k3s1 + k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.31.2-k3s1 + k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.31.2-k3s1 + k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.31.2-k3s1 + k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.31.2-k3s1 + k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.31.2-k3s1 + k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.31.2-k3s1 + k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.31.2-k3s1 + k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.31.2-k3s1 + k8s.io/component-helpers => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.31.2-k3s1 + k8s.io/controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.31.2-k3s1 + k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.31.2-k3s1 + k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.31.2-k3s1 + k8s.io/dynamic-resource-allocation => github.com/k3s-io/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v1.31.2-k3s1 + k8s.io/endpointslice => github.com/k3s-io/kubernetes/staging/src/k8s.io/endpointslice v1.31.2-k3s1 k8s.io/klog => github.com/k3s-io/klog v1.0.0-k3s2 // k3s-release-1.x k8s.io/klog/v2 => github.com/k3s-io/klog/v2 v2.120.1-k3s1 // k3s-main - k8s.io/kms => github.com/k3s-io/kubernetes/staging/src/k8s.io/kms v1.31.1-k3s3 - k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.31.1-k3s3 - k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.31.1-k3s3 - k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.31.1-k3s3 - k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.31.1-k3s3 - k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.31.1-k3s3 - k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.31.1-k3s3 - k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.31.1-k3s3 - k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.31.1-k3s3 - k8s.io/mount-utils => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.31.1-k3s3 - k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.31.1-k3s3 - k8s.io/pod-security-admission => github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.31.1-k3s3 - k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.31.1-k3s3 - k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.31.1-k3s3 - k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.31.1-k3s3 + k8s.io/kms => github.com/k3s-io/kubernetes/staging/src/k8s.io/kms v1.31.2-k3s1 + k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.31.2-k3s1 + k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.31.2-k3s1 + k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.31.2-k3s1 + k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.31.2-k3s1 + k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.31.2-k3s1 + k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.31.2-k3s1 + k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.31.2-k3s1 + k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.31.2-k3s1 + k8s.io/mount-utils => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.31.2-k3s1 + k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.31.2-k3s1 + k8s.io/pod-security-admission => github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.31.2-k3s1 + k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.31.2-k3s1 + k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.31.2-k3s1 + k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.31.2-k3s1 sigs.k8s.io/cri-tools => github.com/k3s-io/cri-tools v1.31.0-k3s2 sourcegraph.com/sourcegraph/go-diff => github.com/sourcegraph/go-diff v0.6.0 ) @@ -93,23 +92,21 @@ require ( github.com/go-bindata/go-bindata v3.1.2+incompatible github.com/go-logr/logr v1.4.2 github.com/go-logr/stdr v1.2.3-0.20220714215716-96bad1d688c5 - github.com/go-sql-driver/mysql v1.7.1 github.com/go-test/deep v1.0.7 github.com/golang/mock v1.6.0 github.com/google/cadvisor v0.49.0 github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.1 github.com/gorilla/websocket v1.5.1 + github.com/inetaf/tcpproxy v0.0.0-20240214030015-3ce58045626c github.com/ipfs/go-ds-leveldb v0.5.0 github.com/ipfs/go-log/v2 v2.5.1 github.com/joho/godotenv v1.5.1 github.com/json-iterator/go v1.1.12 - github.com/k3s-io/helm-controller v0.16.4 - github.com/k3s-io/kine v0.13.0 - github.com/klauspost/compress v1.17.9 - github.com/lib/pq v1.10.2 + github.com/k3s-io/helm-controller v0.16.5 + github.com/k3s-io/kine v0.13.2 + github.com/klauspost/compress v1.17.10 github.com/libp2p/go-libp2p v0.33.2 - github.com/mattn/go-sqlite3 v1.14.19 github.com/minio/minio-go/v7 v7.0.70 github.com/mwitkow/go-http-dialer v0.0.0-20161116154839-378f744fb2b8 github.com/natefinch/lumberjack v2.0.0+incompatible @@ -125,7 +122,7 @@ require ( github.com/rancher/lasso v0.0.0-20240724174736-24ab3dbf26f0 github.com/rancher/permissions v0.0.0-20240523180510-4001d3d637f7 github.com/rancher/remotedialer v0.4.1 - github.com/rancher/wharfie v0.6.4 + github.com/rancher/wharfie v0.6.7 github.com/rancher/wrangler/v3 v3.0.0-rc2 github.com/robfig/cron/v3 v3.0.1 github.com/rootless-containers/rootlesskit v1.0.1 @@ -136,32 +133,33 @@ require ( github.com/urfave/cli v1.22.15 github.com/vishvananda/netlink v1.2.1-beta.2 github.com/yl2chen/cidranger v1.0.2 - go.etcd.io/etcd/api/v3 v3.5.15 - go.etcd.io/etcd/client/v3 v3.5.14 + go.etcd.io/etcd/api/v3 v3.5.16 + go.etcd.io/etcd/client/pkg/v3 v3.5.16 + go.etcd.io/etcd/client/v3 v3.5.16 go.etcd.io/etcd/etcdutl/v3 v3.5.13 - go.etcd.io/etcd/server/v3 v3.5.13 - golang.org/x/crypto v0.25.0 + go.etcd.io/etcd/server/v3 v3.5.16 + go.uber.org/zap v1.27.0 + golang.org/x/crypto v0.27.0 golang.org/x/net v0.28.0 golang.org/x/sync v0.8.0 - golang.org/x/sys v0.24.0 - google.golang.org/grpc v1.66.0 + golang.org/x/sys v0.25.0 + google.golang.org/grpc v1.67.0 gopkg.in/yaml.v2 v2.4.0 - inet.af/tcpproxy v0.0.0-20200125044825-b6bb9b5b8252 - k8s.io/api v0.31.1 - k8s.io/apimachinery v0.31.1 - k8s.io/apiserver v0.31.1 - k8s.io/cli-runtime v0.31.1-rc.1 + k8s.io/api v0.31.2 + k8s.io/apimachinery v0.31.2 + k8s.io/apiserver v0.31.2 + k8s.io/cli-runtime v0.31.2 k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible - k8s.io/cloud-provider v0.31.1 + k8s.io/cloud-provider v0.31.2 k8s.io/cluster-bootstrap v0.0.0 - k8s.io/component-base v0.31.1 - k8s.io/component-helpers v0.31.1 + k8s.io/component-base v0.31.2 + k8s.io/component-helpers v0.31.2 k8s.io/cri-api v0.32.0-alpha.0 - k8s.io/cri-client v0.31.1 + k8s.io/cri-client v0.31.2 k8s.io/klog/v2 v2.130.1 k8s.io/kube-proxy v0.0.0 - k8s.io/kubectl v0.31.1-rc.1 - k8s.io/kubernetes v1.31.1 + k8s.io/kubectl v0.31.2 + k8s.io/kubernetes v1.31.2 k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 sigs.k8s.io/cri-tools v0.0.0-00010101000000-000000000000 sigs.k8s.io/yaml v1.4.0 @@ -182,6 +180,7 @@ require ( require ( dario.cat/mergo v1.0.1 // indirect + filippo.io/edwards25519 v1.1.0 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect @@ -258,6 +257,7 @@ require ( github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-sql-driver/mysql v1.8.1 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gofrs/flock v0.8.1 // indirect @@ -270,7 +270,7 @@ require ( github.com/google/cel-go v0.20.1 // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/google/go-cmp v0.6.0 // indirect - github.com/google/go-containerregistry v0.14.0 // indirect + github.com/google/go-containerregistry v0.20.2 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect @@ -300,9 +300,9 @@ require ( github.com/ipld/go-ipld-prime v0.20.0 // indirect github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa // indirect github.com/jackc/pgpassfile v1.0.0 // indirect - github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect - github.com/jackc/pgx/v5 v5.5.4 // indirect - github.com/jackc/puddle/v2 v2.2.1 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jackc/pgx/v5 v5.7.1 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/jbenet/goprocess v0.1.4 // indirect @@ -332,6 +332,7 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-sqlite3 v1.14.23 // indirect github.com/mdlayher/genetlink v1.3.2 // indirect github.com/mdlayher/netlink v1.7.2 // indirect github.com/mdlayher/socket v0.4.1 // indirect @@ -423,10 +424,9 @@ require ( github.com/xlab/treeprint v1.2.0 // indirect github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect go.etcd.io/bbolt v1.3.11 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.14 // indirect - go.etcd.io/etcd/client/v2 v2.305.13 // indirect - go.etcd.io/etcd/pkg/v3 v3.5.13 // indirect - go.etcd.io/etcd/raft/v3 v3.5.13 // indirect + go.etcd.io/etcd/client/v2 v2.305.16 // indirect + go.etcd.io/etcd/pkg/v3 v3.5.16 // indirect + go.etcd.io/etcd/raft/v3 v3.5.16 // indirect go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.42.0 // indirect @@ -445,28 +445,27 @@ require ( go.uber.org/fx v1.20.1 // indirect go.uber.org/mock v0.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/mod v0.20.0 // indirect golang.org/x/oauth2 v0.21.0 // indirect golang.org/x/term v0.23.0 // indirect - golang.org/x/text v0.17.0 // indirect + golang.org/x/text v0.18.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.24.0 // indirect golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b // indirect golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6 // indirect gonum.org/v1/gonum v0.13.0 // indirect google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.31.1 // indirect - k8s.io/code-generator v0.31.1 // indirect + k8s.io/apiextensions-apiserver v0.31.2 // indirect + k8s.io/code-generator v0.31.2 // indirect k8s.io/controller-manager v0.25.4 // indirect k8s.io/csi-translation-lib v0.0.0 // indirect k8s.io/dynamic-resource-allocation v0.0.0 // indirect @@ -474,13 +473,13 @@ require ( k8s.io/gengo v0.0.0-20240228010128-51d4e06bde70 // indirect k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect k8s.io/kms v0.0.0 // indirect - k8s.io/kube-aggregator v0.31.1 // indirect + k8s.io/kube-aggregator v0.31.2 // indirect k8s.io/kube-controller-manager v0.0.0 // indirect k8s.io/kube-openapi v0.0.0-20240730131305-7a9a4e85957e // indirect k8s.io/kube-scheduler v0.0.0 // indirect - k8s.io/kubelet v0.31.1 // indirect + k8s.io/kubelet v0.31.2 // indirect k8s.io/metrics v0.0.0 // indirect - k8s.io/mount-utils v0.31.1 // indirect + k8s.io/mount-utils v0.31.2 // indirect k8s.io/pod-security-admission v0.0.0 // indirect lukechampine.com/blake3 v1.2.1 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 // indirect diff --git a/go.sum b/go.sum index 30950b72cd65..fd8a347d59d5 100644 --- a/go.sum +++ b/go.sum @@ -222,6 +222,8 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= @@ -283,6 +285,7 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1 github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-proxyproto v0.0.0-20210323213023-7e956b284f0a/go.mod h1:QmP9hvJ91BbJmGVGSbutW19IC0Q9phDCLGaomwTJbgU= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= @@ -493,6 +496,9 @@ github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUn github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/cli v23.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v23.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v24.0.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v27.1.2+incompatible h1:nYviRv5Y+YAKx3dFrTvS1ErkyVVunKOhoweCTE1BsnI= github.com/docker/cli v27.1.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= @@ -623,8 +629,8 @@ github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+Gr github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= -github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= -github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= +github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= @@ -720,8 +726,9 @@ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.14.0 h1:z58vMqHxuwvAsVwvKEkmVBz2TlgBgH5k6koEXBtlYkw= github.com/google/go-containerregistry v0.14.0/go.mod h1:aiJ2fp/SXvkWgmYHioXnbMdlgB8eXiiYOY55gfN91Wk= +github.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l/DSArMxlbwseo= +github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -864,6 +871,8 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/inetaf/tcpproxy v0.0.0-20240214030015-3ce58045626c h1:gYfYE403/nlrGNYj6BEOs9ucLCAGB9gstlSk92DttTg= +github.com/inetaf/tcpproxy v0.0.0-20240214030015-3ce58045626c/go.mod h1:Di7LXRyUcnvAcLicFhtM9/MlZl/TNgRSDHORM2c6CMI= github.com/intel/goresctrl v0.3.0/go.mod h1:fdz3mD85cmP9sHD8JUlrNWAxvwM86CrbmVXltEKd7zk= github.com/intel/goresctrl v0.7.0 h1:x6RclP6LiJc24t9mf47BRbjf06B8oVisZMBv31x3rKc= github.com/intel/goresctrl v0.7.0/go.mod h1:T3ZZnuHSNouwELB5wvOoUJaB7l/4Rm23rJy/wuWJlr0= @@ -896,12 +905,12 @@ github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa h1:s+4MhCQ6YrzisK6 github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= -github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.5.4 h1:Xp2aQS8uXButQdnCMWNmvx6UysWQQC+u1EoizjguY+8= -github.com/jackc/pgx/v5 v5.5.4/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A= -github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= -github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.1 h1:x7SYsPBYDkHDksogeSmZZ5xzThcTgRz++I5E+ePFUcs= +github.com/jackc/pgx/v5 v5.7.1/go.mod h1:e7O26IywZZ+naJtWWos6i6fvWK+29etgITqrqHLfoZA= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= @@ -933,90 +942,90 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/k3s-io/containerd v1.7.21-k3s2 h1:QnveYvdMinAgN2vDlkulVMrpnde0irZv2q6dJsDQmBw= -github.com/k3s-io/containerd v1.7.21-k3s2/go.mod h1:T9perze1nIMl5JzddImIgsCEDaM0i8nAfnm+U48DmJw= +github.com/k3s-io/containerd v1.7.22-k3s1 h1:+StsyV/pl4NL5gDA5dzcPi4anuhCI4ONuzCwjBwjrUE= +github.com/k3s-io/containerd v1.7.22-k3s1/go.mod h1:T9perze1nIMl5JzddImIgsCEDaM0i8nAfnm+U48DmJw= github.com/k3s-io/cri-dockerd v0.3.15-k3s1.31-3 h1:TH3zSbIM9zSZMeWKcWjQqeja3FsmJYwLUHglD7nuUEc= github.com/k3s-io/cri-dockerd v0.3.15-k3s1.31-3/go.mod h1:ny6wyM7fqfew5FABQ+MtKaU07rhsHhlXr/jtFsA2m8Y= github.com/k3s-io/cri-tools v1.31.0-k3s2 h1:nekOdJe5Hecm+C5eswg688uXTI0enUZOJYadmyU9pYw= github.com/k3s-io/cri-tools v1.31.0-k3s2/go.mod h1:PvPf/fN5FiNdK1v43jCydRNRw6631qGTSEOhv/OsjYU= -github.com/k3s-io/etcd/api/v3 v3.5.13-k3s1 h1:aq6fxlEKdwCooLE3HOR6227U51DEvOw3DEbriJxD2QM= -github.com/k3s-io/etcd/api/v3 v3.5.13-k3s1/go.mod h1:gBqlqkcMMZMVTMm4NDZloEVJzxQOQIls8splbqBDa0c= -github.com/k3s-io/etcd/client/pkg/v3 v3.5.13-k3s1 h1:t2I25UtBvohVAhlyXpYjd/Lznm+ybxNhvs3cnEGsF4Y= -github.com/k3s-io/etcd/client/pkg/v3 v3.5.13-k3s1/go.mod h1:XxHT4u1qU12E2+po+UVPrEeL94Um6zL58ppuJWXSAB8= -github.com/k3s-io/etcd/client/v2 v2.305.13-k3s1 h1:lvIdlAI6xRIHSUJC43sJx9lmxehq2quGb+8z5TJldGg= -github.com/k3s-io/etcd/client/v2 v2.305.13-k3s1/go.mod h1:iQnL7fepbiomdXMb3om1rHq96htNNGv2sJkEcZGDRRg= -github.com/k3s-io/etcd/client/v3 v3.5.13-k3s1 h1:/D6KAEGVzwivnjxZ5CzVIykVloLoKB/TBeKw2tKKVQ0= -github.com/k3s-io/etcd/client/v3 v3.5.13-k3s1/go.mod h1:cqiAeY8b5DEEcpxvgWKsbLIWNM/8Wy2xJSDMtioMcoI= -github.com/k3s-io/etcd/etcdutl/v3 v3.5.13-k3s1 h1:fIt+PVHCeINM5fl9OfMI+o9BJKf951pRiVcCytFW97c= -github.com/k3s-io/etcd/etcdutl/v3 v3.5.13-k3s1/go.mod h1:2vhvTIQobP+Cb04qzlcbKGvX6J5oq/N1kquk1yCDIQY= -github.com/k3s-io/etcd/pkg/v3 v3.5.13-k3s1 h1:uLU/SnBuhtSkdBk830x0pseHSsQQvh99C3deG6nc9d0= -github.com/k3s-io/etcd/pkg/v3 v3.5.13-k3s1/go.mod h1:N+4PLrp7agI/Viy+dUYpX7iRtSPvKq+w8Y14d1vX+m0= -github.com/k3s-io/etcd/raft/v3 v3.5.13-k3s1 h1:yexUwAPPdmYfIMWOj6sSyJ2nEe8QOrFzNuvYGRAsm5E= -github.com/k3s-io/etcd/raft/v3 v3.5.13-k3s1/go.mod h1:uUFibGLn2Ksm2URMxN1fICGhk8Wu96EfDQyuLhAcAmw= -github.com/k3s-io/etcd/server/v3 v3.5.13-k3s1 h1:Pqcxkg7V60c26ZpHoekP9QoUdLuduxFn827A/5CIwm4= -github.com/k3s-io/etcd/server/v3 v3.5.13-k3s1/go.mod h1:K/8nbsGupHqmr5MkgaZpLlH1QdX1pcNQLAkODy44XcQ= -github.com/k3s-io/helm-controller v0.16.4 h1:l1noZJacLT2C9JUgfwO9SV8XtfAv1J9avVXyzv8tYHo= -github.com/k3s-io/helm-controller v0.16.4/go.mod h1:AcSxEhOIUgeVvBTnJOAwcezBZXtYew/RhKwO5xp3RlM= -github.com/k3s-io/kine v0.13.0 h1:DVh9VDZYVlyJWKoPoIgGc1LAsoSwkZyazhWzQW15L2k= -github.com/k3s-io/kine v0.13.0/go.mod h1:L4x3qotFebVh1ZVzYwFVL5PPfqw2sRJTjDTIeViO70Y= +github.com/k3s-io/etcd/api/v3 v3.5.16-k3s1 h1:RNExemPFr4S+VqJ2jXVf0Y9iXaps0pTeklSN735z0Mw= +github.com/k3s-io/etcd/api/v3 v3.5.16-k3s1/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28= +github.com/k3s-io/etcd/client/pkg/v3 v3.5.16-k3s1 h1:wEmVFnZ+h3v5ECRmX6jf4SeIykDQei+DRnBczM23YQA= +github.com/k3s-io/etcd/client/pkg/v3 v3.5.16-k3s1/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E= +github.com/k3s-io/etcd/client/v2 v2.305.16-k3s1 h1:f7qWAqVhxrMdBt0coehUYfP0Cix7clL2ko/XCqvWols= +github.com/k3s-io/etcd/client/v2 v2.305.16-k3s1/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE= +github.com/k3s-io/etcd/client/v3 v3.5.16-k3s1 h1:ON2Cd0Fx+qQ53GS6qK6Mr9fh7MFCShZfw+rsrLZ6j5M= +github.com/k3s-io/etcd/client/v3 v3.5.16-k3s1/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50= +github.com/k3s-io/etcd/etcdutl/v3 v3.5.16-k3s1 h1:qUuDUhfhOZ3D6/XsW04jV514+DhV7R669+/+3n9i7VY= +github.com/k3s-io/etcd/etcdutl/v3 v3.5.16-k3s1/go.mod h1:X22QojXcHZNS3TPAitpcYW7rwTvnmchFwAKkSSz0Ncw= +github.com/k3s-io/etcd/pkg/v3 v3.5.16-k3s1 h1:4nDx3la68jehJfqWPs1Yx1clPW7938pKQXrVxp2OgyA= +github.com/k3s-io/etcd/pkg/v3 v3.5.16-k3s1/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY= +github.com/k3s-io/etcd/raft/v3 v3.5.16-k3s1 h1:nD/YzAeIbEcSkXAQFRwAs/2zc2vXAkKmQnDKf6UDCxY= +github.com/k3s-io/etcd/raft/v3 v3.5.16-k3s1/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI= +github.com/k3s-io/etcd/server/v3 v3.5.16-k3s1 h1:9c0DChFw6WRz6r+eCuVLBltZcRwT6h1l79biTPuAGR0= +github.com/k3s-io/etcd/server/v3 v3.5.16-k3s1/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s= +github.com/k3s-io/helm-controller v0.16.5 h1:SsUHfksQXNwePkswv4a970EGD2h0Exsf6t3IdXhpXRo= +github.com/k3s-io/helm-controller v0.16.5/go.mod h1:AcSxEhOIUgeVvBTnJOAwcezBZXtYew/RhKwO5xp3RlM= +github.com/k3s-io/kine v0.13.2 h1:l++g2KY/3UaPJiGpgYuGoqaaYKeMpVj9fP/yfnSxHxo= +github.com/k3s-io/kine v0.13.2/go.mod h1:Zi9F142tmeXVqhPjL6KHVnwOBs8wc/V5r3avKSpIHn0= github.com/k3s-io/klog/v2 v2.120.1-k3s1 h1:7twAHPFpZA21KdMnMNnj68STQMPldAxF2Zsaol57dxw= github.com/k3s-io/klog/v2 v2.120.1-k3s1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= github.com/k3s-io/kube-router/v2 v2.2.1 h1:LrU6l4khFt67+QCIvgok9B/C9JY/U2/TaF9TCVUw0vw= github.com/k3s-io/kube-router/v2 v2.2.1/go.mod h1:OWqBKftzZRXF79mDv1MLiYbvD7RP/kzrk5X5NupAawM= -github.com/k3s-io/kubernetes v1.31.1-k3s3 h1:ZI9L5470m0OnYR/S45ItBK1eOMMnJV7TirWoyRXhLMk= -github.com/k3s-io/kubernetes v1.31.1-k3s3/go.mod h1:/YGPL//Fb9mdv5vukvAQ7Xon+Bqwry52bmjTdORAw+Q= -github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.31.1-k3s3 h1:TQSxiv2OhgRczJFUFm0PXLlD4dyI2f+czmLfLnU3CAM= -github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.31.1-k3s3/go.mod h1:KaRi43US9TCyITVXRX8uu9F8GDZSFJYuOtgC4rjoR7g= -github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.31.1-k3s3 h1:EPlRDPSlWt/gBUzL5RPRRIuFCAKio9UuxwPNg5jqI44= -github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.31.1-k3s3/go.mod h1:kzodfhDvQWeYegyO3XMoHI9B1fB8S9F20GkJrMGyizk= -github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.31.1-k3s3 h1:l38/LexHRajuguKIFEeRP03tYLfCEYnuuoYZC7UeZTM= -github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.31.1-k3s3/go.mod h1:5F0wbie5xX1jDEg5sk5dr+KF8rwFkYtZFHDhSF/UsG4= -github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.31.1-k3s3 h1:17/ehFImJ6srv0rXPGJVVu/F4Han6UOftNaB8wsIT00= -github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.31.1-k3s3/go.mod h1:HJ1b6VgXrf+cWYPUzAO6sIKfdcHFZsASDzAp2Z99hQk= -github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.31.1-k3s3 h1:Q0rQqUEZaOkThQX3M8nQyviak1+qnFVbKMZPLbowzFE= -github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.31.1-k3s3/go.mod h1:ajLlT5ogqDck94zgV1JL+dRaHBXR27MFxRqHg5ojeQA= -github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.31.1-k3s3 h1:UXXbB4xE2hxIlrYt6ciU3IlSpEMv0izV8qpVwMTOMLs= -github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.31.1-k3s3/go.mod h1:acnXvvIK5YiMMAflXv5ug7ClSCjxYChU5y63sTDuv3I= -github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.31.1-k3s3 h1:lKmfpFyerEMPMvTu1YfP7y0AM5mczND1qi0ej7ZA1KE= -github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.31.1-k3s3/go.mod h1:aq39X+7JBHuYgLje0hmtOFnNwDFGxC3kMnxy6TK0IfE= -github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.31.1-k3s3 h1:zXgIDWlpxJrywlkDa/BrKkVhRdpEgATZQw4WGkDoceE= -github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.31.1-k3s3/go.mod h1:sC54xeBDUh723Sh3/rBJpYmjuu2TFYRg8jbiGa+2gR4= -github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.31.1-k3s3 h1:3WmrW88CB5S9ZzW3hPWf6T4COBuYMfLz4/RLG/kRdBc= -github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.31.1-k3s3/go.mod h1:kpHioo65EeiEH6lBGgJq/xueL4Kr7PUMp7J4J5TTg4c= -github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.31.1-k3s3 h1:WofC9Kx9nz6QS+4R0RFmJXDfh4z6/IhJva6pC/kdeyQ= -github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.31.1-k3s3/go.mod h1:PFOdhUAVD6O6z1yuKxWS3EKXSKdZhKjOlCrcuJRxOBA= -github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.31.1-k3s3 h1:rNeCLEKT0woFNvdmFPaP04f/9HIyrZCu99t7rcrgv3s= -github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.31.1-k3s3/go.mod h1:DbKVutwtVincRfT23dk3SQ8uIyMSkFhsaNxqifftcDk= -github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.31.1-k3s3 h1:phjGDUMtOcBrGWW0WN8zC/B9EHfafr+FZCzpx8RFHEQ= -github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.31.1-k3s3/go.mod h1:zXRPI4iMORj8r8MtMpObg4dfVgb8ENDSnWcw5r5obp0= -github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.31.1-k3s3 h1:264xhEjjdMHU4cvtI5AKXsTd7yQPnaz8hv3uABNaEQU= -github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.31.1-k3s3/go.mod h1:e2pTb6psrP2AtdW24SxJaesf2402rQ0YjNa7qYssoi0= -github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.31.1-k3s3 h1:OQbIWCKJg44WLzri0MJJM5IbjDF/6xxAsSW+SCvlSMA= -github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.31.1-k3s3/go.mod h1:Ygc1DPcdWYjsDbAxP+rIzSbU2D83HFsJ3Cp0bs3brio= -github.com/k3s-io/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v1.31.1-k3s3 h1:3hv/TBFmZilg1lUT7Ixaj+XnuLJ4l+EEmpkAJwu6LcI= -github.com/k3s-io/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v1.31.1-k3s3/go.mod h1:iyXR8oPR54q4bj4LWeUHHrULA/Nm0LjfiR3JHe08sak= -github.com/k3s-io/kubernetes/staging/src/k8s.io/endpointslice v1.31.1-k3s3 h1:3MiBA36oCFgFShfbZcvWpQqJevOntYeWKGH2aULHydo= -github.com/k3s-io/kubernetes/staging/src/k8s.io/endpointslice v1.31.1-k3s3/go.mod h1:VKJKC/px/hHUfsxDZmE+kezqfte7vi3ctPekcirZzMM= -github.com/k3s-io/kubernetes/staging/src/k8s.io/kms v1.31.1-k3s3 h1:I8y5CZoIeXg2BsH1k2OIqS+VU+jSpyx29OnYWPaQe3Y= -github.com/k3s-io/kubernetes/staging/src/k8s.io/kms v1.31.1-k3s3/go.mod h1:gClzb5q8LLAagWlaL9S/rt8IcU3iY6gRARKN09DY4o8= -github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.31.1-k3s3 h1:/xR9OR86+iB4CK0YcFKqJ7mNFAhw4+a5Iz91lZNX2kY= -github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.31.1-k3s3/go.mod h1:xc/cVRFh9PdcOsDAnHdB0fuQWih/w8hxbF8Ca4FXHSU= -github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.31.1-k3s3 h1:8PhQeiS0IelPHPwmJoMQWBsCOK5wtcoZTJPui7Wojz0= -github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.31.1-k3s3/go.mod h1:l7Pv6S9sCmzj5ZLTQIf/twX4WrJ6D7WOPbDUfkllT0c= -github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.31.1-k3s3 h1:dNX5ccNaE4/GZGnZVZI3W5tLOX/oiotS38pwbqgKhTA= -github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.31.1-k3s3/go.mod h1:oJOlcxyiohRkqraS+VHuxGd0oMp3N60mbMTaP3YTW0w= -github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.31.1-k3s3 h1:GUzPqwirzsvb5IGtMg/0e2eqw0M8tqoqsWvRSxtY0ik= -github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.31.1-k3s3/go.mod h1:z259VFq5HmPqCcBj78z2uPawedjLkjRagQ98aPQKG8c= -github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.31.1-k3s3 h1:PuRt73t3aNs0qnvmA9K4X9YVCx4ZS4UkeUQyu2CMwYk= -github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.31.1-k3s3/go.mod h1:upu9UjdY08ZgnQwjd8rlxSN5in0Yl2JrtHilExOCsfI= -github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.31.1-k3s3 h1:myVvgreZwN4LhIpi5uxXKQQ+P7QGtcng3k4/wQg2V5k= -github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.31.1-k3s3/go.mod h1:PeBIZnl5Zg5qaT6JFfsDBaw0IlAL4F3mEVy9VovTW6k= -github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.31.1-k3s3 h1:FoCs0NCkHBKUgr2mIVsX0MB6EGIIGZbCpmKEFNFfqgw= -github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.31.1-k3s3/go.mod h1:CXuAS4zkSBWhGd++sFZlq3a7qfcMbGGwJvRn4W0/378= -github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.31.1-k3s3 h1:9EZH/VYYTSQ06/qMHExPJn9oFdcqzZ+mma7Yx2mY+1I= -github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.31.1-k3s3/go.mod h1:p5r0u2M9KzooTgHDz4zRsUt02y4Yx7/5uPwgr0nSGqg= -github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.31.1-k3s3 h1:XJbSUNXUSGUgSfokPZJRaWCAk3+YLQdK4oX4boUXRvA= -github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.31.1-k3s3/go.mod h1:nOJes2FVv6qZXUU3CGubLnloPJVV4rZ+jm0bLRCKKOM= +github.com/k3s-io/kubernetes v1.31.2-k3s1 h1:6yy+3AB/eqDpVqQrDrCuF+A+XTYV5IwTIdS9BRnBHfs= +github.com/k3s-io/kubernetes v1.31.2-k3s1/go.mod h1:9xmT2buyTYj8TRKwRae7FcuY8k5+xlxv7VivvO0KKfs= +github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.31.2-k3s1 h1:QQX2i43ogQpjgyY/d3vrU0tA2Sp7melbo1iwEPooljI= +github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.31.2-k3s1/go.mod h1:KaRi43US9TCyITVXRX8uu9F8GDZSFJYuOtgC4rjoR7g= +github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.31.2-k3s1 h1:xaosSjlEecL960F400qEGdIDPSYYcqebrchOJvyhLCA= +github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.31.2-k3s1/go.mod h1:kzodfhDvQWeYegyO3XMoHI9B1fB8S9F20GkJrMGyizk= +github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.31.2-k3s1 h1:9XfhWc/Se7/WOTz10J0QODxfKffCsgSdQ1hmLw8qHbQ= +github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.31.2-k3s1/go.mod h1:5F0wbie5xX1jDEg5sk5dr+KF8rwFkYtZFHDhSF/UsG4= +github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.31.2-k3s1 h1:CxAfyyQrefLVshbtNdy8L39BZWDshfOoiWHwjkEAnlk= +github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.31.2-k3s1/go.mod h1:mJ5S6E5i9O+hBQXJ8T8dEZn36vN6k29PpaFUsrpgKwg= +github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.31.2-k3s1 h1:imPVpIBZFjho0Bp1AEeyRe7y+mog/YFiI/D2cbfXYOk= +github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.31.2-k3s1/go.mod h1:ajLlT5ogqDck94zgV1JL+dRaHBXR27MFxRqHg5ojeQA= +github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.31.2-k3s1 h1:yDkN04T7KoIzyhZoN5Ch7xZa/AMxdye4lO4Q1oo7mUM= +github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.31.2-k3s1/go.mod h1:acnXvvIK5YiMMAflXv5ug7ClSCjxYChU5y63sTDuv3I= +github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.31.2-k3s1 h1:XJKw80i7VS7oJdaGGSW9OU9SvmpM8+nyYnyJvrhVO7Q= +github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.31.2-k3s1/go.mod h1:aq39X+7JBHuYgLje0hmtOFnNwDFGxC3kMnxy6TK0IfE= +github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.31.2-k3s1 h1:4DhADYQLIUlDgA501OrIkMrHbrWOUUT6YQIbXHwU1ns= +github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.31.2-k3s1/go.mod h1:sC54xeBDUh723Sh3/rBJpYmjuu2TFYRg8jbiGa+2gR4= +github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.31.2-k3s1 h1:1IAw7utxAxT7KacWqLQ7ricdxn3C+ft4Jgw1b9gT/SQ= +github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.31.2-k3s1/go.mod h1:kpHioo65EeiEH6lBGgJq/xueL4Kr7PUMp7J4J5TTg4c= +github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.31.2-k3s1 h1:8O6ifjjCOgaMrXOvqw4kiNvRsWPNUkOQ12YpIEdd4Bs= +github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.31.2-k3s1/go.mod h1:PFOdhUAVD6O6z1yuKxWS3EKXSKdZhKjOlCrcuJRxOBA= +github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.31.2-k3s1 h1:Nsb2dBuBaNrLE7sV5kX+9Q1bzFIXq1IWM2YirAmr1Ic= +github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.31.2-k3s1/go.mod h1:DbKVutwtVincRfT23dk3SQ8uIyMSkFhsaNxqifftcDk= +github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.31.2-k3s1 h1:/JJmHhMdDWoOKYKN3ht6lnMsHn7zdncf0BoJqOeaqSM= +github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.31.2-k3s1/go.mod h1:zXRPI4iMORj8r8MtMpObg4dfVgb8ENDSnWcw5r5obp0= +github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.31.2-k3s1 h1:PFOthOIaSrD/sgxYkHBd7rkY5jkRqO04v75057KNE1Q= +github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.31.2-k3s1/go.mod h1:e2pTb6psrP2AtdW24SxJaesf2402rQ0YjNa7qYssoi0= +github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.31.2-k3s1 h1:SLATSMxHeyyQdy0+Mf+AITt5s+tX8ixe1a+tU4E8HMk= +github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.31.2-k3s1/go.mod h1:Ygc1DPcdWYjsDbAxP+rIzSbU2D83HFsJ3Cp0bs3brio= +github.com/k3s-io/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v1.31.2-k3s1 h1:3MTf9cFKHfsUCxL9viXqmDTpJIAd3MJjNjdBIvRpbEY= +github.com/k3s-io/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v1.31.2-k3s1/go.mod h1:iyXR8oPR54q4bj4LWeUHHrULA/Nm0LjfiR3JHe08sak= +github.com/k3s-io/kubernetes/staging/src/k8s.io/endpointslice v1.31.2-k3s1 h1:F6bdHN5iMtOdxSrLvwFm/EQz3p6U8S2O89qA25+EGkI= +github.com/k3s-io/kubernetes/staging/src/k8s.io/endpointslice v1.31.2-k3s1/go.mod h1:VKJKC/px/hHUfsxDZmE+kezqfte7vi3ctPekcirZzMM= +github.com/k3s-io/kubernetes/staging/src/k8s.io/kms v1.31.2-k3s1 h1:A1Nivfx6TkhOsJy+UfdNiz7XZFKkCJQakbQntWRvaPo= +github.com/k3s-io/kubernetes/staging/src/k8s.io/kms v1.31.2-k3s1/go.mod h1:gClzb5q8LLAagWlaL9S/rt8IcU3iY6gRARKN09DY4o8= +github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.31.2-k3s1 h1:aoR6N1TZ5gHcvcKGJd3EoycefWiFQxkYE6z4s7h1PeU= +github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.31.2-k3s1/go.mod h1:xc/cVRFh9PdcOsDAnHdB0fuQWih/w8hxbF8Ca4FXHSU= +github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.31.2-k3s1 h1:eEvwssCPwE+pLKiQjQHmk5PgdgpxuX9toAiAlqj3GAs= +github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.31.2-k3s1/go.mod h1:l7Pv6S9sCmzj5ZLTQIf/twX4WrJ6D7WOPbDUfkllT0c= +github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.31.2-k3s1 h1:zbvKs0ZX+qWmaypQw+0UMlSns5E+x0GF97PiukZhoFc= +github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.31.2-k3s1/go.mod h1:oJOlcxyiohRkqraS+VHuxGd0oMp3N60mbMTaP3YTW0w= +github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.31.2-k3s1 h1:emEWvPmVxHnZWmWkUPt/On2XF6qZV2lq6rF0aixvZEA= +github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.31.2-k3s1/go.mod h1:z259VFq5HmPqCcBj78z2uPawedjLkjRagQ98aPQKG8c= +github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.31.2-k3s1 h1:NBgxcyI8SHUPdYfNyT8haApxf5rkyb9vCxCfxP8yxr0= +github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.31.2-k3s1/go.mod h1:upu9UjdY08ZgnQwjd8rlxSN5in0Yl2JrtHilExOCsfI= +github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.31.2-k3s1 h1:ZvCeWX7LT/E8f8ZIaJNNNrfNYKCGzqb9xX0zqoh7DD0= +github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.31.2-k3s1/go.mod h1:PeBIZnl5Zg5qaT6JFfsDBaw0IlAL4F3mEVy9VovTW6k= +github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.31.2-k3s1 h1:TV0xgtJntHPOPYT/KGL4Yntv+vkkSoo1DS9tUUaAlfs= +github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.31.2-k3s1/go.mod h1:CXuAS4zkSBWhGd++sFZlq3a7qfcMbGGwJvRn4W0/378= +github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.31.2-k3s1 h1:Om9yYGdpAUwE74O7T5hMsWXCGl9sj9pE4synLXPrBes= +github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.31.2-k3s1/go.mod h1:p5r0u2M9KzooTgHDz4zRsUt02y4Yx7/5uPwgr0nSGqg= +github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.31.2-k3s1 h1:JPJvGUf0w7SvdoC06qiB795NfCAtoKWjQxxMqSCQBUs= +github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.31.2-k3s1/go.mod h1:nOJes2FVv6qZXUU3CGubLnloPJVV4rZ+jm0bLRCKKOM= github.com/k3s-io/runc v1.1.14-k3s1 h1:PcwbBuIfjI9A0T1fq7XIdIxqYHWarDlRln7QsppQQmQ= github.com/k3s-io/runc v1.1.14-k3s1/go.mod h1:E4C2z+7BxR7GHXp0hAY53mek+x49X1LjPNeMTfRGvOA= github.com/k3s-io/spegel v0.0.23-0.20240516234953-f3d2c4072314 h1:TrZb/yM0OtBuifPXlKaOfcxpJqzakA8+KsoO4c69ZLM= @@ -1034,8 +1043,8 @@ github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0= +github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= @@ -1067,8 +1076,6 @@ github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W github.com/lestrrat-go/iter v1.0.1/go.mod h1:zIdgO1mRKhn8l9vrZJZz9TUMMFbQbLeTsbqPDrJ/OJc= github.com/lestrrat-go/jwx v1.2.25/go.mod h1:zoNuZymNl5lgdcu6P7K6ie2QRll5HVfF4xwxBBK1NxY= github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= -github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8= -github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libopenstorage/openstorage v1.0.0 h1:GLPam7/0mpdP8ZZtKjbfcXJBTIA/T1O6CBErVEFEyIM= github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= @@ -1132,8 +1139,8 @@ github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.19 h1:fhGleo2h1p8tVChob4I9HpmVFIAkKGpiukdrgQbWfGI= -github.com/mattn/go-sqlite3 v1.14.19/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mattn/go-sqlite3 v1.14.23 h1:gbShiuAP1W5j9UOksQ06aiiqPMxYecovVGwmTxWtuw0= +github.com/mattn/go-sqlite3 v1.14.23/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw= @@ -1443,8 +1450,8 @@ github.com/rancher/permissions v0.0.0-20240523180510-4001d3d637f7 h1:0Kg2SGoMeU1 github.com/rancher/permissions v0.0.0-20240523180510-4001d3d637f7/go.mod h1:fsbs0YOsGn1ofPD5p+BuI4qDhbMbSJtTegKt6Ucna+c= github.com/rancher/remotedialer v0.4.1 h1:jwOf2kPRjBBpSFofv1OuZHWaYHeC9Eb6/XgDvbkoTgc= github.com/rancher/remotedialer v0.4.1/go.mod h1:Ys004RpJuTLSm+k4aYUCoFiOOad37ubYev3TkOFg/5w= -github.com/rancher/wharfie v0.6.4 h1:JwYB+q661n8ut/ysgsjKe0P0z6bHCCFoC+29995ME90= -github.com/rancher/wharfie v0.6.4/go.mod h1:kWv97z0sMAbnVNT/oe+JFZJVKn4xkas7ZdFf6UifWis= +github.com/rancher/wharfie v0.6.7 h1:BhbBVJSLoDQMkZb+zVTLEKckUbq4sc3ZmEYqGakggSY= +github.com/rancher/wharfie v0.6.7/go.mod h1:ew49A9PzRsTngdzXIkgakfhMq3mHMA650HS1OVQpaNA= github.com/rancher/wrangler/v3 v3.0.0-rc2 h1:XGSPPp6GXELqlLvwJp5MsdqyCPu6SCA4UKJ7rQJzE40= github.com/rancher/wrangler/v3 v3.0.0-rc2/go.mod h1:f54hh7gFkwwbjsieT2b63FowzTU8FvrBonPe//0CIXo= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= @@ -1934,8 +1941,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2138,8 +2145,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go. google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8/go.mod h1:vPrPUTsDCYxXWjP7clS81mZ6/803D8K4iM9Ma27VKas= google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:RGnPtTG7r4i8sPlNyDeikXF99hMM+hN6QMm4ooG9g2g= google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= -google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0= -google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= @@ -2163,8 +2170,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291/go. google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf h1:liao9UHurZLtiEwBgT9LMOnKYsHze6eA6w1KQCMVN2Q= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= @@ -2233,10 +2240,8 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -inet.af/tcpproxy v0.0.0-20200125044825-b6bb9b5b8252 h1:gmJCKidOfjKDUHF1jjke+I+2iQIyE3HNNxu2OKO/FUI= -inet.af/tcpproxy v0.0.0-20200125044825-b6bb9b5b8252/go.mod h1:zq+R+tLcdHugi7Jt+FtIQY6m6wtX34lr2CdQVH2fhW0= -k8s.io/cri-client v0.31.1 h1:w5D7BAhiaSVVDZqHs7YUZPpuUCybx8tCxfdBuDBw7zo= -k8s.io/cri-client v0.31.1/go.mod h1:voVfZexZQwvlf/JD8w30sGN0k22LRcHRfCj7+m4kAXE= +k8s.io/cri-client v0.31.2 h1:GLuzaFre8WueBohexPSUQEOMha3tBuacjb3PT10X8bI= +k8s.io/cri-client v0.31.2/go.mod h1:vUPilfcW9LSyp1Kbc1nuMu5IkyeF1HDqik6FeSAtUhk= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20240228010128-51d4e06bde70 h1:D9H6wq7PAmub2g4XUrekNWMFVI0JIz7s0F64HBPsPOw= k8s.io/gengo v0.0.0-20240228010128-51d4e06bde70/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= diff --git a/install.sh b/install.sh index c09b1b623acc..43ef0ce86907 100755 --- a/install.sh +++ b/install.sh @@ -280,11 +280,11 @@ can_skip_download_binary() { fi } -can_skip_download_selinux() { - if [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != true ] && [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != selinux ]; then - return 1 - fi -} +can_skip_download_selinux() { + if [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != true ] && [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != selinux ]; then + return 1 + fi +} # --- verify an executable k3s binary is installed --- verify_k3s_is_executable() { @@ -389,7 +389,7 @@ get_release_version() { get_k3s_selinux_version() { available_version="k3s-selinux-1.2-2.${rpm_target}.noarch.rpm" info "Finding available k3s-selinux versions" - + # run verify_downloader in case it binary installation was skipped verify_downloader curl || verify_downloader wget || fatal 'Can not find curl or wget for downloading files' @@ -556,7 +556,7 @@ setup_binary() { # --- setup selinux policy --- setup_selinux() { - case ${INSTALL_K3S_CHANNEL} in + case ${INSTALL_K3S_CHANNEL} in *testing) rpm_channel=testing ;; @@ -583,7 +583,8 @@ setup_selinux() { rpm_site_infix=slemicro package_installer=zypper fi - elif [ "${ID_LIKE:-}" = coreos ] || [ "${VARIANT_ID:-}" = coreos ] || [ "${VARIANT_ID:-}" = "iot" ]; then + elif [ "${ID_LIKE:-}" = coreos ] || [ "${VARIANT_ID:-}" = coreos ] || [ "${VARIANT_ID:-}" = "iot" ] || \ + { { [ "${ID:-}" = fedora ] || [ "${ID_LIKE:-}" = fedora ]; } && [ -n "${OSTREE_VERSION:-}" ]; }; then rpm_target=coreos rpm_site_infix=coreos package_installer=rpm-ostree @@ -618,7 +619,7 @@ setup_selinux() { info "Skipping installation of SELinux RPM" return fi - + get_k3s_selinux_version install_selinux_rpm ${rpm_site} ${rpm_channel} ${rpm_target} ${rpm_site_infix} @@ -633,7 +634,8 @@ setup_selinux() { $policy_error "Failed to apply container_runtime_exec_t to ${BIN_DIR}/k3s, ${policy_hint}" fi elif [ ! -f /usr/share/selinux/packages/k3s.pp ]; then - if [ -x /usr/sbin/transactional-update ] || [ "${ID_LIKE:-}" = coreos ] || [ "${VARIANT_ID:-}" = coreos ] || [ "${VARIANT_ID:-}" = iot ]; then + if [ -x /usr/sbin/transactional-update ] || [ "${ID_LIKE:-}" = coreos ] || \ + { { [ "${ID:-}" = fedora ] || [ "${ID_LIKE:-}" = fedora ]; } && [ -n "${OSTREE_VERSION:-}" ]; }; then warn "Please reboot your machine to activate the changes and avoid data loss." else $policy_error "Failed to find the k3s-selinux policy, ${policy_hint}" @@ -687,7 +689,7 @@ EOF if [ "${rpm_installer}" = "yum" ] && [ -x /usr/bin/dnf ]; then rpm_installer=dnf fi - if rpm -q --quiet k3s-selinux; then + if rpm -q --quiet k3s-selinux; then # remove k3s-selinux module before upgrade to allow container-selinux to upgrade safely if check_available_upgrades container-selinux ${3} && check_available_upgrades k3s-selinux ${3}; then MODULE_PRIORITY=$($SUDO semodule --list=full | grep k3s | cut -f1 -d" ") @@ -1154,4 +1156,3 @@ eval set -- $(escape "${INSTALL_K3S_EXEC}") $(quote "$@") create_service_file service_enable_and_start } - diff --git a/install.sh.sha256sum b/install.sh.sha256sum index 22a4b3ea97dd..67266f2976ee 100644 --- a/install.sh.sha256sum +++ b/install.sh.sha256sum @@ -1 +1 @@ -fb963877e5c94be16668fb58f45a83ddc338331d8934d46a480259610322d5b9 install.sh +41860b3152407623d980eff26c622706ac97f15809f1064227bfabe8a9f00b16 install.sh diff --git a/manifests/local-storage.yaml b/manifests/local-storage.yaml index fb72ffc07704..2ca24a369f34 100644 --- a/manifests/local-storage.yaml +++ b/manifests/local-storage.yaml @@ -67,7 +67,7 @@ spec: effect: "NoSchedule" containers: - name: local-path-provisioner - image: "%{SYSTEM_DEFAULT_REGISTRY}%rancher/local-path-provisioner:v0.0.28" + image: "%{SYSTEM_DEFAULT_REGISTRY}%rancher/local-path-provisioner:v0.0.30" imagePullPolicy: IfNotPresent command: - local-path-provisioner diff --git a/pkg/agent/containerd/config_linux.go b/pkg/agent/containerd/config_linux.go index 1da05f300df6..5bd7df655a62 100644 --- a/pkg/agent/containerd/config_linux.go +++ b/pkg/agent/containerd/config_linux.go @@ -24,7 +24,7 @@ import ( const ( socketPrefix = "unix://" - runtimesPath = "/usr/local/nvidia/toolkit:/opt/kwasm/bin:/usr/sbin:/usr/local/sbin:/usr/bin:/usr/local/bin" + runtimesPath = "/usr/local/nvidia/toolkit:/opt/kwasm/bin" ) func getContainerdArgs(cfg *config.Node) []string { @@ -55,10 +55,10 @@ func SetupContainerdConfig(cfg *config.Node) error { cfg.AgentConfig.Systemd = !isRunningInUserNS && controllers["cpuset"] && os.Getenv("INVOCATION_ID") != "" } - // set the path to include the runtimes and then remove the aditional path entries + // set the path to include the default runtimes and remove the aditional path entries // that we added after finding the runtimes originalPath := os.Getenv("PATH") - os.Setenv("PATH", runtimesPath) + os.Setenv("PATH", runtimesPath+string(os.PathListSeparator)+originalPath) extraRuntimes := findContainerRuntimes() os.Setenv("PATH", originalPath) diff --git a/pkg/agent/containerd/runtimes.go b/pkg/agent/containerd/runtimes.go index 3e5ec311a031..ca0a225c5b50 100644 --- a/pkg/agent/containerd/runtimes.go +++ b/pkg/agent/containerd/runtimes.go @@ -71,6 +71,10 @@ func findNvidiaContainerRuntimes(foundRuntimes runtimeConfigs) { RuntimeType: "io.containerd.runc.v2", BinaryName: "nvidia-container-runtime-experimental", }, + "nvidia-cdi": { + RuntimeType: "io.containerd.runc.v2", + BinaryName: "nvidia-container-runtime.cdi", + }, } searchForRuntimes(potentialRuntimes, foundRuntimes) diff --git a/pkg/agent/loadbalancer/loadbalancer.go b/pkg/agent/loadbalancer/loadbalancer.go index 567d825a2bb7..c75ea5fec4f2 100644 --- a/pkg/agent/loadbalancer/loadbalancer.go +++ b/pkg/agent/loadbalancer/loadbalancer.go @@ -10,9 +10,9 @@ import ( "sync" "time" + "github.com/inetaf/tcpproxy" "github.com/k3s-io/k3s/pkg/version" "github.com/sirupsen/logrus" - "inet.af/tcpproxy" ) // server tracks the connections to a server, so that they can be closed when the server is removed. diff --git a/pkg/cli/server/server.go b/pkg/cli/server/server.go index 698d04ec49e3..379a29fda752 100644 --- a/pkg/cli/server/server.go +++ b/pkg/cli/server/server.go @@ -37,10 +37,6 @@ import ( kubeapiserverflag "k8s.io/component-base/cli/flag" "k8s.io/kubernetes/pkg/controlplane/apiserver/options" utilsnet "k8s.io/utils/net" - - _ "github.com/go-sql-driver/mysql" // ensure we have mysql - _ "github.com/lib/pq" // ensure we have postgres - _ "github.com/mattn/go-sqlite3" // ensure we have sqlite ) func Run(app *cli.Context) error { diff --git a/pkg/deploy/zz_generated_bindata.go b/pkg/deploy/zz_generated_bindata.go index 8f1fe3a4d70f..25e4a68cc024 100644 --- a/pkg/deploy/zz_generated_bindata.go +++ b/pkg/deploy/zz_generated_bindata.go @@ -132,7 +132,7 @@ func corednsYaml() (*asset, error) { return a, nil } -var _localStorageYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x56\xdf\x6f\xdb\xb6\x13\x7f\xd7\x5f\x71\x5f\x7d\x97\x97\xa1\x94\x93\x0d\x58\x0a\xbe\x79\xb1\xd3\x06\x70\x6c\xc3\x76\x3b\x14\x45\x61\xd0\xd4\xd9\x66\x43\x91\x04\x49\xb9\xf5\xb2\xfc\xef\x03\x49\xd9\x91\x93\x34\x71\xb0\x4d\x2f\x82\x8e\x77\x9f\x3b\xde\xe7\x7e\x88\x19\xf1\x11\xad\x13\x5a\x51\xd8\x9c\x65\x37\x42\x95\x14\xa6\x68\x37\x82\x63\x97\x73\x5d\x2b\x9f\x55\xe8\x59\xc9\x3c\xa3\x19\x80\x62\x15\x52\x90\x9a\x33\x49\x0c\xf3\x6b\x62\xac\xde\x88\x60\x8f\x96\xb8\x64\x47\x58\x63\x98\xd4\x9d\x61\x1c\x29\xdc\xd4\x0b\x24\x6e\xeb\x3c\x56\x19\x21\x24\x6b\x7b\xb6\x0b\xc6\x0b\x56\xfb\xb5\xb6\xe2\x4f\xe6\x85\x56\xc5\xcd\x5b\x57\x08\xdd\xd9\xc7\x74\x21\x6b\xe7\xd1\x4e\xb4\xc4\xe3\x03\xb2\x41\xdb\xd6\x12\x1d\xcd\x08\x30\x23\xde\x59\x5d\x1b\x47\xe1\x73\x9e\x7f\xc9\x00\x2c\x3a\x5d\x5b\x8e\x51\xa2\x74\x89\x2e\x7f\x03\xb9\x09\x61\x39\x8f\xca\x6f\xb4\xac\x2b\xe4\x92\x89\x2a\x9e\x70\xad\x96\x62\x55\x31\x93\xf4\x74\xe9\x3a\x52\xaf\x22\xd4\x06\xed\x22\xc2\xac\xd0\x87\x43\x29\x5c\x7c\x7f\x63\x9e\xaf\xf3\x2f\x2f\xbb\x47\x55\x1a\x2d\x94\x7f\x32\x84\xbd\xbf\x43\x5f\x3f\x1f\x05\xbc\xc1\x80\x7a\x60\xc8\x2d\x32\x8f\x11\xf4\xe9\xf8\x9c\xd7\x96\xad\xb0\xa1\xe1\x31\x68\x73\xce\x25\x73\x0e\xdd\x71\x19\xf8\x47\xa4\xff\x2e\x54\x29\xd4\xea\x78\xee\x17\x42\x95\x59\x28\x80\x09\x2e\x83\xf2\xee\x7a\xcf\x38\xce\x00\x1e\x17\xdb\x31\x25\xe6\xea\xc5\x57\xe4\x3e\x56\xd9\x93\x2d\xf4\x5f\x35\x0e\x33\xc6\xdd\xa7\xab\x87\x46\xea\x6d\x85\xaf\xe8\xd9\x1f\xbb\x72\x06\x39\x8d\xb4\x27\xdd\xf7\x22\x70\xbe\x1d\x88\x4a\x78\x0a\xa7\x19\x80\xf3\x96\x79\x5c\x6d\x83\x16\x80\xdf\x1a\xa4\x30\xd1\x52\x0a\xb5\xfa\x60\x4a\xe6\x31\xca\x6d\x5b\x92\x54\x01\x2a\xf6\xfd\x83\x62\x1b\x26\x24\x5b\x48\xa4\x70\x16\xe0\x50\x22\xf7\xda\x26\x9d\x2a\x54\xcd\x80\x2d\x50\xba\x9d\x11\x33\xe6\x99\x6b\x78\xac\x8c\xdc\xbb\x68\xdf\x3f\x3c\xf2\x00\xe9\x25\x2c\x80\xdd\xed\xc3\x63\xac\xd0\x56\xf8\xed\x45\x28\xf6\x61\x4c\x66\x9e\x92\x44\xc2\xcc\x20\xdc\x0a\x2f\x38\x93\x79\xa3\xef\x0e\xb8\x1f\xbe\x8e\xf8\x98\x4a\x2d\xd1\xc6\xc2\x6c\x45\x0c\x40\xe0\x06\xb7\x14\xf2\x8b\xc6\x5f\xb7\x2c\xb5\x72\x23\x25\xb7\x79\x4b\x0b\x40\x9b\x60\xad\x2d\x85\xbc\xff\x5d\x38\xef\xf2\x27\x40\x62\xe4\xa1\x78\x8b\x40\xba\x55\xe8\x31\xf6\x1e\xd7\xca\x5b\x2d\x89\x91\x4c\xe1\x2b\x70\x01\x70\xb9\x44\xee\x29\xe4\x43\x3d\xe5\x6b\x2c\x6b\x89\xaf\x71\x5c\xb1\xd0\x72\xff\x96\xc7\x70\x0d\x26\x14\xda\x7d\x06\xc9\x4b\x7d\x90\x1e\x51\xb1\x55\x20\xf8\xe4\x76\xfa\x69\x3a\xeb\x5f\xcf\x7b\xfd\xcb\xee\x87\xc1\x6c\x3e\xe9\xbf\xbb\x9a\xce\x26\x9f\xee\x4e\x2c\x53\x7c\x8d\xb6\xf3\x34\x12\xdd\x9c\x16\xa7\xc5\x2f\x6f\xf3\x43\xc8\x71\x2d\xe5\x58\x4b\xc1\xb7\x14\xae\x96\x43\xed\xc7\x16\x1d\xee\x29\x0f\x11\x57\x15\x53\xe5\x3d\xe1\xe4\xa5\x50\x09\x38\xcf\xac\x6f\x7d\x13\x92\x36\x54\x4b\xd4\x41\xcf\x3b\x49\xda\xbc\x8a\xaf\x4e\xab\xbd\x46\xda\x2f\xd7\xa1\xfa\x5c\xdb\x77\x4a\x56\xb2\x20\x49\xa9\x95\xfb\x2a\xe8\x8f\x99\x5f\xd3\x03\x07\x7b\x0d\x54\x9b\xc7\x60\xe3\x51\x6f\x3e\xec\x5e\xf7\xa7\xe3\xee\x45\xbf\x05\xb6\x61\xb2\xc6\x4b\xab\x2b\x7a\xc0\xee\x52\xa0\x2c\x9b\xe1\xfd\x48\x9e\x7c\xef\xba\xbc\xd8\xcf\xb0\xac\x7d\xab\x57\x5c\x28\xc9\xaf\x99\x39\xf4\xf6\xa8\x64\x9a\xfc\x3e\x9c\xc3\x87\xeb\xf2\x7e\x22\x4f\x93\x3c\x4e\x8e\x67\x67\x72\x58\x50\x4a\x69\xdf\xee\xfa\x12\x97\xac\x96\xfe\x63\x8c\x75\x16\xc7\x6b\x1e\x2d\x52\x69\xb5\x57\xf0\x83\x5e\x12\x8e\x34\xc6\x24\x1e\x53\xc8\xbd\xad\x31\xcf\xda\x75\x0a\x4d\x1d\x07\x83\x56\x20\x29\x35\xcd\xba\xbd\xd6\x25\x52\xf8\x83\x09\x7f\xa9\xed\xa5\xb0\xce\x5f\x68\xe5\xea\x0a\x6d\x66\xd3\x7f\xd1\xae\xa6\x7b\x28\xd1\x63\x4c\x4c\xb3\x43\x77\x19\xcd\x1e\xfc\x63\x3e\xbb\x9a\xf6\xf5\xfb\x83\xad\xb4\x33\x6c\x95\x32\x85\xbf\x48\x4c\xc8\x6d\x43\x5d\x1c\x31\xa1\x40\xae\x99\xc9\xe9\xe7\x46\x7a\xbb\x27\x36\x9e\xe7\x34\xdf\x75\xf6\xb8\x3b\x7b\x3f\xbf\x1c\x4d\xe6\xc3\xd1\x70\x3e\xb8\x9a\xce\xfa\xbd\xf9\x70\xd4\xeb\x4f\xf3\x37\xf7\x36\x21\x3a\x97\xd3\xcf\xf9\xc9\xed\xce\x6e\x30\xba\xe8\x0e\xe6\xd3\xd9\x68\xd2\x7d\xd7\x8f\x28\x77\x27\xf1\x4f\x28\x3c\x77\xcd\x3b\x7d\xdf\xc5\xfd\xe6\xc3\xdf\x47\x13\xec\xff\xff\xd7\x59\x08\xd5\x71\xeb\xc4\x25\x7a\x20\x58\xa7\xd5\x75\x53\x0a\x0b\xa4\x82\xd3\xf3\xf3\x73\x20\x06\xf2\x9f\x6e\x3f\x8e\x06\xf3\xde\xd5\xe4\x2e\x31\xcf\xd7\x95\x2e\xe1\xfc\xf4\xb4\x7d\xd4\x29\x8a\x3c\xae\x41\x66\x4b\xfd\x4d\x1d\xe1\xc8\x56\x40\xec\xf2\x21\xfc\x1a\xa5\x41\x3b\xd6\x65\xb1\x65\x95\xdc\xc3\x3c\x20\x31\x88\x12\xcf\x63\x5d\x3e\xb9\x71\x13\xb5\x09\x8d\x98\x46\xa9\xbd\x56\x7f\x3c\xa2\x1f\x18\xc1\xeb\xc6\x72\x25\xac\xd5\x16\x4b\x22\xc5\xc2\x32\xbb\x25\x8b\xda\x6d\x17\xfa\x3b\x3d\x2b\x7e\xfd\xad\x38\x3b\x76\x2e\xff\x1d\x00\x00\xff\xff\xf6\x4c\xc2\x69\x1a\x0d\x00\x00") +var _localStorageYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x56\x5f\x6f\xdb\x36\x10\x7f\xd7\xa7\xb8\x69\xcb\xcb\x50\xca\x0e\x0a\x2c\x03\xdf\xbc\xd8\x69\x03\x38\xb6\x61\xbb\x1d\x8a\xa2\x30\x68\xe9\x6c\xb3\xa1\x48\x82\xa4\xdc\x6a\x59\xbe\xfb\x40\x52\x76\xe4\x24\x4d\x6c\x6c\xd3\x8b\xa0\xe3\xdd\xef\x8e\xf7\xbb\x3f\x62\x9a\x7f\x44\x63\xb9\x92\x14\xb6\xe7\xc9\x2d\x97\x05\x85\x19\x9a\x2d\xcf\xb1\x97\xe7\xaa\x92\x2e\x29\xd1\xb1\x82\x39\x46\x13\x00\xc9\x4a\xa4\x20\x54\xce\x04\xd1\xcc\x6d\x88\x36\x6a\xcb\xbd\x3d\x1a\x62\xa3\x1d\x61\x8d\x61\x54\xb7\x9a\xe5\x48\xe1\xb6\x5a\x22\xb1\xb5\x75\x58\x26\x84\x90\xa4\xed\xd9\x2c\x59\x9e\xb1\xca\x6d\x94\xe1\x7f\x31\xc7\x95\xcc\x6e\x7f\xb7\x19\x57\x9d\x7d\x4c\x97\xa2\xb2\x0e\xcd\x54\x09\x3c\x3e\x20\xe3\xb5\x4d\x25\xd0\xd2\x84\x00\xd3\xfc\x9d\x51\x95\xb6\x14\x3e\xa7\xe9\x97\x04\xc0\xa0\x55\x95\xc9\x31\x48\xa4\x2a\xd0\xa6\x6f\x20\xd5\x3e\x2c\xeb\x50\xba\xad\x12\x55\x89\xb9\x60\xbc\x0c\x27\xb9\x92\x2b\xbe\x2e\x99\x8e\x7a\xaa\xb0\x1d\xa1\xd6\x01\x6a\x8b\x66\x19\x60\xd6\xe8\xfc\xa1\xe0\x36\xbc\xbf\x31\x97\x6f\xd2\x2f\xaf\xbb\x47\x59\x68\xc5\xa5\x7b\x36\x84\xbd\xbf\x43\x5f\xbf\x1e\x05\xbc\x45\x8f\x7a\x60\x98\x1b\x64\x0e\x03\xe8\xf3\xf1\x59\xa7\x0c\x5b\x63\x43\xc3\x53\xd0\xe6\x3c\x17\xcc\x5a\xb4\xc7\x65\xe0\x5f\x91\xfe\x07\x97\x05\x97\xeb\xe3\xb9\x5f\x72\x59\x24\xbe\x00\xa6\xb8\xf2\xca\xbb\xeb\xbd\xe0\x38\x01\x78\x5a\x6c\xc7\x94\x98\xad\x96\x5f\x31\x77\xa1\xca\x9e\x6d\xa1\xff\xab\x71\x98\xd6\xf6\x21\x5d\x7d\xd4\x42\xd5\x25\x9e\xd0\xb3\x3f\x76\x65\x35\xe6\x34\xd0\x1e\x75\xdf\x73\xcf\x79\x3d\xe4\x25\x77\x14\xba\x09\x80\x75\x86\x39\x5c\xd7\x5e\x0b\xc0\xd5\x1a\x29\x4c\x95\x10\x5c\xae\x3f\xe8\x82\x39\x0c\x72\xd3\x96\x44\x55\x80\x92\x7d\xff\x20\xd9\x96\x71\xc1\x96\x02\x29\x9c\x7b\x38\x14\x98\x3b\x65\xa2\x4e\xe9\xab\x66\xc8\x96\x28\xec\xce\x88\x69\xfd\xc2\x35\x1c\x96\x5a\xec\x5d\xb4\xef\xef\x1f\x71\x80\xf4\x1a\x16\xc0\xee\xf6\xfe\xd1\x86\x2b\xc3\x5d\x7d\xe9\x8b\x7d\x14\x92\x99\xc6\x24\x11\x3f\x33\x48\x6e\xb8\xe3\x39\x13\x69\xa3\x6f\x0f\xb8\x1f\x9d\x46\x7c\x48\xa5\x12\x68\x42\x61\xb6\x22\x06\x20\x70\x8b\x35\x85\xf4\xb2\xf1\xd7\x2b\x0a\x25\xed\x58\x8a\x3a\x6d\x69\x01\x28\xed\xad\x95\xa1\x90\x0e\xbe\x73\xeb\x6c\xfa\x0c\x48\x88\xdc\x17\x6f\xe6\x49\x37\x12\x1d\x86\xde\xcb\x95\x74\x46\x09\xa2\x05\x93\x78\x02\x2e\x00\xae\x56\x98\x3b\x0a\xe9\x48\xcd\xf2\x0d\x16\x95\xc0\x53\x1c\x97\xcc\xb7\xdc\x7f\xe5\xd1\x5f\x83\x71\x89\x66\x9f\x41\xf2\x5a\x1f\xc4\x87\x97\x6c\xed\x09\x3e\xbb\x9b\x7d\x9a\xcd\x07\x37\x8b\xfe\xe0\xaa\xf7\x61\x38\x5f\x4c\x07\xef\xae\x67\xf3\xe9\xa7\xfb\x33\xc3\x64\xbe\x41\xd3\x79\x1e\x89\x6e\xbb\x59\x37\x7b\xdb\x4d\x0f\x21\x27\x95\x10\x13\x25\x78\x5e\x53\xb8\x5e\x8d\x94\x9b\x18\xb4\xb8\xa7\xdc\x47\x5c\x96\x4c\x16\x0f\x84\x93\xd7\x42\x25\x60\x1d\x33\xae\xf5\x4d\x48\xdc\x50\x2d\x51\x07\x5d\xde\x89\xd2\xe6\x95\x7d\xb5\x4a\xee\x35\xe2\x7e\xb9\xf1\xd5\x67\xdb\xbe\x63\xb2\xa2\x05\x89\x4a\xad\xdc\x97\x5e\x7f\xc2\xdc\x86\x1e\x38\xd8\x6b\xa0\xdc\x3e\x05\x9b\x8c\xfb\x8b\x51\xef\x66\x30\x9b\xf4\x2e\x07\x2d\xb0\x2d\x13\x15\x5e\x19\x55\xd2\x03\x76\x57\x1c\x45\xd1\x0c\xef\x27\xf2\xe8\x7b\xd7\xe5\xd9\x7e\x86\x25\xed\x5b\x9d\x70\xa1\x28\xbf\x61\xfa\xd0\xdb\x93\x92\x69\xf2\xfb\x78\x0e\x1f\xae\xcb\x87\x89\x3c\x8b\xf2\x30\x39\x5e\x9c\xc9\x7e\x41\x49\xa9\x5c\xbb\xeb\x0b\x5c\xb1\x4a\xb8\x8f\x21\xd6\x79\x18\xaf\x69\xb0\x88\xa5\xd5\x5e\xc1\x8f\x7a\x89\x5b\xd2\x18\x93\x70\x4c\x21\x75\xa6\xc2\x34\x69\xd7\x29\x34\x75\xec\x0d\x5a\x81\xc4\xd4\x34\xeb\xf6\x46\x15\x48\xe1\x4f\xc6\xdd\x95\x32\x57\xdc\x58\x77\xa9\xa4\xad\x4a\x34\x89\x89\xff\x45\xbb\x9a\xee\xa3\x40\x87\x21\x31\xcd\x0e\xdd\x65\x34\x79\xf4\x8f\xf9\xe2\x6a\xda\xd7\xef\x0f\xb6\xd2\xce\xb0\x55\xca\x14\xfe\x26\x21\x21\x77\x0d\x75\x61\xc4\xf8\x02\xb9\x61\x3a\xa5\x9f\x1b\xe9\xdd\x9e\xd8\x70\x9e\xd2\x74\xd7\xd9\x93\xde\xfc\xfd\xe2\x6a\x3c\x5d\x8c\xc6\xa3\xc5\xf0\x7a\x36\x1f\xf4\x17\xa3\x71\x7f\x30\x4b\xdf\x3c\xd8\xf8\xe8\x6c\x4a\x3f\xa7\x67\x77\x3b\xbb\xe1\xf8\xb2\x37\x5c\xcc\xe6\xe3\x69\xef\xdd\x20\xa0\xdc\x9f\x85\x3f\x21\xff\xdc\x37\xef\xf8\x7d\x1f\xf6\x9b\xf3\x7f\x1f\x4d\xb0\x3f\xff\xd4\x59\x72\xd9\xb1\x9b\xc8\x25\x3a\x20\x58\xc5\xd5\x75\x5b\x70\x03\xa4\x84\xee\xc5\xc5\x05\x10\x0d\xe9\x2f\x77\x1f\xc7\xc3\x45\xff\x7a\x7a\x1f\x99\xcf\x37\xa5\x2a\xe0\xa2\xdb\x6d\x1f\x75\xb2\x2c\x0d\x6b\x90\x99\x42\x7d\x93\x47\x38\x32\x25\x10\xb3\x7a\x0c\xbf\x41\xa1\xd1\x4c\x54\x91\xd5\xac\x14\x7b\x98\x47\x24\x7a\x51\xe4\x79\xa2\x8a\x67\x37\x6e\xa4\x36\xa2\x11\xdd\x28\xb5\xd7\xea\x8f\x47\xf4\x23\x23\x38\x6d\x2c\x97\xdc\x18\x65\xb0\x20\x82\x2f\x0d\x33\x35\x59\x56\xb6\x5e\xaa\xef\xf4\x3c\x7b\xfb\x5b\x76\x7e\xec\x5c\xfe\x27\x00\x00\xff\xff\x8c\xfd\xcb\xef\x1a\x0d\x00\x00") func localStorageYamlBytes() ([]byte, error) { return bindataRead( diff --git a/pkg/etcd/etcd.go b/pkg/etcd/etcd.go index 4125680e66ef..dfcce5433658 100644 --- a/pkg/etcd/etcd.go +++ b/pkg/etcd/etcd.go @@ -41,8 +41,15 @@ import ( "github.com/sirupsen/logrus" "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" + "go.etcd.io/etcd/client/pkg/v3/logutil" clientv3 "go.etcd.io/etcd/client/v3" + "go.etcd.io/etcd/client/v3/credentials" snapshotv3 "go.etcd.io/etcd/etcdutl/v3/snapshot" + "go.etcd.io/etcd/server/v3/etcdserver" + "go.uber.org/zap/zapcore" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/keepalive" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -55,7 +62,7 @@ import ( ) const ( - testTimeout = time.Second * 30 + statusTimeout = time.Second * 30 manageTickerTime = time.Second * 15 learnerMaxStallTime = time.Minute * 5 memberRemovalTimeout = time.Minute * 1 @@ -206,35 +213,40 @@ func (e *ETCD) Test(ctx context.Context) error { return errors.New("etcd datastore is not started") } - ctx, cancel := context.WithTimeout(ctx, testTimeout) - defer cancel() - - endpoints := getEndpoints(e.config) - status, err := e.client.Status(ctx, endpoints[0]) + status, err := e.status(ctx) if err != nil { - return err + return errors.Wrap(err, "failed to get etcd status") + } else if status.IsLearner { + return errors.New("this server has not yet been promoted from learner to voting member") + } else if status.Leader == 0 { + return etcdserver.ErrNoLeader } - if status.IsLearner { - return errors.New("this server has not yet been promoted from learner to voting member") + logrus.Infof("Connected to etcd v%s - datastore using %d of %d bytes", status.Version, status.DbSizeInUse, status.DbSize) + if len(status.Errors) > 0 { + logrus.Warnf("Errors present on etcd cluster: %s", strings.Join(status.Errors, ",")) } + // defrag this node to reclaim freed space from compacted revisions if err := e.defragment(ctx); err != nil { return errors.Wrap(err, "failed to defragment etcd database") } - if err := e.clearAlarms(ctx); err != nil { - return errors.Wrap(err, "failed to report and disarm etcd alarms") + // clear alarms on this node + if err := e.clearAlarms(ctx, status.Header.MemberId); err != nil { + return errors.Wrap(err, "failed to disarm etcd alarms") } - // refresh status to see if any errors remain after clearing alarms - status, err = e.client.Status(ctx, endpoints[0]) + // refresh status - note that errors may remain on other nodes, but this + // should not prevent us from continuing with startup. + status, err = e.status(ctx) if err != nil { - return err + return errors.Wrap(err, "failed to get etcd status") } + logrus.Infof("Datastore using %d of %d bytes after defragment", status.DbSizeInUse, status.DbSize) if len(status.Errors) > 0 { - return fmt.Errorf("etcd cluster errors: %s", strings.Join(status.Errors, ", ")) + logrus.Warnf("Errors present on etcd cluster after defragment: %s", strings.Join(status.Errors, ",")) } members, err := e.client.MemberList(ctx) @@ -242,6 +254,7 @@ func (e *ETCD) Test(ctx context.Context) error { return err } + // Ensure that there is a cluster member with our peerURL and name var memberNameUrls []string for _, member := range members.Members { for _, peerURL := range member.PeerURLs { @@ -253,6 +266,8 @@ func (e *ETCD) Test(ctx context.Context) error { memberNameUrls = append(memberNameUrls, member.Name+"="+member.PeerURLs[0]) } } + + // no matching PeerURL on any Member, return an error that indicates what was expected vs what we found. return &membershipError{members: memberNameUrls, self: e.name + "=" + e.peerURL()} } @@ -523,7 +538,7 @@ func (e *ETCD) startClient(ctx context.Context) error { e.config.Datastore.BackendTLSConfig.CertFile = e.config.Runtime.ClientETCDCert e.config.Datastore.BackendTLSConfig.KeyFile = e.config.Runtime.ClientETCDKey - client, err := getClient(ctx, e.config, endpoints...) + client, conn, err := getClient(ctx, e.config, endpoints...) if err != nil { return err } @@ -531,9 +546,8 @@ func (e *ETCD) startClient(ctx context.Context) error { go func() { <-ctx.Done() - client := e.client e.client = nil - client.Close() + conn.Close() }() return nil @@ -554,11 +568,11 @@ func (e *ETCD) join(ctx context.Context, clientAccessInfo *clientaccess.Info) er return err } - client, err := getClient(clientCtx, e.config, clientURLs...) + client, conn, err := getClient(clientCtx, e.config, clientURLs...) if err != nil { return err } - defer client.Close() + defer conn.Close() for _, member := range memberList.Members { for _, peer := range member.PeerURLs { @@ -725,13 +739,53 @@ func (e *ETCD) infoHandler() http.Handler { // If the runtime config does not list any endpoints, the default endpoint is used. // The returned client should be closed when no longer needed, in order to avoid leaking GRPC // client goroutines. -func getClient(ctx context.Context, control *config.Control, endpoints ...string) (*clientv3.Client, error) { +func getClient(ctx context.Context, control *config.Control, endpoints ...string) (*clientv3.Client, *grpc.ClientConn, error) { + logger, err := logutil.CreateDefaultZapLogger(zapcore.DebugLevel) + if err != nil { + return nil, nil, err + } + cfg, err := getClientConfig(ctx, control, endpoints...) if err != nil { - return nil, err + return nil, nil, err + } + + // Set up dialer and resolver options. + // This is normally handled by clientv3.New() but that wraps all the GRPC + // service with retry handlers and uses deprecated grpc.DialContext() which + // tries to establish a connection even when one isn't wanted. + if cfg.DialKeepAliveTime > 0 { + params := keepalive.ClientParameters{ + Time: cfg.DialKeepAliveTime, + Timeout: cfg.DialKeepAliveTimeout, + PermitWithoutStream: cfg.PermitWithoutStream, + } + cfg.DialOptions = append(cfg.DialOptions, grpc.WithKeepaliveParams(params)) + } + + if cfg.TLS != nil { + creds := credentials.NewBundle(credentials.Config{TLSConfig: cfg.TLS}).TransportCredentials() + cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds)) + } else { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials())) } - return clientv3.New(*cfg) + cfg.DialOptions = append(cfg.DialOptions, grpc.WithResolvers(NewSimpleResolver(cfg.Endpoints[0]))) + + target := fmt.Sprintf("%s://%p/%s", scheme, cfg, authority(cfg.Endpoints[0])) + conn, err := grpc.NewClient(target, cfg.DialOptions...) + if err != nil { + return nil, nil, err + } + + // Create a new client and wire up the GRPC service interfaces. + // Ref: https://github.com/etcd-io/etcd/blob/v3.5.16/client/v3/client.go#L87 + client := clientv3.NewCtxClient(ctx, clientv3.WithZapLogger(logger.Named(version.Program+"-etcd-client"))) + client.Cluster = clientv3.NewClusterFromClusterClient(etcdserverpb.NewClusterClient(conn), client) + client.KV = clientv3.NewKVFromKVClient(etcdserverpb.NewKVClient(conn), client) + client.Maintenance = clientv3.NewMaintenanceFromMaintenanceClient(etcdserverpb.NewMaintenanceClient(conn), client) + + return client, conn, nil } // getClientConfig generates an etcd client config connected to the specified endpoints. @@ -837,7 +891,7 @@ func (e *ETCD) migrateFromSQLite(ctx context.Context) error { defer cancel() _, err = endpoint2.Listen(ctx, endpoint2.Config{ - Endpoint: endpoint2.SQLiteBackend, + Endpoint: "sqlite://", }) if err != nil { return err @@ -851,11 +905,11 @@ func (e *ETCD) migrateFromSQLite(ctx context.Context) error { } defer sqliteClient.Close() - etcdClient, err := getClient(ctx, e.config) + etcdClient, conn, err := getClient(ctx, e.config) if err != nil { return err } - defer etcdClient.Close() + defer conn.Close() values, err := sqliteClient.List(ctx, "/registry/", 0) if err != nil { @@ -984,7 +1038,7 @@ func (e *ETCD) StartEmbeddedTemporary(ctx context.Context) error { return errors.New("etcd datastore already started") } - client, err := getClient(ctx, e.config) + client, conn, err := getClient(ctx, e.config) if err != nil { return err } @@ -992,9 +1046,8 @@ func (e *ETCD) StartEmbeddedTemporary(ctx context.Context) error { go func() { <-ctx.Done() - client := e.client e.client = nil - client.Close() + conn.Close() }() if err := cp.Copy(etcdDataDir, tmpDataDir, cp.Options{PreserveOwner: true}); err != nil { @@ -1251,8 +1304,6 @@ func (e *ETCD) trackLearnerProgress(ctx context.Context, progress *learnerProgre } func (e *ETCD) getETCDStatus(ctx context.Context, url string) (*clientv3.StatusResponse, error) { - ctx, cancel := context.WithTimeout(ctx, defaultDialTimeout) - defer cancel() resp, err := e.client.Status(ctx, url) if err != nil { return resp, errors.Wrap(err, "failed to check etcd member status") @@ -1363,12 +1414,10 @@ func (e *ETCD) setLearnerProgress(ctx context.Context, status *learnerProgress) return err } -// clearAlarms checks for any alarms on the local etcd member. If found, they are -// reported and the alarm state is cleared. -func (e *ETCD) clearAlarms(ctx context.Context) error { - ctx, cancel := context.WithTimeout(ctx, testTimeout) - defer cancel() - +// clearAlarms checks for any NOSPACE alarms on the local etcd member. +// If found, they are reported and the alarm state is cleared. +// Other alarm types are not handled. +func (e *ETCD) clearAlarms(ctx context.Context, memberID uint64) error { if e.client == nil { return errors.New("etcd client was nil") } @@ -1379,22 +1428,37 @@ func (e *ETCD) clearAlarms(ctx context.Context) error { } for _, alarm := range alarmList.Alarms { - logrus.Warnf("Alarm on etcd member %d: %s", alarm.MemberID, alarm.Alarm) - } - - if len(alarmList.Alarms) > 0 { - if _, err := e.client.AlarmDisarm(ctx, &clientv3.AlarmMember{}); err != nil { - return fmt.Errorf("etcd alarm disarm failed: %v", err) + if alarm.MemberID != memberID { + // ignore alarms on other cluster members, they should manage their own problems + continue + } + if alarm.Alarm == etcdserverpb.AlarmType_NOSPACE { + if _, err := e.client.AlarmDisarm(ctx, &clientv3.AlarmMember{MemberID: alarm.MemberID, Alarm: alarm.Alarm}); err != nil { + return fmt.Errorf("%s disarm failed: %v", alarm.Alarm, err) + } + logrus.Infof("%s disarmed successfully", alarm.Alarm) + } else { + return fmt.Errorf("%s alarm must be disarmed manually", alarm.Alarm) } - logrus.Infof("Alarms disarmed on etcd server") } return nil } -func (e *ETCD) defragment(ctx context.Context) error { - ctx, cancel := context.WithTimeout(ctx, testTimeout) +// status returns status using the first etcd endpoint. +func (e *ETCD) status(ctx context.Context) (*clientv3.StatusResponse, error) { + if e.client == nil { + return nil, errors.New("etcd client was nil") + } + + ctx, cancel := context.WithTimeout(ctx, statusTimeout) defer cancel() + endpoints := getEndpoints(e.config) + return e.client.Status(ctx, endpoints[0]) +} + +// defragment defragments the etcd datastore using the first etcd endpoint +func (e *ETCD) defragment(ctx context.Context) error { if e.client == nil { return errors.New("etcd client was nil") } @@ -1550,11 +1614,11 @@ func backupDirWithRetention(dir string, maxBackupRetention int) (string, error) // GetAPIServerURLsFromETCD will try to fetch the version.Program/apiaddresses key from etcd // and unmarshal it to a list of apiserver endpoints. func GetAPIServerURLsFromETCD(ctx context.Context, cfg *config.Control) ([]string, error) { - cl, err := getClient(ctx, cfg) + cl, conn, err := getClient(ctx, cfg) if err != nil { return nil, err } - defer cl.Close() + defer conn.Close() etcdResp, err := cl.KV.Get(ctx, AddressKey) if err != nil { @@ -1576,9 +1640,6 @@ func GetAPIServerURLsFromETCD(ctx context.Context, cfg *config.Control) ([]strin // GetMembersClientURLs will list through the member lists in etcd and return // back a combined list of client urls for each member in the cluster func (e *ETCD) GetMembersClientURLs(ctx context.Context) ([]string, error) { - ctx, cancel := context.WithTimeout(ctx, testTimeout) - defer cancel() - members, err := e.client.MemberList(ctx) if err != nil { return nil, err @@ -1593,24 +1654,6 @@ func (e *ETCD) GetMembersClientURLs(ctx context.Context) ([]string, error) { return clientURLs, nil } -// GetMembersNames will list through the member lists in etcd and return -// back a combined list of member names -func (e *ETCD) GetMembersNames(ctx context.Context) ([]string, error) { - ctx, cancel := context.WithTimeout(ctx, testTimeout) - defer cancel() - - members, err := e.client.MemberList(ctx) - if err != nil { - return nil, err - } - - var memberNames []string - for _, member := range members.Members { - memberNames = append(memberNames, member.Name) - } - return memberNames, nil -} - // RemoveSelf will remove the member if it exists in the cluster. This should // only be called on a node that may have previously run etcd, but will not // currently run etcd, to ensure that it is not a member of the cluster. diff --git a/pkg/etcd/etcd_test.go b/pkg/etcd/etcd_test.go index 5a519bdcffe4..f875a24ad1b7 100644 --- a/pkg/etcd/etcd_test.go +++ b/pkg/etcd/etcd_test.go @@ -6,6 +6,7 @@ import ( "net/http" "os" "path/filepath" + "sync" "testing" "time" @@ -15,11 +16,23 @@ import ( testutil "github.com/k3s-io/k3s/tests" "github.com/robfig/cron/v3" "github.com/sirupsen/logrus" + "go.etcd.io/etcd/api/v3/etcdserverpb" clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/server/v3/etcdserver" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/health" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/reflection" + "google.golang.org/grpc/status" utilnet "k8s.io/apimachinery/pkg/util/net" ) +func init() { + logrus.SetLevel(logrus.DebugLevel) +} + func mustGetAddress() string { ipAddr, err := utilnet.ChooseHostInterface() if err != nil { @@ -76,7 +89,7 @@ func Test_UnitETCD_IsInitialized(t *testing.T) { wantErr bool }{ { - name: "Directory exists", + name: "directory exists", args: args{ ctx: context.TODO(), config: generateTestConfig(), @@ -95,7 +108,7 @@ func Test_UnitETCD_IsInitialized(t *testing.T) { want: true, }, { - name: "Directory does not exist", + name: "directory does not exist", args: args{ ctx: context.TODO(), config: generateTestConfig(), @@ -117,9 +130,6 @@ func Test_UnitETCD_IsInitialized(t *testing.T) { }, } - // enable logging - logrus.SetLevel(logrus.DebugLevel) - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { e := NewETCD() @@ -159,7 +169,7 @@ func Test_UnitETCD_Register(t *testing.T) { wantErr bool }{ { - name: "Call Register with standard config", + name: "standard config", args: args{ ctx: context.TODO(), config: generateTestConfig(), @@ -174,7 +184,7 @@ func Test_UnitETCD_Register(t *testing.T) { }, }, { - name: "Call Register with a tombstone file created", + name: "with a tombstone file created", args: args{ ctx: context.TODO(), config: generateTestConfig(), @@ -249,7 +259,7 @@ func Test_UnitETCD_Start(t *testing.T) { wantErr bool }{ { - name: "Start etcd without clientAccessInfo and without snapshots", + name: "nil clientAccessInfo and nil cron", fields: fields{ config: generateTestConfig(), address: mustGetAddress(), @@ -266,17 +276,18 @@ func Test_UnitETCD_Start(t *testing.T) { }, teardown: func(e *ETCD, ctxInfo *contextInfo) error { // RemoveSelf will fail with a specific error, but it still does cleanup for testing purposes - if err := e.RemoveSelf(ctxInfo.ctx); err != nil && err.Error() != etcdserver.ErrNotEnoughStartedMembers.Error() { - return err - } + err := e.RemoveSelf(ctxInfo.ctx) ctxInfo.cancel() - time.Sleep(10 * time.Second) + time.Sleep(5 * time.Second) testutil.CleanupDataDir(e.config) + if err != nil && err.Error() != etcdserver.ErrNotEnoughStartedMembers.Error() { + return err + } return nil }, }, { - name: "Start etcd without clientAccessInfo on", + name: "nil clientAccessInfo", fields: fields{ config: generateTestConfig(), address: mustGetAddress(), @@ -293,17 +304,18 @@ func Test_UnitETCD_Start(t *testing.T) { }, teardown: func(e *ETCD, ctxInfo *contextInfo) error { // RemoveSelf will fail with a specific error, but it still does cleanup for testing purposes - if err := e.RemoveSelf(ctxInfo.ctx); err != nil && err.Error() != etcdserver.ErrNotEnoughStartedMembers.Error() { - return err - } + err := e.RemoveSelf(ctxInfo.ctx) ctxInfo.cancel() time.Sleep(5 * time.Second) testutil.CleanupDataDir(e.config) + if err != nil && err.Error() != etcdserver.ErrNotEnoughStartedMembers.Error() { + return err + } return nil }, }, { - name: "Start etcd with an existing cluster", + name: "existing cluster", fields: fields{ config: generateTestConfig(), address: mustGetAddress(), @@ -322,13 +334,14 @@ func Test_UnitETCD_Start(t *testing.T) { }, teardown: func(e *ETCD, ctxInfo *contextInfo) error { // RemoveSelf will fail with a specific error, but it still does cleanup for testing purposes - if err := e.RemoveSelf(ctxInfo.ctx); err != nil && err.Error() != etcdserver.ErrNotEnoughStartedMembers.Error() { - return err - } + err := e.RemoveSelf(ctxInfo.ctx) ctxInfo.cancel() time.Sleep(5 * time.Second) testutil.CleanupDataDir(e.config) os.Remove(walDir(e.config)) + if err != nil && err.Error() != etcdserver.ErrNotEnoughStartedMembers.Error() { + return err + } return nil }, }, @@ -353,8 +366,478 @@ func Test_UnitETCD_Start(t *testing.T) { } if err := tt.teardown(e, &tt.fields.context); err != nil { t.Errorf("Teardown for ETCD.Start() failed = %v", err) + } + }) + } +} + +func Test_UnitETCD_Test(t *testing.T) { + type contextInfo struct { + ctx context.Context + cancel context.CancelFunc + } + type fields struct { + context contextInfo + client *clientv3.Client + config *config.Control + name string + address string + } + type args struct { + clientAccessInfo *clientaccess.Info + } + tests := []struct { + name string + fields fields + setup func(e *ETCD, ctxInfo *contextInfo) error + teardown func(e *ETCD, ctxInfo *contextInfo) error + wantErr bool + }{ + { + name: "no server running", + fields: fields{ + config: generateTestConfig(), + address: mustGetAddress(), + name: "default", + }, + setup: func(e *ETCD, ctxInfo *contextInfo) error { + ctxInfo.ctx, ctxInfo.cancel = context.WithCancel(context.Background()) + testutil.GenerateRuntime(e.config) + return e.startClient(ctxInfo.ctx) + }, + teardown: func(e *ETCD, ctxInfo *contextInfo) error { + ctxInfo.cancel() + time.Sleep(1 * time.Second) + testutil.CleanupDataDir(e.config) + return nil + }, + wantErr: true, + }, + { + name: "unreachable server", + fields: fields{ + config: generateTestConfig(), + address: mustGetAddress(), + name: "default", + }, + setup: func(e *ETCD, ctxInfo *contextInfo) error { + ctxInfo.ctx, ctxInfo.cancel = context.WithCancel(context.Background()) + testutil.GenerateRuntime(e.config) + e.config.Runtime.EtcdConfig.Endpoints = []string{"https://192.0.2.0:2379"} // RFC5737 + return e.startClient(ctxInfo.ctx) + }, + teardown: func(e *ETCD, ctxInfo *contextInfo) error { + ctxInfo.cancel() + time.Sleep(1 * time.Second) + testutil.CleanupDataDir(e.config) + return nil + }, + wantErr: true, + }, + { + name: "learner server", + fields: fields{ + config: generateTestConfig(), + address: mustGetAddress(), + name: "default", + }, + setup: func(e *ETCD, ctxInfo *contextInfo) error { + ctxInfo.ctx, ctxInfo.cancel = context.WithCancel(context.Background()) + testutil.GenerateRuntime(e.config) + if err := startMock(ctxInfo.ctx, e, true, false, false, time.Second); err != nil { + return err + } + return e.startClient(ctxInfo.ctx) + }, + teardown: func(e *ETCD, ctxInfo *contextInfo) error { + ctxInfo.cancel() + time.Sleep(1 * time.Second) + testutil.CleanupDataDir(e.config) + return nil + }, + wantErr: true, + }, + { + name: "corrupt server", + fields: fields{ + config: generateTestConfig(), + address: mustGetAddress(), + name: "default", + }, + setup: func(e *ETCD, ctxInfo *contextInfo) error { + ctxInfo.ctx, ctxInfo.cancel = context.WithCancel(context.Background()) + testutil.GenerateRuntime(e.config) + if err := startMock(ctxInfo.ctx, e, false, true, false, time.Second); err != nil { + return err + } + return e.startClient(ctxInfo.ctx) + }, + teardown: func(e *ETCD, ctxInfo *contextInfo) error { + ctxInfo.cancel() + time.Sleep(1 * time.Second) + testutil.CleanupDataDir(e.config) + return nil + }, + wantErr: true, + }, + { + name: "leaderless server", + fields: fields{ + config: generateTestConfig(), + address: mustGetAddress(), + name: "default", + }, + setup: func(e *ETCD, ctxInfo *contextInfo) error { + ctxInfo.ctx, ctxInfo.cancel = context.WithCancel(context.Background()) + testutil.GenerateRuntime(e.config) + if err := startMock(ctxInfo.ctx, e, false, false, true, time.Second); err != nil { + return err + } + return e.startClient(ctxInfo.ctx) + }, + teardown: func(e *ETCD, ctxInfo *contextInfo) error { + ctxInfo.cancel() + time.Sleep(1 * time.Second) + testutil.CleanupDataDir(e.config) + return nil + }, + wantErr: true, + }, + { + name: "normal server", + fields: fields{ + config: generateTestConfig(), + address: mustGetAddress(), + name: "default", + }, + setup: func(e *ETCD, ctxInfo *contextInfo) error { + ctxInfo.ctx, ctxInfo.cancel = context.WithCancel(context.Background()) + testutil.GenerateRuntime(e.config) + if err := startMock(ctxInfo.ctx, e, false, false, false, time.Second); err != nil { + return err + } + return e.startClient(ctxInfo.ctx) + }, + teardown: func(e *ETCD, ctxInfo *contextInfo) error { + ctxInfo.cancel() + time.Sleep(1 * time.Second) + testutil.CleanupDataDir(e.config) + return nil + }, + wantErr: false, + }, + { + name: "alarm on other server", + fields: fields{ + config: generateTestConfig(), + address: mustGetAddress(), + name: "default", + }, + setup: func(e *ETCD, ctxInfo *contextInfo) error { + ctxInfo.ctx, ctxInfo.cancel = context.WithCancel(context.Background()) + testutil.GenerateRuntime(e.config) + extraAlarm := &etcdserverpb.AlarmMember{MemberID: 2, Alarm: etcdserverpb.AlarmType_NOSPACE} + if err := startMock(ctxInfo.ctx, e, false, false, false, time.Second, extraAlarm); err != nil { + return err + } + return e.startClient(ctxInfo.ctx) + }, + teardown: func(e *ETCD, ctxInfo *contextInfo) error { + ctxInfo.cancel() + time.Sleep(1 * time.Second) + testutil.CleanupDataDir(e.config) + return nil + }, + wantErr: false, + }, + { + name: "slow defrag", + fields: fields{ + config: generateTestConfig(), + address: mustGetAddress(), + name: "default", + }, + setup: func(e *ETCD, ctxInfo *contextInfo) error { + ctxInfo.ctx, ctxInfo.cancel = context.WithCancel(context.Background()) + testutil.GenerateRuntime(e.config) + if err := startMock(ctxInfo.ctx, e, false, false, false, 40*time.Second); err != nil { + return err + } + return e.startClient(ctxInfo.ctx) + }, + teardown: func(e *ETCD, ctxInfo *contextInfo) error { + ctxInfo.cancel() + time.Sleep(1 * time.Second) + testutil.CleanupDataDir(e.config) + return nil + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := &ETCD{ + client: tt.fields.client, + config: tt.fields.config, + name: tt.fields.name, + address: tt.fields.address, + } + + if err := tt.setup(e, &tt.fields.context); err != nil { + t.Errorf("Setup for ETCD.Test() failed = %v", err) return } + start := time.Now() + err := e.Test(tt.fields.context.ctx) + duration := time.Now().Sub(start) + t.Logf("ETCD.Test() completed in %v with err=%v", duration, err) + if (err != nil) != tt.wantErr { + t.Errorf("ETCD.Test() error = %v, wantErr %v", err, tt.wantErr) + } + if err := tt.teardown(e, &tt.fields.context); err != nil { + t.Errorf("Teardown for ETCD.Test() failed = %v", err) + } }) } } + +// startMock starts up a mock etcd grpc service with canned responses +// that can be used to test specific scenarios. +func startMock(ctx context.Context, e *ETCD, isLearner, isCorrupt, noLeader bool, defragDelay time.Duration, extraAlarms ...*etcdserverpb.AlarmMember) error { + address := authority(getEndpoints(e.config)[0]) + // listen on endpoint and close listener on context cancel + listener, err := net.Listen("tcp", address) + if err != nil { + return err + } + + // set up tls if enabled + gopts := []grpc.ServerOption{} + if e.config.Datastore.ServerTLSConfig.CertFile != "" && e.config.Datastore.ServerTLSConfig.KeyFile != "" { + creds, err := credentials.NewServerTLSFromFile(e.config.Datastore.ServerTLSConfig.CertFile, e.config.Datastore.ServerTLSConfig.KeyFile) + if err != nil { + return err + } + gopts = append(gopts, grpc.Creds(creds)) + } + server := grpc.NewServer(gopts...) + + mock := &mockEtcd{ + e: e, + mu: &sync.RWMutex{}, + isLearner: isLearner, + isCorrupt: isCorrupt, + noLeader: noLeader, + defragDelay: defragDelay, + extraAlarms: extraAlarms, + } + + // register grpc services + etcdserverpb.RegisterKVServer(server, mock) + etcdserverpb.RegisterClusterServer(server, mock) + etcdserverpb.RegisterMaintenanceServer(server, mock) + + hsrv := health.NewServer() + hsrv.SetServingStatus("", healthpb.HealthCheckResponse_SERVING) + healthpb.RegisterHealthServer(server, hsrv) + + reflection.Register(server) + + // shutdown on context cancel + go func() { + <-ctx.Done() + server.GracefulStop() + listener.Close() + }() + + // start serving + go func() { + logrus.Infof("Mock etcd server starting on %s", listener.Addr()) + logrus.Infof("Mock etcd server exited: %v", server.Serve(listener)) + }() + + return nil +} + +type mockEtcd struct { + e *ETCD + mu *sync.RWMutex + calls map[string]int + isLearner bool + isCorrupt bool + noLeader bool + defragDelay time.Duration + extraAlarms []*etcdserverpb.AlarmMember +} + +// increment call counter for this function +func (m *mockEtcd) inc(call string) { + m.mu.Lock() + defer m.mu.Unlock() + if m.calls == nil { + m.calls = map[string]int{} + } + m.calls[call] = m.calls[call] + 1 +} + +// get call counter for this function +func (m *mockEtcd) get(call string) int { + m.mu.RLock() + defer m.mu.RUnlock() + return m.calls[call] +} + +// get alarm list +func (m *mockEtcd) alarms() []*etcdserverpb.AlarmMember { + alarms := m.extraAlarms + if m.get("alarm") < 2 { + // on the first check, return NOSPACE so that we can clear it after defragging + alarms = append(alarms, &etcdserverpb.AlarmMember{ + Alarm: etcdserverpb.AlarmType_NOSPACE, + MemberID: 1, + }) + } + if m.isCorrupt { + // return CORRUPT if so requested + alarms = append(alarms, &etcdserverpb.AlarmMember{ + Alarm: etcdserverpb.AlarmType_CORRUPT, + MemberID: 1, + }) + } + return alarms +} + +// KV mocks +func (m *mockEtcd) Range(context.Context, *etcdserverpb.RangeRequest) (*etcdserverpb.RangeResponse, error) { + m.inc("range") + return nil, unsupported("range") +} +func (m *mockEtcd) Put(context.Context, *etcdserverpb.PutRequest) (*etcdserverpb.PutResponse, error) { + m.inc("put") + return nil, unsupported("put") +} +func (m *mockEtcd) DeleteRange(context.Context, *etcdserverpb.DeleteRangeRequest) (*etcdserverpb.DeleteRangeResponse, error) { + m.inc("deleterange") + return nil, unsupported("deleterange") +} +func (m *mockEtcd) Txn(context.Context, *etcdserverpb.TxnRequest) (*etcdserverpb.TxnResponse, error) { + m.inc("txn") + return nil, unsupported("txn") +} +func (m *mockEtcd) Compact(context.Context, *etcdserverpb.CompactionRequest) (*etcdserverpb.CompactionResponse, error) { + m.inc("compact") + return nil, unsupported("compact") +} + +// Maintenance mocks +func (m *mockEtcd) Alarm(ctx context.Context, r *etcdserverpb.AlarmRequest) (*etcdserverpb.AlarmResponse, error) { + m.inc("alarm") + res := &etcdserverpb.AlarmResponse{ + Header: &etcdserverpb.ResponseHeader{ + MemberId: 1, + }, + } + if r.Action == etcdserverpb.AlarmRequest_GET { + res.Alarms = m.alarms() + } + return res, nil +} +func (m *mockEtcd) Status(context.Context, *etcdserverpb.StatusRequest) (*etcdserverpb.StatusResponse, error) { + m.inc("status") + res := &etcdserverpb.StatusResponse{ + Header: &etcdserverpb.ResponseHeader{ + MemberId: 1, + }, + Leader: 1, + Version: "v3.5.0-mock0", + DbSize: 1024, + DbSizeInUse: 512, + IsLearner: m.isLearner, + } + if m.noLeader { + res.Leader = 0 + res.Errors = append(res.Errors, etcdserver.ErrNoLeader.Error()) + } + for _, a := range m.alarms() { + res.Errors = append(res.Errors, a.String()) + } + return res, nil +} +func (m *mockEtcd) Defragment(ctx context.Context, r *etcdserverpb.DefragmentRequest) (*etcdserverpb.DefragmentResponse, error) { + m.inc("defragment") + // delay defrag response by configured time, or until the request is cancelled + select { + case <-ctx.Done(): + case <-time.After(m.defragDelay): + } + return &etcdserverpb.DefragmentResponse{ + Header: &etcdserverpb.ResponseHeader{ + MemberId: 1, + }, + }, nil +} +func (m *mockEtcd) Hash(context.Context, *etcdserverpb.HashRequest) (*etcdserverpb.HashResponse, error) { + m.inc("hash") + return nil, unsupported("hash") +} +func (m *mockEtcd) HashKV(context.Context, *etcdserverpb.HashKVRequest) (*etcdserverpb.HashKVResponse, error) { + m.inc("hashkv") + return nil, unsupported("hashkv") +} +func (m *mockEtcd) Snapshot(*etcdserverpb.SnapshotRequest, etcdserverpb.Maintenance_SnapshotServer) error { + m.inc("snapshot") + return unsupported("snapshot") +} +func (m *mockEtcd) MoveLeader(context.Context, *etcdserverpb.MoveLeaderRequest) (*etcdserverpb.MoveLeaderResponse, error) { + m.inc("moveleader") + return nil, unsupported("moveleader") +} +func (m *mockEtcd) Downgrade(context.Context, *etcdserverpb.DowngradeRequest) (*etcdserverpb.DowngradeResponse, error) { + m.inc("downgrade") + return nil, unsupported("downgrade") +} + +// Cluster mocks +func (m *mockEtcd) MemberAdd(context.Context, *etcdserverpb.MemberAddRequest) (*etcdserverpb.MemberAddResponse, error) { + m.inc("memberadd") + return nil, unsupported("memberadd") +} +func (m *mockEtcd) MemberRemove(context.Context, *etcdserverpb.MemberRemoveRequest) (*etcdserverpb.MemberRemoveResponse, error) { + m.inc("memberremove") + return nil, etcdserver.ErrNotEnoughStartedMembers +} +func (m *mockEtcd) MemberUpdate(context.Context, *etcdserverpb.MemberUpdateRequest) (*etcdserverpb.MemberUpdateResponse, error) { + m.inc("memberupdate") + return nil, unsupported("memberupdate") +} +func (m *mockEtcd) MemberList(context.Context, *etcdserverpb.MemberListRequest) (*etcdserverpb.MemberListResponse, error) { + m.inc("memberlist") + scheme := "http" + if m.e.config.Datastore.ServerTLSConfig.CertFile != "" { + scheme = "https" + } + + return &etcdserverpb.MemberListResponse{ + Header: &etcdserverpb.ResponseHeader{ + MemberId: 1, + }, + Members: []*etcdserverpb.Member{ + { + ID: 1, + Name: m.e.name, + IsLearner: m.isLearner, + ClientURLs: []string{scheme + "://127.0.0.1:2379"}, + PeerURLs: []string{scheme + "://" + m.e.address + ":2380"}, + }, + }, + }, nil +} + +func (m *mockEtcd) MemberPromote(context.Context, *etcdserverpb.MemberPromoteRequest) (*etcdserverpb.MemberPromoteResponse, error) { + m.inc("memberpromote") + return nil, unsupported("memberpromote") +} + +func unsupported(field string) error { + return status.New(codes.Unimplemented, field+" is not implemented").Err() +} diff --git a/pkg/etcd/resolver.go b/pkg/etcd/resolver.go new file mode 100644 index 000000000000..b95242cbfa91 --- /dev/null +++ b/pkg/etcd/resolver.go @@ -0,0 +1,80 @@ +package etcd + +import ( + "net/url" + "path" + "strings" + + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" +) + +const scheme = "etcd-endpoint" + +type EtcdSimpleResolver struct { + *manual.Resolver + endpoint string +} + +// Cribbed from https://github.com/etcd-io/etcd/blob/v3.5.16/client/v3/internal/resolver/resolver.go +// but only supports a single fixed endpoint. We use this instead of the internal etcd client resolver +// because the agent loadbalancer handles failover and we don't want etcd or grpc's special behavior. +func NewSimpleResolver(endpoint string) *EtcdSimpleResolver { + r := manual.NewBuilderWithScheme(scheme) + return &EtcdSimpleResolver{Resolver: r, endpoint: endpoint} +} + +func (r *EtcdSimpleResolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + res, err := r.Resolver.Build(target, cc, opts) + if err != nil { + return nil, err + } + + if r.CC != nil { + addr, serverName := interpret(r.endpoint) + r.UpdateState(resolver.State{ + Addresses: []resolver.Address{{Addr: addr, ServerName: serverName}}, + }) + } + + return res, nil +} + +func interpret(ep string) (string, string) { + if strings.HasPrefix(ep, "unix:") || strings.HasPrefix(ep, "unixs:") { + if strings.HasPrefix(ep, "unix:///") || strings.HasPrefix(ep, "unixs:///") { + _, absolutePath, _ := strings.Cut(ep, "://") + return "unix://" + absolutePath, path.Base(absolutePath) + } + if strings.HasPrefix(ep, "unix://") || strings.HasPrefix(ep, "unixs://") { + _, localPath, _ := strings.Cut(ep, "://") + return "unix:" + localPath, path.Base(localPath) + } + _, localPath, _ := strings.Cut(ep, ":") + return "unix:" + localPath, path.Base(localPath) + } + if strings.Contains(ep, "://") { + url, err := url.Parse(ep) + if err != nil { + return ep, ep + } + if url.Scheme == "http" || url.Scheme == "https" { + return url.Host, url.Host + } + return ep, url.Host + } + return ep, ep +} + +func authority(ep string) string { + if _, authority, ok := strings.Cut(ep, "://"); ok { + return authority + } + if suff, ok := strings.CutPrefix(ep, "unix:"); ok { + return suff + } + if suff, ok := strings.CutPrefix(ep, "unixs:"); ok { + return suff + } + return ep +} diff --git a/pkg/etcd/s3/s3.go b/pkg/etcd/s3/s3.go index c1158a0bc3b2..39ffecd2bc2e 100644 --- a/pkg/etcd/s3/s3.go +++ b/pkg/etcd/s3/s3.go @@ -416,7 +416,7 @@ func (c *Client) SnapshotRetention(ctx context.Context, retention int, prefix st logrus.Infof("Removing S3 snapshot: s3://%s/%s", c.etcdS3.Bucket, df.Key) key := path.Base(df.Key) - if err := c.DeleteSnapshot(ctx, key); err != nil { + if err := c.DeleteSnapshot(ctx, key); err != nil && !snapshot.IsNotExist(err) { return deleted, err } deleted = append(deleted, key) @@ -431,14 +431,27 @@ func (c *Client) DeleteSnapshot(ctx context.Context, key string) error { defer cancel() key = path.Join(c.etcdS3.Folder, key) - err := c.mc.RemoveObject(ctx, c.etcdS3.Bucket, key, minio.RemoveObjectOptions{}) - if err == nil || snapshot.IsNotExist(err) { - metadataKey := path.Join(path.Dir(key), snapshot.MetadataDir, path.Base(key)) - if merr := c.mc.RemoveObject(ctx, c.etcdS3.Bucket, metadataKey, minio.RemoveObjectOptions{}); merr != nil && !snapshot.IsNotExist(merr) { - err = merr + _, err := c.mc.StatObject(ctx, c.etcdS3.Bucket, key, minio.StatObjectOptions{}) + if err == nil { + if err := c.mc.RemoveObject(ctx, c.etcdS3.Bucket, key, minio.RemoveObjectOptions{}); err != nil { + return err } } + // check for and try to delete the metadata regardless of whether or not the + // snapshot existed, just to ensure that things are cleaned up in the case of + // ephemeral errors. Metadata delete errors are only exposed if the object + // exists and fails to delete. + metadataKey := path.Join(path.Dir(key), snapshot.MetadataDir, path.Base(key)) + _, merr := c.mc.StatObject(ctx, c.etcdS3.Bucket, metadataKey, minio.StatObjectOptions{}) + if merr == nil { + if err := c.mc.RemoveObject(ctx, c.etcdS3.Bucket, metadataKey, minio.RemoveObjectOptions{}); err != nil { + return err + } + } + + // return error from snapshot StatObject call, so that callers can determine + // if the object was actually deleted or not by checking for a NotFound error. return err } diff --git a/pkg/etcd/snapshot.go b/pkg/etcd/snapshot.go index f2999f093080..90919c2403af 100644 --- a/pkg/etcd/snapshot.go +++ b/pkg/etcd/snapshot.go @@ -11,6 +11,7 @@ import ( "os" "path/filepath" "runtime" + "slices" "sort" "strconv" "strings" @@ -26,7 +27,7 @@ import ( "github.com/pkg/errors" "github.com/robfig/cron/v3" "github.com/sirupsen/logrus" - snapshotv3 "go.etcd.io/etcd/etcdutl/v3/snapshot" + snapshotv3 "go.etcd.io/etcd/client/v3/snapshot" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -41,6 +42,7 @@ import ( const ( errorTTL = 24 * time.Hour + s3ReconcileTTL = time.Minute snapshotListPageSize = 20 ) @@ -241,7 +243,7 @@ func (e *ETCD) Snapshot(ctx context.Context) (*managed.SnapshotResult, error) { var sf *snapshot.File - if err := snapshotv3.NewV3(e.client.GetLogger()).Save(ctx, *cfg, snapshotPath); err != nil { + if err := snapshotv3.Save(ctx, e.client.GetLogger(), *cfg, snapshotPath); err != nil { sf = &snapshot.File{ Name: snapshotName, Location: "", @@ -363,7 +365,7 @@ func (e *ETCD) Snapshot(ctx context.Context) (*managed.SnapshotResult, error) { } } - return res, e.ReconcileSnapshotData(ctx) + return res, e.reconcileSnapshotData(ctx, res) } // listLocalSnapshots provides a list of the currently stored @@ -464,7 +466,7 @@ func (e *ETCD) PruneSnapshots(ctx context.Context) (*managed.SnapshotResult, err res.Deleted = append(res.Deleted, deleted...) } } - return res, e.ReconcileSnapshotData(ctx) + return res, e.reconcileSnapshotData(ctx, res) } // ListSnapshots returns a list of snapshots. Local snapshots are always listed, @@ -555,7 +557,7 @@ func (e *ETCD) DeleteSnapshots(ctx context.Context, snapshots []string) (*manage } } - return res, e.ReconcileSnapshotData(ctx) + return res, e.reconcileSnapshotData(ctx, res) } func (e *ETCD) deleteSnapshot(snapshotPath string) error { @@ -647,9 +649,17 @@ func (e *ETCD) emitEvent(esf *k3s.ETCDSnapshotFile) { } // ReconcileSnapshotData reconciles snapshot data in the ETCDSnapshotFile resources. -// It will reconcile snapshot data from disk locally always, and if S3 is enabled, will attempt to list S3 snapshots -// and reconcile snapshots from S3. +// It will reconcile snapshot data from disk locally always, and if S3 is enabled, will attempt to +// list S3 snapshots and reconcile snapshots from S3. func (e *ETCD) ReconcileSnapshotData(ctx context.Context) error { + return e.reconcileSnapshotData(ctx, nil) +} + +// reconcileSnapshotData reconciles snapshot data in the ETCDSnapshotFile resources. +// It will reconcile snapshot data from disk locally always, and if S3 is enabled, will attempt to +// list S3 snapshots and reconcile snapshots from S3. Any snapshots listed in the Deleted field of +// the provided SnapshotResult are deleted, even if they are within a retention window. +func (e *ETCD) reconcileSnapshotData(ctx context.Context, res *managed.SnapshotResult) error { // make sure the core.Factory is initialized. There can // be a race between this core code startup. for e.config.Runtime.Core == nil { @@ -726,6 +736,7 @@ func (e *ETCD) ReconcileSnapshotData(ctx context.Context) error { snapshots := e.config.Runtime.K3s.K3s().V1().ETCDSnapshotFile() snapshotPager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (k8sruntime.Object, error) { return snapshots.List(opts) })) snapshotPager.PageSize = snapshotListPageSize + now := time.Now().Round(time.Second) // List all snapshots matching the selector // If a snapshot from Kubernetes was found on disk/s3, it is in sync and we can remove it from the map to sync. @@ -742,10 +753,20 @@ func (e *ETCD) ReconcileSnapshotData(ctx context.Context) error { // exists in both and names match, don't need to sync delete(snapshotFiles, sfKey) } else { - // doesn't exist on disk - if it's an error that hasn't expired yet, leave it, otherwise remove it - if esf.Status.Error != nil && esf.Status.Error.Time != nil { + // doesn't exist on disk/s3 + if res != nil && slices.Contains(res.Deleted, esf.Spec.SnapshotName) { + // snapshot has been intentionally deleted, skip checking for expiration + } else if esf.Status.Error != nil && esf.Status.Error.Time != nil { expires := esf.Status.Error.Time.Add(errorTTL) - if time.Now().Before(expires) { + if now.Before(expires) { + // it's an error that hasn't expired yet, leave it + return nil + } + } else if esf.Spec.S3 != nil { + expires := esf.ObjectMeta.CreationTimestamp.Add(s3ReconcileTTL) + if now.Before(expires) { + // it's an s3 snapshot that's only just been created, leave it to prevent a race condition + // when multiple nodes are uploading snapshots at the same time. return nil } } @@ -754,6 +775,7 @@ func (e *ETCD) ReconcileSnapshotData(ctx context.Context) error { } else { logrus.Debugf("Key %s not found in snapshotFile list", sfKey) } + // otherwise remove it logrus.Infof("Deleting ETCDSnapshotFile for %s", esf.Spec.SnapshotName) if err := snapshots.Delete(esf.Name, &metav1.DeleteOptions{}); err != nil { logrus.Errorf("Failed to delete ETCDSnapshotFile: %v", err) @@ -817,18 +839,17 @@ func (e *ETCD) ReconcileSnapshotData(ctx context.Context) error { } // Update our Node object to note the timestamp of the snapshot storages that have been reconciled - now := time.Now().Round(time.Second).Format(time.RFC3339) patch := []map[string]string{ { "op": "add", - "value": now, + "value": now.Format(time.RFC3339), "path": "/metadata/annotations/" + strings.ReplaceAll(annotationLocalReconciled, "/", "~1"), }, } if e.config.EtcdS3 != nil { patch = append(patch, map[string]string{ "op": "add", - "value": now, + "value": now.Format(time.RFC3339), "path": "/metadata/annotations/" + strings.ReplaceAll(annotationS3Reconciled, "/", "~1"), }) } diff --git a/pkg/flock/flock_unix.go b/pkg/flock/flock_unix.go index 49b5465bced5..eb4253aa2621 100644 --- a/pkg/flock/flock_unix.go +++ b/pkg/flock/flock_unix.go @@ -19,9 +19,6 @@ limitations under the License. package flock import ( - "os/exec" - "strings" - "golang.org/x/sys/unix" ) @@ -49,14 +46,3 @@ func AcquireShared(path string) (int, error) { func Release(lock int) error { return unix.Flock(lock, unix.LOCK_UN) } - -// CheckLock checks whether any process is using the lock -func CheckLock(path string) bool { - lockByte, _ := exec.Command("lsof", "-w", "-F", "ln", path).Output() - locks := string(lockByte) - if locks == "" { - return false - } - readWriteLock := strings.Split(locks, "\n")[2] - return readWriteLock == "lR" || readWriteLock == "lW" -} diff --git a/pkg/flock/flock_unix_test.go b/pkg/flock/flock_unix_test.go index a47015fa17a1..7eb8794aea85 100644 --- a/pkg/flock/flock_unix_test.go +++ b/pkg/flock/flock_unix_test.go @@ -19,9 +19,22 @@ limitations under the License. package flock import ( + "os/exec" + "strings" "testing" ) +// checkLock checks whether any process is using the lock +func checkLock(path string) bool { + lockByte, _ := exec.Command("lsof", "-w", "-F", "lfn", path).Output() + locks := string(lockByte) + if locks == "" { + return false + } + readWriteLock := strings.Split(locks, "\n")[2] + return readWriteLock == "lR" || readWriteLock == "lW" +} + func Test_UnitFlock(t *testing.T) { tests := []struct { name string @@ -45,7 +58,7 @@ func Test_UnitFlock(t *testing.T) { return } - if got := CheckLock(tt.path); got != tt.wantCheck { + if got := checkLock(tt.path); got != tt.wantCheck { t.Errorf("CheckLock() = %+v\nWant = %+v", got, tt.wantCheck) } @@ -53,7 +66,7 @@ func Test_UnitFlock(t *testing.T) { t.Errorf("Release() error = %v, wantErr %v", err, tt.wantErr) } - if got := CheckLock(tt.path); got == tt.wantCheck { + if got := checkLock(tt.path); got == tt.wantCheck { t.Errorf("CheckLock() = %+v\nWant = %+v", got, !tt.wantCheck) } }) diff --git a/pkg/rootless/rootless.go b/pkg/rootless/rootless.go index f584e7b4a7cd..558c923c7576 100644 --- a/pkg/rootless/rootless.go +++ b/pkg/rootless/rootless.go @@ -32,6 +32,7 @@ var ( enableIPv6Env = "K3S_ROOTLESS_ENABLE_IPV6" portDriverEnv = "K3S_ROOTLESS_PORT_DRIVER" disableLoopbackEnv = "K3S_ROOTLESS_DISABLE_HOST_LOOPBACK" + copyUpDirsEnv = "K3S_ROOTLESS_COPYUPDIRS" ) func Rootless(stateDir string, enableIPv6 bool) error { @@ -218,6 +219,9 @@ func createChildOpt(driver portDriver) (*child.Opt, error) { opt.NetworkDriver = slirp4netns.NewChildDriver() opt.PortDriver = driver.NewChildDriver() opt.CopyUpDirs = []string{"/etc", "/var/run", "/run", "/var/lib"} + if copyUpDirs := os.Getenv(copyUpDirsEnv); copyUpDirs != "" { + opt.CopyUpDirs = append(opt.CopyUpDirs, strings.Split(copyUpDirs, ",")...) + } opt.CopyUpDriver = tmpfssymlink.NewChildDriver() opt.MountProcfs = true opt.Reaper = true diff --git a/pkg/server/cert.go b/pkg/server/cert.go index bde425196ea3..ea03a305dfd1 100644 --- a/pkg/server/cert.go +++ b/pkg/server/cert.go @@ -75,6 +75,10 @@ func caCertReplace(server *config.Control, buf io.ReadCloser, force bool) error return err } + if err := defaultBootstrap(server, tmpServer); err != nil { + return errors.Wrap(err, "failed to set default bootstrap values") + } + if err := validateBootstrap(server, tmpServer); err != nil { if !force { return errors.Wrap(err, "failed to validate new CA certificates and keys") @@ -85,27 +89,16 @@ func caCertReplace(server *config.Control, buf io.ReadCloser, force bool) error return cluster.Save(context.TODO(), tmpServer, true) } -// validateBootstrap checks the new certs and keys to ensure that the cluster would function properly were they to be used. -// - The new leaf CA certificates must be verifiable using the same root and intermediate certs as the current leaf CA certificates. -// - The new service account signing key bundle must include the currently active signing key. -func validateBootstrap(oldServer, newServer *config.Control) error { +// defaultBootstrap provides default values from the existing bootstrap fields +// if the value is not tagged for rotation, or the current value is empty. +func defaultBootstrap(oldServer, newServer *config.Control) error { errs := []error{} - // Use reflection to iterate over all of the bootstrap fields, checking files at each of the new paths. oldMeta := reflect.ValueOf(&oldServer.Runtime.ControlRuntimeBootstrap).Elem() newMeta := reflect.ValueOf(&newServer.Runtime.ControlRuntimeBootstrap).Elem() - fields := []reflect.StructField{} + // use the existing file if the new file does not exist or is empty for _, field := range reflect.VisibleFields(oldMeta.Type()) { - // Only handle bootstrap fields tagged for rotation - if field.Tag.Get("rotate") != "true" { - continue - } - fields = append(fields, field) - } - - // first pass: use the existing file if the new file does not exist or is empty - for _, field := range fields { newVal := newMeta.FieldByName(field.Name) info, err := os.Stat(newVal.String()) if err != nil && !errors.Is(err, fs.ErrNotExist) { @@ -113,20 +106,34 @@ func validateBootstrap(oldServer, newServer *config.Control) error { continue } - if info == nil || info.Size() == 0 { + if field.Tag.Get("rotate") != "true" || info == nil || info.Size() == 0 { if newVal.CanSet() { oldVal := oldMeta.FieldByName(field.Name) - logrus.Infof("certificate: %s not provided; using current value %s", field.Name, oldVal) + logrus.Infof("Using current data for %s: %s", field.Name, oldVal) newVal.Set(oldVal) } else { errs = append(errs, fmt.Errorf("cannot use current data for %s; field is not settable", field.Name)) } } - } + return merr.NewErrors(errs...) +} - // second pass: validate file contents - for _, field := range fields { +// validateBootstrap checks the new certs and keys to ensure that the cluster would function properly were they to be used. +// - The new leaf CA certificates must be verifiable using the same root and intermediate certs as the current leaf CA certificates. +// - The new service account signing key bundle must include the currently active signing key. +func validateBootstrap(oldServer, newServer *config.Control) error { + errs := []error{} + + // Use reflection to iterate over all of the bootstrap fields, checking files at each of the new paths. + oldMeta := reflect.ValueOf(&oldServer.Runtime.ControlRuntimeBootstrap).Elem() + newMeta := reflect.ValueOf(&newServer.Runtime.ControlRuntimeBootstrap).Elem() + + for _, field := range reflect.VisibleFields(oldMeta.Type()) { + // Only handle bootstrap fields tagged for rotation + if field.Tag.Get("rotate") != "true" { + continue + } oldVal := oldMeta.FieldByName(field.Name) newVal := newMeta.FieldByName(field.Name) @@ -150,10 +157,7 @@ func validateBootstrap(oldServer, newServer *config.Control) error { } } - if len(errs) > 0 { - return merr.NewErrors(errs...) - } - return nil + return merr.NewErrors(errs...) } func validateCA(oldCAPath, newCAPath string) error { diff --git a/scripts/airgap/image-list.txt b/scripts/airgap/image-list.txt index 4e8f4d41d206..3f700553e029 100644 --- a/scripts/airgap/image-list.txt +++ b/scripts/airgap/image-list.txt @@ -1,6 +1,6 @@ -docker.io/rancher/klipper-helm:v0.9.2-build20240828 +docker.io/rancher/klipper-helm:v0.9.3-build20241008 docker.io/rancher/klipper-lb:v0.4.9 -docker.io/rancher/local-path-provisioner:v0.0.28 +docker.io/rancher/local-path-provisioner:v0.0.30 docker.io/rancher/mirrored-coredns-coredns:1.11.3 docker.io/rancher/mirrored-library-busybox:1.36.1 docker.io/rancher/mirrored-library-traefik:2.11.10 diff --git a/scripts/build-tests-sonobuoy b/scripts/build-tests-sonobuoy index a016e6a4e4ce..3cae271b4ba6 100755 --- a/scripts/build-tests-sonobuoy +++ b/scripts/build-tests-sonobuoy @@ -14,7 +14,7 @@ PKG_TO_TEST=$(find ./pkg/ -type f -name "*_int_test.go" | sed -r 's|/[^/]+$||' | for i in $PKG_TO_TEST; do name=$(echo "${i##*/}") echo $name - go test -c -v -ldflags "-X 'github.com/k3s-io/k3s/tests/integration.existingServer=True'" -o dist/artifacts/k3s-integration-$name.test $i -run Integration + go test -c -ldflags "-X 'github.com/k3s-io/k3s/tests/integration.existingServer=True'" -o dist/artifacts/k3s-integration-$name.test $i -run Integration -ginkgo.v -test.v done # Integration tests under /tests @@ -22,7 +22,7 @@ PKG_TO_TEST=$(find ./tests/integration -type f -name "*_int_test.go" | sed -r 's for i in $PKG_TO_TEST; do name=$(echo "${i##*/}") echo $name - go test -c -v -ldflags "-X 'github.com/k3s-io/k3s/tests/integration.existingServer=True'" -o dist/artifacts/k3s-integration-$name.test $i -run Integration + go test -c -ldflags "-X 'github.com/k3s-io/k3s/tests/integration.existingServer=True'" -o dist/artifacts/k3s-integration-$name.test $i -run Integration -ginkgo.v -test.v done docker build -f ./tests/integration/Dockerfile.test -t $REPO . docker save $REPO -o ./dist/artifacts/$REPO.tar diff --git a/scripts/image_scan.sh b/scripts/image_scan.sh index d057d09a31dd..09169519f07f 100755 --- a/scripts/image_scan.sh +++ b/scripts/image_scan.sh @@ -31,7 +31,11 @@ TRIVY_TEMPLATE='{{- $critical := 0 }}{{- $high := 0 }} {{- end -}} {{ end }} Vulnerabilities - Critical: {{ $critical }}, High: {{ $high }}{{ println }}' +VEX_REPORT="rancher.openvex.json" -trivy --quiet image --severity ${SEVERITIES} --no-progress --ignore-unfixed --format template --template "${TRIVY_TEMPLATE}" ${IMAGE} +# Download Rancher's VEX Hub standalone report +curl -fsS -o ${VEX_REPORT} https://raw.githubusercontent.com/rancher/vexhub/refs/heads/main/reports/rancher.openvex.json + +trivy --quiet image --severity ${SEVERITIES} --vex ${VEX_REPORT} --no-progress --ignore-unfixed --format template --template "${TRIVY_TEMPLATE}" ${IMAGE} exit 0 diff --git a/tests/e2e/amd64_resource_files/loadbalancer-extTrafficPol.yaml b/tests/e2e/amd64_resource_files/loadbalancer-extTrafficPol.yaml new file mode 100644 index 000000000000..50b4b0012b29 --- /dev/null +++ b/tests/e2e/amd64_resource_files/loadbalancer-extTrafficPol.yaml @@ -0,0 +1,62 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-config +data: + default.conf: | + server { + listen 80; + location /ip { + return 200 "$remote_addr\n"; + } + # Default location block to serve the default "Welcome to nginx" page + location / { + root /usr/share/nginx/html; + index index.html; + } + } +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-loadbalancer-ext +spec: + selector: + matchLabels: + k8s-app: nginx-app-loadbalancer-ext + replicas: 1 + template: + metadata: + labels: + k8s-app: nginx-app-loadbalancer-ext + spec: + containers: + - name: nginx + image: ranchertest/mytestcontainer + ports: + - containerPort: 80 + volumeMounts: + - name: nginx-config-volume + mountPath: /etc/nginx/conf.d + volumes: + - name: nginx-config-volume + configMap: + name: nginx-config +--- +apiVersion: v1 +kind: Service +metadata: + name: nginx-loadbalancer-svc-ext + labels: + k8s-app: nginx-app-loadbalancer-ext +spec: + type: LoadBalancer + externalTrafficPolicy: Local + ports: + - port: 82 + targetPort: 80 + protocol: TCP + name: http + selector: + k8s-app: nginx-app-loadbalancer-ext diff --git a/tests/e2e/amd64_resource_files/loadbalancer-intTrafficPol.yaml b/tests/e2e/amd64_resource_files/loadbalancer-intTrafficPol.yaml new file mode 100644 index 000000000000..5cc9e96f5e35 --- /dev/null +++ b/tests/e2e/amd64_resource_files/loadbalancer-intTrafficPol.yaml @@ -0,0 +1,63 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-config +data: + default.conf: | + server { + listen 80; + location /ip { + return 200 "$remote_addr\n"; + } + # Default location block to serve the default "Welcome to nginx" page + location / { + root /usr/share/nginx/html; + index index.html; + } + } + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-loadbalancer-int +spec: + selector: + matchLabels: + k8s-app: nginx-app-loadbalancer-int + replicas: 1 + template: + metadata: + labels: + k8s-app: nginx-app-loadbalancer-int + spec: + containers: + - name: nginx + image: ranchertest/mytestcontainer + ports: + - containerPort: 80 + volumeMounts: + - name: nginx-config-volume + mountPath: /etc/nginx/conf.d + volumes: + - name: nginx-config-volume + configMap: + name: nginx-config +--- +apiVersion: v1 +kind: Service +metadata: + name: nginx-loadbalancer-svc-int + labels: + k8s-app: nginx-app-loadbalancer-int +spec: + type: LoadBalancer + internalTrafficPolicy: Local + ports: + - port: 83 + targetPort: 80 + protocol: TCP + name: http + selector: + k8s-app: nginx-app-loadbalancer-int diff --git a/tests/e2e/amd64_resource_files/loadbalancer.yaml b/tests/e2e/amd64_resource_files/loadbalancer.yaml index 0897d215aea2..3a5dfac418fb 100644 --- a/tests/e2e/amd64_resource_files/loadbalancer.yaml +++ b/tests/e2e/amd64_resource_files/loadbalancer.yaml @@ -1,4 +1,22 @@ --- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-config +data: + default.conf: | + server { + listen 80; + location /ip { + return 200 "$remote_addr\n"; + } + # Default location block to serve the default "Welcome to nginx" page + location / { + root /usr/share/nginx/html; + index index.html; + } + } +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -18,6 +36,13 @@ spec: image: ranchertest/mytestcontainer ports: - containerPort: 80 + volumeMounts: + - name: nginx-config-volume + mountPath: /etc/nginx/conf.d + volumes: + - name: nginx-config-volume + configMap: + name: nginx-config --- apiVersion: v1 kind: Service diff --git a/tests/e2e/scripts/latest_commit.sh b/tests/e2e/scripts/latest_commit.sh index 548d96557a19..787bc11df1a0 100755 --- a/tests/e2e/scripts/latest_commit.sh +++ b/tests/e2e/scripts/latest_commit.sh @@ -1,8 +1,11 @@ #!/bin/bash -# Grabs the last 5 commit SHA's from the given branch, then purges any commits that do not have a passing CI build + +branch=$1 +output_file=$2 +# Grabs the last 10 commit SHA's from the given branch, then purges any commits that do not have a passing CI build iterations=0 -curl -s -H 'Accept: application/vnd.github.v3+json' "https://api.github.com/repos/k3s-io/k3s/commits?per_page=5&sha=$1" | jq -r '.[] | .sha' &> $2 -# The VMs take time on startup to hit googleapis.com, wait loop until we can + +# The VMs take time on startup to hit aws, wait loop until we can while ! curl -s --fail https://k3s-ci-builds.s3.amazonaws.com > /dev/null; do ((iterations++)) if [ "$iterations" -ge 30 ]; then @@ -12,15 +15,34 @@ while ! curl -s --fail https://k3s-ci-builds.s3.amazonaws.com > /dev/null; do sleep 1 done -iterations=0 -curl -s --fail https://k3s-ci-builds.s3.amazonaws.com/k3s-$(head -n 1 $2).sha256sum -while [ $? -ne 0 ]; do - ((iterations++)) - if [ "$iterations" -ge 6 ]; then - echo "No valid commits found" - exit 1 +if [ -n "$GH_TOKEN" ]; then + response=$(curl -s -H "Authorization: token $GH_TOKEN" -H 'Accept: application/vnd.github.v3+json' "https://api.github.com/repos/k3s-io/k3s/commits?per_page=10&sha=$branch") +else + response=$(curl -s -H 'Accept: application/vnd.github.v3+json' "https://api.github.com/repos/k3s-io/k3s/commits?per_page=10&sha=$branch") +fi +type=$(echo "$response" | jq -r type) + +# Verify if the response is an array with the k3s commits +if [[ $type == "object" ]]; then + message=$(echo "$response" | jq -r .message) + if [[ $message == "API rate limit exceeded for "* ]]; then + echo "Github API rate limit exceeded" + exit 1 + fi + echo "Github API returned a non-expected response ${message}" + exit 1 +elif [[ $type == "array" ]]; then + commits_str=$(echo "$response" | jq -j -r '.[] | .sha, " "') +fi + +read -a commits <<< "$commits_str" + +for commit in "${commits[@]}"; do + if curl -s --fail https://k3s-ci-builds.s3.amazonaws.com/k3s-$commit.sha256sum > /dev/null; then + echo "$commit" > "$output_file" + exit 0 fi - sed -i 1d "$2" - sleep 1 - curl -s --fail https://k3s-ci-builds.s3.amazonaws.com/k3s-$(head -n 1 $2).sha256sum done + +echo "Failed to find a valid commit, checked: " "${commits[@]}" +exit 1 \ No newline at end of file diff --git a/tests/e2e/scripts/run_tests.sh b/tests/e2e/scripts/run_tests.sh index d26b732599aa..a30b6b478eed 100755 --- a/tests/e2e/scripts/run_tests.sh +++ b/tests/e2e/scripts/run_tests.sh @@ -6,6 +6,26 @@ agentcount=${3:-1} db=${4:-"etcd"} hardened=${5:-""} +cleanup() { + for net in $(virsh net-list --all | tail -n +2 | tr -s ' ' | cut -d ' ' -f2 | grep -v default); do + virsh net-destroy "$net" + virsh net-undefine "$net" + done + + for domain in $(virsh list --all | tail -n +2 | tr -s ' ' | cut -d ' ' -f3); do + virsh destroy "$domain" + virsh undefine "$domain" --remove-all-storage + done + + for vm in $(vagrant global-status |tr -s ' '|tail +3 |grep "/" |cut -d ' ' -f5); do + cd $vm + vagrant destroy -f + cd .. + done + # Prune Vagrant global status + vagrant global-status --prune +} + E2E_EXTERNAL_DB=$db && export E2E_EXTERNAL_DB E2E_REGISTRY=true && export E2E_REGISTRY @@ -18,7 +38,10 @@ OS=$(echo "$nodeOS"|cut -d'/' -f2) echo "$OS" vagrant global-status | awk '/running/'|cut -c1-7| xargs -r -d '\n' -n 1 -- vagrant destroy -f -E2E_RELEASE_CHANNEL="commit" && export E2E_RELEASE_CHANNEL + +# To reduce GH API requsts, we grab the latest commit on the host and pass it to the tests +./scripts/latest_commit.sh master latest_commit.txt +E2E_RELEASE_VERSION=$(cat latest_commit.txt) && export E2E_RELEASE_VERSION echo 'RUNNING DUALSTACK TEST' E2E_HARDENED="$hardened" /usr/local/go/bin/go test -v dualstack/dualstack_test.go -nodeOS="$nodeOS" -serverCount=1 -agentCount=1 -timeout=30m -json -ci |tee k3s_"$OS".log @@ -44,6 +67,8 @@ echo 'RUNNING SNAPSHOT AND RESTORE TEST' echo 'RUNNING ROTATE CUSTOM CA TEST' /usr/local/go/bin/go test -v rotateca/rotateca_test.go -nodeOS="$nodeOS" -serverCount=1 -agentCount=1 -timeout=30m -json -ci | tee -a k3s_"$OS".log +# For upgrade test we use the release channel install as the starting point +unset E2E_RELEASE_VERSION E2E_RELEASE_CHANNEL="latest" && export E2E_RELEASE_CHANNEL echo 'RUNNING CLUSTER UPGRADE TEST' E2E_REGISTRY=true /usr/local/go/bin/go test -v upgradecluster/upgradecluster_test.go -nodeOS="$nodeOS" -serverCount=$((servercount)) -agentCount=$((agentcount)) -timeout=1h -json -ci | tee -a k3s_"$OS".log diff --git a/tests/e2e/svcpoliciesandfirewall/Vagrantfile b/tests/e2e/svcpoliciesandfirewall/Vagrantfile new file mode 100644 index 000000000000..a09e42484023 --- /dev/null +++ b/tests/e2e/svcpoliciesandfirewall/Vagrantfile @@ -0,0 +1,75 @@ +ENV['VAGRANT_NO_PARALLEL'] = 'no' +NODE_ROLES = (ENV['E2E_NODE_ROLES'] || + ["server-0", "agent-0" ]) +NODE_BOXES = (ENV['E2E_NODE_BOXES'] || + ['bento/ubuntu-24.04', 'bento/ubuntu-24.04']) +GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master") +RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "") +GOCOVER = (ENV['E2E_GOCOVER'] || "") +NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i +NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i +NETWORK4_PREFIX = "10.10.10" +install_type = "" + +def provision(vm, role, role_num, node_num) + vm.box = NODE_BOXES[node_num] + vm.hostname = role + node_ip4 = "#{NETWORK4_PREFIX}.#{100+node_num}" + vm.network "private_network", :ip => node_ip4, :netmask => "255.255.255.0" + + scripts_location = Dir.exist?("./scripts") ? "./scripts" : "../scripts" + vagrant_defaults = File.exist?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb" + load vagrant_defaults + + defaultOSConfigure(vm) + addCoverageDir(vm, role, GOCOVER) + install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH) + + if role.include?("server") && role_num == 0 + vm.provision :k3s, run: 'once' do |k3s| + k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 + k3s.args = "server " + k3s.config = <<~YAML + node-ip: #{node_ip4} + token: vagrant + YAML + k3s.env = ["K3S_KUBECONFIG_MODE=0644", install_type] + end + end + if role.include?("agent") + vm.provision :k3s, run: 'once' do |k3s| + k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 + k3s.args = "agent " + k3s.config = <<~YAML + server: https://#{NETWORK4_PREFIX}.100:6443 + token: vagrant + node-ip: #{node_ip4} + YAML + k3s.env = ["K3S_KUBECONFIG_MODE=0644", install_type] + end + end +end + +Vagrant.configure("2") do |config| + config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload", "vagrant-libvirt", "vagrant-scp"] + config.vm.provider "libvirt" do |v| + v.cpus = NODE_CPUS + v.memory = NODE_MEMORY + # We replicate the default prefix, but add a timestamp to enable parallel runs and cleanup of old VMs + v.default_prefix = File.basename(Dir.getwd) + "_" + Time.now.to_i.to_s + "_" + end + + if NODE_ROLES.kind_of?(String) + NODE_ROLES = NODE_ROLES.split(" ", -1) + end + if NODE_BOXES.kind_of?(String) + NODE_BOXES = NODE_BOXES.split(" ", -1) + end + + NODE_ROLES.each_with_index do |role, i| + role_num = role.split("-", -1).pop.to_i + config.vm.define role do |node| + provision(node.vm, role, role_num, i) + end + end +end diff --git a/tests/e2e/svcpoliciesandfirewall/svcpoliciesandfirewall_test.go b/tests/e2e/svcpoliciesandfirewall/svcpoliciesandfirewall_test.go new file mode 100644 index 000000000000..53128947a234 --- /dev/null +++ b/tests/e2e/svcpoliciesandfirewall/svcpoliciesandfirewall_test.go @@ -0,0 +1,358 @@ +// This test verifies: +// * externaltrafficpolicy for both local and cluster values +// * internaltrafficpolicy for both local and cluster values +// * services firewall based on loadBalancerSourceRanges field + +package svcpoliciesandfirewall + +import ( + "flag" + "fmt" + "os" + "strings" + "testing" + "text/template" + + "github.com/k3s-io/k3s/tests/e2e" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +// Valid nodeOS: bento/ubuntu-24.04, opensuse/Leap-15.6.x86_64 +var nodeOS = flag.String("nodeOS", "bento/ubuntu-24.04", "VM operating system") +var serverCount = flag.Int("serverCount", 1, "number of server nodes") +var agentCount = flag.Int("agentCount", 1, "number of agent nodes") +var ci = flag.Bool("ci", false, "running on CI") +var local = flag.Bool("local", false, "deploy a locally built K3s binary") + +func Test_E2EPoliciesAndFirewall(t *testing.T) { + flag.Parse() + RegisterFailHandler(Fail) + suiteConfig, reporterConfig := GinkgoConfiguration() + RunSpecs(t, "Services Traffic Policies and Firewall config Suite", suiteConfig, reporterConfig) +} + +var ( + kubeConfigFile string + serverNodeNames []string + agentNodeNames []string + nodes []e2e.Node +) + +var _ = ReportAfterEach(e2e.GenReport) + +var _ = Describe("Verify Services Traffic policies and firewall config", Ordered, func() { + + It("Starts up with no issues", func() { + var err error + if *local { + serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) + } else { + serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) + } + Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) + fmt.Println("CLUSTER CONFIG") + fmt.Println("OS:", *nodeOS) + fmt.Println("Server Nodes:", serverNodeNames) + fmt.Println("Agent Nodes:", agentNodeNames) + kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Checks Node Status", func() { + Eventually(func(g Gomega) { + var err error + nodes, err = e2e.ParseNodes(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, node := range nodes { + g.Expect(node.Status).Should(Equal("Ready")) + } + }, "300s", "5s").Should(Succeed()) + _, err := e2e.ParseNodes(kubeConfigFile, true) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Checks Pod Status", func() { + Eventually(func(g Gomega) { + pods, err := e2e.ParsePods(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, pod := range pods { + if strings.Contains(pod.Name, "helm-install") { + g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) + } else { + g.Expect(pod.Status).Should(Equal("Running"), pod.Name) + } + } + }, "300s", "5s").Should(Succeed()) + _, err := e2e.ParsePods(kubeConfigFile, true) + Expect(err).NotTo(HaveOccurred()) + }) + + // Verifies that the service with external traffic policy=local is deployed + // Verifies that the external-ip is only set to the node IP where the server runs + // It also verifies that the service with external traffic policy=cluster has both node IPs as externalIP + It("Verify external traffic policy=local gets set up correctly", func() { + _, err := e2e.DeployWorkload("loadbalancer.yaml", kubeConfigFile, false) + Expect(err).NotTo(HaveOccurred(), "loadbalancer not deployed") + _, err = e2e.DeployWorkload("loadbalancer-extTrafficPol.yaml", kubeConfigFile, false) + Expect(err).NotTo(HaveOccurred(), "loadbalancer-extTrafficPol not deployed") + + // Check where the server pod is running + var serverNodeName string + Eventually(func() (string, error) { + pods, err := e2e.ParsePods(kubeConfigFile, false) + Expect(err).NotTo(HaveOccurred(), "failed to parse pods") + for _, pod := range pods { + if strings.Contains(pod.Name, "test-loadbalancer-ext") { + serverNodeName = pod.Node + break + } + } + return serverNodeName, nil + }, "25s", "5s").ShouldNot(BeEmpty(), "server pod not found") + + var serverNodeIP string + for _, node := range nodes { + if node.Name == serverNodeName { + serverNodeIP = node.InternalIP + } + } + + // Verify there is only one external-ip and it is matching the node IP + lbSvc := "nginx-loadbalancer-svc" + lbSvcExt := "nginx-loadbalancer-svc-ext" + Eventually(func() ([]string, error) { + return e2e.FetchExternalIPs(kubeConfigFile, lbSvc) + }, "25s", "5s").Should(HaveLen(2), "external IP count not equal to 2") + + Eventually(func(g Gomega) { + externalIPs, _ := e2e.FetchExternalIPs(kubeConfigFile, lbSvcExt) + g.Expect(externalIPs).To(HaveLen(1), "more than 1 exernalIP found") + g.Expect(externalIPs[0]).To(Equal(serverNodeIP),"external IP does not match servernodeIP") + }, "25s", "5s").Should(Succeed()) + }) + + // Verifies that the service is reachable from the outside and the source IP is nos MASQ + // It also verifies that the service with external traffic policy=cluster can be accessed and the source IP is MASQ + It("Verify connectivity in external traffic policy=local", func() { + lbSvc := "nginx-loadbalancer-svc" + lbSvcExternalIPs, _ := e2e.FetchExternalIPs(kubeConfigFile, lbSvc) + lbSvcExt := "nginx-loadbalancer-svc-ext" + lbSvcExtExternalIPs, _ := e2e.FetchExternalIPs(kubeConfigFile, lbSvcExt) + + // Verify connectivity to the external IP of the lbsvc service and the IP should be the flannel interface IP because of MASQ + for _, externalIP := range lbSvcExternalIPs { + Eventually(func() (string, error) { + cmd := "curl -s " + externalIP + ":81/ip" + return e2e.RunCommand(cmd) + }, "25s", "5s").Should(ContainSubstring("10.42")) + } + + // Verify connectivity to the external IP of the lbsvcExt service and the IP should not be the flannel interface IP + Eventually(func() (string, error) { + cmd := "curl -s " + lbSvcExtExternalIPs[0] + ":82/ip" + return e2e.RunCommand(cmd) + }, "25s", "5s").ShouldNot(ContainSubstring("10.42")) + + + // Verify connectivity to the other nodeIP does not work because of external traffic policy=local + for _, externalIP := range lbSvcExternalIPs { + if externalIP == lbSvcExtExternalIPs[0] { + // This IP we already test and it shuold work + continue + } + Eventually(func() error { + cmd := "curl -s --max-time 5 " + externalIP + ":82/ip" + _, err := e2e.RunCommand(cmd) + return err + }, "40s", "5s").Should(MatchError(ContainSubstring("exit status"))) + } + }) + + // Verifies that the internal traffic policy=local is deployed + It("Verify internal traffic policy=local gets set up correctly", func() { + _, err := e2e.DeployWorkload("loadbalancer-intTrafficPol.yaml", kubeConfigFile, false) + Expect(err).NotTo(HaveOccurred(), "loadbalancer-intTrafficPol not deployed") + _, err = e2e.DeployWorkload("pod_client.yaml", kubeConfigFile, false) + Expect(err).NotTo(HaveOccurred(), "pod client not deployed") + + // Check that service exists + Eventually(func() (string, error) { + clusterIP, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-loadbalancer-svc-int", false) + return clusterIP, nil + }, "25s", "5s").Should(ContainSubstring("10.43")) + + // Check that client pods are running + Eventually(func() string { + pods, err := e2e.ParsePods(kubeConfigFile, false) + Expect(err).NotTo(HaveOccurred()) + for _, pod := range pods { + if strings.Contains(pod.Name, "client-deployment") { + return pod.Status + } + } + return "" + }, "50s", "5s").Should(Equal("Running")) + }) + + // Verifies that only the client pod running in the same node as the server pod can access the service + // It also verifies that the service with internal traffic policy=cluster can be accessed by both client pods + It("Verify connectivity in internal traffic policy=local", func() { + var clientPod1, clientPod1Node, clientPod1IP, clientPod2, clientPod2Node, clientPod2IP, serverNodeName string + Eventually(func(g Gomega) { + pods, err := e2e.ParsePods(kubeConfigFile, false) + Expect(err).NotTo(HaveOccurred(), "failed to parse pods") + for _, pod := range pods { + if strings.Contains(pod.Name, "test-loadbalancer-int") { + serverNodeName = pod.Node + } + if strings.Contains(pod.Name, "client-deployment") { + if clientPod1 == "" { + clientPod1 = pod.Name + clientPod1Node = pod.Node + clientPod1IP = pod.IP + } else { + clientPod2 = pod.Name + clientPod2Node = pod.Node + clientPod2IP = pod.IP + } + } + } + // As we need those variables for the connectivity test, let's check they are not emtpy + g.Expect(serverNodeName).ShouldNot(BeEmpty(), "server pod for internalTrafficPolicy=local not found") + g.Expect(clientPod1).ShouldNot(BeEmpty(), "client pod1 not found") + g.Expect(clientPod2).ShouldNot(BeEmpty(), "client pod2 not found") + g.Expect(clientPod1Node).ShouldNot(BeEmpty(), "client pod1 node not found") + g.Expect(clientPod2Node).ShouldNot(BeEmpty(), "client pod2 node not found") + g.Expect(clientPod1IP).ShouldNot(BeEmpty(), "client pod1 IP not found") + g.Expect(clientPod2IP).ShouldNot(BeEmpty(), "client pod2 IP not found") + }, "25s", "5s").Should(Succeed(), "All pod and names and IPs should be non-empty") + + // Check that clientPod1Node and clientPod2Node are not equal + Expect(clientPod1Node).ShouldNot(Equal(clientPod2Node)) + + var workingCmd, nonWorkingCmd string + if serverNodeName == clientPod1Node { + workingCmd = "kubectl --kubeconfig=" + kubeConfigFile + " exec " + clientPod1 + " -- curl -s --max-time 5 nginx-loadbalancer-svc-int:83/ip" + nonWorkingCmd = "kubectl --kubeconfig=" + kubeConfigFile + " exec " + clientPod2 + " -- curl -s --max-time 5 nginx-loadbalancer-svc-int:83/ip" + } + if serverNodeName == clientPod2Node { + workingCmd = "kubectl --kubeconfig=" + kubeConfigFile + " exec " + clientPod2 + " -- curl -s --max-time 5 nginx-loadbalancer-svc-int:83/ip" + nonWorkingCmd = "kubectl --kubeconfig=" + kubeConfigFile + " exec " + clientPod1 + " -- curl -s --max-time 5 nginx-loadbalancer-svc-int:83/ip" + } + + Eventually(func() (string, error) { + out, err := e2e.RunCommand(workingCmd) + return out, err + }, "25s", "5s").Should(SatisfyAny( + ContainSubstring(clientPod1IP), + ContainSubstring(clientPod2IP), + )) + + // Check the non working command fails because of internal traffic policy=local + Eventually(func() (bool) { + _, err := e2e.RunCommand(nonWorkingCmd) + if err != nil && strings.Contains(err.Error(), "exit status") { + // Treat exit status as a successful condition + return true + } + return false + }, "40s", "5s").Should(BeTrue()) + + // curling a service with internal traffic policy=cluster. It should work on both pods + for _, pod := range []string{clientPod1, clientPod2} { + cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec " + pod + " -- curl -s --max-time 5 nginx-loadbalancer-svc:81/ip" + Eventually(func() (string, error) { + return e2e.RunCommand(cmd) + }, "20s", "5s").Should(SatisfyAny( + ContainSubstring(clientPod1IP), + ContainSubstring(clientPod2IP), + )) + } + }) + + // Set up the service manifest with loadBalancerSourceRanges + It("Applies service manifest with loadBalancerSourceRanges", func() { + // Define the service manifest with a placeholder for the IP + serviceManifest := ` +apiVersion: v1 +kind: Service +metadata: + name: nginx-loadbalancer-svc-ext-firewall +spec: + type: LoadBalancer + loadBalancerSourceRanges: + - {{.NodeIP}}/32 + ports: + - port: 82 + targetPort: 80 + protocol: TCP + name: http + selector: + k8s-app: nginx-app-loadbalancer-ext +` + // Remove the service nginx-loadbalancer-svc-ext + _, err := e2e.RunCommand("kubectl --kubeconfig=" + kubeConfigFile + " delete svc nginx-loadbalancer-svc-ext") + Expect(err).NotTo(HaveOccurred(), "failed to remove service nginx-loadbalancer-svc-ext") + + // Parse and execute the template with the node IP + tmpl, err := template.New("service").Parse(serviceManifest) + Expect(err).NotTo(HaveOccurred()) + + var filledManifest strings.Builder + err = tmpl.Execute(&filledManifest, struct{ NodeIP string }{NodeIP: nodes[0].InternalIP}) + Expect(err).NotTo(HaveOccurred()) + + // Write the filled manifest to a temporary file + tmpFile, err := os.CreateTemp("", "service-*.yaml") + Expect(err).NotTo(HaveOccurred()) + defer os.Remove(tmpFile.Name()) + + _, err = tmpFile.WriteString(filledManifest.String()) + Expect(err).NotTo(HaveOccurred()) + tmpFile.Close() + + // Apply the manifest using kubectl + applyCmd := fmt.Sprintf("kubectl --kubeconfig=%s apply -f %s", kubeConfigFile, tmpFile.Name()) + out, err := e2e.RunCommand(applyCmd) + Expect(err).NotTo(HaveOccurred(), out) + + Eventually(func() (string, error) { + clusterIP, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-loadbalancer-svc-ext-firewall", false) + return clusterIP, nil + }, "25s", "5s").Should(ContainSubstring("10.43")) + }) + + // Verify that only the allowed node can curl. That node should be able to curl both externalIPs (i.e. node.InternalIP) + It("Verify firewall is working", func() { + for _, node := range nodes { + // Verify connectivity from nodes[0] works because we passed its IP to the loadBalancerSourceRanges + Eventually(func() (string, error) { + cmd := "curl -s --max-time 5 " + node.InternalIP + ":82" + return e2e.RunCmdOnNode(cmd, nodes[0].Name) + }, "40s", "5s").Should(ContainSubstring("Welcome to nginx")) + + // Verify connectivity from nodes[1] fails because we did not pass its IP to the loadBalancerSourceRanges + Eventually(func(g Gomega) error { + cmd := "curl -s --max-time 5 " + node.InternalIP + ":82" + _, err := e2e.RunCmdOnNode(cmd, nodes[1].Name) + return err + }, "40s", "5s").Should(MatchError(ContainSubstring("exit status"))) + } + }) +}) + +var failed bool +var _ = AfterEach(func() { + failed = failed || CurrentSpecReport().Failed() +}) + +var _ = AfterSuite(func() { + if !failed { + Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) + } + if !failed || *ci { + Expect(e2e.DestroyCluster()).To(Succeed()) + Expect(os.Remove(kubeConfigFile)).To(Succeed()) + } +}) diff --git a/tests/e2e/testutils.go b/tests/e2e/testutils.go index 8a550da19e95..357e41992978 100644 --- a/tests/e2e/testutils.go +++ b/tests/e2e/testutils.go @@ -46,6 +46,11 @@ type NodeError struct { Err error } +type SvcExternalIP struct { + IP string `json:"ip"` + ipMode string `json:"ipMode"` +} + type ObjIP struct { Name string IPv4 string @@ -262,6 +267,29 @@ func FetchClusterIP(kubeconfig string, servicename string, dualStack bool) (stri return RunCommand(cmd) } +// FetchExternalIPs fetches the external IPs of a service +func FetchExternalIPs(kubeconfig string, servicename string) ([]string, error) { + var externalIPs []string + cmd := "kubectl get svc " + servicename + " -o jsonpath='{.status.loadBalancer.ingress}' --kubeconfig=" + kubeconfig + output, err := RunCommand(cmd) + if err != nil { + return externalIPs, err + } + + var svcExternalIPs []SvcExternalIP + err = json.Unmarshal([]byte(output), &svcExternalIPs) + if err != nil { + return externalIPs, fmt.Errorf("Error unmarshalling JSON: %v", err) + } + + // Iterate over externalIPs and append each IP to the ips slice + for _, ipEntry := range svcExternalIPs { + externalIPs = append(externalIPs, ipEntry.IP) + } + + return externalIPs, nil +} + func FetchIngressIP(kubeconfig string) ([]string, error) { cmd := "kubectl get ing ingress -o jsonpath='{.status.loadBalancer.ingress[*].ip}' --kubeconfig=" + kubeconfig res, err := RunCommand(cmd) diff --git a/tests/e2e/vagrantdefaults.rb b/tests/e2e/vagrantdefaults.rb index 5e57e022bc1b..3d9fc65954d9 100644 --- a/tests/e2e/vagrantdefaults.rb +++ b/tests/e2e/vagrantdefaults.rb @@ -14,11 +14,18 @@ def defaultOSConfigure(vm) end end +# getInstallType is used to control which version of k3s to install +# To install a specific version, set release_version to the version number +# To install a specific commit, set release_version to the commit SHA +# To install the latest commit from a branch, leave release_version empty +# and set release_channel to "commit" and set branch to the branch name def getInstallType(vm, release_version, branch, release_channel='') if release_version == "skip" install_type = "INSTALL_K3S_SKIP_DOWNLOAD=true" - elsif !release_version.empty? + elsif !release_version.empty? && release_version.start_with?("v1") return "INSTALL_K3S_VERSION=#{release_version}" + elsif !release_version.empty? + return "INSTALL_K3S_COMMIT=#{release_version}" elsif !release_channel.empty? && release_channel != "commit" return "INSTALL_K3S_CHANNEL=#{release_channel}" else @@ -26,7 +33,7 @@ def getInstallType(vm, release_version, branch, release_channel='') scripts_location = Dir.exist?("./scripts") ? "./scripts" : "../scripts" # Grabs the last 5 commit SHA's from the given branch, then purges any commits that do not have a passing CI build # MicroOS requires it not be in a /tmp/ or other root system folder - vm.provision "Get latest commit", type: "shell", path: scripts_location +"/latest_commit.sh", args: [branch, "/tmp/k3s_commits"] + vm.provision "Get latest commit", type: "shell", path: scripts_location +"/latest_commit.sh", env: {GH_TOKEN:ENV['GH_TOKEN']}, args: [branch, "/tmp/k3s_commits"] return "INSTALL_K3S_COMMIT=$(head\ -n\ 1\ /tmp/k3s_commits)" end end diff --git a/tests/integration/README.md b/tests/integration/README.md index d5c805166319..04e2313c3ac0 100644 --- a/tests/integration/README.md +++ b/tests/integration/README.md @@ -22,7 +22,7 @@ See the [local storage test](../tests/integration/localstorage/localstorage_int_ Integration tests can be run with no k3s cluster present, each test will spin up and kill the appropriate k3s server it needs. Note: Integration tests must be run as root, prefix the commands below with `sudo -E env "PATH=$PATH"` if a sudo user. ```bash -go test ./tests/integration/... -run Integration +go test ./tests/integration/... -run Integration -ginkgo.v -test.v ``` Additionally, to generate JUnit reporting for the tests, the Ginkgo CLI is used @@ -32,7 +32,7 @@ ginkgo --junit-report=result.xml ./tests/integration/... Integration tests can be run on an existing single-node cluster via compile time flag, tests will skip if the server is not configured correctly. ```bash -go test -ldflags "-X 'github.com/k3s-io/k3s/tests/integration.existingServer=True'" ./tests/integration/... -run Integration +go test -ldflags "-X 'github.com/k3s-io/k3s/tests/integration.existingServer=True'" ./tests/integration/... -run Integration -ginkgo.v -test.v ``` Integration tests can also be run via a [Sonobuoy](https://sonobuoy.io/docs/v0.53.2/) plugin on an existing single-node cluster. diff --git a/tests/integration/cacertrotation/cacertrotation_int_test.go b/tests/integration/cacertrotation/cacertrotation_int_test.go new file mode 100644 index 000000000000..31df7369a772 --- /dev/null +++ b/tests/integration/cacertrotation/cacertrotation_int_test.go @@ -0,0 +1,108 @@ +package ca_cert_rotation_test + +import ( + "fmt" + "strings" + "testing" + + testutil "github.com/k3s-io/k3s/tests/integration" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +const tmpdDataDir = "/tmp/cacertrotationtest" + +var server, server2 *testutil.K3sServer +var serverArgs = []string{"--cluster-init", "-t", "test", "-d", tmpdDataDir} +var certHash, caCertHash string +var testLock int + +var _ = BeforeSuite(func() { + if !testutil.IsExistingServer() { + var err error + testLock, err = testutil.K3sTestLock() + Expect(err).ToNot(HaveOccurred()) + server, err = testutil.K3sStartServer(serverArgs...) + Expect(err).ToNot(HaveOccurred()) + } +}) + +var _ = Describe("ca certificate rotation", Ordered, func() { + BeforeEach(func() { + if testutil.IsExistingServer() && !testutil.ServerArgsPresent(serverArgs) { + Skip("Test needs k3s server with: " + strings.Join(serverArgs, " ")) + } + }) + When("a new server is created", func() { + It("starts up with no problems", func() { + Eventually(func() error { + return testutil.K3sDefaultDeployments() + }, "180s", "5s").Should(Succeed()) + }) + It("get certificate hash", func() { + // get md5sum of the CA certs + var err error + caCertHash, err = testutil.RunCommand("md5sum " + tmpdDataDir + "/server/tls/client-ca.crt | cut -f 1 -d' '") + Expect(err).ToNot(HaveOccurred()) + certHash, err = testutil.RunCommand("md5sum " + tmpdDataDir + "/server/tls/serving-kube-apiserver.crt | cut -f 1 -d' '") + Expect(err).ToNot(HaveOccurred()) + }) + It("generates updated ca-certificates", func() { + cmd := fmt.Sprintf("DATA_DIR=%s ../../../contrib/util/rotate-default-ca-certs.sh", tmpdDataDir) + By("running command: " + cmd) + res, err := testutil.RunCommand(cmd) + By("checking command results: " + res) + Expect(err).ToNot(HaveOccurred()) + }) + It("certificate rotate-ca", func() { + res, err := testutil.K3sCmd("certificate", "rotate-ca", "-d", tmpdDataDir, "--path", tmpdDataDir+"/server/rotate-ca") + By("checking command results: " + res) + Expect(err).ToNot(HaveOccurred()) + }) + It("stop k3s", func() { + Expect(testutil.K3sKillServer(server)).To(Succeed()) + }) + It("start k3s server", func() { + var err error + server2, err = testutil.K3sStartServer(serverArgs...) + Expect(err).ToNot(HaveOccurred()) + }) + It("starts up with no problems", func() { + Eventually(func() error { + return testutil.K3sDefaultDeployments() + }, "360s", "5s").Should(Succeed()) + }) + It("get certificate hash", func() { + // get md5sum of the CA certs + var err error + caCertHashAfter, err := testutil.RunCommand("md5sum " + tmpdDataDir + "/server/tls/client-ca.crt | cut -f 1 -d' '") + Expect(err).ToNot(HaveOccurred()) + certHashAfter, err := testutil.RunCommand("md5sum " + tmpdDataDir + "/server/tls/serving-kube-apiserver.crt | cut -f 1 -d' '") + Expect(err).ToNot(HaveOccurred()) + Expect(certHash).To(Not(Equal(certHashAfter))) + Expect(caCertHash).To(Not(Equal(caCertHashAfter))) + }) + }) +}) + +var failed bool +var _ = AfterEach(func() { + failed = failed || CurrentSpecReport().Failed() +}) + +var _ = AfterSuite(func() { + if !testutil.IsExistingServer() { + if failed { + testutil.K3sSaveLog(server, false) + } + Expect(testutil.K3sKillServer(server)).To(Succeed()) + Expect(testutil.K3sCleanup(-1, "")).To(Succeed()) + Expect(testutil.K3sKillServer(server2)).To(Succeed()) + Expect(testutil.K3sCleanup(testLock, tmpdDataDir)).To(Succeed()) + } +}) + +func Test_IntegrationCertRotation(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "CA Cert rotation Suite") +} diff --git a/tests/integration/integration.go b/tests/integration/integration.go index 6f6745dbcb65..42b775a49216 100644 --- a/tests/integration/integration.go +++ b/tests/integration/integration.go @@ -389,9 +389,10 @@ func RunCommand(cmd string) (string, error) { c := exec.Command("bash", "-c", cmd) var out bytes.Buffer c.Stdout = &out + c.Stderr = &out err := c.Run() if err != nil { - return "", fmt.Errorf("%s", err) + return out.String(), fmt.Errorf("%s", err) } return out.String(), nil } diff --git a/tests/unit.go b/tests/unit.go index ee76af03851a..5bc40cea3b55 100644 --- a/tests/unit.go +++ b/tests/unit.go @@ -54,6 +54,10 @@ func GenerateRuntime(cnf *config.Control) error { deps.CreateRuntimeCertFiles(cnf) + cnf.Datastore.ServerTLSConfig.CAFile = cnf.Runtime.ETCDServerCA + cnf.Datastore.ServerTLSConfig.CertFile = cnf.Runtime.ServerETCDCert + cnf.Datastore.ServerTLSConfig.KeyFile = cnf.Runtime.ServerETCDKey + return deps.GenServerDeps(cnf) }