From 97f6c3b90135de7625c57da605db98aaac8c6792 Mon Sep 17 00:00:00 2001 From: aerosouund Date: Sat, 17 Aug 2024 15:25:12 +0300 Subject: [PATCH 1/8] feat: Provision & provision k8s opts Two new opts that represent the two scripts used in the provision phase (provision linux and provision k8s). Using go embed to include any necessary config files then run the commands on a node using libssh Signed-off-by: aerosouund --- .../opts/k8sprovision/conf/001-calico.conf | 2 + .../opts/k8sprovision/conf/002-dhclient.conf | 2 + .../opts/k8sprovision/conf/adv-audit.yaml | 17 + .../gocli/opts/k8sprovision/conf/cni.diff | 109 + .../gocli/opts/k8sprovision/conf/cni.yaml | 3872 +++++++++++++++++ .../opts/k8sprovision/conf/cni_ipv6.diff | 119 + .../opts/k8sprovision/conf/crio-yum.repo | 6 + .../gocli/opts/k8sprovision/conf/extra-images | 23 + .../opts/k8sprovision/conf/fetch-images.sh | 54 + .../gocli/opts/k8sprovision/conf/k8s.conf | 5 + .../gocli/opts/k8sprovision/conf/kubeadm.conf | 54 + .../opts/k8sprovision/conf/kubeadm_ipv6.conf | 70 + .../opts/k8sprovision/conf/kubernetes.repo | 6 + .../gocli/opts/k8sprovision/conf/psa.yaml | 20 + .../opts/k8sprovision/conf/registries.conf | 8 + .../gocli/opts/k8sprovision/conf/storage.conf | 4 + .../gocli/opts/k8sprovision/k8sprovision.go | 296 ++ .../opts/k8sprovision/k8sprovision_test.go | 39 + ...add-security-context-deployment-patch.yaml | 6 + .../gocli/opts/k8sprovision/patches/etcd.yaml | 4 + .../k8sprovision/patches/kube-apiserver.yaml | 4 + .../patches/kube-controller-manager.yaml | 4 + .../k8sprovision/patches/kube-scheduler.yaml | 4 + .../gocli/opts/k8sprovision/testconfig.go | 122 + .../gocli/opts/labelnodes/labelnodes.go | 2 +- .../gocli/opts/labelnodes/labelnodes_test.go | 39 + .../gocli/opts/labelnodes/testconfig.go | 9 + .../gocli/opts/provision/provision.go | 2 +- .../gocli/opts/provision/provision_test.go | 39 + .../gocli/opts/provision/testconfig.go | 35 + .../github.com/pmezard/go-difflib/LICENSE | 27 - .../pmezard/go-difflib/difflib/difflib.go | 772 ---- .../github.com/stretchr/testify/LICENSE | 21 - .../testify/assert/assertion_compare.go | 480 -- .../testify/assert/assertion_format.go | 815 ---- .../testify/assert/assertion_format.go.tmpl | 5 - .../testify/assert/assertion_forward.go | 1621 ------- .../testify/assert/assertion_forward.go.tmpl | 5 - .../testify/assert/assertion_order.go | 81 - .../stretchr/testify/assert/assertions.go | 2105 --------- .../github.com/stretchr/testify/assert/doc.go | 46 - .../stretchr/testify/assert/errors.go | 10 - .../testify/assert/forward_assertions.go | 16 - .../testify/assert/http_assertions.go | 165 - 44 files changed, 4974 insertions(+), 6171 deletions(-) create mode 100644 cluster-provision/gocli/opts/k8sprovision/conf/001-calico.conf create mode 100644 cluster-provision/gocli/opts/k8sprovision/conf/002-dhclient.conf create mode 100644 cluster-provision/gocli/opts/k8sprovision/conf/adv-audit.yaml create mode 100644 cluster-provision/gocli/opts/k8sprovision/conf/cni.diff create mode 100644 cluster-provision/gocli/opts/k8sprovision/conf/cni.yaml create mode 100644 cluster-provision/gocli/opts/k8sprovision/conf/cni_ipv6.diff create mode 100644 cluster-provision/gocli/opts/k8sprovision/conf/crio-yum.repo create mode 100644 cluster-provision/gocli/opts/k8sprovision/conf/extra-images create mode 100644 cluster-provision/gocli/opts/k8sprovision/conf/fetch-images.sh create mode 100644 cluster-provision/gocli/opts/k8sprovision/conf/k8s.conf create mode 100644 cluster-provision/gocli/opts/k8sprovision/conf/kubeadm.conf create mode 100644 cluster-provision/gocli/opts/k8sprovision/conf/kubeadm_ipv6.conf create mode 100644 cluster-provision/gocli/opts/k8sprovision/conf/kubernetes.repo create mode 100644 cluster-provision/gocli/opts/k8sprovision/conf/psa.yaml create mode 100644 cluster-provision/gocli/opts/k8sprovision/conf/registries.conf create mode 100644 cluster-provision/gocli/opts/k8sprovision/conf/storage.conf create mode 100644 cluster-provision/gocli/opts/k8sprovision/k8sprovision.go create mode 100644 cluster-provision/gocli/opts/k8sprovision/k8sprovision_test.go create mode 100644 cluster-provision/gocli/opts/k8sprovision/patches/add-security-context-deployment-patch.yaml create mode 100644 cluster-provision/gocli/opts/k8sprovision/patches/etcd.yaml create mode 100644 cluster-provision/gocli/opts/k8sprovision/patches/kube-apiserver.yaml create mode 100644 cluster-provision/gocli/opts/k8sprovision/patches/kube-controller-manager.yaml create mode 100644 cluster-provision/gocli/opts/k8sprovision/patches/kube-scheduler.yaml create mode 100644 cluster-provision/gocli/opts/k8sprovision/testconfig.go create mode 100644 cluster-provision/gocli/opts/labelnodes/labelnodes_test.go create mode 100644 cluster-provision/gocli/opts/labelnodes/testconfig.go create mode 100644 cluster-provision/gocli/opts/provision/provision_test.go create mode 100644 cluster-provision/gocli/opts/provision/testconfig.go delete mode 100644 cluster-provision/gocli/vendor/github.com/pmezard/go-difflib/LICENSE delete mode 100644 cluster-provision/gocli/vendor/github.com/pmezard/go-difflib/difflib/difflib.go delete mode 100644 cluster-provision/gocli/vendor/github.com/stretchr/testify/LICENSE delete mode 100644 cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/assertion_compare.go delete mode 100644 cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/assertion_format.go delete mode 100644 cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl delete mode 100644 cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/assertion_forward.go delete mode 100644 cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl delete mode 100644 cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/assertion_order.go delete mode 100644 cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/assertions.go delete mode 100644 cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/doc.go delete mode 100644 cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/errors.go delete mode 100644 cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/forward_assertions.go delete mode 100644 cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/http_assertions.go diff --git a/cluster-provision/gocli/opts/k8sprovision/conf/001-calico.conf b/cluster-provision/gocli/opts/k8sprovision/conf/001-calico.conf new file mode 100644 index 0000000000..1722715699 --- /dev/null +++ b/cluster-provision/gocli/opts/k8sprovision/conf/001-calico.conf @@ -0,0 +1,2 @@ +[keyfile] +unmanaged-devices=interface-name:cali*;interface-name:tunl* \ No newline at end of file diff --git a/cluster-provision/gocli/opts/k8sprovision/conf/002-dhclient.conf b/cluster-provision/gocli/opts/k8sprovision/conf/002-dhclient.conf new file mode 100644 index 0000000000..47fec0ab03 --- /dev/null +++ b/cluster-provision/gocli/opts/k8sprovision/conf/002-dhclient.conf @@ -0,0 +1,2 @@ +[main] +dhcp=dhclient \ No newline at end of file diff --git a/cluster-provision/gocli/opts/k8sprovision/conf/adv-audit.yaml b/cluster-provision/gocli/opts/k8sprovision/conf/adv-audit.yaml new file mode 100644 index 0000000000..7eb84c98f9 --- /dev/null +++ b/cluster-provision/gocli/opts/k8sprovision/conf/adv-audit.yaml @@ -0,0 +1,17 @@ +apiVersion: audit.k8s.io/v1 +kind: Policy +rules: +- level: Request + users: ["kubernetes-admin"] + resources: + - group: kubevirt.io + resources: + - virtualmachines + - virtualmachineinstances + - virtualmachineinstancereplicasets + - virtualmachineinstancepresets + - virtualmachineinstancemigrations + omitStages: + - RequestReceived + - ResponseStarted + - Panic \ No newline at end of file diff --git a/cluster-provision/gocli/opts/k8sprovision/conf/cni.diff b/cluster-provision/gocli/opts/k8sprovision/conf/cni.diff new file mode 100644 index 0000000000..9074ef4de7 --- /dev/null +++ b/cluster-provision/gocli/opts/k8sprovision/conf/cni.diff @@ -0,0 +1,109 @@ +--- a/cluster-provision/k8s/1.19/manifests/cni.do-not-change.yaml ++++ b/cluster-provision/k8s/1.19/manifests/cni.do-not-change.yaml +@@ -32,7 +32,12 @@ data: + "nodename": "__KUBERNETES_NODE_NAME__", + "mtu": __CNI_MTU__, + "ipam": { +- "type": "calico-ipam" ++ "type": "calico-ipam", ++ "assign_ipv4": "true", ++ "assign_ipv6": "true" ++ }, ++ "container_settings": { ++ "allow_ip_forwarding": true + }, + "policy": { + "type": "k8s" +@@ -3533,7 +3538,7 @@ spec: + # It can be deleted if this is a fresh installation, or if you have already + # upgraded to use calico-ipam. + - name: upgrade-ipam +- image: docker.io/calico/cni:v3.18.0 ++ image: quay.io/calico/cni:v3.18.0 + command: ["/opt/cni/bin/calico-ipam", "-upgrade"] + envFrom: + - configMapRef: +@@ -3560,7 +3565,7 @@ spec: + # This container installs the CNI binaries + # and CNI network config file on each node. + - name: install-cni +- image: docker.io/calico/cni:v3.18.0 ++ image: quay.io/calico/cni:v3.18.0 + command: ["/opt/cni/bin/install"] + envFrom: + - configMapRef: +@@ -3601,7 +3606,7 @@ spec: + # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes + # to communicate with Felix over the Policy Sync API. + - name: flexvol-driver +- image: docker.io/calico/pod2daemon-flexvol:v3.18.0 ++ image: quay.io/calico/pod2daemon-flexvol:v3.18.0 + volumeMounts: + - name: flexvol-driver-host + mountPath: /host/driver +@@ -3612,7 +3617,7 @@ spec: + # container programs network policy and routes on each + # host. + - name: calico-node +- image: docker.io/calico/node:v3.18.0 ++ image: quay.io/calico/node:v3.18.0 + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. +@@ -3671,6 +3676,8 @@ spec: + # no effect. This should fall within `--cluster-cidr`. + # - name: CALICO_IPV4POOL_CIDR + # value: "192.168.0.0/16" ++ - name: IP6 ++ value: "autodetect" + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" +@@ -3679,12 +3686,14 @@ spec: + value: "ACCEPT" + # Disable IPv6 on Kubernetes. + - name: FELIX_IPV6SUPPORT +- value: "false" ++ value: "true" + # Set Felix logging to "info" + - name: FELIX_LOGSEVERITYSCREEN + value: "info" + - name: FELIX_HEALTHENABLED + value: "true" ++ - name: CALICO_IPV6POOL_NAT_OUTGOING ++ value: "true" + securityContext: + privileged: true + resources: +@@ -3818,6 +3818,8 @@ spec: + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule ++ - key: node-role.kubernetes.io/control-plane ++ effect: NoSchedule + serviceAccountName: calico-kube-controllers + priorityClassName: system-cluster-critical + containers: +@@ -3820,9 +3829,12 @@ spec: + effect: NoSchedule + serviceAccountName: calico-kube-controllers + priorityClassName: system-cluster-critical ++ securityContext: ++ seLinuxOptions: ++ type: spc_t + containers: + - name: calico-kube-controllers +- image: docker.io/calico/kube-controllers:v3.18.0 ++ image: quay.io/calico/kube-controllers:v3.18.0 + env: + # Choose which controllers to run. + - name: ENABLED_CONTROLLERS +@@ -3847,7 +3859,7 @@ + + # This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict + +-apiVersion: policy/v1beta1 ++apiVersion: policy/v1 + kind: PodDisruptionBudget + metadata: + name: calico-kube-controllers diff --git a/cluster-provision/gocli/opts/k8sprovision/conf/cni.yaml b/cluster-provision/gocli/opts/k8sprovision/conf/cni.yaml new file mode 100644 index 0000000000..bcc4ee0c02 --- /dev/null +++ b/cluster-provision/gocli/opts/k8sprovision/conf/cni.yaml @@ -0,0 +1,3872 @@ +--- +# Source: calico/templates/calico-config.yaml +# This ConfigMap is used to configure a self-hosted Calico installation. +kind: ConfigMap +apiVersion: v1 +metadata: + name: calico-config + namespace: kube-system +data: + # Typha is disabled. + typha_service_name: "none" + # Configure the backend to use. + calico_backend: "bird" + + # Configure the MTU to use for workload interfaces and tunnels. + # By default, MTU is auto-detected, and explicitly setting this field should not be required. + # You can override auto-detection by providing a non-zero value. + veth_mtu: "0" + + # The CNI network configuration to install on each node. The special + # values in this config will be automatically populated. + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "calico", + "log_level": "info", + "log_file_path": "/var/log/calico/cni/cni.log", + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", + "mtu": __CNI_MTU__, + "ipam": { + "type": "calico-ipam" + }, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + }, + { + "type": "portmap", + "snat": true, + "capabilities": {"portMappings": true} + }, + { + "type": "bandwidth", + "capabilities": {"bandwidth": true} + } + ] + } + +--- +# Source: calico/templates/kdd-crds.yaml + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: bgpconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BGPConfiguration + listKind: BGPConfigurationList + plural: bgpconfigurations + singular: bgpconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: BGPConfiguration contains the configuration for any BGP routing. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BGPConfigurationSpec contains the values of the BGP configuration. + properties: + asNumber: + description: 'ASNumber is the default AS number used by a node. [Default: + 64512]' + format: int32 + type: integer + communities: + description: Communities is a list of BGP community values and their + arbitrary names for tagging routes. + items: + description: Community contains standard or large community value + and its name. + properties: + name: + description: Name given to community value. + type: string + value: + description: Value must be of format `aa:nn` or `aa:nn:mm`. + For standard community use `aa:nn` format, where `aa` and + `nn` are 16 bit number. For large community use `aa:nn:mm` + format, where `aa`, `nn` and `mm` are 32 bit number. Where, + `aa` is an AS Number, `nn` and `mm` are per-AS identifier. + pattern: ^(\d+):(\d+)$|^(\d+):(\d+):(\d+)$ + type: string + type: object + type: array + listenPort: + description: ListenPort is the port where BGP protocol should listen. + Defaults to 179 + maximum: 65535 + minimum: 1 + type: integer + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: INFO]' + type: string + nodeToNodeMeshEnabled: + description: 'NodeToNodeMeshEnabled sets whether full node to node + BGP mesh is enabled. [Default: true]' + type: boolean + prefixAdvertisements: + description: PrefixAdvertisements contains per-prefix advertisement + configuration. + items: + description: PrefixAdvertisement configures advertisement properties + for the specified CIDR. + properties: + cidr: + description: CIDR for which properties should be advertised. + type: string + communities: + description: Communities can be list of either community names + already defined in `Specs.Communities` or community value + of format `aa:nn` or `aa:nn:mm`. For standard community use + `aa:nn` format, where `aa` and `nn` are 16 bit number. For + large community use `aa:nn:mm` format, where `aa`, `nn` and + `mm` are 32 bit number. Where,`aa` is an AS Number, `nn` and + `mm` are per-AS identifier. + items: + type: string + type: array + type: object + type: array + serviceClusterIPs: + description: ServiceClusterIPs are the CIDR blocks from which service + cluster IPs are allocated. If specified, Calico will advertise these + blocks, as well as any cluster IPs within them. + items: + description: ServiceClusterIPBlock represents a single allowed ClusterIP + CIDR block. + properties: + cidr: + type: string + type: object + type: array + serviceExternalIPs: + description: ServiceExternalIPs are the CIDR blocks for Kubernetes + Service External IPs. Kubernetes Service ExternalIPs will only be + advertised if they are within one of these blocks. + items: + description: ServiceExternalIPBlock represents a single allowed + External IP CIDR block. + properties: + cidr: + type: string + type: object + type: array + serviceLoadBalancerIPs: + description: ServiceLoadBalancerIPs are the CIDR blocks for Kubernetes + Service LoadBalancer IPs. Kubernetes Service status.LoadBalancer.Ingress + IPs will only be advertised if they are within one of these blocks. + items: + description: ServiceLoadBalancerIPBlock represents a single allowed + LoadBalancer IP CIDR block. + properties: + cidr: + type: string + type: object + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: bgppeers.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BGPPeer + listKind: BGPPeerList + plural: bgppeers + singular: bgppeer + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BGPPeerSpec contains the specification for a BGPPeer resource. + properties: + asNumber: + description: The AS Number of the peer. + format: int32 + type: integer + keepOriginalNextHop: + description: Option to keep the original nexthop field when routes + are sent to a BGP Peer. Setting "true" configures the selected BGP + Peers node to use the "next hop keep;" instead of "next hop self;"(default) + in the specific branch of the Node on "bird.cfg". + type: boolean + node: + description: The node name identifying the Calico node instance that + is targeted by this peer. If this is not set, and no nodeSelector + is specified, then this BGP peer selects all nodes in the cluster. + type: string + nodeSelector: + description: Selector for the nodes that should have this peering. When + this is set, the Node field must be empty. + type: string + password: + description: Optional BGP password for the peerings generated by this + BGPPeer resource. + properties: + secretKeyRef: + description: Selects a key of a secret in the node pod's namespace. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + type: object + peerIP: + description: The IP address of the peer followed by an optional port + number to peer with. If port number is given, format should be `[]:port` + or `:` for IPv4. If optional port number is not set, + and this peer IP and ASNumber belongs to a calico/node with ListenPort + set in BGPConfiguration, then we use that port to peer. + type: string + peerSelector: + description: Selector for the remote nodes to peer with. When this + is set, the PeerIP and ASNumber fields must be empty. For each + peering between the local node and selected remote nodes, we configure + an IPv4 peering if both ends have NodeBGPSpec.IPv4Address specified, + and an IPv6 peering if both ends have NodeBGPSpec.IPv6Address specified. The + remote AS number comes from the remote node's NodeBGPSpec.ASNumber, + or the global default if that is not set. + type: string + sourceAddress: + description: Specifies whether and how to configure a source address + for the peerings generated by this BGPPeer resource. Default value + "UseNodeIP" means to configure the node IP as the source address. "None" + means not to configure a source address. + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: blockaffinities.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BlockAffinity + listKind: BlockAffinityList + plural: blockaffinities + singular: blockaffinity + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BlockAffinitySpec contains the specification for a BlockAffinity + resource. + properties: + cidr: + type: string + deleted: + description: Deleted indicates that this block affinity is being deleted. + This field is a string for compatibility with older releases that + mistakenly treat this field as a string. + type: string + node: + type: string + state: + type: string + required: + - cidr + - deleted + - node + - state + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clusterinformations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: ClusterInformation + listKind: ClusterInformationList + plural: clusterinformations + singular: clusterinformation + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ClusterInformation contains the cluster specific information. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterInformationSpec contains the values of describing + the cluster. + properties: + calicoVersion: + description: CalicoVersion is the version of Calico that the cluster + is running + type: string + clusterGUID: + description: ClusterGUID is the GUID of the cluster + type: string + clusterType: + description: ClusterType describes the type of the cluster + type: string + datastoreReady: + description: DatastoreReady is used during significant datastore migrations + to signal to components such as Felix that it should wait before + accessing the datastore. + type: boolean + variant: + description: Variant declares which variant of Calico should be active. + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: felixconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: FelixConfiguration + listKind: FelixConfigurationList + plural: felixconfigurations + singular: felixconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Felix Configuration contains the configuration for Felix. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: FelixConfigurationSpec contains the values of the Felix configuration. + properties: + allowIPIPPacketsFromWorkloads: + description: 'AllowIPIPPacketsFromWorkloads controls whether Felix + will add a rule to drop IPIP encapsulated traffic from workloads + [Default: false]' + type: boolean + allowVXLANPacketsFromWorkloads: + description: 'AllowVXLANPacketsFromWorkloads controls whether Felix + will add a rule to drop VXLAN encapsulated traffic from workloads + [Default: false]' + type: boolean + awsSrcDstCheck: + description: 'Set source-destination-check on AWS EC2 instances. Accepted + value must be one of "DoNothing", "Enabled" or "Disabled". [Default: + DoNothing]' + enum: + - DoNothing + - Enable + - Disable + type: string + bpfConnectTimeLoadBalancingEnabled: + description: 'BPFConnectTimeLoadBalancingEnabled when in BPF mode, + controls whether Felix installs the connection-time load balancer. The + connect-time load balancer is required for the host to be able to + reach Kubernetes services and it improves the performance of pod-to-service + connections. The only reason to disable it is for debugging purposes. [Default: + true]' + type: boolean + bpfDataIfacePattern: + description: BPFDataIfacePattern is a regular expression that controls + which interfaces Felix should attach BPF programs to in order to + catch traffic to/from the network. This needs to match the interfaces + that Calico workload traffic flows over as well as any interfaces + that handle incoming traffic to nodeports and services from outside + the cluster. It should not match the workload interfaces (usually + named cali...). + type: string + bpfDisableUnprivileged: + description: 'BPFDisableUnprivileged, if enabled, Felix sets the kernel.unprivileged_bpf_disabled + sysctl to disable unprivileged use of BPF. This ensures that unprivileged + users cannot access Calico''s BPF maps and cannot insert their own + BPF programs to interfere with Calico''s. [Default: true]' + type: boolean + bpfEnabled: + description: 'BPFEnabled, if enabled Felix will use the BPF dataplane. + [Default: false]' + type: boolean + bpfExternalServiceMode: + description: 'BPFExternalServiceMode in BPF mode, controls how connections + from outside the cluster to services (node ports and cluster IPs) + are forwarded to remote workloads. If set to "Tunnel" then both + request and response traffic is tunneled to the remote node. If + set to "DSR", the request traffic is tunneled but the response traffic + is sent directly from the remote node. In "DSR" mode, the remote + node appears to use the IP of the ingress node; this requires a + permissive L2 network. [Default: Tunnel]' + type: string + bpfKubeProxyEndpointSlicesEnabled: + description: BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls + whether Felix's embedded kube-proxy accepts EndpointSlices or not. + type: boolean + bpfKubeProxyIptablesCleanupEnabled: + description: 'BPFKubeProxyIptablesCleanupEnabled, if enabled in BPF + mode, Felix will proactively clean up the upstream Kubernetes kube-proxy''s + iptables chains. Should only be enabled if kube-proxy is not running. [Default: + true]' + type: boolean + bpfKubeProxyMinSyncPeriod: + description: 'BPFKubeProxyMinSyncPeriod, in BPF mode, controls the + minimum time between updates to the dataplane for Felix''s embedded + kube-proxy. Lower values give reduced set-up latency. Higher values + reduce Felix CPU usage by batching up more work. [Default: 1s]' + type: string + bpfLogLevel: + description: 'BPFLogLevel controls the log level of the BPF programs + when in BPF dataplane mode. One of "Off", "Info", or "Debug". The + logs are emitted to the BPF trace pipe, accessible with the command + `tc exec bpf debug`. [Default: Off].' + type: string + chainInsertMode: + description: 'ChainInsertMode controls whether Felix hooks the kernel''s + top-level iptables chains by inserting a rule at the top of the + chain or by appending a rule at the bottom. insert is the safe default + since it prevents Calico''s rules from being bypassed. If you switch + to append mode, be sure that the other rules in the chains signal + acceptance by falling through to the Calico rules, otherwise the + Calico policy will be bypassed. [Default: insert]' + type: string + dataplaneDriver: + type: string + debugDisableLogDropping: + type: boolean + debugMemoryProfilePath: + type: string + debugSimulateCalcGraphHangAfter: + type: string + debugSimulateDataplaneHangAfter: + type: string + defaultEndpointToHostAction: + description: 'DefaultEndpointToHostAction controls what happens to + traffic that goes from a workload endpoint to the host itself (after + the traffic hits the endpoint egress policy). By default Calico + blocks traffic from workload endpoints to the host itself with an + iptables "DROP" action. If you want to allow some or all traffic + from endpoint to host, set this parameter to RETURN or ACCEPT. Use + RETURN if you have your own rules in the iptables "INPUT" chain; + Calico will insert its rules at the top of that chain, then "RETURN" + packets to the "INPUT" chain once it has completed processing workload + endpoint egress policy. Use ACCEPT to unconditionally accept packets + from workloads after processing workload endpoint egress policy. + [Default: Drop]' + type: string + deviceRouteProtocol: + description: This defines the route protocol added to programmed device + routes, by default this will be RTPROT_BOOT when left blank. + type: integer + deviceRouteSourceAddress: + description: This is the source address to use on programmed device + routes. By default the source address is left blank, leaving the + kernel to choose the source address used. + type: string + disableConntrackInvalidCheck: + type: boolean + endpointReportingDelay: + type: string + endpointReportingEnabled: + type: boolean + externalNodesList: + description: ExternalNodesCIDRList is a list of CIDR's of external-non-calico-nodes + which may source tunnel traffic and have the tunneled traffic be + accepted at calico nodes. + items: + type: string + type: array + failsafeInboundHostPorts: + description: 'FailsafeInboundHostPorts is a comma-delimited list of + UDP/TCP ports that Felix will allow incoming traffic to host endpoints + on irrespective of the security policy. This is useful to avoid + accidentally cutting off a host with incorrect configuration. Each + port should be specified as tcp: or udp:. + For back-compatibility, if the protocol is not specified, it defaults + to "tcp". To disable all inbound host ports, use the value none. + The default value allows ssh access and DHCP. [Default: tcp:22, + udp:68, tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667]' + items: + description: ProtoPort is combination of protocol and port, both + must be specified. + properties: + port: + type: integer + protocol: + type: string + required: + - port + - protocol + type: object + type: array + failsafeOutboundHostPorts: + description: 'FailsafeOutboundHostPorts is a comma-delimited list + of UDP/TCP ports that Felix will allow outgoing traffic from host + endpoints to irrespective of the security policy. This is useful + to avoid accidentally cutting off a host with incorrect configuration. + Each port should be specified as tcp: or udp:. + For back-compatibility, if the protocol is not specified, it defaults + to "tcp". To disable all outbound host ports, use the value none. + The default value opens etcd''s standard ports to ensure that Felix + does not get cut off from etcd as well as allowing DHCP and DNS. + [Default: tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667, + udp:53, udp:67]' + items: + description: ProtoPort is combination of protocol and port, both + must be specified. + properties: + port: + type: integer + protocol: + type: string + required: + - port + - protocol + type: object + type: array + featureDetectOverride: + description: FeatureDetectOverride is used to override the feature + detection. Values are specified in a comma separated list with no + spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=". + "true" or "false" will force the feature, empty or omitted values + are auto-detected. + type: string + genericXDPEnabled: + description: 'GenericXDPEnabled enables Generic XDP so network cards + that don''t support XDP offload or driver modes can use XDP. This + is not recommended since it doesn''t provide better performance + than iptables. [Default: false]' + type: boolean + healthEnabled: + type: boolean + healthHost: + type: string + healthPort: + type: integer + interfaceExclude: + description: 'InterfaceExclude is a comma-separated list of interfaces + that Felix should exclude when monitoring for host endpoints. The + default value ensures that Felix ignores Kubernetes'' IPVS dummy + interface, which is used internally by kube-proxy. If you want to + exclude multiple interface names using a single value, the list + supports regular expressions. For regular expressions you must wrap + the value with ''/''. For example having values ''/^kube/,veth1'' + will exclude all interfaces that begin with ''kube'' and also the + interface ''veth1''. [Default: kube-ipvs0]' + type: string + interfacePrefix: + description: 'InterfacePrefix is the interface name prefix that identifies + workload endpoints and so distinguishes them from host endpoint + interfaces. Note: in environments other than bare metal, the orchestrators + configure this appropriately. For example our Kubernetes and Docker + integrations set the ''cali'' value, and our OpenStack integration + sets the ''tap'' value. [Default: cali]' + type: string + interfaceRefreshInterval: + description: InterfaceRefreshInterval is the period at which Felix + rescans local interfaces to verify their state. The rescan can be + disabled by setting the interval to 0. + type: string + ipipEnabled: + type: boolean + ipipMTU: + description: 'IPIPMTU is the MTU to set on the tunnel device. See + Configuring MTU [Default: 1440]' + type: integer + ipsetsRefreshInterval: + description: 'IpsetsRefreshInterval is the period at which Felix re-checks + all iptables state to ensure that no other process has accidentally + broken Calico''s rules. Set to 0 to disable iptables refresh. [Default: + 90s]' + type: string + iptablesBackend: + description: IptablesBackend specifies which backend of iptables will + be used. The default is legacy. + type: string + iptablesFilterAllowAction: + type: string + iptablesLockFilePath: + description: 'IptablesLockFilePath is the location of the iptables + lock file. You may need to change this if the lock file is not in + its standard location (for example if you have mapped it into Felix''s + container at a different path). [Default: /run/xtables.lock]' + type: string + iptablesLockProbeInterval: + description: 'IptablesLockProbeInterval is the time that Felix will + wait between attempts to acquire the iptables lock if it is not + available. Lower values make Felix more responsive when the lock + is contended, but use more CPU. [Default: 50ms]' + type: string + iptablesLockTimeout: + description: 'IptablesLockTimeout is the time that Felix will wait + for the iptables lock, or 0, to disable. To use this feature, Felix + must share the iptables lock file with all other processes that + also take the lock. When running Felix inside a container, this + requires the /run directory of the host to be mounted into the calico/node + or calico/felix container. [Default: 0s disabled]' + type: string + iptablesMangleAllowAction: + type: string + iptablesMarkMask: + description: 'IptablesMarkMask is the mask that Felix selects its + IPTables Mark bits from. Should be a 32 bit hexadecimal number with + at least 8 bits set, none of which clash with any other mark bits + in use on the system. [Default: 0xff000000]' + format: int32 + type: integer + iptablesNATOutgoingInterfaceFilter: + type: string + iptablesPostWriteCheckInterval: + description: 'IptablesPostWriteCheckInterval is the period after Felix + has done a write to the dataplane that it schedules an extra read + back in order to check the write was not clobbered by another process. + This should only occur if another application on the system doesn''t + respect the iptables lock. [Default: 1s]' + type: string + iptablesRefreshInterval: + description: 'IptablesRefreshInterval is the period at which Felix + re-checks the IP sets in the dataplane to ensure that no other process + has accidentally broken Calico''s rules. Set to 0 to disable IP + sets refresh. Note: the default for this value is lower than the + other refresh intervals as a workaround for a Linux kernel bug that + was fixed in kernel version 4.11. If you are using v4.11 or greater + you may want to set this to, a higher value to reduce Felix CPU + usage. [Default: 10s]' + type: string + ipv6Support: + type: boolean + kubeNodePortRanges: + description: 'KubeNodePortRanges holds list of port ranges used for + service node ports. Only used if felix detects kube-proxy running + in ipvs mode. Felix uses these ranges to separate host and workload + traffic. [Default: 30000:32767].' + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + logFilePath: + description: 'LogFilePath is the full path to the Felix log. Set to + none to disable file logging. [Default: /var/log/calico/felix.log]' + type: string + logPrefix: + description: 'LogPrefix is the log prefix that Felix uses when rendering + LOG rules. [Default: calico-packet]' + type: string + logSeverityFile: + description: 'LogSeverityFile is the log severity above which logs + are sent to the log file. [Default: Info]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: Info]' + type: string + logSeveritySys: + description: 'LogSeveritySys is the log severity above which logs + are sent to the syslog. Set to None for no logging to syslog. [Default: + Info]' + type: string + maxIpsetSize: + type: integer + metadataAddr: + description: 'MetadataAddr is the IP address or domain name of the + server that can answer VM queries for cloud-init metadata. In OpenStack, + this corresponds to the machine running nova-api (or in Ubuntu, + nova-api-metadata). A value of none (case insensitive) means that + Felix should not set up any NAT rule for the metadata path. [Default: + 127.0.0.1]' + type: string + metadataPort: + description: 'MetadataPort is the port of the metadata server. This, + combined with global.MetadataAddr (if not ''None''), is used to + set up a NAT rule, from 169.254.169.254:80 to MetadataAddr:MetadataPort. + In most cases this should not need to be changed [Default: 8775].' + type: integer + mtuIfacePattern: + description: MTUIfacePattern is a regular expression that controls + which interfaces Felix should scan in order to calculate the host's + MTU. This should not match workload interfaces (usually named cali...). + type: string + natOutgoingAddress: + description: NATOutgoingAddress specifies an address to use when performing + source NAT for traffic in a natOutgoing pool that is leaving the + network. By default the address used is an address on the interface + the traffic is leaving on (ie it uses the iptables MASQUERADE target) + type: string + natPortRange: + anyOf: + - type: integer + - type: string + description: NATPortRange specifies the range of ports that is used + for port mapping when doing outgoing NAT. When unset the default + behavior of the network stack is used. + pattern: ^.* + x-kubernetes-int-or-string: true + netlinkTimeout: + type: string + openstackRegion: + description: 'OpenstackRegion is the name of the region that a particular + Felix belongs to. In a multi-region Calico/OpenStack deployment, + this must be configured somehow for each Felix (here in the datamodel, + or in felix.cfg or the environment on each compute node), and must + match the [calico] openstack_region value configured in neutron.conf + on each node. [Default: Empty]' + type: string + policySyncPathPrefix: + description: 'PolicySyncPathPrefix is used to by Felix to communicate + policy changes to external services, like Application layer policy. + [Default: Empty]' + type: string + prometheusGoMetricsEnabled: + description: 'PrometheusGoMetricsEnabled disables Go runtime metrics + collection, which the Prometheus client does by default, when set + to false. This reduces the number of metrics reported, reducing + Prometheus load. [Default: true]' + type: boolean + prometheusMetricsEnabled: + description: 'PrometheusMetricsEnabled enables the Prometheus metrics + server in Felix if set to true. [Default: false]' + type: boolean + prometheusMetricsHost: + description: 'PrometheusMetricsHost is the host that the Prometheus + metrics server should bind to. [Default: empty]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. [Default: 9091]' + type: integer + prometheusProcessMetricsEnabled: + description: 'PrometheusProcessMetricsEnabled disables process metrics + collection, which the Prometheus client does by default, when set + to false. This reduces the number of metrics reported, reducing + Prometheus load. [Default: true]' + type: boolean + removeExternalRoutes: + description: Whether or not to remove device routes that have not + been programmed by Felix. Disabling this will allow external applications + to also add device routes. This is enabled by default which means + we will remove externally added routes. + type: boolean + reportingInterval: + description: 'ReportingInterval is the interval at which Felix reports + its status into the datastore or 0 to disable. Must be non-zero + in OpenStack deployments. [Default: 30s]' + type: string + reportingTTL: + description: 'ReportingTTL is the time-to-live setting for process-wide + status reports. [Default: 90s]' + type: string + routeRefreshInterval: + description: 'RouteRefreshInterval is the period at which Felix re-checks + the routes in the dataplane to ensure that no other process has + accidentally broken Calico''s rules. Set to 0 to disable route refresh. + [Default: 90s]' + type: string + routeSource: + description: 'RouteSource configures where Felix gets its routing + information. - WorkloadIPs: use workload endpoints to construct + routes. - CalicoIPAM: the default - use IPAM data to construct routes.' + type: string + routeTableRange: + description: Calico programs additional Linux route tables for various + purposes. RouteTableRange specifies the indices of the route tables + that Calico should use. + properties: + max: + type: integer + min: + type: integer + required: + - max + - min + type: object + serviceLoopPrevention: + description: 'When service IP advertisement is enabled, prevent routing + loops to service IPs that are not in use, by dropping or rejecting + packets that do not get DNAT''d by kube-proxy. Unless set to "Disabled", + in which case such routing loops continue to be allowed. [Default: + Drop]' + type: string + sidecarAccelerationEnabled: + description: 'SidecarAccelerationEnabled enables experimental sidecar + acceleration [Default: false]' + type: boolean + usageReportingEnabled: + description: 'UsageReportingEnabled reports anonymous Calico version + number and cluster size to projectcalico.org. Logs warnings returned + by the usage server. For example, if a significant security vulnerability + has been discovered in the version of Calico being used. [Default: + true]' + type: boolean + usageReportingInitialDelay: + description: 'UsageReportingInitialDelay controls the minimum delay + before Felix makes a report. [Default: 300s]' + type: string + usageReportingInterval: + description: 'UsageReportingInterval controls the interval at which + Felix makes reports. [Default: 86400s]' + type: string + useInternalDataplaneDriver: + type: boolean + vxlanEnabled: + type: boolean + vxlanMTU: + description: 'VXLANMTU is the MTU to set on the tunnel device. See + Configuring MTU [Default: 1440]' + type: integer + vxlanPort: + type: integer + vxlanVNI: + type: integer + wireguardEnabled: + description: 'WireguardEnabled controls whether Wireguard is enabled. + [Default: false]' + type: boolean + wireguardInterfaceName: + description: 'WireguardInterfaceName specifies the name to use for + the Wireguard interface. [Default: wg.calico]' + type: string + wireguardListeningPort: + description: 'WireguardListeningPort controls the listening port used + by Wireguard. [Default: 51820]' + type: integer + wireguardMTU: + description: 'WireguardMTU controls the MTU on the Wireguard interface. + See Configuring MTU [Default: 1420]' + type: integer + wireguardRoutingRulePriority: + description: 'WireguardRoutingRulePriority controls the priority value + to use for the Wireguard routing rule. [Default: 99]' + type: integer + xdpEnabled: + description: 'XDPEnabled enables XDP acceleration for suitable untracked + incoming deny rules. [Default: true]' + type: boolean + xdpRefreshInterval: + description: 'XDPRefreshInterval is the period at which Felix re-checks + all XDP state to ensure that no other process has accidentally broken + Calico''s BPF maps or attached programs. Set to 0 to disable XDP + refresh. [Default: 90s]' + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: globalnetworkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: GlobalNetworkPolicy + listKind: GlobalNetworkPolicyList + plural: globalnetworkpolicies + singular: globalnetworkpolicy + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + applyOnForward: + description: ApplyOnForward indicates to apply the rules in this policy + on forward traffic. + type: boolean + doNotTrack: + description: DoNotTrack indicates whether packets matched by the rules + in this policy should go through the data plane's connection tracking, + such as Linux conntrack. If True, the rules in this policy are + applied before any data plane connection tracking, and packets allowed + by this policy are marked as not to be tracked. + type: boolean + egress: + description: The ordered set of egress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: The ordered set of ingress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + required: + - action + type: object + type: array + namespaceSelector: + description: NamespaceSelector is an optional field for an expression + used to select a pod based on namespaces. + type: string + order: + description: Order is an optional field that specifies the order in + which the policy is applied. Policies with higher "order" are applied + after those with lower order. If the order is omitted, it may be + considered to be "infinite" - i.e. the policy will be applied last. Policies + with identical order will be applied in alphanumerical order based + on the Policy "Name". + type: number + preDNAT: + description: PreDNAT indicates to apply the rules in this policy before + any DNAT. + type: boolean + selector: + description: "The selector is an expression used to pick pick out + the endpoints that the policy should be applied to. \n Selector + expressions follow this syntax: \n \tlabel == \"string_literal\" + \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" + \ -> not equal; also matches if label is not present \tlabel in + { \"a\", \"b\", \"c\", ... } -> true if the value of label X is + one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", + ... } -> true if the value of label X is not one of \"a\", \"b\", + \"c\" \thas(label_name) -> True if that label is present \t! expr + -> negation of expr \texpr && expr -> Short-circuit and \texpr + || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() + or the empty selector -> matches all endpoints. \n Label names are + allowed to contain alphanumerics, -, _ and /. String literals are + more permissive but they do not support escape characters. \n Examples + (with made-up labels): \n \ttype == \"webserver\" && deployment + == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != + \"dev\" \t! has(label_name)" + type: string + serviceAccountSelector: + description: ServiceAccountSelector is an optional field for an expression + used to select a pod based on service accounts. + type: string + types: + description: "Types indicates whether this policy applies to ingress, + or to egress, or to both. When not explicitly specified (and so + the value on creation is empty or nil), Calico defaults Types according + to what Ingress and Egress rules are present in the policy. The + default is: \n - [ PolicyTypeIngress ], if there are no Egress rules + (including the case where there are also no Ingress rules) \n + - [ PolicyTypeEgress ], if there are Egress rules but no Ingress + rules \n - [ PolicyTypeIngress, PolicyTypeEgress ], if there are + both Ingress and Egress rules. \n When the policy is read back again, + Types will always be one of these values, never empty or nil." + items: + description: PolicyType enumerates the possible values of the PolicySpec + Types field. + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: globalnetworksets.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: GlobalNetworkSet + listKind: GlobalNetworkSetList + plural: globalnetworksets + singular: globalnetworkset + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: GlobalNetworkSet contains a set of arbitrary IP sub-networks/CIDRs + that share labels to allow rules to refer to them via selectors. The labels + of GlobalNetworkSet are not namespaced. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: GlobalNetworkSetSpec contains the specification for a NetworkSet + resource. + properties: + nets: + description: The list of IP networks that belong to this set. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: hostendpoints.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: HostEndpoint + listKind: HostEndpointList + plural: hostendpoints + singular: hostendpoint + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: HostEndpointSpec contains the specification for a HostEndpoint + resource. + properties: + expectedIPs: + description: "The expected IP addresses (IPv4 and IPv6) of the endpoint. + If \"InterfaceName\" is not present, Calico will look for an interface + matching any of the IPs in the list and apply policy to that. Note: + \tWhen using the selector match criteria in an ingress or egress + security Policy \tor Profile, Calico converts the selector into + a set of IP addresses. For host \tendpoints, the ExpectedIPs field + is used for that purpose. (If only the interface \tname is specified, + Calico does not learn the IPs of the interface for use in match + \tcriteria.)" + items: + type: string + type: array + interfaceName: + description: "Either \"*\", or the name of a specific Linux interface + to apply policy to; or empty. \"*\" indicates that this HostEndpoint + governs all traffic to, from or through the default network namespace + of the host named by the \"Node\" field; entering and leaving that + namespace via any interface, including those from/to non-host-networked + local workloads. \n If InterfaceName is not \"*\", this HostEndpoint + only governs traffic that enters or leaves the host through the + specific interface named by InterfaceName, or - when InterfaceName + is empty - through the specific interface that has one of the IPs + in ExpectedIPs. Therefore, when InterfaceName is empty, at least + one expected IP must be specified. Only external interfaces (such + as \"eth0\") are supported here; it isn't possible for a HostEndpoint + to protect traffic through a specific local workload interface. + \n Note: Only some kinds of policy are implemented for \"*\" HostEndpoints; + initially just pre-DNAT policy. Please check Calico documentation + for the latest position." + type: string + node: + description: The node name identifying the Calico node instance. + type: string + ports: + description: Ports contains the endpoint's named ports, which may + be referenced in security policy rules. + items: + properties: + name: + type: string + port: + type: integer + protocol: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + required: + - name + - port + - protocol + type: object + type: array + profiles: + description: A list of identifiers of security Profile objects that + apply to this endpoint. Each profile is applied in the order that + they appear in this list. Profile rules are applied after the selector-based + security policy. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ipamblocks.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMBlock + listKind: IPAMBlockList + plural: ipamblocks + singular: ipamblock + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMBlockSpec contains the specification for an IPAMBlock + resource. + properties: + affinity: + type: string + allocations: + items: + type: integer + # TODO: This nullable is manually added in. We should update controller-gen + # to handle []*int properly itself. + nullable: true + type: array + attributes: + items: + properties: + handle_id: + type: string + secondary: + additionalProperties: + type: string + type: object + type: object + type: array + cidr: + type: string + deleted: + type: boolean + strictAffinity: + type: boolean + unallocated: + items: + type: integer + type: array + required: + - allocations + - attributes + - cidr + - strictAffinity + - unallocated + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ipamconfigs.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMConfig + listKind: IPAMConfigList + plural: ipamconfigs + singular: ipamconfig + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMConfigSpec contains the specification for an IPAMConfig + resource. + properties: + autoAllocateBlocks: + type: boolean + maxBlocksPerHost: + description: MaxBlocksPerHost, if non-zero, is the max number of blocks + that can be affine to each host. + type: integer + strictAffinity: + type: boolean + required: + - autoAllocateBlocks + - strictAffinity + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ipamhandles.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMHandle + listKind: IPAMHandleList + plural: ipamhandles + singular: ipamhandle + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMHandleSpec contains the specification for an IPAMHandle + resource. + properties: + block: + additionalProperties: + type: integer + type: object + deleted: + type: boolean + handleID: + type: string + required: + - block + - handleID + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ippools.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPPool + listKind: IPPoolList + plural: ippools + singular: ippool + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPPoolSpec contains the specification for an IPPool resource. + properties: + blockSize: + description: The block size to use for IP address assignments from + this pool. Defaults to 26 for IPv4 and 112 for IPv6. + type: integer + cidr: + description: The pool CIDR. + type: string + disabled: + description: When disabled is true, Calico IPAM will not assign addresses + from this pool. + type: boolean + ipip: + description: 'Deprecated: this field is only used for APIv1 backwards + compatibility. Setting this field is not allowed, this field is + for internal use only.' + properties: + enabled: + description: When enabled is true, ipip tunneling will be used + to deliver packets to destinations within this pool. + type: boolean + mode: + description: The IPIP mode. This can be one of "always" or "cross-subnet". A + mode of "always" will also use IPIP tunneling for routing to + destination IP addresses within this pool. A mode of "cross-subnet" + will only use IPIP tunneling when the destination node is on + a different subnet to the originating node. The default value + (if not specified) is "always". + type: string + type: object + ipipMode: + description: Contains configuration for IPIP tunneling for this pool. + If not specified, then this is defaulted to "Never" (i.e. IPIP tunneling + is disabled). + type: string + nat-outgoing: + description: 'Deprecated: this field is only used for APIv1 backwards + compatibility. Setting this field is not allowed, this field is + for internal use only.' + type: boolean + natOutgoing: + description: When nat-outgoing is true, packets sent from Calico networked + containers in this pool to destinations outside of this pool will + be masqueraded. + type: boolean + nodeSelector: + description: Allows IPPool to allocate for a specific node by label + selector. + type: string + vxlanMode: + description: Contains configuration for VXLAN tunneling for this pool. + If not specified, then this is defaulted to "Never" (i.e. VXLAN + tunneling is disabled). + type: string + required: + - cidr + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: kubecontrollersconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: KubeControllersConfiguration + listKind: KubeControllersConfigurationList + plural: kubecontrollersconfigurations + singular: kubecontrollersconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: KubeControllersConfigurationSpec contains the values of the + Kubernetes controllers configuration. + properties: + controllers: + description: Controllers enables and configures individual Kubernetes + controllers + properties: + namespace: + description: Namespace enables and configures the namespace controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + node: + description: Node enables and configures the node controller. + Enabled by default, set to nil to disable. + properties: + hostEndpoint: + description: HostEndpoint controls syncing nodes to host endpoints. + Disabled by default, set to nil to disable. + properties: + autoCreate: + description: 'AutoCreate enables automatic creation of + host endpoints for every node. [Default: Disabled]' + type: string + type: object + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + syncLabels: + description: 'SyncLabels controls whether to copy Kubernetes + node labels to Calico nodes. [Default: Enabled]' + type: string + type: object + policy: + description: Policy enables and configures the policy controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + serviceAccount: + description: ServiceAccount enables and configures the service + account controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + workloadEndpoint: + description: WorkloadEndpoint enables and configures the workload + endpoint controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + type: object + etcdV3CompactionPeriod: + description: 'EtcdV3CompactionPeriod is the period between etcdv3 + compaction requests. Set to 0 to disable. [Default: 10m]' + type: string + healthChecks: + description: 'HealthChecks enables or disables support for health + checks [Default: Enabled]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: Info]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. Set to 0 to disable. [Default: 9094]' + type: integer + required: + - controllers + type: object + status: + description: KubeControllersConfigurationStatus represents the status + of the configuration. It's useful for admins to be able to see the actual + config that was applied, which can be modified by environment variables + on the kube-controllers process. + properties: + environmentVars: + additionalProperties: + type: string + description: EnvironmentVars contains the environment variables on + the kube-controllers that influenced the RunningConfig. + type: object + runningConfig: + description: RunningConfig contains the effective config that is running + in the kube-controllers pod, after merging the API resource with + any environment variables. + properties: + controllers: + description: Controllers enables and configures individual Kubernetes + controllers + properties: + namespace: + description: Namespace enables and configures the namespace + controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + node: + description: Node enables and configures the node controller. + Enabled by default, set to nil to disable. + properties: + hostEndpoint: + description: HostEndpoint controls syncing nodes to host + endpoints. Disabled by default, set to nil to disable. + properties: + autoCreate: + description: 'AutoCreate enables automatic creation + of host endpoints for every node. [Default: Disabled]' + type: string + type: object + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + syncLabels: + description: 'SyncLabels controls whether to copy Kubernetes + node labels to Calico nodes. [Default: Enabled]' + type: string + type: object + policy: + description: Policy enables and configures the policy controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + serviceAccount: + description: ServiceAccount enables and configures the service + account controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + workloadEndpoint: + description: WorkloadEndpoint enables and configures the workload + endpoint controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + type: object + etcdV3CompactionPeriod: + description: 'EtcdV3CompactionPeriod is the period between etcdv3 + compaction requests. Set to 0 to disable. [Default: 10m]' + type: string + healthChecks: + description: 'HealthChecks enables or disables support for health + checks [Default: Enabled]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which + logs are sent to the stdout. [Default: Info]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. Set to 0 to disable. [Default: + 9094]' + type: integer + required: + - controllers + type: object + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: networkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: NetworkPolicy + listKind: NetworkPolicyList + plural: networkpolicies + singular: networkpolicy + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + egress: + description: The ordered set of egress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: The ordered set of ingress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and Selector are defined on the same rule, then only workload + endpoints that are matched by both selectors will be selected + by the rule. \n For NetworkPolicy, an empty NamespaceSelector + implies that the Selector is limited to selecting only + workload endpoints in the same namespace as the NetworkPolicy. + \n For NetworkPolicy, `global()` NamespaceSelector implies + that the Selector is limited to selecting only GlobalNetworkSet + or HostEndpoint. \n For GlobalNetworkPolicy, an empty + NamespaceSelector implies the Selector applies to workload + endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + type: object + required: + - action + type: object + type: array + order: + description: Order is an optional field that specifies the order in + which the policy is applied. Policies with higher "order" are applied + after those with lower order. If the order is omitted, it may be + considered to be "infinite" - i.e. the policy will be applied last. Policies + with identical order will be applied in alphanumerical order based + on the Policy "Name". + type: number + selector: + description: "The selector is an expression used to pick pick out + the endpoints that the policy should be applied to. \n Selector + expressions follow this syntax: \n \tlabel == \"string_literal\" + \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" + \ -> not equal; also matches if label is not present \tlabel in + { \"a\", \"b\", \"c\", ... } -> true if the value of label X is + one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", + ... } -> true if the value of label X is not one of \"a\", \"b\", + \"c\" \thas(label_name) -> True if that label is present \t! expr + -> negation of expr \texpr && expr -> Short-circuit and \texpr + || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() + or the empty selector -> matches all endpoints. \n Label names are + allowed to contain alphanumerics, -, _ and /. String literals are + more permissive but they do not support escape characters. \n Examples + (with made-up labels): \n \ttype == \"webserver\" && deployment + == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != + \"dev\" \t! has(label_name)" + type: string + serviceAccountSelector: + description: ServiceAccountSelector is an optional field for an expression + used to select a pod based on service accounts. + type: string + types: + description: "Types indicates whether this policy applies to ingress, + or to egress, or to both. When not explicitly specified (and so + the value on creation is empty or nil), Calico defaults Types according + to what Ingress and Egress are present in the policy. The default + is: \n - [ PolicyTypeIngress ], if there are no Egress rules (including + the case where there are also no Ingress rules) \n - [ PolicyTypeEgress + ], if there are Egress rules but no Ingress rules \n - [ PolicyTypeIngress, + PolicyTypeEgress ], if there are both Ingress and Egress rules. + \n When the policy is read back again, Types will always be one + of these values, never empty or nil." + items: + description: PolicyType enumerates the possible values of the PolicySpec + Types field. + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: networksets.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: NetworkSet + listKind: NetworkSetList + plural: networksets + singular: networkset + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: NetworkSet is the Namespaced-equivalent of the GlobalNetworkSet. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NetworkSetSpec contains the specification for a NetworkSet + resource. + properties: + nets: + description: The list of IP networks that belong to this set. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +--- +# Source: calico/templates/calico-kube-controllers-rbac.yaml + +# Include a clusterrole for the kube-controllers component, +# and bind it to the calico-kube-controllers serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +rules: + # Nodes are watched to monitor for deletions. + - apiGroups: [""] + resources: + - nodes + verbs: + - watch + - list + - get + # Pods are queried to check for existence. + - apiGroups: [""] + resources: + - pods + verbs: + - get + # IPAM resources are manipulated when nodes are deleted. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + verbs: + - list + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + - watch + # kube-controllers manages hostendpoints. + - apiGroups: ["crd.projectcalico.org"] + resources: + - hostendpoints + verbs: + - get + - list + - create + - update + - delete + # Needs access to update clusterinformations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - clusterinformations + verbs: + - get + - create + - update + # KubeControllersConfiguration is where it gets its config + - apiGroups: ["crd.projectcalico.org"] + resources: + - kubecontrollersconfigurations + verbs: + # read its own config + - get + # create a default if none exists + - create + # update status + - update + # watch for changes + - watch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-kube-controllers +subjects: +- kind: ServiceAccount + name: calico-kube-controllers + namespace: kube-system +--- + +--- +# Source: calico/templates/calico-node-rbac.yaml +# Include a clusterrole for the calico-node DaemonSet, +# and bind it to the calico-node serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-node +rules: + # The CNI plugin needs to get pods, nodes, and namespaces. + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + verbs: + - get + - apiGroups: [""] + resources: + - endpoints + - services + verbs: + # Used to discover service IPs for advertisement. + - watch + - list + # Used to discover Typhas. + - get + # Pod CIDR auto-detection on kubeadm needs access to config maps. + - apiGroups: [""] + resources: + - configmaps + verbs: + - get + - apiGroups: [""] + resources: + - nodes/status + verbs: + # Needed for clearing NodeNetworkUnavailable flag. + - patch + # Calico stores some configuration information in node annotations. + - update + # Watch for changes to Kubernetes NetworkPolicies. + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - watch + - list + # Used by Calico for policy information. + - apiGroups: [""] + resources: + - pods + - namespaces + - serviceaccounts + verbs: + - list + - watch + # The CNI plugin patches pods/status. + - apiGroups: [""] + resources: + - pods/status + verbs: + - patch + # Calico monitors various CRDs for config. + - apiGroups: ["crd.projectcalico.org"] + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipamblocks + - globalnetworkpolicies + - globalnetworksets + - networkpolicies + - networksets + - clusterinformations + - hostendpoints + - blockaffinities + verbs: + - get + - list + - watch + # Calico must create and update some CRDs on startup. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update + # Calico stores some configuration information on the node. + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - watch + # These permissions are only required for upgrade from v2.6, and can + # be removed after upgrade or on fresh installations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update + # These permissions are required for Calico CNI to perform IPAM allocations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + - apiGroups: ["crd.projectcalico.org"] + resources: + - ipamconfigs + verbs: + - get + # Block affinities must also be watchable by confd for route aggregation. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + verbs: + - watch + # The Calico IPAM migration needs to get daemonsets. These permissions can be + # removed if not upgrading from an installation using host-local IPAM. + - apiGroups: ["apps"] + resources: + - daemonsets + verbs: + - get + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: calico-node + namespace: kube-system + +--- +# Source: calico/templates/calico-node.yaml +# This manifest installs the calico-node container, as well +# as the CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: calico-node + namespace: kube-system + labels: + k8s-app: calico-node +spec: + selector: + matchLabels: + k8s-app: calico-node + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + k8s-app: calico-node + spec: + nodeSelector: + kubernetes.io/os: linux + hostNetwork: true + tolerations: + # Make sure calico-node gets scheduled on all nodes. + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + serviceAccountName: calico-node + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 + priorityClassName: system-node-critical + initContainers: + # This container performs upgrade from host-local IPAM to calico-ipam. + # It can be deleted if this is a fresh installation, or if you have already + # upgraded to use calico-ipam. + - name: upgrade-ipam + image: docker.io/calico/cni:v3.18.0 + command: ["/opt/cni/bin/calico-ipam", "-upgrade"] + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + volumeMounts: + - mountPath: /var/lib/cni/networks + name: host-local-net-dir + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + securityContext: + privileged: true + # This container installs the CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: docker.io/calico/cni:v3.18.0 + command: ["/opt/cni/bin/install"] + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # Name of the CNI config file to create. + - name: CNI_CONF_NAME + value: "10-calico.conflist" + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: calico-config + key: cni_network_config + # Set the hostname based on the k8s node name. + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # CNI MTU Config variable + - name: CNI_MTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Prevents the container from sleeping forever. + - name: SLEEP + value: "false" + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + securityContext: + privileged: true + # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes + # to communicate with Felix over the Policy Sync API. + - name: flexvol-driver + image: docker.io/calico/pod2daemon-flexvol:v3.18.0 + volumeMounts: + - name: flexvol-driver-host + mountPath: /host/driver + securityContext: + privileged: true + containers: + # Runs calico-node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: docker.io/calico/node:v3.18.0 + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # Use Kubernetes API as the backing datastore. + - name: DATASTORE_TYPE + value: "kubernetes" + # Wait for the datastore. + - name: WAIT_FOR_DATASTORE + value: "true" + # Set based on the k8s node name. + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Choose the backend to use. + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "k8s,bgp" + # Auto-detect the BGP IP address. + - name: IP + value: "autodetect" + # Enable IPIP + - name: CALICO_IPV4POOL_IPIP + value: "Always" + # Enable or Disable VXLAN on the default IP pool. + - name: CALICO_IPV4POOL_VXLAN + value: "Never" + # Set MTU for tunnel device used if ipip is enabled + - name: FELIX_IPINIPMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Set MTU for the VXLAN tunnel device. + - name: FELIX_VXLANMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Set MTU for the Wireguard tunnel device. + - name: FELIX_WIREGUARDMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # The default IPv4 pool to create on startup if none exists. Pod IPs will be + # chosen from this range. Changing this value after installation will have + # no effect. This should fall within `--cluster-cidr`. + # - name: CALICO_IPV4POOL_CIDR + # value: "192.168.0.0/16" + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "ACCEPT" + # Disable IPv6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + # Set Felix logging to "info" + - name: FELIX_LOGSEVERITYSCREEN + value: "info" + - name: FELIX_HEALTHENABLED + value: "true" + securityContext: + privileged: true + resources: + requests: + cpu: 250m + livenessProbe: + exec: + command: + - /bin/calico-node + - -felix-live + - -bird-live + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + readinessProbe: + exec: + command: + - /bin/calico-node + - -felix-ready + - -bird-ready + periodSeconds: 10 + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false + - name: policysync + mountPath: /var/run/nodeagent + # For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the + # parent directory. + - name: sysfs + mountPath: /sys/fs/ + # Bidirectional means that, if we mount the BPF filesystem at /sys/fs/bpf it will propagate to the host. + # If the host is known to mount that filesystem already then Bidirectional can be omitted. + mountPropagation: Bidirectional + - name: cni-log-dir + mountPath: /var/log/calico/cni + readOnly: true + volumes: + # Used by calico-node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + - name: var-lib-calico + hostPath: + path: /var/lib/calico + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + - name: sysfs + hostPath: + path: /sys/fs/ + type: DirectoryOrCreate + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + # Used to access CNI logs. + - name: cni-log-dir + hostPath: + path: /var/log/calico/cni + # Mount in the directory for host-local IPAM allocations. This is + # used when upgrading from host-local to calico-ipam, and can be removed + # if not using the upgrade-ipam init container. + - name: host-local-net-dir + hostPath: + path: /var/lib/cni/networks + # Used to create per-pod Unix Domain Sockets + - name: policysync + hostPath: + type: DirectoryOrCreate + path: /var/run/nodeagent + # Used to install Flex Volume Driver + - name: flexvol-driver-host + hostPath: + type: DirectoryOrCreate + path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-node + namespace: kube-system + +--- +# Source: calico/templates/calico-kube-controllers.yaml +# See https://github.com/projectcalico/kube-controllers +apiVersion: apps/v1 +kind: Deployment +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers +spec: + # The controllers can only have a single active instance. + replicas: 1 + selector: + matchLabels: + k8s-app: calico-kube-controllers + strategy: + type: Recreate + template: + metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers + spec: + nodeSelector: + kubernetes.io/os: linux + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule + serviceAccountName: calico-kube-controllers + priorityClassName: system-cluster-critical + containers: + - name: calico-kube-controllers + image: docker.io/calico/kube-controllers:v3.18.0 + env: + # Choose which controllers to run. + - name: ENABLED_CONTROLLERS + value: node + - name: DATASTORE_TYPE + value: kubernetes + readinessProbe: + exec: + command: + - /usr/bin/check-status + - -r + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-kube-controllers + namespace: kube-system + +--- + +# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict + +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers +spec: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: calico-kube-controllers + +--- +# Source: calico/templates/calico-etcd-secrets.yaml + +--- +# Source: calico/templates/calico-typha.yaml + +--- +# Source: calico/templates/configure-canal.yaml + + diff --git a/cluster-provision/gocli/opts/k8sprovision/conf/cni_ipv6.diff b/cluster-provision/gocli/opts/k8sprovision/conf/cni_ipv6.diff new file mode 100644 index 0000000000..45cae03d34 --- /dev/null +++ b/cluster-provision/gocli/opts/k8sprovision/conf/cni_ipv6.diff @@ -0,0 +1,119 @@ +--- a/cluster-provision/k8s/1.24/manifests/cni.do-not-change.yaml ++++ b/cluster-provision/k8s/1.24/manifests/cni.do-not-change.yaml +@@ -32,7 +32,12 @@ + "nodename": "__KUBERNETES_NODE_NAME__", + "mtu": __CNI_MTU__, + "ipam": { +- "type": "calico-ipam" ++ "type": "calico-ipam", ++ "assign_ipv4": "false", ++ "assign_ipv6": "true" ++ }, ++ "container_settings": { ++ "allow_ip_forwarding": true + }, + "policy": { + "type": "k8s" +@@ -3533,7 +3538,7 @@ + # It can be deleted if this is a fresh installation, or if you have already + # upgraded to use calico-ipam. + - name: upgrade-ipam +- image: docker.io/calico/cni:v3.18.0 ++ image: quay.io/calico/cni:v3.18.0 + command: ["/opt/cni/bin/calico-ipam", "-upgrade"] + envFrom: + - configMapRef: +@@ -3560,7 +3565,7 @@ + # This container installs the CNI binaries + # and CNI network config file on each node. + - name: install-cni +- image: docker.io/calico/cni:v3.18.0 ++ image: quay.io/calico/cni:v3.18.0 + command: ["/opt/cni/bin/install"] + envFrom: + - configMapRef: +@@ -3601,7 +3606,7 @@ + # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes + # to communicate with Felix over the Policy Sync API. + - name: flexvol-driver +- image: docker.io/calico/pod2daemon-flexvol:v3.18.0 ++ image: quay.io/calico/pod2daemon-flexvol:v3.18.0 + volumeMounts: + - name: flexvol-driver-host + mountPath: /host/driver +@@ -3612,7 +3617,7 @@ + # container programs network policy and routes on each + # host. + - name: calico-node +- image: docker.io/calico/node:v3.18.0 ++ image: quay.io/calico/node:v3.18.0 + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. +@@ -3641,10 +3646,10 @@ + value: "k8s,bgp" + # Auto-detect the BGP IP address. + - name: IP +- value: "autodetect" ++ value: "none" + # Enable IPIP + - name: CALICO_IPV4POOL_IPIP +- value: "Always" ++ value: "Never" + # Enable or Disable VXLAN on the default IP pool. + - name: CALICO_IPV4POOL_VXLAN + value: "Never" +@@ -3671,6 +3676,8 @@ + # no effect. This should fall within `--cluster-cidr`. + # - name: CALICO_IPV4POOL_CIDR + # value: "192.168.0.0/16" ++ - name: IP6 ++ value: "autodetect" + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" +@@ -3679,12 +3686,16 @@ + value: "ACCEPT" + # Disable IPv6 on Kubernetes. + - name: FELIX_IPV6SUPPORT +- value: "false" ++ value: "true" + # Set Felix logging to "info" + - name: FELIX_LOGSEVERITYSCREEN + value: "info" + - name: FELIX_HEALTHENABLED + value: "true" ++ - name: CALICO_IPV6POOL_NAT_OUTGOING ++ value: "true" ++ - name: CALICO_ROUTER_ID ++ value: "hash" + securityContext: + privileged: true + resources: +@@ -3818,11 +3829,16 @@ + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule ++ - key: node-role.kubernetes.io/control-plane ++ effect: NoSchedule + serviceAccountName: calico-kube-controllers + priorityClassName: system-cluster-critical ++ securityContext: ++ seLinuxOptions: ++ type: spc_t + containers: + - name: calico-kube-controllers +- image: docker.io/calico/kube-controllers:v3.18.0 ++ image: quay.io/calico/kube-controllers:v3.18.0 + env: + # Choose which controllers to run. + - name: ENABLED_CONTROLLERS +@@ -3847,7 +3863,7 @@ + + # This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict + +-apiVersion: policy/v1beta1 ++apiVersion: policy/v1 + kind: PodDisruptionBudget + metadata: + name: calico-kube-controllers diff --git a/cluster-provision/gocli/opts/k8sprovision/conf/crio-yum.repo b/cluster-provision/gocli/opts/k8sprovision/conf/crio-yum.repo new file mode 100644 index 0000000000..747f2fa37f --- /dev/null +++ b/cluster-provision/gocli/opts/k8sprovision/conf/crio-yum.repo @@ -0,0 +1,6 @@ +[isv_kubernetes_addons_cri-o_stable_v1.28] +name=CRI-O v1.28 (Stable) (rpm) +type=rpm-md +baseurl=https://storage.googleapis.com/kubevirtci-crio-mirror/isv_kubernetes_addons_cri-o_stable_v1.28 +gpgcheck=0 +enabled=1 \ No newline at end of file diff --git a/cluster-provision/gocli/opts/k8sprovision/conf/extra-images b/cluster-provision/gocli/opts/k8sprovision/conf/extra-images new file mode 100644 index 0000000000..473ecf1347 --- /dev/null +++ b/cluster-provision/gocli/opts/k8sprovision/conf/extra-images @@ -0,0 +1,23 @@ +quay.io/kubevirtci/install-cni:1.15.0 +quay.io/kubevirtci/operator:1.15.0 +quay.io/kubevirtci/pilot:1.15.0 +quay.io/kubevirtci/proxyv2:1.15.0 +quay.io/calico/cni:v3.18.0 +quay.io/calico/kube-controllers:v3.18.0 +quay.io/calico/node:v3.18.0 +quay.io/calico/pod2daemon-flexvol:v3.18.0 +quay.io/prometheus-operator/prometheus-config-reloader:v0.75.1 +docker.io/grafana/grafana:11.1.0 +ghcr.io/k8snetworkplumbingwg/multus-dynamic-networks-controller@sha256:dee1979d92f0a31598a6e3569ac7004be7d29e7ca9e31db23753ef263110dc04 +ghcr.io/kubevirt/kubesecondarydns@sha256:77132adb5f840ceb0aadd408731a5c8b01a4b427a78084ab5e4e9b961195cb02 +quay.io/kubevirt/bridge-marker@sha256:5d24c6d1ecb0556896b7b81c7e5260b54173858425777b7a84df8a706c07e6d2 +quay.io/kubevirt/cdi-apiserver:v1.58.1 +quay.io/kubevirt/cdi-controller:v1.58.1 +quay.io/kubevirt/cdi-operator:v1.58.1 +quay.io/kubevirt/cdi-uploadproxy:v1.58.1 +quay.io/kubevirt/cluster-network-addons-operator:v0.87.0 +quay.io/kubevirt/cni-default-plugins@sha256:825e3f9fec1996c54a52cec806154945b38f76476b160d554c36e38dfffe5e61 +quay.io/kubevirt/kubemacpool@sha256:afba7d0c4a95d2d4924f6ee6ef16bbe59117877383819057f01809150829cb0c +quay.io/kubevirt/macvtap-cni@sha256:434420511e09b2b5ede785a2c9062b6658ffbc26fbdd4629ce06110f9039c600 +quay.io/kubevirt/ovs-cni-plugin@sha256:5f7290e2294255ab2547c3b4bf48cc2d75531ec5a43e600366e9b2719bef983f +quay.io/openshift/origin-kube-rbac-proxy@sha256:baedb268ac66456018fb30af395bb3d69af5fff3252ff5d549f0231b1ebb6901 \ No newline at end of file diff --git a/cluster-provision/gocli/opts/k8sprovision/conf/fetch-images.sh b/cluster-provision/gocli/opts/k8sprovision/conf/fetch-images.sh new file mode 100644 index 0000000000..18fba6833b --- /dev/null +++ b/cluster-provision/gocli/opts/k8sprovision/conf/fetch-images.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +set -euo pipefail + +function usage() { + cat < [source-image-list] + + Fetches all images from the cluster provision source and manifests. Returns a list that is sorted and + without double entries. + + If source-image-list is provided this is taken as an input and added to the result. + +EOF +} + +function check_args() { + if [ "$#" -lt 1 ]; then + usage + exit 1 + fi + if [ ! -d "$1" ]; then + usage + echo "Directory $1 does not exist" + exit 1 + fi +} + +function main() { + check_args "$@" + + temp_file=$(mktemp) + trap 'rm -f "${temp_file}"' EXIT SIGINT SIGTERM + + provision_dir="$1" + image_regex='([a-z0-9\_\.]+[/-]?)+(@sha256)?:[a-z0-9\_\.\-]+' + image_regex_w_double_quotes='"?'"${image_regex}"'"?' + + ( + # Avoid bailing out because of nothing found in scripts part + set +e + find "$provision_dir" -type f -name '*.sh' -print0 | + xargs -0 grep -iE '(docker|podman)[ _]pull[^ ]+ '"${image_regex_w_double_quotes}" + find "$provision_dir" -type f -name '*.yaml' -print0 | + xargs -0 grep -iE '(image|value): '"${image_regex_w_double_quotes}" + set -e + # last `grep -v` is necessary to avoid trying to pre pull istio "images", as the regex also matches on values + # from the generated istio deployment manifest + ) | grep -ioE "${image_regex_w_double_quotes}"'$' | grep -v '.svc:' >>"${temp_file}" + + sed -E 's/"//g' "${temp_file}" | sort | uniq +} + +main "$@" \ No newline at end of file diff --git a/cluster-provision/gocli/opts/k8sprovision/conf/k8s.conf b/cluster-provision/gocli/opts/k8sprovision/conf/k8s.conf new file mode 100644 index 0000000000..ab1c5d11b9 --- /dev/null +++ b/cluster-provision/gocli/opts/k8sprovision/conf/k8s.conf @@ -0,0 +1,5 @@ +net.bridge.bridge-nf-call-iptables = 1 +net.ipv4.ip_forward = 1 +net.ipv6.conf.all.disable_ipv6 = 0 +net.ipv6.conf.all.forwarding = 1 +net.bridge.bridge-nf-call-ip6tables = 1 \ No newline at end of file diff --git a/cluster-provision/gocli/opts/k8sprovision/conf/kubeadm.conf b/cluster-provision/gocli/opts/k8sprovision/conf/kubeadm.conf new file mode 100644 index 0000000000..838aece1b0 --- /dev/null +++ b/cluster-provision/gocli/opts/k8sprovision/conf/kubeadm.conf @@ -0,0 +1,54 @@ +apiVersion: kubeadm.k8s.io/v1beta3 +bootstrapTokens: +- groups: + - system:bootstrappers:kubeadm:default-node-token + token: abcdef.1234567890123456 + ttl: 24h0m0s + usages: + - signing + - authentication +kind: InitConfiguration +patches: + directory: /provision/kubeadm-patches +--- +apiServer: + extraArgs: + allow-privileged: "true" + audit-log-format: json + audit-log-path: /var/log/k8s-audit/k8s-audit.log + audit-policy-file: /etc/kubernetes/audit/adv-audit.yaml + enable-admission-plugins: NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota + admission-control-config-file: /etc/kubernetes/psa.yaml + extraVolumes: + - hostPath: /etc/kubernetes/psa.yaml + mountPath: /etc/kubernetes/psa.yaml + name: psa + - hostPath: /etc/kubernetes/audit + mountPath: /etc/kubernetes/audit + name: audit-conf + readOnly: true + - hostPath: /var/log/k8s-audit + mountPath: /var/log/k8s-audit + name: audit-log + timeoutForControlPlane: 4m0s +apiVersion: kubeadm.k8s.io/v1beta3 +certificatesDir: /etc/kubernetes/pki +clusterName: kubernetes +controllerManager: + extraArgs: + node-cidr-mask-size-ipv6: "116" +etcd: + local: + dataDir: /var/lib/etcd +imageRepository: registry.k8s.io +kind: ClusterConfiguration +kubernetesVersion: vVERSION +networking: + dnsDomain: cluster.local + podSubnet: 10.244.0.0/16,fd10:244::/112 + serviceSubnet: 10.96.0.0/12,fd10:96::/108 +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +clusterCIDR: 10.244.0.0/16,fd10:244::/112 +mode: iptables diff --git a/cluster-provision/gocli/opts/k8sprovision/conf/kubeadm_ipv6.conf b/cluster-provision/gocli/opts/k8sprovision/conf/kubeadm_ipv6.conf new file mode 100644 index 0000000000..90db90e86e --- /dev/null +++ b/cluster-provision/gocli/opts/k8sprovision/conf/kubeadm_ipv6.conf @@ -0,0 +1,70 @@ +apiVersion: kubeadm.k8s.io/v1beta3 +bootstrapTokens: +- groups: + - system:bootstrappers:kubeadm:default-node-token + token: abcdef.1234567890123456 + ttl: 24h0m0s + usages: + - signing + - authentication +kind: InitConfiguration +patches: + directory: /provision/kubeadm-patches +localAPIEndpoint: + advertiseAddress: "::" + bindPort: 6443 +nodeRegistration: + kubeletExtraArgs: + node-ip: "::" +--- +apiServer: + extraArgs: + allow-privileged: "true" + audit-log-format: json + audit-log-path: /var/log/k8s-audit/k8s-audit.log + audit-policy-file: /etc/kubernetes/audit/adv-audit.yaml + enable-admission-plugins: NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota + bind-address: "::" + admission-control-config-file: /etc/kubernetes/psa.yaml + extraVolumes: + - hostPath: /etc/kubernetes/psa.yaml + mountPath: /etc/kubernetes/psa.yaml + name: psa + - hostPath: /etc/kubernetes/audit + mountPath: /etc/kubernetes/audit + name: audit-conf + readOnly: true + - hostPath: /var/log/k8s-audit + mountPath: /var/log/k8s-audit + name: audit-log + timeoutForControlPlane: 4m0s +apiVersion: kubeadm.k8s.io/v1beta3 +certificatesDir: /etc/kubernetes/pki +clusterName: kubernetes +controllerManager: + extraArgs: + bind-address: "::" + node-cidr-mask-size-ipv6: "116" +etcd: + local: + dataDir: /var/lib/etcd +imageRepository: registry.k8s.io +kind: ClusterConfiguration +kubernetesVersion: vVERSION +networking: + dnsDomain: cluster.local + podSubnet: fd00:10:244::/112 + serviceSubnet: fd00:10:96::/112 +scheduler: + extraArgs: + bind-address: "::1" +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +clusterCIDR: fd00:10:244::/112 +mode: iptables +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +address: "::" +healthzBindAddress: "::" diff --git a/cluster-provision/gocli/opts/k8sprovision/conf/kubernetes.repo b/cluster-provision/gocli/opts/k8sprovision/conf/kubernetes.repo new file mode 100644 index 0000000000..add2c15a20 --- /dev/null +++ b/cluster-provision/gocli/opts/k8sprovision/conf/kubernetes.repo @@ -0,0 +1,6 @@ +[kubernetes] +name=Kubernetes Release +baseurl=https://pkgs.k8s.io/core:/stable:/v1.VERSION/rpm +enabled=1 +gpgcheck=0 +repo_gpgcheck=0 \ No newline at end of file diff --git a/cluster-provision/gocli/opts/k8sprovision/conf/psa.yaml b/cluster-provision/gocli/opts/k8sprovision/conf/psa.yaml new file mode 100644 index 0000000000..a50d37db23 --- /dev/null +++ b/cluster-provision/gocli/opts/k8sprovision/conf/psa.yaml @@ -0,0 +1,20 @@ +apiVersion: apiserver.config.k8s.io/v1 +kind: AdmissionConfiguration +plugins: +- name: PodSecurity + configuration: + apiVersion: pod-security.admission.config.k8s.io/v1 + kind: PodSecurityConfiguration + defaults: + enforce: "privileged" + enforce-version: "latest" + audit: "restricted" + audit-version: "latest" + warn: "restricted" + warn-version: "latest" + exemptions: + usernames: [] + runtimeClasses: [] + # Hopefuly this will not be needed in future. Add your favorite namespace to be ignored and your operator not broken + # You also need to modify psa.sh + namespaces: ["kube-system", "default", "istio-operator" ,"istio-system", "nfs-csi", "monitoring", "rook-ceph", "cluster-network-addons", "sonobuoy"] \ No newline at end of file diff --git a/cluster-provision/gocli/opts/k8sprovision/conf/registries.conf b/cluster-provision/gocli/opts/k8sprovision/conf/registries.conf new file mode 100644 index 0000000000..9d3563970e --- /dev/null +++ b/cluster-provision/gocli/opts/k8sprovision/conf/registries.conf @@ -0,0 +1,8 @@ +[registries.search] +registries = ["registry.access.redhat.com", "registry.fedoraproject.org", "quay.io", "docker.io"] + +[registries.insecure] +registries = ["registry:5000"] + +[registries.block] +registries = [] \ No newline at end of file diff --git a/cluster-provision/gocli/opts/k8sprovision/conf/storage.conf b/cluster-provision/gocli/opts/k8sprovision/conf/storage.conf new file mode 100644 index 0000000000..ec4c9fee9c --- /dev/null +++ b/cluster-provision/gocli/opts/k8sprovision/conf/storage.conf @@ -0,0 +1,4 @@ +[storage] +driver = "vfs" +runroot = "/var/run/containers/storage" +graphroot = "/mnt/containers-storage" \ No newline at end of file diff --git a/cluster-provision/gocli/opts/k8sprovision/k8sprovision.go b/cluster-provision/gocli/opts/k8sprovision/k8sprovision.go new file mode 100644 index 0000000000..e4feca0269 --- /dev/null +++ b/cluster-provision/gocli/opts/k8sprovision/k8sprovision.go @@ -0,0 +1,296 @@ +package k8sprovision + +import ( + "embed" + "fmt" + "io" + "net/http" + "strings" + "time" + + "github.com/sirupsen/logrus" + "kubevirt.io/kubevirtci/cluster-provision/gocli/pkg/libssh" +) + +//go:embed conf/* +var f embed.FS + +//go:embed patches/* +var patchFs embed.FS + +type k8sProvisioner struct { + version string + slim bool + sshClient libssh.Client +} + +func NewK8sProvisioner(sshClient libssh.Client, version string, slim bool) *k8sProvisioner { + return &k8sProvisioner{ + version: version, + slim: slim, + sshClient: sshClient, + } +} + +func (k *k8sProvisioner) Exec() error { + crio, err := f.ReadFile("conf/crio-yum.repo") + if err != nil { + return err + } + + registries, err := f.ReadFile("conf/registries.conf") + if err != nil { + return err + } + + storage, err := f.ReadFile("conf/storage.conf") + if err != nil { + return err + } + + k8sRepo, err := f.ReadFile("conf/kubernetes.repo") + if err != nil { + return err + } + + cniPatch, err := f.ReadFile("conf/cni.diff") + if err != nil { + return err + } + + cniV6Patch, err := f.ReadFile("conf/cni_ipv6.diff") + if err != nil { + return err + } + + k8sConf, err := f.ReadFile("conf/k8s.conf") + if err != nil { + return err + } + + calico, err := f.ReadFile("conf/001-calico.conf") + if err != nil { + return err + } + + dhclient, err := f.ReadFile("conf/002-dhclient.conf") + if err != nil { + return err + } + + secContextPatch, err := patchFs.ReadFile("patches/add-security-context-deployment-patch.yaml") + if err != nil { + return err + } + + etcdPatch, err := patchFs.ReadFile("patches/etcd.yaml") + if err != nil { + return err + } + + apiServerPatch, err := patchFs.ReadFile("patches/kube-apiserver.yaml") + if err != nil { + return err + } + + controllerManagerPatch, err := patchFs.ReadFile("patches/kube-controller-manager.yaml") + if err != nil { + return err + } + + schedulerPatch, err := patchFs.ReadFile("patches/kube-scheduler.yaml") + if err != nil { + return err + } + + packagesVersion, err := k.getPackagesVersion() + if err != nil { + return err + } + + advAudit, err := f.ReadFile("conf/adv-audit.yaml") + if err != nil { + return err + } + + psa, err := f.ReadFile("conf/psa.yaml") + if err != nil { + return err + } + + kubeAdm, err := f.ReadFile("conf/kubeadm.conf") + if err != nil { + return err + } + + kubeAdm6, err := f.ReadFile("conf/kubeadm_ipv6.conf") + if err != nil { + return err + } + + k8sMinor := strings.Split(k.version, ".")[1] + k8sRepoWithVersion := strings.Replace(string(k8sRepo), "VERSION", k8sMinor, 1) + + kubeAdmConf := strings.Replace(string(kubeAdm), "VERSION", k.version, 1) + kubeAdm6Conf := strings.Replace(string(kubeAdm6), "VERSION", k.version, 1) + + cmds := []string{ + "echo '" + string(crio) + "' | tee /etc/yum.repos.d/devel_kubic_libcontainers_stable_cri-o_v1.28.repo >> /dev/null", + "dnf install -y cri-o", + "echo '" + string(registries) + "' | tee /etc/containers/registries.conf >> /dev/null", + "echo '" + string(storage) + "' | tee /etc/containers/storage.conf >> /dev/null", + "systemctl restart crio", + "systemctl enable --now crio", + "echo '" + k8sRepoWithVersion + "' | tee /etc/yum.repos.d/kubernetes.repo >> /dev/null", + fmt.Sprintf("dnf install --skip-broken --nobest --nogpgcheck --disableexcludes=kubernetes -y kubectl-%[1]s kubeadm-%[1]s kubelet-%[1]s kubernetes-cni", packagesVersion), + "kubeadm config images pull --kubernetes-version " + k.version, + `image_regex='([a-z0-9\_\.]+[/-]?)+(@sha256)?:[a-z0-9\_\.\-]+' image_regex_w_double_quotes='"?'"${image_regex}"'"?' find /tmp -type f -name '*.yaml' -print0 | xargs -0 grep -iE '(image|value): '"${image_regex_w_double_quotes}" > /tmp/images`, + } + + for _, cmd := range cmds { + if err := k.sshClient.Command(cmd); err != nil { + return err + } + } + + images, err := k.sshClient.CommandWithNoStdOut(`image_regex='([a-z0-9\_\.]+[/-]?)+(@sha256)?:[a-z0-9\_\.\-]+' && image_regex_w_double_quotes='"?'"${image_regex}"'"?' && grep -ioE "${image_regex_w_double_quotes}" /tmp/images`) + if err != nil { + return err + } + + if !k.slim { + imagesList := strings.Split(images, "\n") + for _, image := range imagesList { + err := k.pullImageRetry(image) + if err != nil { + logrus.Infof("Failed to pull image: %s, it will not be available offline", image) + } + } + + extraImg, err := f.ReadFile("conf/extra-images") + if err != nil { + return err + } + + imagesList = strings.Split(string(extraImg), "\n") + for _, image := range imagesList { + err := k.pullImageRetry(image) + if err != nil { + logrus.Infof("Failed to pull image: %s, it will not be available offline", image) + } + } + } + + cmds = []string{ + "mkdir /provision", + "yum install -y patch || true", + "dnf install -y patch || true", + "cp /tmp/cni.do-not-change.yaml /provision/cni.yaml", + "mv /tmp/cni.do-not-change.yaml /provision/cni_ipv6.yaml", + "echo '" + string(cniPatch) + "' | tee /tmp/cni_patch.diff >> /dev/null", + "echo '" + string(cniV6Patch) + "' | tee /tmp/cni_v6_patch.diff >> /dev/null", + "patch /provision/cni.yaml /tmp/cni_patch.diff", + "patch /provision/cni_ipv6.yaml /tmp/cni_v6_patch.diff", + "cp /tmp/local-volume.yaml /provision/local-volume.yaml", + "echo 'KUBELET_EXTRA_ARGS=--cgroup-driver=systemd --runtime-cgroups=/systemd/system.slice --fail-swap-on=false --kubelet-cgroups=/systemd/system.slice' >> /etc/sysconfig/kubelet", + `echo "vm.unprivileged_userfaultfd = 1" > /etc/sysctl.d/enable-userfaultfd.conf`, + "modprobe bridge", + "modprobe overlay", + "modprobe br_netfilter", + "echo '" + string(k8sConf) + "' | tee /etc/sysctl.d/k8s.conf >> /dev/null", + "sysctl --system", + "echo bridge >> /etc/modules-load.d/k8s.conf", + "echo br_netfilter >> /etc/modules-load.d/k8s.conf", + "echo overlay >> /etc/modules-load.d/k8s.conf", + "rm -f /etc/cni/net.d/*", + "systemctl daemon-reload", + "systemctl enable crio kubelet --now", + "echo '" + string(calico) + "' | tee /etc/NetworkManager/conf.d/001-calico.conf >> /dev/null", + "echo '" + string(dhclient) + "' | tee /etc/NetworkManager/conf.d/002-dhclient.conf >> /dev/null", + `echo "net.netfilter.nf_conntrack_max=1000000" >> /etc/sysctl.conf`, + "sysctl --system", + "systemctl restart NetworkManager", + `nmcli connection modify "System eth0" ipv6.method auto ipv6.addr-gen-mode eui64`, + `nmcli connection up "System eth0"`, + "sysctl --system", + "mkdir -p /provision/kubeadm-patches", + "echo '" + string(secContextPatch) + "' | tee /provision/kubeadm-patches/add-security-context-deployment-patch.yaml >> /dev/null", + "echo '" + string(etcdPatch) + "' | tee /provision/kubeadm-patches/etcd.yaml >> /dev/null", + "echo '" + string(apiServerPatch) + "' | tee /provision/kubeadm-patches/kube-apiserver.yaml >> /dev/null", + "echo '" + string(controllerManagerPatch) + "' | tee /provision/kubeadm-patches/kube-controller-manager.yaml >> /dev/null", + "echo '" + string(schedulerPatch) + "' | tee /provision/kubeadm-patches/kube-scheduler.yaml >> /dev/null", + "mkdir /etc/kubernetes/audit", + "echo '" + string(advAudit) + "' | tee /etc/kubernetes/audit/adv-audit.yaml >> /dev/null", + "echo '" + string(psa) + "' | tee /etc/kubernetes/psa.yaml >> /dev/null", + "echo '" + kubeAdmConf + "' | tee /etc/kubernetes/kubeadm.conf >> /dev/null", + "echo '" + kubeAdm6Conf + "' | tee /etc/kubernetes/kubeadm_ipv6.conf >> /dev/null", + "until ip address show dev eth0 | grep global | grep inet6; do sleep 1; done", + "swapoff -a", + "systemctl restart kubelet", + "kubeadm init --config /etc/kubernetes/kubeadm.conf -v5", + "kubectl --kubeconfig=/etc/kubernetes/admin.conf patch deployment coredns -n kube-system -p '" + string(secContextPatch) + "'", + "kubectl --kubeconfig=/etc/kubernetes/admin.conf create -f /provision/cni.yaml", + "kubectl --kubeconfig=/etc/kubernetes/admin.conf wait --for=condition=Ready pods --all -n kube-system --timeout=300s", + "kubectl --kubeconfig=/etc/kubernetes/admin.conf get pods -n kube-system", + "kubeadm reset --force", + "mkdir -p /var/provision/kubevirt.io/tests", + "chcon -t container_file_t /var/provision/kubevirt.io/tests", + `echo "tmpfs /var/provision/kubevirt.io/tests tmpfs rw,context=system_u:object_r:container_file_t:s0 0 1" >> /etc/fstab`, + "rm -f /etc/sysconfig/network-scripts/ifcfg-*", + "nmcli connection add con-name eth0 ifname eth0 type ethernet", + "rm -f /etc/machine-id ; touch /etc/machine-id", + } + + for _, cmd := range cmds { + if err := k.sshClient.Command(cmd); err != nil { + return err + } + } + + return nil +} + +func (k *k8sProvisioner) pullImageRetry(image string) error { + maxRetries := 5 + downloaded := false + + for i := 0; i < maxRetries; i++ { + if err := k.sshClient.Command("crictl pull " + image); err != nil { + logrus.Infof("Attempt [%d]: Failed to download image %s: %s, sleeping 3 seconds and trying again", i+1, image, err.Error()) + time.Sleep(time.Second * 3) + } else { + downloaded = true + break + } + } + + if !downloaded { + return fmt.Errorf("reached max retries to download for %s", image) + } + return nil +} + +func (k *k8sProvisioner) getPackagesVersion() (string, error) { + packagesVersion := k.version + if strings.HasSuffix(k.version, "alpha") || strings.HasSuffix(k.version, "beta") || strings.HasSuffix(k.version, "rc") { + k8sversion := strings.Split(k.version, ".") + + url := fmt.Sprintf("https://storage.googleapis.com/kubernetes-release/release/stable-%s.%s.txt", k8sversion[0], k8sversion[1]) + resp, err := http.Get(url) + if err != nil { + return packagesVersion, nil + } + + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + fmt.Println("Error reading the response body:", err) + return packagesVersion, nil + } + if string(body) != "" { + packagesVersion = strings.TrimPrefix(string(body), "v") + } + } + return packagesVersion, nil +} diff --git a/cluster-provision/gocli/opts/k8sprovision/k8sprovision_test.go b/cluster-provision/gocli/opts/k8sprovision/k8sprovision_test.go new file mode 100644 index 0000000000..59f59f8053 --- /dev/null +++ b/cluster-provision/gocli/opts/k8sprovision/k8sprovision_test.go @@ -0,0 +1,39 @@ +package k8sprovision + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "go.uber.org/mock/gomock" + kubevirtcimocks "kubevirt.io/kubevirtci/cluster-provision/gocli/utils/mock" +) + +func TestK8sProvision(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "K8s provision test suite") +} + +var _ = Describe("K8s provision", func() { + var ( + mockCtrl *gomock.Controller + sshClient *kubevirtcimocks.MockSSHClient + opt *k8sProvisioner + ) + + BeforeEach(func() { + mockCtrl = gomock.NewController(GinkgoT()) + sshClient = kubevirtcimocks.NewMockSSHClient(mockCtrl) + opt := NewK8sProvisioner(sshClient, "1.30", true) + AddExpectCalls(sshClient, opt.version, opt.slim) + }) + + AfterEach(func() { + mockCtrl.Finish() + }) + + It("should provision k8s successfully", func() { + err := opt.Exec() + Expect(err).NotTo(HaveOccurred()) + }) +}) diff --git a/cluster-provision/gocli/opts/k8sprovision/patches/add-security-context-deployment-patch.yaml b/cluster-provision/gocli/opts/k8sprovision/patches/add-security-context-deployment-patch.yaml new file mode 100644 index 0000000000..38d9b0e65c --- /dev/null +++ b/cluster-provision/gocli/opts/k8sprovision/patches/add-security-context-deployment-patch.yaml @@ -0,0 +1,6 @@ +spec: + template: + spec: + securityContext: + seLinuxOptions: + type: spc_t diff --git a/cluster-provision/gocli/opts/k8sprovision/patches/etcd.yaml b/cluster-provision/gocli/opts/k8sprovision/patches/etcd.yaml new file mode 100644 index 0000000000..58ec1a7a53 --- /dev/null +++ b/cluster-provision/gocli/opts/k8sprovision/patches/etcd.yaml @@ -0,0 +1,4 @@ +spec: + securityContext: + seLinuxOptions: + type: spc_t diff --git a/cluster-provision/gocli/opts/k8sprovision/patches/kube-apiserver.yaml b/cluster-provision/gocli/opts/k8sprovision/patches/kube-apiserver.yaml new file mode 100644 index 0000000000..58ec1a7a53 --- /dev/null +++ b/cluster-provision/gocli/opts/k8sprovision/patches/kube-apiserver.yaml @@ -0,0 +1,4 @@ +spec: + securityContext: + seLinuxOptions: + type: spc_t diff --git a/cluster-provision/gocli/opts/k8sprovision/patches/kube-controller-manager.yaml b/cluster-provision/gocli/opts/k8sprovision/patches/kube-controller-manager.yaml new file mode 100644 index 0000000000..58ec1a7a53 --- /dev/null +++ b/cluster-provision/gocli/opts/k8sprovision/patches/kube-controller-manager.yaml @@ -0,0 +1,4 @@ +spec: + securityContext: + seLinuxOptions: + type: spc_t diff --git a/cluster-provision/gocli/opts/k8sprovision/patches/kube-scheduler.yaml b/cluster-provision/gocli/opts/k8sprovision/patches/kube-scheduler.yaml new file mode 100644 index 0000000000..58ec1a7a53 --- /dev/null +++ b/cluster-provision/gocli/opts/k8sprovision/patches/kube-scheduler.yaml @@ -0,0 +1,4 @@ +spec: + securityContext: + seLinuxOptions: + type: spc_t diff --git a/cluster-provision/gocli/opts/k8sprovision/testconfig.go b/cluster-provision/gocli/opts/k8sprovision/testconfig.go new file mode 100644 index 0000000000..66dd52b02c --- /dev/null +++ b/cluster-provision/gocli/opts/k8sprovision/testconfig.go @@ -0,0 +1,122 @@ +package k8sprovision + +import ( + "fmt" + "strings" + + kubevirtcimocks "kubevirt.io/kubevirtci/cluster-provision/gocli/utils/mock" +) + +func AddExpectCalls(sshClient *kubevirtcimocks.MockSSHClient, version string, slim bool) { + crio, _ := f.ReadFile("conf/crio-yum.repo") + registries, _ := f.ReadFile("conf/registries.conf") + storage, _ := f.ReadFile("conf/storage.conf") + k8sRepo, _ := f.ReadFile("conf/kubernetes.repo") + cniPatch, _ := f.ReadFile("conf/cni.diff") + cniV6Patch, _ := f.ReadFile("conf/cni_ipv6.diff") + k8sConf, _ := f.ReadFile("conf/k8s.conf") + calico, _ := f.ReadFile("conf/001-calico.conf") + dhclient, _ := f.ReadFile("conf/002-dhclient.conf") + secContextPatch, _ := patchFs.ReadFile("patches/add-security-context-deployment-patch.yaml") + etcdPatch, _ := patchFs.ReadFile("patches/etcd.yaml") + apiServerPatch, _ := patchFs.ReadFile("patches/kube-apiserver.yaml") + controllerManagerPatch, _ := patchFs.ReadFile("patches/kube-controller-manager.yaml") + schedulerPatch, _ := patchFs.ReadFile("patches/kube-scheduler.yaml") + + packagesVersion := "1.30" + + advAudit, _ := f.ReadFile("conf/adv-audit.yaml") + psa, _ := f.ReadFile("conf/psa.yaml") + kubeAdm, _ := f.ReadFile("conf/kubeadm.conf") + kubeAdm6, _ := f.ReadFile("conf/kubeadm_ipv6.conf") + + k8sMinor := strings.Split(version, ".")[1] + k8sRepoWithVersion := strings.Replace(string(k8sRepo), "VERSION", k8sMinor, 1) + kubeAdmConf := strings.Replace(string(kubeAdm), "VERSION", version, 1) + kubeAdm6Conf := strings.Replace(string(kubeAdm6), "VERSION", version, 1) + + cmds := []string{ + "echo '" + string(crio) + "' | tee /etc/yum.repos.d/devel_kubic_libcontainers_stable_cri-o_v1.28.repo >> /dev/null", + "dnf install -y cri-o", + "echo '" + string(registries) + "' | tee /etc/containers/registries.conf >> /dev/null", + "echo '" + string(storage) + "' | tee /etc/containers/storage.conf >> /dev/null", + "systemctl restart crio", + "systemctl enable --now crio", + "echo '" + k8sRepoWithVersion + "' | tee /etc/yum.repos.d/kubernetes.repo >> /dev/null", + fmt.Sprintf("dnf install --skip-broken --nobest --nogpgcheck --disableexcludes=kubernetes -y kubectl-%[1]s kubeadm-%[1]s kubelet-%[1]s kubernetes-cni", packagesVersion), + "kubeadm config images pull --kubernetes-version " + version, + `image_regex='([a-z0-9\_\.]+[/-]?)+(@sha256)?:[a-z0-9\_\.\-]+' image_regex_w_double_quotes='"?'"${image_regex}"'"?' find /tmp -type f -name '*.yaml' -print0 | xargs -0 grep -iE '(image|value): '"${image_regex_w_double_quotes}" > /tmp/images`, + } + + for _, cmd := range cmds { + sshClient.EXPECT().Command(cmd) + } + + sshClient.EXPECT().CommandWithNoStdOut(`image_regex='([a-z0-9\_\.]+[/-]?)+(@sha256)?:[a-z0-9\_\.\-]+' && image_regex_w_double_quotes='"?'"${image_regex}"'"?' && grep -ioE "${image_regex_w_double_quotes}" /tmp/images`).Return("nginx:latest", nil) + + cmds = []string{ + "mkdir /provision", + "yum install -y patch || true", + "dnf install -y patch || true", + "cp /tmp/cni.do-not-change.yaml /provision/cni.yaml", + "mv /tmp/cni.do-not-change.yaml /provision/cni_ipv6.yaml", + "echo '" + string(cniPatch) + "' | tee /tmp/cni_patch.diff >> /dev/null", + "echo '" + string(cniV6Patch) + "' | tee /tmp/cni_v6_patch.diff >> /dev/null", + "patch /provision/cni.yaml /tmp/cni_patch.diff", + "patch /provision/cni_ipv6.yaml /tmp/cni_v6_patch.diff", + "cp /tmp/local-volume.yaml /provision/local-volume.yaml", + `echo "vm.unprivileged_userfaultfd = 1" > /etc/sysctl.d/enable-userfaultfd.conf`, + "modprobe bridge", + "modprobe overlay", + "modprobe br_netfilter", + "echo '" + string(k8sConf) + "' | tee /etc/sysctl.d/k8s.conf >> /dev/null", + "sysctl --system", + "echo bridge >> /etc/modules-load.d/k8s.conf", + "echo br_netfilter >> /etc/modules-load.d/k8s.conf", + "echo overlay >> /etc/modules-load.d/k8s.conf", + "rm -f /etc/cni/net.d/*", + "systemctl daemon-reload", + "systemctl enable crio kubelet --now", + "echo '" + string(calico) + "' | tee /etc/NetworkManager/conf.d/001-calico.conf >> /dev/null", + "echo '" + string(dhclient) + "' | tee /etc/NetworkManager/conf.d/002-dhclient.conf >> /dev/null", + `echo "net.netfilter.nf_conntrack_max=1000000" >> /etc/sysctl.conf`, + "sysctl --system", + "systemctl restart NetworkManager", + `nmcli connection modify "System eth0" ipv6.method auto ipv6.addr-gen-mode eui64`, + `nmcli connection up "System eth0"`, + "sysctl --system", + "echo bridge >> /etc/modules-load.d/k8s.conf", + "echo br_netfilter >> /etc/modules-load.d/k8s.conf", + "echo overlay >> /etc/modules-load.d/k8s.conf", + "mkdir -p /provision/kubeadm-patches", + "echo '" + string(secContextPatch) + "' | tee /provision/kubeadm-patches/add-security-context-deployment-patch.yaml >> /dev/null", + "echo '" + string(etcdPatch) + "' | tee /provision/kubeadm-patches/etcd.yaml >> /dev/null", + "echo '" + string(apiServerPatch) + "' | tee /provision/kubeadm-patches/kube-apiserver.yaml >> /dev/null", + "echo '" + string(controllerManagerPatch) + "' | tee /provision/kubeadm-patches/kube-controller-manager.yaml >> /dev/null", + "echo '" + string(schedulerPatch) + "' | tee /provision/kubeadm-patches/kube-scheduler.yaml >> /dev/null", + "mkdir /etc/kubernetes/audit", + "echo '" + string(advAudit) + "' | tee /etc/kubernetes/audit/adv-audit.yaml >> /dev/null", + "echo '" + string(psa) + "' | tee /etc/kubernetes/psa.yaml >> /dev/null", + "echo '" + kubeAdmConf + "' | tee /etc/kubernetes/kubeadm.conf >> /dev/null", + "echo '" + kubeAdm6Conf + "' | tee /etc/kubernetes/kubeadm_ipv6.conf >> /dev/null", + "until ip address show dev eth0 | grep global | grep inet6; do sleep 1; done", + "swapoff -a", + "systemctl restart kubelet", + "kubeadm init --config /etc/kubernetes/kubeadm.conf -v5", + "kubectl --kubeconfig=/etc/kubernetes/admin.conf patch deployment coredns -n kube-system -p '" + string(secContextPatch) + "'", + "kubectl --kubeconfig=/etc/kubernetes/admin.conf create -f /provision/cni.yaml", + "kubectl --kubeconfig=/etc/kubernetes/admin.conf wait --for=condition=Ready pods --all -n kube-system --timeout=300s", + "kubectl --kubeconfig=/etc/kubernetes/admin.conf get pods -n kube-system", + "kubeadm reset --force", + "mkdir -p /var/provision/kubevirt.io/tests", + "chcon -t container_file_t /var/provision/kubevirt.io/tests", + `echo "tmpfs /var/provision/kubevirt.io/tests tmpfs rw,context=system_u:object_r:container_file_t:s0 0 1" >> /etc/fstab`, + "rm -f /etc/sysconfig/network-scripts/ifcfg-*", + "nmcli connection add con-name eth0 ifname eth0 type ethernet", + "rm -f /etc/machine-id ; touch /etc/machine-id", + } + + for _, cmd := range cmds { + sshClient.EXPECT().Command(cmd) + } +} diff --git a/cluster-provision/gocli/opts/labelnodes/labelnodes.go b/cluster-provision/gocli/opts/labelnodes/labelnodes.go index 75c0393cf1..d3203756ff 100644 --- a/cluster-provision/gocli/opts/labelnodes/labelnodes.go +++ b/cluster-provision/gocli/opts/labelnodes/labelnodes.go @@ -9,7 +9,7 @@ type nodeLabler struct { labelSelector string } -func NewNodeLabler(sc libssh.Client, p uint16, l string) *nodeLabler { +func NewNodeLabler(sc libssh.Client, l string) *nodeLabler { return &nodeLabler{ sshClient: sc, labelSelector: l, diff --git a/cluster-provision/gocli/opts/labelnodes/labelnodes_test.go b/cluster-provision/gocli/opts/labelnodes/labelnodes_test.go new file mode 100644 index 0000000000..4aa810966e --- /dev/null +++ b/cluster-provision/gocli/opts/labelnodes/labelnodes_test.go @@ -0,0 +1,39 @@ +package labelnodes + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "go.uber.org/mock/gomock" + kubevirtcimocks "kubevirt.io/kubevirtci/cluster-provision/gocli/utils/mock" +) + +func TestNodeLabel(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "NodeLabeler test suite") +} + +var _ = Describe("NodeLabeler", func() { + var ( + mockCtrl *gomock.Controller + sshClient *kubevirtcimocks.MockSSHClient + opt *nodeLabler + ) + + BeforeEach(func() { + mockCtrl = gomock.NewController(GinkgoT()) + sshClient = kubevirtcimocks.NewMockSSHClient(mockCtrl) + opt = NewNodeLabler(sshClient, "node-role.kubernetes.io/control-plane") + AddExpectCalls(sshClient, "node-role.kubernetes.io/control-plane") + }) + + AfterEach(func() { + mockCtrl.Finish() + }) + + It("should label nodes successfully", func() { + err := opt.Exec() + Expect(err).NotTo(HaveOccurred()) + }) +}) diff --git a/cluster-provision/gocli/opts/labelnodes/testconfig.go b/cluster-provision/gocli/opts/labelnodes/testconfig.go new file mode 100644 index 0000000000..1e1ee46b86 --- /dev/null +++ b/cluster-provision/gocli/opts/labelnodes/testconfig.go @@ -0,0 +1,9 @@ +package labelnodes + +import ( + kubevirtcimocks "kubevirt.io/kubevirtci/cluster-provision/gocli/utils/mock" +) + +func AddExpectCalls(sshClient *kubevirtcimocks.MockSSHClient, label string) { + sshClient.EXPECT().Command("kubectl --kubeconfig=/etc/kubernetes/admin.conf label node -l " + label + " node-role.kubernetes.io/worker=''") +} diff --git a/cluster-provision/gocli/opts/provision/provision.go b/cluster-provision/gocli/opts/provision/provision.go index f59cb5d45f..0038dc49b0 100644 --- a/cluster-provision/gocli/opts/provision/provision.go +++ b/cluster-provision/gocli/opts/provision/provision.go @@ -22,7 +22,7 @@ func NewLinuxProvisioner(sc libssh.Client) *linuxProvisioner { func (l *linuxProvisioner) Exec() error { cmds := []string{ - `echo '` + string(sharedVars) + `' | tee /var/lib/kubevirtci/shared_vars.sh > /dev/null`, + `mkdir -p /var/lib/kubevirtci && echo '` + string(sharedVars) + `' | tee /var/lib/kubevirtci/shared_vars.sh > /dev/null`, `dnf install -y "kernel-modules-$(uname -r)"`, "dnf install -y cloud-utils-growpart", `if growpart /dev/vda 1; then resize2fs /dev/vda1; fi`, diff --git a/cluster-provision/gocli/opts/provision/provision_test.go b/cluster-provision/gocli/opts/provision/provision_test.go new file mode 100644 index 0000000000..bfb8435eb1 --- /dev/null +++ b/cluster-provision/gocli/opts/provision/provision_test.go @@ -0,0 +1,39 @@ +package provision + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "go.uber.org/mock/gomock" + kubevirtcimocks "kubevirt.io/kubevirtci/cluster-provision/gocli/utils/mock" +) + +func TestLinuxProvisioner(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Linux provision test suite") +} + +var _ = Describe("Linux provision", func() { + var ( + mockCtrl *gomock.Controller + sshClient *kubevirtcimocks.MockSSHClient + opt *linuxProvisioner + ) + + BeforeEach(func() { + mockCtrl = gomock.NewController(GinkgoT()) + sshClient = kubevirtcimocks.NewMockSSHClient(mockCtrl) + opt = NewLinuxProvisioner(sshClient) + AddExpectCalls(sshClient) + }) + + AfterEach(func() { + mockCtrl.Finish() + }) + + It("should provision linux successfully", func() { + err := opt.Exec() + Expect(err).NotTo(HaveOccurred()) + }) +}) diff --git a/cluster-provision/gocli/opts/provision/testconfig.go b/cluster-provision/gocli/opts/provision/testconfig.go new file mode 100644 index 0000000000..ea74be7b13 --- /dev/null +++ b/cluster-provision/gocli/opts/provision/testconfig.go @@ -0,0 +1,35 @@ +package provision + +import ( + kubevirtcimocks "kubevirt.io/kubevirtci/cluster-provision/gocli/utils/mock" +) + +func AddExpectCalls(sshClient *kubevirtcimocks.MockSSHClient) { + cmds := []string{ + `mkdir -p /var/lib/kubevirtci && echo '` + string(sharedVars) + `' | tee /var/lib/kubevirtci/shared_vars.sh > /dev/null`, + `dnf install -y "kernel-modules-$(uname -r)"`, + "dnf install -y cloud-utils-growpart", + `if growpart /dev/vda 1; then resize2fs /dev/vda1; fi`, + "dnf install -y patch", + "systemctl stop firewalld || :", + "systemctl disable firewalld || :", + "dnf -y remove firewalld", + "dnf -y install iscsi-initiator-utils", + "dnf -y install nftables", + "dnf -y install lvm2", + `echo 'ACTION=="add|change", SUBSYSTEM=="block", KERNEL=="vd[a-z]", ATTR{queue/rotational}="0"' > /etc/udev/rules.d/60-force-ssd-rotational.rules`, + "dnf install -y iproute-tc", + "mkdir -p /opt/istio-1.15.0/bin", + `curl "https://storage.googleapis.com/kubevirtci-istioctl-mirror/istio-1.15.0/bin/istioctl" -o "/opt/istio-1.15.0/bin/istioctl"`, + `chmod +x /opt/istio-1.15.0/bin/istioctl`, + "dnf install -y container-selinux", + "dnf install -y libseccomp-devel", + "dnf install -y centos-release-nfv-openvswitch", + "dnf install -y openvswitch2.16", + "dnf install -y NetworkManager NetworkManager-ovs NetworkManager-config-server", + } + + for _, cmd := range cmds { + sshClient.EXPECT().Command(cmd) + } +} diff --git a/cluster-provision/gocli/vendor/github.com/pmezard/go-difflib/LICENSE b/cluster-provision/gocli/vendor/github.com/pmezard/go-difflib/LICENSE deleted file mode 100644 index c67dad612a..0000000000 --- a/cluster-provision/gocli/vendor/github.com/pmezard/go-difflib/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2013, Patrick Mezard -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - The names of its contributors may not be used to endorse or promote -products derived from this software without specific prior written -permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/cluster-provision/gocli/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/cluster-provision/gocli/vendor/github.com/pmezard/go-difflib/difflib/difflib.go deleted file mode 100644 index 003e99fadb..0000000000 --- a/cluster-provision/gocli/vendor/github.com/pmezard/go-difflib/difflib/difflib.go +++ /dev/null @@ -1,772 +0,0 @@ -// Package difflib is a partial port of Python difflib module. -// -// It provides tools to compare sequences of strings and generate textual diffs. -// -// The following class and functions have been ported: -// -// - SequenceMatcher -// -// - unified_diff -// -// - context_diff -// -// Getting unified diffs was the main goal of the port. Keep in mind this code -// is mostly suitable to output text differences in a human friendly way, there -// are no guarantees generated diffs are consumable by patch(1). -package difflib - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strings" -) - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} - -func calculateRatio(matches, length int) float64 { - if length > 0 { - return 2.0 * float64(matches) / float64(length) - } - return 1.0 -} - -type Match struct { - A int - B int - Size int -} - -type OpCode struct { - Tag byte - I1 int - I2 int - J1 int - J2 int -} - -// SequenceMatcher compares sequence of strings. The basic -// algorithm predates, and is a little fancier than, an algorithm -// published in the late 1980's by Ratcliff and Obershelp under the -// hyperbolic name "gestalt pattern matching". The basic idea is to find -// the longest contiguous matching subsequence that contains no "junk" -// elements (R-O doesn't address junk). The same idea is then applied -// recursively to the pieces of the sequences to the left and to the right -// of the matching subsequence. This does not yield minimal edit -// sequences, but does tend to yield matches that "look right" to people. -// -// SequenceMatcher tries to compute a "human-friendly diff" between two -// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the -// longest *contiguous* & junk-free matching subsequence. That's what -// catches peoples' eyes. The Windows(tm) windiff has another interesting -// notion, pairing up elements that appear uniquely in each sequence. -// That, and the method here, appear to yield more intuitive difference -// reports than does diff. This method appears to be the least vulnerable -// to synching up on blocks of "junk lines", though (like blank lines in -// ordinary text files, or maybe "

" lines in HTML files). That may be -// because this is the only method of the 3 that has a *concept* of -// "junk" . -// -// Timing: Basic R-O is cubic time worst case and quadratic time expected -// case. SequenceMatcher is quadratic time for the worst case and has -// expected-case behavior dependent in a complicated way on how many -// elements the sequences have in common; best case time is linear. -type SequenceMatcher struct { - a []string - b []string - b2j map[string][]int - IsJunk func(string) bool - autoJunk bool - bJunk map[string]struct{} - matchingBlocks []Match - fullBCount map[string]int - bPopular map[string]struct{} - opCodes []OpCode -} - -func NewMatcher(a, b []string) *SequenceMatcher { - m := SequenceMatcher{autoJunk: true} - m.SetSeqs(a, b) - return &m -} - -func NewMatcherWithJunk(a, b []string, autoJunk bool, - isJunk func(string) bool) *SequenceMatcher { - - m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} - m.SetSeqs(a, b) - return &m -} - -// Set two sequences to be compared. -func (m *SequenceMatcher) SetSeqs(a, b []string) { - m.SetSeq1(a) - m.SetSeq2(b) -} - -// Set the first sequence to be compared. The second sequence to be compared is -// not changed. -// -// SequenceMatcher computes and caches detailed information about the second -// sequence, so if you want to compare one sequence S against many sequences, -// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other -// sequences. -// -// See also SetSeqs() and SetSeq2(). -func (m *SequenceMatcher) SetSeq1(a []string) { - if &a == &m.a { - return - } - m.a = a - m.matchingBlocks = nil - m.opCodes = nil -} - -// Set the second sequence to be compared. The first sequence to be compared is -// not changed. -func (m *SequenceMatcher) SetSeq2(b []string) { - if &b == &m.b { - return - } - m.b = b - m.matchingBlocks = nil - m.opCodes = nil - m.fullBCount = nil - m.chainB() -} - -func (m *SequenceMatcher) chainB() { - // Populate line -> index mapping - b2j := map[string][]int{} - for i, s := range m.b { - indices := b2j[s] - indices = append(indices, i) - b2j[s] = indices - } - - // Purge junk elements - m.bJunk = map[string]struct{}{} - if m.IsJunk != nil { - junk := m.bJunk - for s, _ := range b2j { - if m.IsJunk(s) { - junk[s] = struct{}{} - } - } - for s, _ := range junk { - delete(b2j, s) - } - } - - // Purge remaining popular elements - popular := map[string]struct{}{} - n := len(m.b) - if m.autoJunk && n >= 200 { - ntest := n/100 + 1 - for s, indices := range b2j { - if len(indices) > ntest { - popular[s] = struct{}{} - } - } - for s, _ := range popular { - delete(b2j, s) - } - } - m.bPopular = popular - m.b2j = b2j -} - -func (m *SequenceMatcher) isBJunk(s string) bool { - _, ok := m.bJunk[s] - return ok -} - -// Find longest matching block in a[alo:ahi] and b[blo:bhi]. -// -// If IsJunk is not defined: -// -// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where -// alo <= i <= i+k <= ahi -// blo <= j <= j+k <= bhi -// and for all (i',j',k') meeting those conditions, -// k >= k' -// i <= i' -// and if i == i', j <= j' -// -// In other words, of all maximal matching blocks, return one that -// starts earliest in a, and of all those maximal matching blocks that -// start earliest in a, return the one that starts earliest in b. -// -// If IsJunk is defined, first the longest matching block is -// determined as above, but with the additional restriction that no -// junk element appears in the block. Then that block is extended as -// far as possible by matching (only) junk elements on both sides. So -// the resulting block never matches on junk except as identical junk -// happens to be adjacent to an "interesting" match. -// -// If no blocks match, return (alo, blo, 0). -func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { - // CAUTION: stripping common prefix or suffix would be incorrect. - // E.g., - // ab - // acab - // Longest matching block is "ab", but if common prefix is - // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so - // strip, so ends up claiming that ab is changed to acab by - // inserting "ca" in the middle. That's minimal but unintuitive: - // "it's obvious" that someone inserted "ac" at the front. - // Windiff ends up at the same place as diff, but by pairing up - // the unique 'b's and then matching the first two 'a's. - besti, bestj, bestsize := alo, blo, 0 - - // find longest junk-free match - // during an iteration of the loop, j2len[j] = length of longest - // junk-free match ending with a[i-1] and b[j] - j2len := map[int]int{} - for i := alo; i != ahi; i++ { - // look at all instances of a[i] in b; note that because - // b2j has no junk keys, the loop is skipped if a[i] is junk - newj2len := map[int]int{} - for _, j := range m.b2j[m.a[i]] { - // a[i] matches b[j] - if j < blo { - continue - } - if j >= bhi { - break - } - k := j2len[j-1] + 1 - newj2len[j] = k - if k > bestsize { - besti, bestj, bestsize = i-k+1, j-k+1, k - } - } - j2len = newj2len - } - - // Extend the best by non-junk elements on each end. In particular, - // "popular" non-junk elements aren't in b2j, which greatly speeds - // the inner loop above, but also means "the best" match so far - // doesn't contain any junk *or* popular non-junk elements. - for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - !m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize += 1 - } - - // Now that we have a wholly interesting match (albeit possibly - // empty!), we may as well suck up the matching junk on each - // side of it too. Can't think of a good reason not to, and it - // saves post-processing the (possibly considerable) expense of - // figuring out what to do with it. In the case of an empty - // interesting match, this is clearly the right thing to do, - // because no other kind of match is possible in the regions. - for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize += 1 - } - - return Match{A: besti, B: bestj, Size: bestsize} -} - -// Return list of triples describing matching subsequences. -// -// Each triple is of the form (i, j, n), and means that -// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in -// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are -// adjacent triples in the list, and the second is not the last triple in the -// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe -// adjacent equal blocks. -// -// The last triple is a dummy, (len(a), len(b), 0), and is the only -// triple with n==0. -func (m *SequenceMatcher) GetMatchingBlocks() []Match { - if m.matchingBlocks != nil { - return m.matchingBlocks - } - - var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match - matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { - match := m.findLongestMatch(alo, ahi, blo, bhi) - i, j, k := match.A, match.B, match.Size - if match.Size > 0 { - if alo < i && blo < j { - matched = matchBlocks(alo, i, blo, j, matched) - } - matched = append(matched, match) - if i+k < ahi && j+k < bhi { - matched = matchBlocks(i+k, ahi, j+k, bhi, matched) - } - } - return matched - } - matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) - - // It's possible that we have adjacent equal blocks in the - // matching_blocks list now. - nonAdjacent := []Match{} - i1, j1, k1 := 0, 0, 0 - for _, b := range matched { - // Is this block adjacent to i1, j1, k1? - i2, j2, k2 := b.A, b.B, b.Size - if i1+k1 == i2 && j1+k1 == j2 { - // Yes, so collapse them -- this just increases the length of - // the first block by the length of the second, and the first - // block so lengthened remains the block to compare against. - k1 += k2 - } else { - // Not adjacent. Remember the first block (k1==0 means it's - // the dummy we started with), and make the second block the - // new block to compare against. - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - i1, j1, k1 = i2, j2, k2 - } - } - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - - nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) - m.matchingBlocks = nonAdjacent - return m.matchingBlocks -} - -// Return list of 5-tuples describing how to turn a into b. -// -// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple -// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the -// tuple preceding it, and likewise for j1 == the previous j2. -// -// The tags are characters, with these meanings: -// -// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] -// -// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. -// -// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. -// -// 'e' (equal): a[i1:i2] == b[j1:j2] -func (m *SequenceMatcher) GetOpCodes() []OpCode { - if m.opCodes != nil { - return m.opCodes - } - i, j := 0, 0 - matching := m.GetMatchingBlocks() - opCodes := make([]OpCode, 0, len(matching)) - for _, m := range matching { - // invariant: we've pumped out correct diffs to change - // a[:i] into b[:j], and the next matching block is - // a[ai:ai+size] == b[bj:bj+size]. So we need to pump - // out a diff to change a[i:ai] into b[j:bj], pump out - // the matching block, and move (i,j) beyond the match - ai, bj, size := m.A, m.B, m.Size - tag := byte(0) - if i < ai && j < bj { - tag = 'r' - } else if i < ai { - tag = 'd' - } else if j < bj { - tag = 'i' - } - if tag > 0 { - opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) - } - i, j = ai+size, bj+size - // the list of matching blocks is terminated by a - // sentinel with size 0 - if size > 0 { - opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) - } - } - m.opCodes = opCodes - return m.opCodes -} - -// Isolate change clusters by eliminating ranges with no changes. -// -// Return a generator of groups with up to n lines of context. -// Each group is in the same format as returned by GetOpCodes(). -func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { - if n < 0 { - n = 3 - } - codes := m.GetOpCodes() - if len(codes) == 0 { - codes = []OpCode{OpCode{'e', 0, 1, 0, 1}} - } - // Fixup leading and trailing groups if they show no changes. - if codes[0].Tag == 'e' { - c := codes[0] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} - } - if codes[len(codes)-1].Tag == 'e' { - c := codes[len(codes)-1] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} - } - nn := n + n - groups := [][]OpCode{} - group := []OpCode{} - for _, c := range codes { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - // End the current group and start a new one whenever - // there is a large range with no changes. - if c.Tag == 'e' && i2-i1 > nn { - group = append(group, OpCode{c.Tag, i1, min(i2, i1+n), - j1, min(j2, j1+n)}) - groups = append(groups, group) - group = []OpCode{} - i1, j1 = max(i1, i2-n), max(j1, j2-n) - } - group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) - } - if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { - groups = append(groups, group) - } - return groups -} - -// Return a measure of the sequences' similarity (float in [0,1]). -// -// Where T is the total number of elements in both sequences, and -// M is the number of matches, this is 2.0*M / T. -// Note that this is 1 if the sequences are identical, and 0 if -// they have nothing in common. -// -// .Ratio() is expensive to compute if you haven't already computed -// .GetMatchingBlocks() or .GetOpCodes(), in which case you may -// want to try .QuickRatio() or .RealQuickRation() first to get an -// upper bound. -func (m *SequenceMatcher) Ratio() float64 { - matches := 0 - for _, m := range m.GetMatchingBlocks() { - matches += m.Size - } - return calculateRatio(matches, len(m.a)+len(m.b)) -} - -// Return an upper bound on ratio() relatively quickly. -// -// This isn't defined beyond that it is an upper bound on .Ratio(), and -// is faster to compute. -func (m *SequenceMatcher) QuickRatio() float64 { - // viewing a and b as multisets, set matches to the cardinality - // of their intersection; this counts the number of matches - // without regard to order, so is clearly an upper bound - if m.fullBCount == nil { - m.fullBCount = map[string]int{} - for _, s := range m.b { - m.fullBCount[s] = m.fullBCount[s] + 1 - } - } - - // avail[x] is the number of times x appears in 'b' less the - // number of times we've seen it in 'a' so far ... kinda - avail := map[string]int{} - matches := 0 - for _, s := range m.a { - n, ok := avail[s] - if !ok { - n = m.fullBCount[s] - } - avail[s] = n - 1 - if n > 0 { - matches += 1 - } - } - return calculateRatio(matches, len(m.a)+len(m.b)) -} - -// Return an upper bound on ratio() very quickly. -// -// This isn't defined beyond that it is an upper bound on .Ratio(), and -// is faster to compute than either .Ratio() or .QuickRatio(). -func (m *SequenceMatcher) RealQuickRatio() float64 { - la, lb := len(m.a), len(m.b) - return calculateRatio(min(la, lb), la+lb) -} - -// Convert range to the "ed" format -func formatRangeUnified(start, stop int) string { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - beginning := start + 1 // lines start numbering with one - length := stop - start - if length == 1 { - return fmt.Sprintf("%d", beginning) - } - if length == 0 { - beginning -= 1 // empty ranges begin at line just before the range - } - return fmt.Sprintf("%d,%d", beginning, length) -} - -// Unified diff parameters -type UnifiedDiff struct { - A []string // First sequence lines - FromFile string // First file name - FromDate string // First file time - B []string // Second sequence lines - ToFile string // Second file name - ToDate string // Second file time - Eol string // Headers end of line, defaults to LF - Context int // Number of context lines -} - -// Compare two sequences of lines; generate the delta as a unified diff. -// -// Unified diffs are a compact way of showing line changes and a few -// lines of context. The number of context lines is set by 'n' which -// defaults to three. -// -// By default, the diff control lines (those with ---, +++, or @@) are -// created with a trailing newline. This is helpful so that inputs -// created from file.readlines() result in diffs that are suitable for -// file.writelines() since both the inputs and outputs have trailing -// newlines. -// -// For inputs that do not have trailing newlines, set the lineterm -// argument to "" so that the output will be uniformly newline free. -// -// The unidiff format normally has a header for filenames and modification -// times. Any or all of these may be specified using strings for -// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. -// The modification times are normally expressed in the ISO 8601 format. -func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { - buf := bufio.NewWriter(writer) - defer buf.Flush() - wf := func(format string, args ...interface{}) error { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) - return err - } - ws := func(s string) error { - _, err := buf.WriteString(s) - return err - } - - if len(diff.Eol) == 0 { - diff.Eol = "\n" - } - - started := false - m := NewMatcher(diff.A, diff.B) - for _, g := range m.GetGroupedOpCodes(diff.Context) { - if !started { - started = true - fromDate := "" - if len(diff.FromDate) > 0 { - fromDate = "\t" + diff.FromDate - } - toDate := "" - if len(diff.ToDate) > 0 { - toDate = "\t" + diff.ToDate - } - if diff.FromFile != "" || diff.ToFile != "" { - err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) - if err != nil { - return err - } - err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) - if err != nil { - return err - } - } - } - first, last := g[0], g[len(g)-1] - range1 := formatRangeUnified(first.I1, last.I2) - range2 := formatRangeUnified(first.J1, last.J2) - if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { - return err - } - for _, c := range g { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - if c.Tag == 'e' { - for _, line := range diff.A[i1:i2] { - if err := ws(" " + line); err != nil { - return err - } - } - continue - } - if c.Tag == 'r' || c.Tag == 'd' { - for _, line := range diff.A[i1:i2] { - if err := ws("-" + line); err != nil { - return err - } - } - } - if c.Tag == 'r' || c.Tag == 'i' { - for _, line := range diff.B[j1:j2] { - if err := ws("+" + line); err != nil { - return err - } - } - } - } - } - return nil -} - -// Like WriteUnifiedDiff but returns the diff a string. -func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { - w := &bytes.Buffer{} - err := WriteUnifiedDiff(w, diff) - return string(w.Bytes()), err -} - -// Convert range to the "ed" format. -func formatRangeContext(start, stop int) string { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - beginning := start + 1 // lines start numbering with one - length := stop - start - if length == 0 { - beginning -= 1 // empty ranges begin at line just before the range - } - if length <= 1 { - return fmt.Sprintf("%d", beginning) - } - return fmt.Sprintf("%d,%d", beginning, beginning+length-1) -} - -type ContextDiff UnifiedDiff - -// Compare two sequences of lines; generate the delta as a context diff. -// -// Context diffs are a compact way of showing line changes and a few -// lines of context. The number of context lines is set by diff.Context -// which defaults to three. -// -// By default, the diff control lines (those with *** or ---) are -// created with a trailing newline. -// -// For inputs that do not have trailing newlines, set the diff.Eol -// argument to "" so that the output will be uniformly newline free. -// -// The context diff format normally has a header for filenames and -// modification times. Any or all of these may be specified using -// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate. -// The modification times are normally expressed in the ISO 8601 format. -// If not specified, the strings default to blanks. -func WriteContextDiff(writer io.Writer, diff ContextDiff) error { - buf := bufio.NewWriter(writer) - defer buf.Flush() - var diffErr error - wf := func(format string, args ...interface{}) { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) - if diffErr == nil && err != nil { - diffErr = err - } - } - ws := func(s string) { - _, err := buf.WriteString(s) - if diffErr == nil && err != nil { - diffErr = err - } - } - - if len(diff.Eol) == 0 { - diff.Eol = "\n" - } - - prefix := map[byte]string{ - 'i': "+ ", - 'd': "- ", - 'r': "! ", - 'e': " ", - } - - started := false - m := NewMatcher(diff.A, diff.B) - for _, g := range m.GetGroupedOpCodes(diff.Context) { - if !started { - started = true - fromDate := "" - if len(diff.FromDate) > 0 { - fromDate = "\t" + diff.FromDate - } - toDate := "" - if len(diff.ToDate) > 0 { - toDate = "\t" + diff.ToDate - } - if diff.FromFile != "" || diff.ToFile != "" { - wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) - wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol) - } - } - - first, last := g[0], g[len(g)-1] - ws("***************" + diff.Eol) - - range1 := formatRangeContext(first.I1, last.I2) - wf("*** %s ****%s", range1, diff.Eol) - for _, c := range g { - if c.Tag == 'r' || c.Tag == 'd' { - for _, cc := range g { - if cc.Tag == 'i' { - continue - } - for _, line := range diff.A[cc.I1:cc.I2] { - ws(prefix[cc.Tag] + line) - } - } - break - } - } - - range2 := formatRangeContext(first.J1, last.J2) - wf("--- %s ----%s", range2, diff.Eol) - for _, c := range g { - if c.Tag == 'r' || c.Tag == 'i' { - for _, cc := range g { - if cc.Tag == 'd' { - continue - } - for _, line := range diff.B[cc.J1:cc.J2] { - ws(prefix[cc.Tag] + line) - } - } - break - } - } - } - return diffErr -} - -// Like WriteContextDiff but returns the diff a string. -func GetContextDiffString(diff ContextDiff) (string, error) { - w := &bytes.Buffer{} - err := WriteContextDiff(w, diff) - return string(w.Bytes()), err -} - -// Split a string on "\n" while preserving them. The output can be used -// as input for UnifiedDiff and ContextDiff structures. -func SplitLines(s string) []string { - lines := strings.SplitAfter(s, "\n") - lines[len(lines)-1] += "\n" - return lines -} diff --git a/cluster-provision/gocli/vendor/github.com/stretchr/testify/LICENSE b/cluster-provision/gocli/vendor/github.com/stretchr/testify/LICENSE deleted file mode 100644 index 4b0421cf9e..0000000000 --- a/cluster-provision/gocli/vendor/github.com/stretchr/testify/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/assertion_compare.go deleted file mode 100644 index 4d4b4aad6f..0000000000 --- a/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ /dev/null @@ -1,480 +0,0 @@ -package assert - -import ( - "bytes" - "fmt" - "reflect" - "time" -) - -type CompareType int - -const ( - compareLess CompareType = iota - 1 - compareEqual - compareGreater -) - -var ( - intType = reflect.TypeOf(int(1)) - int8Type = reflect.TypeOf(int8(1)) - int16Type = reflect.TypeOf(int16(1)) - int32Type = reflect.TypeOf(int32(1)) - int64Type = reflect.TypeOf(int64(1)) - - uintType = reflect.TypeOf(uint(1)) - uint8Type = reflect.TypeOf(uint8(1)) - uint16Type = reflect.TypeOf(uint16(1)) - uint32Type = reflect.TypeOf(uint32(1)) - uint64Type = reflect.TypeOf(uint64(1)) - - uintptrType = reflect.TypeOf(uintptr(1)) - - float32Type = reflect.TypeOf(float32(1)) - float64Type = reflect.TypeOf(float64(1)) - - stringType = reflect.TypeOf("") - - timeType = reflect.TypeOf(time.Time{}) - bytesType = reflect.TypeOf([]byte{}) -) - -func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { - obj1Value := reflect.ValueOf(obj1) - obj2Value := reflect.ValueOf(obj2) - - // throughout this switch we try and avoid calling .Convert() if possible, - // as this has a pretty big performance impact - switch kind { - case reflect.Int: - { - intobj1, ok := obj1.(int) - if !ok { - intobj1 = obj1Value.Convert(intType).Interface().(int) - } - intobj2, ok := obj2.(int) - if !ok { - intobj2 = obj2Value.Convert(intType).Interface().(int) - } - if intobj1 > intobj2 { - return compareGreater, true - } - if intobj1 == intobj2 { - return compareEqual, true - } - if intobj1 < intobj2 { - return compareLess, true - } - } - case reflect.Int8: - { - int8obj1, ok := obj1.(int8) - if !ok { - int8obj1 = obj1Value.Convert(int8Type).Interface().(int8) - } - int8obj2, ok := obj2.(int8) - if !ok { - int8obj2 = obj2Value.Convert(int8Type).Interface().(int8) - } - if int8obj1 > int8obj2 { - return compareGreater, true - } - if int8obj1 == int8obj2 { - return compareEqual, true - } - if int8obj1 < int8obj2 { - return compareLess, true - } - } - case reflect.Int16: - { - int16obj1, ok := obj1.(int16) - if !ok { - int16obj1 = obj1Value.Convert(int16Type).Interface().(int16) - } - int16obj2, ok := obj2.(int16) - if !ok { - int16obj2 = obj2Value.Convert(int16Type).Interface().(int16) - } - if int16obj1 > int16obj2 { - return compareGreater, true - } - if int16obj1 == int16obj2 { - return compareEqual, true - } - if int16obj1 < int16obj2 { - return compareLess, true - } - } - case reflect.Int32: - { - int32obj1, ok := obj1.(int32) - if !ok { - int32obj1 = obj1Value.Convert(int32Type).Interface().(int32) - } - int32obj2, ok := obj2.(int32) - if !ok { - int32obj2 = obj2Value.Convert(int32Type).Interface().(int32) - } - if int32obj1 > int32obj2 { - return compareGreater, true - } - if int32obj1 == int32obj2 { - return compareEqual, true - } - if int32obj1 < int32obj2 { - return compareLess, true - } - } - case reflect.Int64: - { - int64obj1, ok := obj1.(int64) - if !ok { - int64obj1 = obj1Value.Convert(int64Type).Interface().(int64) - } - int64obj2, ok := obj2.(int64) - if !ok { - int64obj2 = obj2Value.Convert(int64Type).Interface().(int64) - } - if int64obj1 > int64obj2 { - return compareGreater, true - } - if int64obj1 == int64obj2 { - return compareEqual, true - } - if int64obj1 < int64obj2 { - return compareLess, true - } - } - case reflect.Uint: - { - uintobj1, ok := obj1.(uint) - if !ok { - uintobj1 = obj1Value.Convert(uintType).Interface().(uint) - } - uintobj2, ok := obj2.(uint) - if !ok { - uintobj2 = obj2Value.Convert(uintType).Interface().(uint) - } - if uintobj1 > uintobj2 { - return compareGreater, true - } - if uintobj1 == uintobj2 { - return compareEqual, true - } - if uintobj1 < uintobj2 { - return compareLess, true - } - } - case reflect.Uint8: - { - uint8obj1, ok := obj1.(uint8) - if !ok { - uint8obj1 = obj1Value.Convert(uint8Type).Interface().(uint8) - } - uint8obj2, ok := obj2.(uint8) - if !ok { - uint8obj2 = obj2Value.Convert(uint8Type).Interface().(uint8) - } - if uint8obj1 > uint8obj2 { - return compareGreater, true - } - if uint8obj1 == uint8obj2 { - return compareEqual, true - } - if uint8obj1 < uint8obj2 { - return compareLess, true - } - } - case reflect.Uint16: - { - uint16obj1, ok := obj1.(uint16) - if !ok { - uint16obj1 = obj1Value.Convert(uint16Type).Interface().(uint16) - } - uint16obj2, ok := obj2.(uint16) - if !ok { - uint16obj2 = obj2Value.Convert(uint16Type).Interface().(uint16) - } - if uint16obj1 > uint16obj2 { - return compareGreater, true - } - if uint16obj1 == uint16obj2 { - return compareEqual, true - } - if uint16obj1 < uint16obj2 { - return compareLess, true - } - } - case reflect.Uint32: - { - uint32obj1, ok := obj1.(uint32) - if !ok { - uint32obj1 = obj1Value.Convert(uint32Type).Interface().(uint32) - } - uint32obj2, ok := obj2.(uint32) - if !ok { - uint32obj2 = obj2Value.Convert(uint32Type).Interface().(uint32) - } - if uint32obj1 > uint32obj2 { - return compareGreater, true - } - if uint32obj1 == uint32obj2 { - return compareEqual, true - } - if uint32obj1 < uint32obj2 { - return compareLess, true - } - } - case reflect.Uint64: - { - uint64obj1, ok := obj1.(uint64) - if !ok { - uint64obj1 = obj1Value.Convert(uint64Type).Interface().(uint64) - } - uint64obj2, ok := obj2.(uint64) - if !ok { - uint64obj2 = obj2Value.Convert(uint64Type).Interface().(uint64) - } - if uint64obj1 > uint64obj2 { - return compareGreater, true - } - if uint64obj1 == uint64obj2 { - return compareEqual, true - } - if uint64obj1 < uint64obj2 { - return compareLess, true - } - } - case reflect.Float32: - { - float32obj1, ok := obj1.(float32) - if !ok { - float32obj1 = obj1Value.Convert(float32Type).Interface().(float32) - } - float32obj2, ok := obj2.(float32) - if !ok { - float32obj2 = obj2Value.Convert(float32Type).Interface().(float32) - } - if float32obj1 > float32obj2 { - return compareGreater, true - } - if float32obj1 == float32obj2 { - return compareEqual, true - } - if float32obj1 < float32obj2 { - return compareLess, true - } - } - case reflect.Float64: - { - float64obj1, ok := obj1.(float64) - if !ok { - float64obj1 = obj1Value.Convert(float64Type).Interface().(float64) - } - float64obj2, ok := obj2.(float64) - if !ok { - float64obj2 = obj2Value.Convert(float64Type).Interface().(float64) - } - if float64obj1 > float64obj2 { - return compareGreater, true - } - if float64obj1 == float64obj2 { - return compareEqual, true - } - if float64obj1 < float64obj2 { - return compareLess, true - } - } - case reflect.String: - { - stringobj1, ok := obj1.(string) - if !ok { - stringobj1 = obj1Value.Convert(stringType).Interface().(string) - } - stringobj2, ok := obj2.(string) - if !ok { - stringobj2 = obj2Value.Convert(stringType).Interface().(string) - } - if stringobj1 > stringobj2 { - return compareGreater, true - } - if stringobj1 == stringobj2 { - return compareEqual, true - } - if stringobj1 < stringobj2 { - return compareLess, true - } - } - // Check for known struct types we can check for compare results. - case reflect.Struct: - { - // All structs enter here. We're not interested in most types. - if !obj1Value.CanConvert(timeType) { - break - } - - // time.Time can be compared! - timeObj1, ok := obj1.(time.Time) - if !ok { - timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time) - } - - timeObj2, ok := obj2.(time.Time) - if !ok { - timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time) - } - - return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64) - } - case reflect.Slice: - { - // We only care about the []byte type. - if !obj1Value.CanConvert(bytesType) { - break - } - - // []byte can be compared! - bytesObj1, ok := obj1.([]byte) - if !ok { - bytesObj1 = obj1Value.Convert(bytesType).Interface().([]byte) - - } - bytesObj2, ok := obj2.([]byte) - if !ok { - bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte) - } - - return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true - } - case reflect.Uintptr: - { - uintptrObj1, ok := obj1.(uintptr) - if !ok { - uintptrObj1 = obj1Value.Convert(uintptrType).Interface().(uintptr) - } - uintptrObj2, ok := obj2.(uintptr) - if !ok { - uintptrObj2 = obj2Value.Convert(uintptrType).Interface().(uintptr) - } - if uintptrObj1 > uintptrObj2 { - return compareGreater, true - } - if uintptrObj1 == uintptrObj2 { - return compareEqual, true - } - if uintptrObj1 < uintptrObj2 { - return compareLess, true - } - } - } - - return compareEqual, false -} - -// Greater asserts that the first element is greater than the second -// -// assert.Greater(t, 2, 1) -// assert.Greater(t, float64(2), float64(1)) -// assert.Greater(t, "b", "a") -func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) -} - -// GreaterOrEqual asserts that the first element is greater than or equal to the second -// -// assert.GreaterOrEqual(t, 2, 1) -// assert.GreaterOrEqual(t, 2, 2) -// assert.GreaterOrEqual(t, "b", "a") -// assert.GreaterOrEqual(t, "b", "b") -func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) -} - -// Less asserts that the first element is less than the second -// -// assert.Less(t, 1, 2) -// assert.Less(t, float64(1), float64(2)) -// assert.Less(t, "a", "b") -func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) -} - -// LessOrEqual asserts that the first element is less than or equal to the second -// -// assert.LessOrEqual(t, 1, 2) -// assert.LessOrEqual(t, 2, 2) -// assert.LessOrEqual(t, "a", "b") -// assert.LessOrEqual(t, "b", "b") -func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) -} - -// Positive asserts that the specified element is positive -// -// assert.Positive(t, 1) -// assert.Positive(t, 1.23) -func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...) -} - -// Negative asserts that the specified element is negative -// -// assert.Negative(t, -1) -// assert.Negative(t, -1.23) -func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...) -} - -func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - e1Kind := reflect.ValueOf(e1).Kind() - e2Kind := reflect.ValueOf(e2).Kind() - if e1Kind != e2Kind { - return Fail(t, "Elements should be the same type", msgAndArgs...) - } - - compareResult, isComparable := compare(e1, e2, e1Kind) - if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) - } - - if !containsValue(allowedComparesResults, compareResult) { - return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...) - } - - return true -} - -func containsValue(values []CompareType, value CompareType) bool { - for _, v := range values { - if v == value { - return true - } - } - - return false -} diff --git a/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/assertion_format.go b/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/assertion_format.go deleted file mode 100644 index 3ddab109ad..0000000000 --- a/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ /dev/null @@ -1,815 +0,0 @@ -// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. - -package assert - -import ( - http "net/http" - url "net/url" - time "time" -) - -// Conditionf uses a Comparison to assert a complex condition. -func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Condition(t, comp, append([]interface{}{msg}, args...)...) -} - -// Containsf asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") -// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") -// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") -func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Contains(t, s, contains, append([]interface{}{msg}, args...)...) -} - -// DirExistsf checks whether a directory exists in the given path. It also fails -// if the path is a file rather a directory or there is an error checking whether it exists. -func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return DirExists(t, path, append([]interface{}{msg}, args...)...) -} - -// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified -// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, -// the number of appearances of each of them in both lists should match. -// -// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") -func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) -} - -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// assert.Emptyf(t, obj, "error message %s", "formatted") -func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Empty(t, object, append([]interface{}{msg}, args...)...) -} - -// Equalf asserts that two objects are equal. -// -// assert.Equalf(t, 123, 123, "error message %s", "formatted") -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). Function equality -// cannot be determined and will always fail. -func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Equal(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// EqualErrorf asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") -func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...) -} - -// EqualExportedValuesf asserts that the types of two objects are equal and their public -// fields are also equal. This is useful for comparing structs that have private fields -// that could potentially differ. -// -// type S struct { -// Exported int -// notExported int -// } -// assert.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true -// assert.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false -func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. -// -// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") -func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// Errorf asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if assert.Errorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } -func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Error(t, err, append([]interface{}{msg}, args...)...) -} - -// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. -// This is a wrapper for errors.As. -func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...) -} - -// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) -// and that the error contains the specified substring. -// -// actualObj, err := SomeFunction() -// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") -func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return ErrorContains(t, theError, contains, append([]interface{}{msg}, args...)...) -} - -// ErrorIsf asserts that at least one of the errors in err's chain matches target. -// This is a wrapper for errors.Is. -func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return ErrorIs(t, err, target, append([]interface{}{msg}, args...)...) -} - -// Eventuallyf asserts that given condition will be met in waitFor time, -// periodically checking target function each tick. -// -// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") -func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Eventually(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...) -} - -// EventuallyWithTf asserts that given condition will be met in waitFor time, -// periodically checking target function each tick. In contrast to Eventually, -// it supplies a CollectT to the condition function, so that the condition -// function can use the CollectT to call other assertions. -// The condition is considered "met" if no errors are raised in a tick. -// The supplied CollectT collects all errors from one tick (if there are any). -// If the condition is not met before waitFor, the collected errors of -// the last tick are copied to t. -// -// externalValue := false -// go func() { -// time.Sleep(8*time.Second) -// externalValue = true -// }() -// assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") { -// // add assertions as needed; any assertion failure will fail the current tick -// assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") -func EventuallyWithTf(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return EventuallyWithT(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...) -} - -// Exactlyf asserts that two objects are equal in value and type. -// -// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") -func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// Failf reports a failure through -func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Fail(t, failureMessage, append([]interface{}{msg}, args...)...) -} - -// FailNowf fails test -func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...) -} - -// Falsef asserts that the specified value is false. -// -// assert.Falsef(t, myBool, "error message %s", "formatted") -func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return False(t, value, append([]interface{}{msg}, args...)...) -} - -// FileExistsf checks whether a file exists in the given path. It also fails if -// the path points to a directory or there is an error when trying to check the file. -func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return FileExists(t, path, append([]interface{}{msg}, args...)...) -} - -// Greaterf asserts that the first element is greater than the second -// -// assert.Greaterf(t, 2, 1, "error message %s", "formatted") -// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") -// assert.Greaterf(t, "b", "a", "error message %s", "formatted") -func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Greater(t, e1, e2, append([]interface{}{msg}, args...)...) -} - -// GreaterOrEqualf asserts that the first element is greater than or equal to the second -// -// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") -func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return GreaterOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...) -} - -// HTTPBodyContainsf asserts that a specified handler returns a -// body that contains a string. -// -// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) -} - -// HTTPBodyNotContainsf asserts that a specified handler returns a -// body that does not contain a string. -// -// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) -} - -// HTTPErrorf asserts that a specified handler returns an error status code. -// -// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...) -} - -// HTTPRedirectf asserts that a specified handler returns a redirect status code. -// -// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...) -} - -// HTTPStatusCodef asserts that a specified handler returns a specified status code. -// -// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPStatusCode(t, handler, method, url, values, statuscode, append([]interface{}{msg}, args...)...) -} - -// HTTPSuccessf asserts that a specified handler returns a success status code. -// -// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...) -} - -// Implementsf asserts that an object is implemented by the specified interface. -// -// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") -func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...) -} - -// InDeltaf asserts that the two numerals are within delta of each other. -// -// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") -func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...) -} - -// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. -func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...) -} - -// InDeltaSlicef is the same as InDelta, except it compares two slices. -func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...) -} - -// InEpsilonf asserts that expected and actual have a relative error less than epsilon -func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) -} - -// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. -func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) -} - -// IsDecreasingf asserts that the collection is decreasing -// -// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") -// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") -// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") -func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return IsDecreasing(t, object, append([]interface{}{msg}, args...)...) -} - -// IsIncreasingf asserts that the collection is increasing -// -// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") -// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") -// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") -func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return IsIncreasing(t, object, append([]interface{}{msg}, args...)...) -} - -// IsNonDecreasingf asserts that the collection is not decreasing -// -// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") -// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") -// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") -func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return IsNonDecreasing(t, object, append([]interface{}{msg}, args...)...) -} - -// IsNonIncreasingf asserts that the collection is not increasing -// -// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") -// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") -// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") -func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...) -} - -// IsTypef asserts that the specified objects are of the same type. -func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...) -} - -// JSONEqf asserts that two JSON strings are equivalent. -// -// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") -func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// Lenf asserts that the specified object has specific length. -// Lenf also fails if the object has a type that len() not accept. -// -// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") -func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Len(t, object, length, append([]interface{}{msg}, args...)...) -} - -// Lessf asserts that the first element is less than the second -// -// assert.Lessf(t, 1, 2, "error message %s", "formatted") -// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") -// assert.Lessf(t, "a", "b", "error message %s", "formatted") -func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Less(t, e1, e2, append([]interface{}{msg}, args...)...) -} - -// LessOrEqualf asserts that the first element is less than or equal to the second -// -// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") -// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") -func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...) -} - -// Negativef asserts that the specified element is negative -// -// assert.Negativef(t, -1, "error message %s", "formatted") -// assert.Negativef(t, -1.23, "error message %s", "formatted") -func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Negative(t, e, append([]interface{}{msg}, args...)...) -} - -// Neverf asserts that the given condition doesn't satisfy in waitFor time, -// periodically checking the target function each tick. -// -// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") -func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Never(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...) -} - -// Nilf asserts that the specified object is nil. -// -// assert.Nilf(t, err, "error message %s", "formatted") -func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Nil(t, object, append([]interface{}{msg}, args...)...) -} - -// NoDirExistsf checks whether a directory does not exist in the given path. -// It fails if the path points to an existing _directory_ only. -func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NoDirExists(t, path, append([]interface{}{msg}, args...)...) -} - -// NoErrorf asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if assert.NoErrorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedObj, actualObj) -// } -func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NoError(t, err, append([]interface{}{msg}, args...)...) -} - -// NoFileExistsf checks whether a file does not exist in a given path. It fails -// if the path points to an existing _file_ only. -func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NoFileExists(t, path, append([]interface{}{msg}, args...)...) -} - -// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") -func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotContains(t, s, contains, append([]interface{}{msg}, args...)...) -} - -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { -// assert.Equal(t, "two", obj[1]) -// } -func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotEmpty(t, object, append([]interface{}{msg}, args...)...) -} - -// NotEqualf asserts that the specified values are NOT equal. -// -// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// NotEqualValuesf asserts that two objects are not equal even when converted to the same type -// -// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") -func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// NotErrorIsf asserts that at none of the errors in err's chain matches target. -// This is a wrapper for errors.Is. -func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...) -} - -// NotImplementsf asserts that an object does not implement the specified interface. -// -// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") -func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotImplements(t, interfaceObject, object, append([]interface{}{msg}, args...)...) -} - -// NotNilf asserts that the specified object is not nil. -// -// assert.NotNilf(t, err, "error message %s", "formatted") -func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotNil(t, object, append([]interface{}{msg}, args...)...) -} - -// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") -func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotPanics(t, f, append([]interface{}{msg}, args...)...) -} - -// NotRegexpf asserts that a specified regexp does not match a string. -// -// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") -// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") -func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...) -} - -// NotSamef asserts that two pointers do not reference the same object. -// -// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. -// -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") -// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") -func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...) -} - -// NotZerof asserts that i is not the zero value for its type. -func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotZero(t, i, append([]interface{}{msg}, args...)...) -} - -// Panicsf asserts that the code inside the specified PanicTestFunc panics. -// -// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") -func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Panics(t, f, append([]interface{}{msg}, args...)...) -} - -// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc -// panics, and that the recovered panic value is an error that satisfies the -// EqualError comparison. -// -// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") -func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return PanicsWithError(t, errString, f, append([]interface{}{msg}, args...)...) -} - -// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that -// the recovered panic value equals the expected panic value. -// -// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") -func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...) -} - -// Positivef asserts that the specified element is positive -// -// assert.Positivef(t, 1, "error message %s", "formatted") -// assert.Positivef(t, 1.23, "error message %s", "formatted") -func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Positive(t, e, append([]interface{}{msg}, args...)...) -} - -// Regexpf asserts that a specified regexp matches a string. -// -// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") -// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") -func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Regexp(t, rx, str, append([]interface{}{msg}, args...)...) -} - -// Samef asserts that two pointers reference the same object. -// -// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func Samef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Same(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. -// -// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") -// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") -func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Subset(t, list, subset, append([]interface{}{msg}, args...)...) -} - -// Truef asserts that the specified value is true. -// -// assert.Truef(t, myBool, "error message %s", "formatted") -func Truef(t TestingT, value bool, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return True(t, value, append([]interface{}{msg}, args...)...) -} - -// WithinDurationf asserts that the two times are within duration delta of each other. -// -// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") -func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...) -} - -// WithinRangef asserts that a time is within a time range (inclusive). -// -// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") -func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return WithinRange(t, actual, start, end, append([]interface{}{msg}, args...)...) -} - -// YAMLEqf asserts that two YAML strings are equivalent. -func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// Zerof asserts that i is the zero value for its type. -func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Zero(t, i, append([]interface{}{msg}, args...)...) -} diff --git a/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl b/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl deleted file mode 100644 index d2bb0b8177..0000000000 --- a/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl +++ /dev/null @@ -1,5 +0,0 @@ -{{.CommentFormat}} -func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool { - if h, ok := t.(tHelper); ok { h.Helper() } - return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}}) -} diff --git a/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/assertion_forward.go deleted file mode 100644 index a84e09bd40..0000000000 --- a/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ /dev/null @@ -1,1621 +0,0 @@ -// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. - -package assert - -import ( - http "net/http" - url "net/url" - time "time" -) - -// Condition uses a Comparison to assert a complex condition. -func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Condition(a.t, comp, msgAndArgs...) -} - -// Conditionf uses a Comparison to assert a complex condition. -func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Conditionf(a.t, comp, msg, args...) -} - -// Contains asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// a.Contains("Hello World", "World") -// a.Contains(["Hello", "World"], "World") -// a.Contains({"Hello": "World"}, "Hello") -func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Contains(a.t, s, contains, msgAndArgs...) -} - -// Containsf asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// a.Containsf("Hello World", "World", "error message %s", "formatted") -// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") -// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") -func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Containsf(a.t, s, contains, msg, args...) -} - -// DirExists checks whether a directory exists in the given path. It also fails -// if the path is a file rather a directory or there is an error checking whether it exists. -func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return DirExists(a.t, path, msgAndArgs...) -} - -// DirExistsf checks whether a directory exists in the given path. It also fails -// if the path is a file rather a directory or there is an error checking whether it exists. -func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return DirExistsf(a.t, path, msg, args...) -} - -// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified -// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, -// the number of appearances of each of them in both lists should match. -// -// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2]) -func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return ElementsMatch(a.t, listA, listB, msgAndArgs...) -} - -// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified -// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, -// the number of appearances of each of them in both lists should match. -// -// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") -func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return ElementsMatchf(a.t, listA, listB, msg, args...) -} - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// a.Empty(obj) -func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Empty(a.t, object, msgAndArgs...) -} - -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// a.Emptyf(obj, "error message %s", "formatted") -func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Emptyf(a.t, object, msg, args...) -} - -// Equal asserts that two objects are equal. -// -// a.Equal(123, 123) -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). Function equality -// cannot be determined and will always fail. -func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Equal(a.t, expected, actual, msgAndArgs...) -} - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// a.EqualError(err, expectedErrorString) -func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualError(a.t, theError, errString, msgAndArgs...) -} - -// EqualErrorf asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") -func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualErrorf(a.t, theError, errString, msg, args...) -} - -// EqualExportedValues asserts that the types of two objects are equal and their public -// fields are also equal. This is useful for comparing structs that have private fields -// that could potentially differ. -// -// type S struct { -// Exported int -// notExported int -// } -// a.EqualExportedValues(S{1, 2}, S{1, 3}) => true -// a.EqualExportedValues(S{1, 2}, S{2, 3}) => false -func (a *Assertions) EqualExportedValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualExportedValues(a.t, expected, actual, msgAndArgs...) -} - -// EqualExportedValuesf asserts that the types of two objects are equal and their public -// fields are also equal. This is useful for comparing structs that have private fields -// that could potentially differ. -// -// type S struct { -// Exported int -// notExported int -// } -// a.EqualExportedValuesf(S{1, 2}, S{1, 3}, "error message %s", "formatted") => true -// a.EqualExportedValuesf(S{1, 2}, S{2, 3}, "error message %s", "formatted") => false -func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualExportedValuesf(a.t, expected, actual, msg, args...) -} - -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. -// -// a.EqualValues(uint32(123), int32(123)) -func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualValues(a.t, expected, actual, msgAndArgs...) -} - -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. -// -// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") -func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualValuesf(a.t, expected, actual, msg, args...) -} - -// Equalf asserts that two objects are equal. -// -// a.Equalf(123, 123, "error message %s", "formatted") -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). Function equality -// cannot be determined and will always fail. -func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Equalf(a.t, expected, actual, msg, args...) -} - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if a.Error(err) { -// assert.Equal(t, expectedError, err) -// } -func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Error(a.t, err, msgAndArgs...) -} - -// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. -// This is a wrapper for errors.As. -func (a *Assertions) ErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return ErrorAs(a.t, err, target, msgAndArgs...) -} - -// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. -// This is a wrapper for errors.As. -func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return ErrorAsf(a.t, err, target, msg, args...) -} - -// ErrorContains asserts that a function returned an error (i.e. not `nil`) -// and that the error contains the specified substring. -// -// actualObj, err := SomeFunction() -// a.ErrorContains(err, expectedErrorSubString) -func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return ErrorContains(a.t, theError, contains, msgAndArgs...) -} - -// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) -// and that the error contains the specified substring. -// -// actualObj, err := SomeFunction() -// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") -func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return ErrorContainsf(a.t, theError, contains, msg, args...) -} - -// ErrorIs asserts that at least one of the errors in err's chain matches target. -// This is a wrapper for errors.Is. -func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return ErrorIs(a.t, err, target, msgAndArgs...) -} - -// ErrorIsf asserts that at least one of the errors in err's chain matches target. -// This is a wrapper for errors.Is. -func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return ErrorIsf(a.t, err, target, msg, args...) -} - -// Errorf asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if a.Errorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } -func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Errorf(a.t, err, msg, args...) -} - -// Eventually asserts that given condition will be met in waitFor time, -// periodically checking target function each tick. -// -// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond) -func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Eventually(a.t, condition, waitFor, tick, msgAndArgs...) -} - -// EventuallyWithT asserts that given condition will be met in waitFor time, -// periodically checking target function each tick. In contrast to Eventually, -// it supplies a CollectT to the condition function, so that the condition -// function can use the CollectT to call other assertions. -// The condition is considered "met" if no errors are raised in a tick. -// The supplied CollectT collects all errors from one tick (if there are any). -// If the condition is not met before waitFor, the collected errors of -// the last tick are copied to t. -// -// externalValue := false -// go func() { -// time.Sleep(8*time.Second) -// externalValue = true -// }() -// a.EventuallyWithT(func(c *assert.CollectT) { -// // add assertions as needed; any assertion failure will fail the current tick -// assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") -func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EventuallyWithT(a.t, condition, waitFor, tick, msgAndArgs...) -} - -// EventuallyWithTf asserts that given condition will be met in waitFor time, -// periodically checking target function each tick. In contrast to Eventually, -// it supplies a CollectT to the condition function, so that the condition -// function can use the CollectT to call other assertions. -// The condition is considered "met" if no errors are raised in a tick. -// The supplied CollectT collects all errors from one tick (if there are any). -// If the condition is not met before waitFor, the collected errors of -// the last tick are copied to t. -// -// externalValue := false -// go func() { -// time.Sleep(8*time.Second) -// externalValue = true -// }() -// a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") { -// // add assertions as needed; any assertion failure will fail the current tick -// assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") -func (a *Assertions) EventuallyWithTf(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EventuallyWithTf(a.t, condition, waitFor, tick, msg, args...) -} - -// Eventuallyf asserts that given condition will be met in waitFor time, -// periodically checking target function each tick. -// -// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") -func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Eventuallyf(a.t, condition, waitFor, tick, msg, args...) -} - -// Exactly asserts that two objects are equal in value and type. -// -// a.Exactly(int32(123), int64(123)) -func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Exactly(a.t, expected, actual, msgAndArgs...) -} - -// Exactlyf asserts that two objects are equal in value and type. -// -// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted") -func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Exactlyf(a.t, expected, actual, msg, args...) -} - -// Fail reports a failure through -func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Fail(a.t, failureMessage, msgAndArgs...) -} - -// FailNow fails test -func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return FailNow(a.t, failureMessage, msgAndArgs...) -} - -// FailNowf fails test -func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return FailNowf(a.t, failureMessage, msg, args...) -} - -// Failf reports a failure through -func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Failf(a.t, failureMessage, msg, args...) -} - -// False asserts that the specified value is false. -// -// a.False(myBool) -func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return False(a.t, value, msgAndArgs...) -} - -// Falsef asserts that the specified value is false. -// -// a.Falsef(myBool, "error message %s", "formatted") -func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Falsef(a.t, value, msg, args...) -} - -// FileExists checks whether a file exists in the given path. It also fails if -// the path points to a directory or there is an error when trying to check the file. -func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return FileExists(a.t, path, msgAndArgs...) -} - -// FileExistsf checks whether a file exists in the given path. It also fails if -// the path points to a directory or there is an error when trying to check the file. -func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return FileExistsf(a.t, path, msg, args...) -} - -// Greater asserts that the first element is greater than the second -// -// a.Greater(2, 1) -// a.Greater(float64(2), float64(1)) -// a.Greater("b", "a") -func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Greater(a.t, e1, e2, msgAndArgs...) -} - -// GreaterOrEqual asserts that the first element is greater than or equal to the second -// -// a.GreaterOrEqual(2, 1) -// a.GreaterOrEqual(2, 2) -// a.GreaterOrEqual("b", "a") -// a.GreaterOrEqual("b", "b") -func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return GreaterOrEqual(a.t, e1, e2, msgAndArgs...) -} - -// GreaterOrEqualf asserts that the first element is greater than or equal to the second -// -// a.GreaterOrEqualf(2, 1, "error message %s", "formatted") -// a.GreaterOrEqualf(2, 2, "error message %s", "formatted") -// a.GreaterOrEqualf("b", "a", "error message %s", "formatted") -// a.GreaterOrEqualf("b", "b", "error message %s", "formatted") -func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return GreaterOrEqualf(a.t, e1, e2, msg, args...) -} - -// Greaterf asserts that the first element is greater than the second -// -// a.Greaterf(2, 1, "error message %s", "formatted") -// a.Greaterf(float64(2), float64(1), "error message %s", "formatted") -// a.Greaterf("b", "a", "error message %s", "formatted") -func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Greaterf(a.t, e1, e2, msg, args...) -} - -// HTTPBodyContains asserts that a specified handler returns a -// body that contains a string. -// -// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...) -} - -// HTTPBodyContainsf asserts that a specified handler returns a -// body that contains a string. -// -// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...) -} - -// HTTPBodyNotContains asserts that a specified handler returns a -// body that does not contain a string. -// -// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...) -} - -// HTTPBodyNotContainsf asserts that a specified handler returns a -// body that does not contain a string. -// -// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...) -} - -// HTTPError asserts that a specified handler returns an error status code. -// -// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPError(a.t, handler, method, url, values, msgAndArgs...) -} - -// HTTPErrorf asserts that a specified handler returns an error status code. -// -// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPErrorf(a.t, handler, method, url, values, msg, args...) -} - -// HTTPRedirect asserts that a specified handler returns a redirect status code. -// -// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...) -} - -// HTTPRedirectf asserts that a specified handler returns a redirect status code. -// -// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPRedirectf(a.t, handler, method, url, values, msg, args...) -} - -// HTTPStatusCode asserts that a specified handler returns a specified status code. -// -// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPStatusCode(a.t, handler, method, url, values, statuscode, msgAndArgs...) -} - -// HTTPStatusCodef asserts that a specified handler returns a specified status code. -// -// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPStatusCodef(a.t, handler, method, url, values, statuscode, msg, args...) -} - -// HTTPSuccess asserts that a specified handler returns a success status code. -// -// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...) -} - -// HTTPSuccessf asserts that a specified handler returns a success status code. -// -// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPSuccessf(a.t, handler, method, url, values, msg, args...) -} - -// Implements asserts that an object is implemented by the specified interface. -// -// a.Implements((*MyInterface)(nil), new(MyObject)) -func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Implements(a.t, interfaceObject, object, msgAndArgs...) -} - -// Implementsf asserts that an object is implemented by the specified interface. -// -// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") -func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Implementsf(a.t, interfaceObject, object, msg, args...) -} - -// InDelta asserts that the two numerals are within delta of each other. -// -// a.InDelta(math.Pi, 22/7.0, 0.01) -func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDelta(a.t, expected, actual, delta, msgAndArgs...) -} - -// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. -func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...) -} - -// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. -func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...) -} - -// InDeltaSlice is the same as InDelta, except it compares two slices. -func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) -} - -// InDeltaSlicef is the same as InDelta, except it compares two slices. -func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaSlicef(a.t, expected, actual, delta, msg, args...) -} - -// InDeltaf asserts that the two numerals are within delta of each other. -// -// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted") -func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaf(a.t, expected, actual, delta, msg, args...) -} - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) -} - -// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. -func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...) -} - -// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. -func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...) -} - -// InEpsilonf asserts that expected and actual have a relative error less than epsilon -func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InEpsilonf(a.t, expected, actual, epsilon, msg, args...) -} - -// IsDecreasing asserts that the collection is decreasing -// -// a.IsDecreasing([]int{2, 1, 0}) -// a.IsDecreasing([]float{2, 1}) -// a.IsDecreasing([]string{"b", "a"}) -func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsDecreasing(a.t, object, msgAndArgs...) -} - -// IsDecreasingf asserts that the collection is decreasing -// -// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted") -// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted") -// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted") -func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsDecreasingf(a.t, object, msg, args...) -} - -// IsIncreasing asserts that the collection is increasing -// -// a.IsIncreasing([]int{1, 2, 3}) -// a.IsIncreasing([]float{1, 2}) -// a.IsIncreasing([]string{"a", "b"}) -func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsIncreasing(a.t, object, msgAndArgs...) -} - -// IsIncreasingf asserts that the collection is increasing -// -// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted") -// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted") -// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted") -func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsIncreasingf(a.t, object, msg, args...) -} - -// IsNonDecreasing asserts that the collection is not decreasing -// -// a.IsNonDecreasing([]int{1, 1, 2}) -// a.IsNonDecreasing([]float{1, 2}) -// a.IsNonDecreasing([]string{"a", "b"}) -func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsNonDecreasing(a.t, object, msgAndArgs...) -} - -// IsNonDecreasingf asserts that the collection is not decreasing -// -// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted") -// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted") -// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted") -func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsNonDecreasingf(a.t, object, msg, args...) -} - -// IsNonIncreasing asserts that the collection is not increasing -// -// a.IsNonIncreasing([]int{2, 1, 1}) -// a.IsNonIncreasing([]float{2, 1}) -// a.IsNonIncreasing([]string{"b", "a"}) -func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsNonIncreasing(a.t, object, msgAndArgs...) -} - -// IsNonIncreasingf asserts that the collection is not increasing -// -// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted") -// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted") -// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted") -func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsNonIncreasingf(a.t, object, msg, args...) -} - -// IsType asserts that the specified objects are of the same type. -func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsType(a.t, expectedType, object, msgAndArgs...) -} - -// IsTypef asserts that the specified objects are of the same type. -func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsTypef(a.t, expectedType, object, msg, args...) -} - -// JSONEq asserts that two JSON strings are equivalent. -// -// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return JSONEq(a.t, expected, actual, msgAndArgs...) -} - -// JSONEqf asserts that two JSON strings are equivalent. -// -// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") -func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return JSONEqf(a.t, expected, actual, msg, args...) -} - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// a.Len(mySlice, 3) -func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Len(a.t, object, length, msgAndArgs...) -} - -// Lenf asserts that the specified object has specific length. -// Lenf also fails if the object has a type that len() not accept. -// -// a.Lenf(mySlice, 3, "error message %s", "formatted") -func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Lenf(a.t, object, length, msg, args...) -} - -// Less asserts that the first element is less than the second -// -// a.Less(1, 2) -// a.Less(float64(1), float64(2)) -// a.Less("a", "b") -func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Less(a.t, e1, e2, msgAndArgs...) -} - -// LessOrEqual asserts that the first element is less than or equal to the second -// -// a.LessOrEqual(1, 2) -// a.LessOrEqual(2, 2) -// a.LessOrEqual("a", "b") -// a.LessOrEqual("b", "b") -func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return LessOrEqual(a.t, e1, e2, msgAndArgs...) -} - -// LessOrEqualf asserts that the first element is less than or equal to the second -// -// a.LessOrEqualf(1, 2, "error message %s", "formatted") -// a.LessOrEqualf(2, 2, "error message %s", "formatted") -// a.LessOrEqualf("a", "b", "error message %s", "formatted") -// a.LessOrEqualf("b", "b", "error message %s", "formatted") -func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return LessOrEqualf(a.t, e1, e2, msg, args...) -} - -// Lessf asserts that the first element is less than the second -// -// a.Lessf(1, 2, "error message %s", "formatted") -// a.Lessf(float64(1), float64(2), "error message %s", "formatted") -// a.Lessf("a", "b", "error message %s", "formatted") -func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Lessf(a.t, e1, e2, msg, args...) -} - -// Negative asserts that the specified element is negative -// -// a.Negative(-1) -// a.Negative(-1.23) -func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Negative(a.t, e, msgAndArgs...) -} - -// Negativef asserts that the specified element is negative -// -// a.Negativef(-1, "error message %s", "formatted") -// a.Negativef(-1.23, "error message %s", "formatted") -func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Negativef(a.t, e, msg, args...) -} - -// Never asserts that the given condition doesn't satisfy in waitFor time, -// periodically checking the target function each tick. -// -// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond) -func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Never(a.t, condition, waitFor, tick, msgAndArgs...) -} - -// Neverf asserts that the given condition doesn't satisfy in waitFor time, -// periodically checking the target function each tick. -// -// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") -func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Neverf(a.t, condition, waitFor, tick, msg, args...) -} - -// Nil asserts that the specified object is nil. -// -// a.Nil(err) -func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Nil(a.t, object, msgAndArgs...) -} - -// Nilf asserts that the specified object is nil. -// -// a.Nilf(err, "error message %s", "formatted") -func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Nilf(a.t, object, msg, args...) -} - -// NoDirExists checks whether a directory does not exist in the given path. -// It fails if the path points to an existing _directory_ only. -func (a *Assertions) NoDirExists(path string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NoDirExists(a.t, path, msgAndArgs...) -} - -// NoDirExistsf checks whether a directory does not exist in the given path. -// It fails if the path points to an existing _directory_ only. -func (a *Assertions) NoDirExistsf(path string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NoDirExistsf(a.t, path, msg, args...) -} - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if a.NoError(err) { -// assert.Equal(t, expectedObj, actualObj) -// } -func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NoError(a.t, err, msgAndArgs...) -} - -// NoErrorf asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if a.NoErrorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedObj, actualObj) -// } -func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NoErrorf(a.t, err, msg, args...) -} - -// NoFileExists checks whether a file does not exist in a given path. It fails -// if the path points to an existing _file_ only. -func (a *Assertions) NoFileExists(path string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NoFileExists(a.t, path, msgAndArgs...) -} - -// NoFileExistsf checks whether a file does not exist in a given path. It fails -// if the path points to an existing _file_ only. -func (a *Assertions) NoFileExistsf(path string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NoFileExistsf(a.t, path, msg, args...) -} - -// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// a.NotContains("Hello World", "Earth") -// a.NotContains(["Hello", "World"], "Earth") -// a.NotContains({"Hello": "World"}, "Earth") -func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotContains(a.t, s, contains, msgAndArgs...) -} - -// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") -// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") -// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") -func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotContainsf(a.t, s, contains, msg, args...) -} - -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if a.NotEmpty(obj) { -// assert.Equal(t, "two", obj[1]) -// } -func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEmpty(a.t, object, msgAndArgs...) -} - -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if a.NotEmptyf(obj, "error message %s", "formatted") { -// assert.Equal(t, "two", obj[1]) -// } -func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEmptyf(a.t, object, msg, args...) -} - -// NotEqual asserts that the specified values are NOT equal. -// -// a.NotEqual(obj1, obj2) -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEqual(a.t, expected, actual, msgAndArgs...) -} - -// NotEqualValues asserts that two objects are not equal even when converted to the same type -// -// a.NotEqualValues(obj1, obj2) -func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEqualValues(a.t, expected, actual, msgAndArgs...) -} - -// NotEqualValuesf asserts that two objects are not equal even when converted to the same type -// -// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted") -func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEqualValuesf(a.t, expected, actual, msg, args...) -} - -// NotEqualf asserts that the specified values are NOT equal. -// -// a.NotEqualf(obj1, obj2, "error message %s", "formatted") -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEqualf(a.t, expected, actual, msg, args...) -} - -// NotErrorIs asserts that at none of the errors in err's chain matches target. -// This is a wrapper for errors.Is. -func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotErrorIs(a.t, err, target, msgAndArgs...) -} - -// NotErrorIsf asserts that at none of the errors in err's chain matches target. -// This is a wrapper for errors.Is. -func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotErrorIsf(a.t, err, target, msg, args...) -} - -// NotImplements asserts that an object does not implement the specified interface. -// -// a.NotImplements((*MyInterface)(nil), new(MyObject)) -func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotImplements(a.t, interfaceObject, object, msgAndArgs...) -} - -// NotImplementsf asserts that an object does not implement the specified interface. -// -// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") -func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotImplementsf(a.t, interfaceObject, object, msg, args...) -} - -// NotNil asserts that the specified object is not nil. -// -// a.NotNil(err) -func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotNil(a.t, object, msgAndArgs...) -} - -// NotNilf asserts that the specified object is not nil. -// -// a.NotNilf(err, "error message %s", "formatted") -func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotNilf(a.t, object, msg, args...) -} - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// a.NotPanics(func(){ RemainCalm() }) -func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotPanics(a.t, f, msgAndArgs...) -} - -// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") -func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotPanicsf(a.t, f, msg, args...) -} - -// NotRegexp asserts that a specified regexp does not match a string. -// -// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") -// a.NotRegexp("^start", "it's not starting") -func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotRegexp(a.t, rx, str, msgAndArgs...) -} - -// NotRegexpf asserts that a specified regexp does not match a string. -// -// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") -// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") -func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotRegexpf(a.t, rx, str, msg, args...) -} - -// NotSame asserts that two pointers do not reference the same object. -// -// a.NotSame(ptr1, ptr2) -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotSame(a.t, expected, actual, msgAndArgs...) -} - -// NotSamef asserts that two pointers do not reference the same object. -// -// a.NotSamef(ptr1, ptr2, "error message %s", "formatted") -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotSamef(a.t, expected, actual, msg, args...) -} - -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. -// -// a.NotSubset([1, 3, 4], [1, 2]) -// a.NotSubset({"x": 1, "y": 2}, {"z": 3}) -func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotSubset(a.t, list, subset, msgAndArgs...) -} - -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. -// -// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") -// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") -func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotSubsetf(a.t, list, subset, msg, args...) -} - -// NotZero asserts that i is not the zero value for its type. -func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotZero(a.t, i, msgAndArgs...) -} - -// NotZerof asserts that i is not the zero value for its type. -func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotZerof(a.t, i, msg, args...) -} - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// a.Panics(func(){ GoCrazy() }) -func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Panics(a.t, f, msgAndArgs...) -} - -// PanicsWithError asserts that the code inside the specified PanicTestFunc -// panics, and that the recovered panic value is an error that satisfies the -// EqualError comparison. -// -// a.PanicsWithError("crazy error", func(){ GoCrazy() }) -func (a *Assertions) PanicsWithError(errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return PanicsWithError(a.t, errString, f, msgAndArgs...) -} - -// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc -// panics, and that the recovered panic value is an error that satisfies the -// EqualError comparison. -// -// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") -func (a *Assertions) PanicsWithErrorf(errString string, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return PanicsWithErrorf(a.t, errString, f, msg, args...) -} - -// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that -// the recovered panic value equals the expected panic value. -// -// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) -func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return PanicsWithValue(a.t, expected, f, msgAndArgs...) -} - -// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that -// the recovered panic value equals the expected panic value. -// -// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") -func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return PanicsWithValuef(a.t, expected, f, msg, args...) -} - -// Panicsf asserts that the code inside the specified PanicTestFunc panics. -// -// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") -func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Panicsf(a.t, f, msg, args...) -} - -// Positive asserts that the specified element is positive -// -// a.Positive(1) -// a.Positive(1.23) -func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Positive(a.t, e, msgAndArgs...) -} - -// Positivef asserts that the specified element is positive -// -// a.Positivef(1, "error message %s", "formatted") -// a.Positivef(1.23, "error message %s", "formatted") -func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Positivef(a.t, e, msg, args...) -} - -// Regexp asserts that a specified regexp matches a string. -// -// a.Regexp(regexp.MustCompile("start"), "it's starting") -// a.Regexp("start...$", "it's not starting") -func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Regexp(a.t, rx, str, msgAndArgs...) -} - -// Regexpf asserts that a specified regexp matches a string. -// -// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") -// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") -func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Regexpf(a.t, rx, str, msg, args...) -} - -// Same asserts that two pointers reference the same object. -// -// a.Same(ptr1, ptr2) -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Same(a.t, expected, actual, msgAndArgs...) -} - -// Samef asserts that two pointers reference the same object. -// -// a.Samef(ptr1, ptr2, "error message %s", "formatted") -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Samef(a.t, expected, actual, msg, args...) -} - -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. -// -// a.Subset([1, 2, 3], [1, 2]) -// a.Subset({"x": 1, "y": 2}, {"x": 1}) -func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Subset(a.t, list, subset, msgAndArgs...) -} - -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. -// -// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") -// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") -func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Subsetf(a.t, list, subset, msg, args...) -} - -// True asserts that the specified value is true. -// -// a.True(myBool) -func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return True(a.t, value, msgAndArgs...) -} - -// Truef asserts that the specified value is true. -// -// a.Truef(myBool, "error message %s", "formatted") -func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Truef(a.t, value, msg, args...) -} - -// WithinDuration asserts that the two times are within duration delta of each other. -// -// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) -func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) -} - -// WithinDurationf asserts that the two times are within duration delta of each other. -// -// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") -func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return WithinDurationf(a.t, expected, actual, delta, msg, args...) -} - -// WithinRange asserts that a time is within a time range (inclusive). -// -// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) -func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return WithinRange(a.t, actual, start, end, msgAndArgs...) -} - -// WithinRangef asserts that a time is within a time range (inclusive). -// -// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") -func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return WithinRangef(a.t, actual, start, end, msg, args...) -} - -// YAMLEq asserts that two YAML strings are equivalent. -func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return YAMLEq(a.t, expected, actual, msgAndArgs...) -} - -// YAMLEqf asserts that two YAML strings are equivalent. -func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return YAMLEqf(a.t, expected, actual, msg, args...) -} - -// Zero asserts that i is the zero value for its type. -func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Zero(a.t, i, msgAndArgs...) -} - -// Zerof asserts that i is the zero value for its type. -func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Zerof(a.t, i, msg, args...) -} diff --git a/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl b/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl deleted file mode 100644 index 188bb9e174..0000000000 --- a/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl +++ /dev/null @@ -1,5 +0,0 @@ -{{.CommentWithoutT "a"}} -func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool { - if h, ok := a.t.(tHelper); ok { h.Helper() } - return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) -} diff --git a/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/assertion_order.go b/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/assertion_order.go deleted file mode 100644 index 00df62a059..0000000000 --- a/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ /dev/null @@ -1,81 +0,0 @@ -package assert - -import ( - "fmt" - "reflect" -) - -// isOrdered checks that collection contains orderable elements. -func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { - objKind := reflect.TypeOf(object).Kind() - if objKind != reflect.Slice && objKind != reflect.Array { - return false - } - - objValue := reflect.ValueOf(object) - objLen := objValue.Len() - - if objLen <= 1 { - return true - } - - value := objValue.Index(0) - valueInterface := value.Interface() - firstValueKind := value.Kind() - - for i := 1; i < objLen; i++ { - prevValue := value - prevValueInterface := valueInterface - - value = objValue.Index(i) - valueInterface = value.Interface() - - compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind) - - if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...) - } - - if !containsValue(allowedComparesResults, compareResult) { - return Fail(t, fmt.Sprintf(failMessage, prevValue, value), msgAndArgs...) - } - } - - return true -} - -// IsIncreasing asserts that the collection is increasing -// -// assert.IsIncreasing(t, []int{1, 2, 3}) -// assert.IsIncreasing(t, []float{1, 2}) -// assert.IsIncreasing(t, []string{"a", "b"}) -func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) -} - -// IsNonIncreasing asserts that the collection is not increasing -// -// assert.IsNonIncreasing(t, []int{2, 1, 1}) -// assert.IsNonIncreasing(t, []float{2, 1}) -// assert.IsNonIncreasing(t, []string{"b", "a"}) -func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) -} - -// IsDecreasing asserts that the collection is decreasing -// -// assert.IsDecreasing(t, []int{2, 1, 0}) -// assert.IsDecreasing(t, []float{2, 1}) -// assert.IsDecreasing(t, []string{"b", "a"}) -func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) -} - -// IsNonDecreasing asserts that the collection is not decreasing -// -// assert.IsNonDecreasing(t, []int{1, 1, 2}) -// assert.IsNonDecreasing(t, []float{1, 2}) -// assert.IsNonDecreasing(t, []string{"a", "b"}) -func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) -} diff --git a/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/assertions.go b/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/assertions.go deleted file mode 100644 index 0b7570f21c..0000000000 --- a/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/assertions.go +++ /dev/null @@ -1,2105 +0,0 @@ -package assert - -import ( - "bufio" - "bytes" - "encoding/json" - "errors" - "fmt" - "math" - "os" - "reflect" - "regexp" - "runtime" - "runtime/debug" - "strings" - "time" - "unicode" - "unicode/utf8" - - "github.com/davecgh/go-spew/spew" - "github.com/pmezard/go-difflib/difflib" - "gopkg.in/yaml.v3" -) - -//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl" - -// TestingT is an interface wrapper around *testing.T -type TestingT interface { - Errorf(format string, args ...interface{}) -} - -// ComparisonAssertionFunc is a common function prototype when comparing two values. Can be useful -// for table driven tests. -type ComparisonAssertionFunc func(TestingT, interface{}, interface{}, ...interface{}) bool - -// ValueAssertionFunc is a common function prototype when validating a single value. Can be useful -// for table driven tests. -type ValueAssertionFunc func(TestingT, interface{}, ...interface{}) bool - -// BoolAssertionFunc is a common function prototype when validating a bool value. Can be useful -// for table driven tests. -type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool - -// ErrorAssertionFunc is a common function prototype when validating an error value. Can be useful -// for table driven tests. -type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool - -// Comparison is a custom function that returns true on success and false on failure -type Comparison func() (success bool) - -/* - Helper functions -*/ - -// ObjectsAreEqual determines if two objects are considered equal. -// -// This function does no assertion of any kind. -func ObjectsAreEqual(expected, actual interface{}) bool { - if expected == nil || actual == nil { - return expected == actual - } - - exp, ok := expected.([]byte) - if !ok { - return reflect.DeepEqual(expected, actual) - } - - act, ok := actual.([]byte) - if !ok { - return false - } - if exp == nil || act == nil { - return exp == nil && act == nil - } - return bytes.Equal(exp, act) -} - -// copyExportedFields iterates downward through nested data structures and creates a copy -// that only contains the exported struct fields. -func copyExportedFields(expected interface{}) interface{} { - if isNil(expected) { - return expected - } - - expectedType := reflect.TypeOf(expected) - expectedKind := expectedType.Kind() - expectedValue := reflect.ValueOf(expected) - - switch expectedKind { - case reflect.Struct: - result := reflect.New(expectedType).Elem() - for i := 0; i < expectedType.NumField(); i++ { - field := expectedType.Field(i) - isExported := field.IsExported() - if isExported { - fieldValue := expectedValue.Field(i) - if isNil(fieldValue) || isNil(fieldValue.Interface()) { - continue - } - newValue := copyExportedFields(fieldValue.Interface()) - result.Field(i).Set(reflect.ValueOf(newValue)) - } - } - return result.Interface() - - case reflect.Ptr: - result := reflect.New(expectedType.Elem()) - unexportedRemoved := copyExportedFields(expectedValue.Elem().Interface()) - result.Elem().Set(reflect.ValueOf(unexportedRemoved)) - return result.Interface() - - case reflect.Array, reflect.Slice: - var result reflect.Value - if expectedKind == reflect.Array { - result = reflect.New(reflect.ArrayOf(expectedValue.Len(), expectedType.Elem())).Elem() - } else { - result = reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len()) - } - for i := 0; i < expectedValue.Len(); i++ { - index := expectedValue.Index(i) - if isNil(index) { - continue - } - unexportedRemoved := copyExportedFields(index.Interface()) - result.Index(i).Set(reflect.ValueOf(unexportedRemoved)) - } - return result.Interface() - - case reflect.Map: - result := reflect.MakeMap(expectedType) - for _, k := range expectedValue.MapKeys() { - index := expectedValue.MapIndex(k) - unexportedRemoved := copyExportedFields(index.Interface()) - result.SetMapIndex(k, reflect.ValueOf(unexportedRemoved)) - } - return result.Interface() - - default: - return expected - } -} - -// ObjectsExportedFieldsAreEqual determines if the exported (public) fields of two objects are -// considered equal. This comparison of only exported fields is applied recursively to nested data -// structures. -// -// This function does no assertion of any kind. -// -// Deprecated: Use [EqualExportedValues] instead. -func ObjectsExportedFieldsAreEqual(expected, actual interface{}) bool { - expectedCleaned := copyExportedFields(expected) - actualCleaned := copyExportedFields(actual) - return ObjectsAreEqualValues(expectedCleaned, actualCleaned) -} - -// ObjectsAreEqualValues gets whether two objects are equal, or if their -// values are equal. -func ObjectsAreEqualValues(expected, actual interface{}) bool { - if ObjectsAreEqual(expected, actual) { - return true - } - - expectedValue := reflect.ValueOf(expected) - actualValue := reflect.ValueOf(actual) - if !expectedValue.IsValid() || !actualValue.IsValid() { - return false - } - - expectedType := expectedValue.Type() - actualType := actualValue.Type() - if !expectedType.ConvertibleTo(actualType) { - return false - } - - if !isNumericType(expectedType) || !isNumericType(actualType) { - // Attempt comparison after type conversion - return reflect.DeepEqual( - expectedValue.Convert(actualType).Interface(), actual, - ) - } - - // If BOTH values are numeric, there are chances of false positives due - // to overflow or underflow. So, we need to make sure to always convert - // the smaller type to a larger type before comparing. - if expectedType.Size() >= actualType.Size() { - return actualValue.Convert(expectedType).Interface() == expected - } - - return expectedValue.Convert(actualType).Interface() == actual -} - -// isNumericType returns true if the type is one of: -// int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, -// float32, float64, complex64, complex128 -func isNumericType(t reflect.Type) bool { - return t.Kind() >= reflect.Int && t.Kind() <= reflect.Complex128 -} - -/* CallerInfo is necessary because the assert functions use the testing object -internally, causing it to print the file:line of the assert method, rather than where -the problem actually occurred in calling code.*/ - -// CallerInfo returns an array of strings containing the file and line number -// of each stack frame leading from the current test to the assert call that -// failed. -func CallerInfo() []string { - - var pc uintptr - var ok bool - var file string - var line int - var name string - - callers := []string{} - for i := 0; ; i++ { - pc, file, line, ok = runtime.Caller(i) - if !ok { - // The breaks below failed to terminate the loop, and we ran off the - // end of the call stack. - break - } - - // This is a huge edge case, but it will panic if this is the case, see #180 - if file == "" { - break - } - - f := runtime.FuncForPC(pc) - if f == nil { - break - } - name = f.Name() - - // testing.tRunner is the standard library function that calls - // tests. Subtests are called directly by tRunner, without going through - // the Test/Benchmark/Example function that contains the t.Run calls, so - // with subtests we should break when we hit tRunner, without adding it - // to the list of callers. - if name == "testing.tRunner" { - break - } - - parts := strings.Split(file, "/") - if len(parts) > 1 { - filename := parts[len(parts)-1] - dir := parts[len(parts)-2] - if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) - } - } - - // Drop the package - segments := strings.Split(name, ".") - name = segments[len(segments)-1] - if isTest(name, "Test") || - isTest(name, "Benchmark") || - isTest(name, "Example") { - break - } - } - - return callers -} - -// Stolen from the `go test` tool. -// isTest tells whether name looks like a test (or benchmark, according to prefix). -// It is a Test (say) if there is a character after Test that is not a lower-case letter. -// We don't want TesticularCancer. -func isTest(name, prefix string) bool { - if !strings.HasPrefix(name, prefix) { - return false - } - if len(name) == len(prefix) { // "Test" is ok - return true - } - r, _ := utf8.DecodeRuneInString(name[len(prefix):]) - return !unicode.IsLower(r) -} - -func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { - if len(msgAndArgs) == 0 || msgAndArgs == nil { - return "" - } - if len(msgAndArgs) == 1 { - msg := msgAndArgs[0] - if msgAsStr, ok := msg.(string); ok { - return msgAsStr - } - return fmt.Sprintf("%+v", msg) - } - if len(msgAndArgs) > 1 { - return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) - } - return "" -} - -// Aligns the provided message so that all lines after the first line start at the same location as the first line. -// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab). -// The longestLabelLen parameter specifies the length of the longest label in the output (required because this is the -// basis on which the alignment occurs). -func indentMessageLines(message string, longestLabelLen int) string { - outBuf := new(bytes.Buffer) - - for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ { - // no need to align first line because it starts at the correct location (after the label) - if i != 0 { - // append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab - outBuf.WriteString("\n\t" + strings.Repeat(" ", longestLabelLen+1) + "\t") - } - outBuf.WriteString(scanner.Text()) - } - - return outBuf.String() -} - -type failNower interface { - FailNow() -} - -// FailNow fails test -func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - Fail(t, failureMessage, msgAndArgs...) - - // We cannot extend TestingT with FailNow() and - // maintain backwards compatibility, so we fallback - // to panicking when FailNow is not available in - // TestingT. - // See issue #263 - - if t, ok := t.(failNower); ok { - t.FailNow() - } else { - panic("test failed and t is missing `FailNow()`") - } - return false -} - -// Fail reports a failure through -func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - content := []labeledContent{ - {"Error Trace", strings.Join(CallerInfo(), "\n\t\t\t")}, - {"Error", failureMessage}, - } - - // Add test name if the Go version supports it - if n, ok := t.(interface { - Name() string - }); ok { - content = append(content, labeledContent{"Test", n.Name()}) - } - - message := messageFromMsgAndArgs(msgAndArgs...) - if len(message) > 0 { - content = append(content, labeledContent{"Messages", message}) - } - - t.Errorf("\n%s", ""+labeledOutput(content...)) - - return false -} - -type labeledContent struct { - label string - content string -} - -// labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner: -// -// \t{{label}}:{{align_spaces}}\t{{content}}\n -// -// The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label. -// If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this -// alignment is achieved, "\t{{content}}\n" is added for the output. -// -// If the content of the labeledOutput contains line breaks, the subsequent lines are aligned so that they start at the same location as the first line. -func labeledOutput(content ...labeledContent) string { - longestLabel := 0 - for _, v := range content { - if len(v.label) > longestLabel { - longestLabel = len(v.label) - } - } - var output string - for _, v := range content { - output += "\t" + v.label + ":" + strings.Repeat(" ", longestLabel-len(v.label)) + "\t" + indentMessageLines(v.content, longestLabel) + "\n" - } - return output -} - -// Implements asserts that an object is implemented by the specified interface. -// -// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) -func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - interfaceType := reflect.TypeOf(interfaceObject).Elem() - - if object == nil { - return Fail(t, fmt.Sprintf("Cannot check if nil implements %v", interfaceType), msgAndArgs...) - } - if !reflect.TypeOf(object).Implements(interfaceType) { - return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...) - } - - return true -} - -// NotImplements asserts that an object does not implement the specified interface. -// -// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject)) -func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - interfaceType := reflect.TypeOf(interfaceObject).Elem() - - if object == nil { - return Fail(t, fmt.Sprintf("Cannot check if nil does not implement %v", interfaceType), msgAndArgs...) - } - if reflect.TypeOf(object).Implements(interfaceType) { - return Fail(t, fmt.Sprintf("%T implements %v", object, interfaceType), msgAndArgs...) - } - - return true -} - -// IsType asserts that the specified objects are of the same type. -func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { - return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) - } - - return true -} - -// Equal asserts that two objects are equal. -// -// assert.Equal(t, 123, 123) -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). Function equality -// cannot be determined and will always fail. -func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if err := validateEqualArgs(expected, actual); err != nil { - return Fail(t, fmt.Sprintf("Invalid operation: %#v == %#v (%s)", - expected, actual, err), msgAndArgs...) - } - - if !ObjectsAreEqual(expected, actual) { - diff := diff(expected, actual) - expected, actual = formatUnequalValues(expected, actual) - return Fail(t, fmt.Sprintf("Not equal: \n"+ - "expected: %s\n"+ - "actual : %s%s", expected, actual, diff), msgAndArgs...) - } - - return true - -} - -// validateEqualArgs checks whether provided arguments can be safely used in the -// Equal/NotEqual functions. -func validateEqualArgs(expected, actual interface{}) error { - if expected == nil && actual == nil { - return nil - } - - if isFunction(expected) || isFunction(actual) { - return errors.New("cannot take func type as argument") - } - return nil -} - -// Same asserts that two pointers reference the same object. -// -// assert.Same(t, ptr1, ptr2) -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if !samePointers(expected, actual) { - return Fail(t, fmt.Sprintf("Not same: \n"+ - "expected: %p %#v\n"+ - "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) - } - - return true -} - -// NotSame asserts that two pointers do not reference the same object. -// -// assert.NotSame(t, ptr1, ptr2) -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if samePointers(expected, actual) { - return Fail(t, fmt.Sprintf( - "Expected and actual point to the same object: %p %#v", - expected, expected), msgAndArgs...) - } - return true -} - -// samePointers compares two generic interface objects and returns whether -// they point to the same object -func samePointers(first, second interface{}) bool { - firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second) - if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr { - return false - } - - firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second) - if firstType != secondType { - return false - } - - // compare pointer addresses - return first == second -} - -// formatUnequalValues takes two values of arbitrary types and returns string -// representations appropriate to be presented to the user. -// -// If the values are not of like type, the returned strings will be prefixed -// with the type name, and the value will be enclosed in parentheses similar -// to a type conversion in the Go grammar. -func formatUnequalValues(expected, actual interface{}) (e string, a string) { - if reflect.TypeOf(expected) != reflect.TypeOf(actual) { - return fmt.Sprintf("%T(%s)", expected, truncatingFormat(expected)), - fmt.Sprintf("%T(%s)", actual, truncatingFormat(actual)) - } - switch expected.(type) { - case time.Duration: - return fmt.Sprintf("%v", expected), fmt.Sprintf("%v", actual) - } - return truncatingFormat(expected), truncatingFormat(actual) -} - -// truncatingFormat formats the data and truncates it if it's too long. -// -// This helps keep formatted error messages lines from exceeding the -// bufio.MaxScanTokenSize max line length that the go testing framework imposes. -func truncatingFormat(data interface{}) string { - value := fmt.Sprintf("%#v", data) - max := bufio.MaxScanTokenSize - 100 // Give us some space the type info too if needed. - if len(value) > max { - value = value[0:max] + "<... truncated>" - } - return value -} - -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. -// -// assert.EqualValues(t, uint32(123), int32(123)) -func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if !ObjectsAreEqualValues(expected, actual) { - diff := diff(expected, actual) - expected, actual = formatUnequalValues(expected, actual) - return Fail(t, fmt.Sprintf("Not equal: \n"+ - "expected: %s\n"+ - "actual : %s%s", expected, actual, diff), msgAndArgs...) - } - - return true - -} - -// EqualExportedValues asserts that the types of two objects are equal and their public -// fields are also equal. This is useful for comparing structs that have private fields -// that could potentially differ. -// -// type S struct { -// Exported int -// notExported int -// } -// assert.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true -// assert.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false -func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - aType := reflect.TypeOf(expected) - bType := reflect.TypeOf(actual) - - if aType != bType { - return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) - } - - if aType.Kind() == reflect.Ptr { - aType = aType.Elem() - } - if bType.Kind() == reflect.Ptr { - bType = bType.Elem() - } - - if aType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) - } - - if bType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) - } - - expected = copyExportedFields(expected) - actual = copyExportedFields(actual) - - if !ObjectsAreEqualValues(expected, actual) { - diff := diff(expected, actual) - expected, actual = formatUnequalValues(expected, actual) - return Fail(t, fmt.Sprintf("Not equal (comparing only exported fields): \n"+ - "expected: %s\n"+ - "actual : %s%s", expected, actual, diff), msgAndArgs...) - } - - return true -} - -// Exactly asserts that two objects are equal in value and type. -// -// assert.Exactly(t, int32(123), int64(123)) -func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - aType := reflect.TypeOf(expected) - bType := reflect.TypeOf(actual) - - if aType != bType { - return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) - } - - return Equal(t, expected, actual, msgAndArgs...) - -} - -// NotNil asserts that the specified object is not nil. -// -// assert.NotNil(t, err) -func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if !isNil(object) { - return true - } - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Fail(t, "Expected value not to be nil.", msgAndArgs...) -} - -// isNil checks if a specified object is nil or not, without Failing. -func isNil(object interface{}) bool { - if object == nil { - return true - } - - value := reflect.ValueOf(object) - switch value.Kind() { - case - reflect.Chan, reflect.Func, - reflect.Interface, reflect.Map, - reflect.Ptr, reflect.Slice, reflect.UnsafePointer: - - return value.IsNil() - } - - return false -} - -// Nil asserts that the specified object is nil. -// -// assert.Nil(t, err) -func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if isNil(object) { - return true - } - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) -} - -// isEmpty gets whether the specified object is considered empty or not. -func isEmpty(object interface{}) bool { - - // get nil case out of the way - if object == nil { - return true - } - - objValue := reflect.ValueOf(object) - - switch objValue.Kind() { - // collection types are empty when they have no element - case reflect.Chan, reflect.Map, reflect.Slice: - return objValue.Len() == 0 - // pointers are empty if nil or if the value they point to is empty - case reflect.Ptr: - if objValue.IsNil() { - return true - } - deref := objValue.Elem().Interface() - return isEmpty(deref) - // for all other types, compare against the zero value - // array types are empty when they match their zero-initialized state - default: - zero := reflect.Zero(objValue.Type()) - return reflect.DeepEqual(object, zero.Interface()) - } -} - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// assert.Empty(t, obj) -func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - pass := isEmpty(object) - if !pass { - if h, ok := t.(tHelper); ok { - h.Helper() - } - Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...) - } - - return pass - -} - -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if assert.NotEmpty(t, obj) { -// assert.Equal(t, "two", obj[1]) -// } -func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - pass := !isEmpty(object) - if !pass { - if h, ok := t.(tHelper); ok { - h.Helper() - } - Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...) - } - - return pass - -} - -// getLen tries to get the length of an object. -// It returns (0, false) if impossible. -func getLen(x interface{}) (length int, ok bool) { - v := reflect.ValueOf(x) - defer func() { - ok = recover() == nil - }() - return v.Len(), true -} - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// assert.Len(t, mySlice, 3) -func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - l, ok := getLen(object) - if !ok { - return Fail(t, fmt.Sprintf("\"%v\" could not be applied builtin len()", object), msgAndArgs...) - } - - if l != length { - return Fail(t, fmt.Sprintf("\"%v\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) - } - return true -} - -// True asserts that the specified value is true. -// -// assert.True(t, myBool) -func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { - if !value { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Fail(t, "Should be true", msgAndArgs...) - } - - return true - -} - -// False asserts that the specified value is false. -// -// assert.False(t, myBool) -func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { - if value { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Fail(t, "Should be false", msgAndArgs...) - } - - return true - -} - -// NotEqual asserts that the specified values are NOT equal. -// -// assert.NotEqual(t, obj1, obj2) -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if err := validateEqualArgs(expected, actual); err != nil { - return Fail(t, fmt.Sprintf("Invalid operation: %#v != %#v (%s)", - expected, actual, err), msgAndArgs...) - } - - if ObjectsAreEqual(expected, actual) { - return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...) - } - - return true - -} - -// NotEqualValues asserts that two objects are not equal even when converted to the same type -// -// assert.NotEqualValues(t, obj1, obj2) -func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if ObjectsAreEqualValues(expected, actual) { - return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...) - } - - return true -} - -// containsElement try loop over the list check if the list includes the element. -// return (false, false) if impossible. -// return (true, false) if element was not found. -// return (true, true) if element was found. -func containsElement(list interface{}, element interface{}) (ok, found bool) { - - listValue := reflect.ValueOf(list) - listType := reflect.TypeOf(list) - if listType == nil { - return false, false - } - listKind := listType.Kind() - defer func() { - if e := recover(); e != nil { - ok = false - found = false - } - }() - - if listKind == reflect.String { - elementValue := reflect.ValueOf(element) - return true, strings.Contains(listValue.String(), elementValue.String()) - } - - if listKind == reflect.Map { - mapKeys := listValue.MapKeys() - for i := 0; i < len(mapKeys); i++ { - if ObjectsAreEqual(mapKeys[i].Interface(), element) { - return true, true - } - } - return true, false - } - - for i := 0; i < listValue.Len(); i++ { - if ObjectsAreEqual(listValue.Index(i).Interface(), element) { - return true, true - } - } - return true, false - -} - -// Contains asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// assert.Contains(t, "Hello World", "World") -// assert.Contains(t, ["Hello", "World"], "World") -// assert.Contains(t, {"Hello": "World"}, "Hello") -func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - ok, found := containsElement(s, contains) - if !ok { - return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...) - } - if !found { - return Fail(t, fmt.Sprintf("%#v does not contain %#v", s, contains), msgAndArgs...) - } - - return true - -} - -// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// assert.NotContains(t, "Hello World", "Earth") -// assert.NotContains(t, ["Hello", "World"], "Earth") -// assert.NotContains(t, {"Hello": "World"}, "Earth") -func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - ok, found := containsElement(s, contains) - if !ok { - return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...) - } - if found { - return Fail(t, fmt.Sprintf("%#v should not contain %#v", s, contains), msgAndArgs...) - } - - return true - -} - -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. -// -// assert.Subset(t, [1, 2, 3], [1, 2]) -// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) -func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if subset == nil { - return true // we consider nil to be equal to the nil set - } - - listKind := reflect.TypeOf(list).Kind() - if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) - } - - subsetKind := reflect.TypeOf(subset).Kind() - if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) - } - - if subsetKind == reflect.Map && listKind == reflect.Map { - subsetMap := reflect.ValueOf(subset) - actualMap := reflect.ValueOf(list) - - for _, k := range subsetMap.MapKeys() { - ev := subsetMap.MapIndex(k) - av := actualMap.MapIndex(k) - - if !av.IsValid() { - return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...) - } - if !ObjectsAreEqual(ev.Interface(), av.Interface()) { - return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...) - } - } - - return true - } - - subsetList := reflect.ValueOf(subset) - for i := 0; i < subsetList.Len(); i++ { - element := subsetList.Index(i).Interface() - ok, found := containsElement(list, element) - if !ok { - return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", list), msgAndArgs...) - } - if !found { - return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, element), msgAndArgs...) - } - } - - return true -} - -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. -// -// assert.NotSubset(t, [1, 3, 4], [1, 2]) -// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) -func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if subset == nil { - return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...) - } - - listKind := reflect.TypeOf(list).Kind() - if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) - } - - subsetKind := reflect.TypeOf(subset).Kind() - if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) - } - - if subsetKind == reflect.Map && listKind == reflect.Map { - subsetMap := reflect.ValueOf(subset) - actualMap := reflect.ValueOf(list) - - for _, k := range subsetMap.MapKeys() { - ev := subsetMap.MapIndex(k) - av := actualMap.MapIndex(k) - - if !av.IsValid() { - return true - } - if !ObjectsAreEqual(ev.Interface(), av.Interface()) { - return true - } - } - - return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) - } - - subsetList := reflect.ValueOf(subset) - for i := 0; i < subsetList.Len(); i++ { - element := subsetList.Index(i).Interface() - ok, found := containsElement(list, element) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) - } - if !found { - return true - } - } - - return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) -} - -// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified -// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, -// the number of appearances of each of them in both lists should match. -// -// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) -func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if isEmpty(listA) && isEmpty(listB) { - return true - } - - if !isList(t, listA, msgAndArgs...) || !isList(t, listB, msgAndArgs...) { - return false - } - - extraA, extraB := diffLists(listA, listB) - - if len(extraA) == 0 && len(extraB) == 0 { - return true - } - - return Fail(t, formatListDiff(listA, listB, extraA, extraB), msgAndArgs...) -} - -// isList checks that the provided value is array or slice. -func isList(t TestingT, list interface{}, msgAndArgs ...interface{}) (ok bool) { - kind := reflect.TypeOf(list).Kind() - if kind != reflect.Array && kind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s, expecting array or slice", list, kind), - msgAndArgs...) - } - return true -} - -// diffLists diffs two arrays/slices and returns slices of elements that are only in A and only in B. -// If some element is present multiple times, each instance is counted separately (e.g. if something is 2x in A and -// 5x in B, it will be 0x in extraA and 3x in extraB). The order of items in both lists is ignored. -func diffLists(listA, listB interface{}) (extraA, extraB []interface{}) { - aValue := reflect.ValueOf(listA) - bValue := reflect.ValueOf(listB) - - aLen := aValue.Len() - bLen := bValue.Len() - - // Mark indexes in bValue that we already used - visited := make([]bool, bLen) - for i := 0; i < aLen; i++ { - element := aValue.Index(i).Interface() - found := false - for j := 0; j < bLen; j++ { - if visited[j] { - continue - } - if ObjectsAreEqual(bValue.Index(j).Interface(), element) { - visited[j] = true - found = true - break - } - } - if !found { - extraA = append(extraA, element) - } - } - - for j := 0; j < bLen; j++ { - if visited[j] { - continue - } - extraB = append(extraB, bValue.Index(j).Interface()) - } - - return -} - -func formatListDiff(listA, listB interface{}, extraA, extraB []interface{}) string { - var msg bytes.Buffer - - msg.WriteString("elements differ") - if len(extraA) > 0 { - msg.WriteString("\n\nextra elements in list A:\n") - msg.WriteString(spewConfig.Sdump(extraA)) - } - if len(extraB) > 0 { - msg.WriteString("\n\nextra elements in list B:\n") - msg.WriteString(spewConfig.Sdump(extraB)) - } - msg.WriteString("\n\nlistA:\n") - msg.WriteString(spewConfig.Sdump(listA)) - msg.WriteString("\n\nlistB:\n") - msg.WriteString(spewConfig.Sdump(listB)) - - return msg.String() -} - -// Condition uses a Comparison to assert a complex condition. -func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - result := comp() - if !result { - Fail(t, "Condition failed!", msgAndArgs...) - } - return result -} - -// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics -// methods, and represents a simple func that takes no arguments, and returns nothing. -type PanicTestFunc func() - -// didPanic returns true if the function passed to it panics. Otherwise, it returns false. -func didPanic(f PanicTestFunc) (didPanic bool, message interface{}, stack string) { - didPanic = true - - defer func() { - message = recover() - if didPanic { - stack = string(debug.Stack()) - } - }() - - // call the target function - f() - didPanic = false - - return -} - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// assert.Panics(t, func(){ GoCrazy() }) -func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if funcDidPanic, panicValue, _ := didPanic(f); !funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) - } - - return true -} - -// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that -// the recovered panic value equals the expected panic value. -// -// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) -func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - funcDidPanic, panicValue, panickedStack := didPanic(f) - if !funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) - } - if panicValue != expected { - return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, expected, panicValue, panickedStack), msgAndArgs...) - } - - return true -} - -// PanicsWithError asserts that the code inside the specified PanicTestFunc -// panics, and that the recovered panic value is an error that satisfies the -// EqualError comparison. -// -// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) -func PanicsWithError(t TestingT, errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - funcDidPanic, panicValue, panickedStack := didPanic(f) - if !funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) - } - panicErr, ok := panicValue.(error) - if !ok || panicErr.Error() != errString { - return Fail(t, fmt.Sprintf("func %#v should panic with error message:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, errString, panicValue, panickedStack), msgAndArgs...) - } - - return true -} - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// assert.NotPanics(t, func(){ RemainCalm() }) -func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if funcDidPanic, panicValue, panickedStack := didPanic(f); funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v\n\tPanic stack:\t%s", f, panicValue, panickedStack), msgAndArgs...) - } - - return true -} - -// WithinDuration asserts that the two times are within duration delta of each other. -// -// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) -func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - dt := expected.Sub(actual) - if dt < -delta || dt > delta { - return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) - } - - return true -} - -// WithinRange asserts that a time is within a time range (inclusive). -// -// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) -func WithinRange(t TestingT, actual, start, end time.Time, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if end.Before(start) { - return Fail(t, "Start should be before end", msgAndArgs...) - } - - if actual.Before(start) { - return Fail(t, fmt.Sprintf("Time %v expected to be in time range %v to %v, but is before the range", actual, start, end), msgAndArgs...) - } else if actual.After(end) { - return Fail(t, fmt.Sprintf("Time %v expected to be in time range %v to %v, but is after the range", actual, start, end), msgAndArgs...) - } - - return true -} - -func toFloat(x interface{}) (float64, bool) { - var xf float64 - xok := true - - switch xn := x.(type) { - case uint: - xf = float64(xn) - case uint8: - xf = float64(xn) - case uint16: - xf = float64(xn) - case uint32: - xf = float64(xn) - case uint64: - xf = float64(xn) - case int: - xf = float64(xn) - case int8: - xf = float64(xn) - case int16: - xf = float64(xn) - case int32: - xf = float64(xn) - case int64: - xf = float64(xn) - case float32: - xf = float64(xn) - case float64: - xf = xn - case time.Duration: - xf = float64(xn) - default: - xok = false - } - - return xf, xok -} - -// InDelta asserts that the two numerals are within delta of each other. -// -// assert.InDelta(t, math.Pi, 22/7.0, 0.01) -func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - af, aok := toFloat(expected) - bf, bok := toFloat(actual) - - if !aok || !bok { - return Fail(t, "Parameters must be numerical", msgAndArgs...) - } - - if math.IsNaN(af) && math.IsNaN(bf) { - return true - } - - if math.IsNaN(af) { - return Fail(t, "Expected must not be NaN", msgAndArgs...) - } - - if math.IsNaN(bf) { - return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...) - } - - dt := af - bf - if dt < -delta || dt > delta { - return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) - } - - return true -} - -// InDeltaSlice is the same as InDelta, except it compares two slices. -func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, "Parameters must be slice", msgAndArgs...) - } - - actualSlice := reflect.ValueOf(actual) - expectedSlice := reflect.ValueOf(expected) - - for i := 0; i < actualSlice.Len(); i++ { - result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta, msgAndArgs...) - if !result { - return result - } - } - - return true -} - -// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. -func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Map || - reflect.TypeOf(expected).Kind() != reflect.Map { - return Fail(t, "Arguments must be maps", msgAndArgs...) - } - - expectedMap := reflect.ValueOf(expected) - actualMap := reflect.ValueOf(actual) - - if expectedMap.Len() != actualMap.Len() { - return Fail(t, "Arguments must have the same number of keys", msgAndArgs...) - } - - for _, k := range expectedMap.MapKeys() { - ev := expectedMap.MapIndex(k) - av := actualMap.MapIndex(k) - - if !ev.IsValid() { - return Fail(t, fmt.Sprintf("missing key %q in expected map", k), msgAndArgs...) - } - - if !av.IsValid() { - return Fail(t, fmt.Sprintf("missing key %q in actual map", k), msgAndArgs...) - } - - if !InDelta( - t, - ev.Interface(), - av.Interface(), - delta, - msgAndArgs..., - ) { - return false - } - } - - return true -} - -func calcRelativeError(expected, actual interface{}) (float64, error) { - af, aok := toFloat(expected) - bf, bok := toFloat(actual) - if !aok || !bok { - return 0, fmt.Errorf("Parameters must be numerical") - } - if math.IsNaN(af) && math.IsNaN(bf) { - return 0, nil - } - if math.IsNaN(af) { - return 0, errors.New("expected value must not be NaN") - } - if af == 0 { - return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") - } - if math.IsNaN(bf) { - return 0, errors.New("actual value must not be NaN") - } - - return math.Abs(af-bf) / math.Abs(af), nil -} - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if math.IsNaN(epsilon) { - return Fail(t, "epsilon must not be NaN", msgAndArgs...) - } - actualEpsilon, err := calcRelativeError(expected, actual) - if err != nil { - return Fail(t, err.Error(), msgAndArgs...) - } - if actualEpsilon > epsilon { - return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ - " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...) - } - - return true -} - -// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. -func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if expected == nil || actual == nil { - return Fail(t, "Parameters must be slice", msgAndArgs...) - } - - expectedSlice := reflect.ValueOf(expected) - actualSlice := reflect.ValueOf(actual) - - if expectedSlice.Type().Kind() != reflect.Slice { - return Fail(t, "Expected value must be slice", msgAndArgs...) - } - - expectedLen := expectedSlice.Len() - if !IsType(t, expected, actual) || !Len(t, actual, expectedLen) { - return false - } - - for i := 0; i < expectedLen; i++ { - if !InEpsilon(t, expectedSlice.Index(i).Interface(), actualSlice.Index(i).Interface(), epsilon, "at index %d", i) { - return false - } - } - - return true -} - -/* - Errors -*/ - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if assert.NoError(t, err) { -// assert.Equal(t, expectedObj, actualObj) -// } -func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { - if err != nil { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...) - } - - return true -} - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if assert.Error(t, err) { -// assert.Equal(t, expectedError, err) -// } -func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { - if err == nil { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Fail(t, "An error is expected but got nil.", msgAndArgs...) - } - - return true -} - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// assert.EqualError(t, err, expectedErrorString) -func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if !Error(t, theError, msgAndArgs...) { - return false - } - expected := errString - actual := theError.Error() - // don't need to use deep equals here, we know they are both strings - if expected != actual { - return Fail(t, fmt.Sprintf("Error message not equal:\n"+ - "expected: %q\n"+ - "actual : %q", expected, actual), msgAndArgs...) - } - return true -} - -// ErrorContains asserts that a function returned an error (i.e. not `nil`) -// and that the error contains the specified substring. -// -// actualObj, err := SomeFunction() -// assert.ErrorContains(t, err, expectedErrorSubString) -func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if !Error(t, theError, msgAndArgs...) { - return false - } - - actual := theError.Error() - if !strings.Contains(actual, contains) { - return Fail(t, fmt.Sprintf("Error %#v does not contain %#v", actual, contains), msgAndArgs...) - } - - return true -} - -// matchRegexp return true if a specified regexp matches a string. -func matchRegexp(rx interface{}, str interface{}) bool { - - var r *regexp.Regexp - if rr, ok := rx.(*regexp.Regexp); ok { - r = rr - } else { - r = regexp.MustCompile(fmt.Sprint(rx)) - } - - return (r.FindStringIndex(fmt.Sprint(str)) != nil) - -} - -// Regexp asserts that a specified regexp matches a string. -// -// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") -// assert.Regexp(t, "start...$", "it's not starting") -func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - match := matchRegexp(rx, str) - - if !match { - Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...) - } - - return match -} - -// NotRegexp asserts that a specified regexp does not match a string. -// -// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// assert.NotRegexp(t, "^start", "it's not starting") -func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - match := matchRegexp(rx, str) - - if match { - Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...) - } - - return !match - -} - -// Zero asserts that i is the zero value for its type. -func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { - return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...) - } - return true -} - -// NotZero asserts that i is not the zero value for its type. -func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { - return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...) - } - return true -} - -// FileExists checks whether a file exists in the given path. It also fails if -// the path points to a directory or there is an error when trying to check the file. -func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - info, err := os.Lstat(path) - if err != nil { - if os.IsNotExist(err) { - return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...) - } - return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...) - } - if info.IsDir() { - return Fail(t, fmt.Sprintf("%q is a directory", path), msgAndArgs...) - } - return true -} - -// NoFileExists checks whether a file does not exist in a given path. It fails -// if the path points to an existing _file_ only. -func NoFileExists(t TestingT, path string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - info, err := os.Lstat(path) - if err != nil { - return true - } - if info.IsDir() { - return true - } - return Fail(t, fmt.Sprintf("file %q exists", path), msgAndArgs...) -} - -// DirExists checks whether a directory exists in the given path. It also fails -// if the path is a file rather a directory or there is an error checking whether it exists. -func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - info, err := os.Lstat(path) - if err != nil { - if os.IsNotExist(err) { - return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...) - } - return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...) - } - if !info.IsDir() { - return Fail(t, fmt.Sprintf("%q is a file", path), msgAndArgs...) - } - return true -} - -// NoDirExists checks whether a directory does not exist in the given path. -// It fails if the path points to an existing _directory_ only. -func NoDirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - info, err := os.Lstat(path) - if err != nil { - if os.IsNotExist(err) { - return true - } - return true - } - if !info.IsDir() { - return true - } - return Fail(t, fmt.Sprintf("directory %q exists", path), msgAndArgs...) -} - -// JSONEq asserts that two JSON strings are equivalent. -// -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - var expectedJSONAsInterface, actualJSONAsInterface interface{} - - if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil { - return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) - } - - if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { - return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) - } - - return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...) -} - -// YAMLEq asserts that two YAML strings are equivalent. -func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - var expectedYAMLAsInterface, actualYAMLAsInterface interface{} - - if err := yaml.Unmarshal([]byte(expected), &expectedYAMLAsInterface); err != nil { - return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...) - } - - if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil { - return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...) - } - - return Equal(t, expectedYAMLAsInterface, actualYAMLAsInterface, msgAndArgs...) -} - -func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { - t := reflect.TypeOf(v) - k := t.Kind() - - if k == reflect.Ptr { - t = t.Elem() - k = t.Kind() - } - return t, k -} - -// diff returns a diff of both values as long as both are of the same type and -// are a struct, map, slice, array or string. Otherwise it returns an empty string. -func diff(expected interface{}, actual interface{}) string { - if expected == nil || actual == nil { - return "" - } - - et, ek := typeAndKind(expected) - at, _ := typeAndKind(actual) - - if et != at { - return "" - } - - if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { - return "" - } - - var e, a string - - switch et { - case reflect.TypeOf(""): - e = reflect.ValueOf(expected).String() - a = reflect.ValueOf(actual).String() - case reflect.TypeOf(time.Time{}): - e = spewConfigStringerEnabled.Sdump(expected) - a = spewConfigStringerEnabled.Sdump(actual) - default: - e = spewConfig.Sdump(expected) - a = spewConfig.Sdump(actual) - } - - diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ - A: difflib.SplitLines(e), - B: difflib.SplitLines(a), - FromFile: "Expected", - FromDate: "", - ToFile: "Actual", - ToDate: "", - Context: 1, - }) - - return "\n\nDiff:\n" + diff -} - -func isFunction(arg interface{}) bool { - if arg == nil { - return false - } - return reflect.TypeOf(arg).Kind() == reflect.Func -} - -var spewConfig = spew.ConfigState{ - Indent: " ", - DisablePointerAddresses: true, - DisableCapacities: true, - SortKeys: true, - DisableMethods: true, - MaxDepth: 10, -} - -var spewConfigStringerEnabled = spew.ConfigState{ - Indent: " ", - DisablePointerAddresses: true, - DisableCapacities: true, - SortKeys: true, - MaxDepth: 10, -} - -type tHelper interface { - Helper() -} - -// Eventually asserts that given condition will be met in waitFor time, -// periodically checking target function each tick. -// -// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) -func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - ch := make(chan bool, 1) - - timer := time.NewTimer(waitFor) - defer timer.Stop() - - ticker := time.NewTicker(tick) - defer ticker.Stop() - - for tick := ticker.C; ; { - select { - case <-timer.C: - return Fail(t, "Condition never satisfied", msgAndArgs...) - case <-tick: - tick = nil - go func() { ch <- condition() }() - case v := <-ch: - if v { - return true - } - tick = ticker.C - } - } -} - -// CollectT implements the TestingT interface and collects all errors. -type CollectT struct { - errors []error -} - -// Errorf collects the error. -func (c *CollectT) Errorf(format string, args ...interface{}) { - c.errors = append(c.errors, fmt.Errorf(format, args...)) -} - -// FailNow panics. -func (*CollectT) FailNow() { - panic("Assertion failed") -} - -// Deprecated: That was a method for internal usage that should not have been published. Now just panics. -func (*CollectT) Reset() { - panic("Reset() is deprecated") -} - -// Deprecated: That was a method for internal usage that should not have been published. Now just panics. -func (*CollectT) Copy(TestingT) { - panic("Copy() is deprecated") -} - -// EventuallyWithT asserts that given condition will be met in waitFor time, -// periodically checking target function each tick. In contrast to Eventually, -// it supplies a CollectT to the condition function, so that the condition -// function can use the CollectT to call other assertions. -// The condition is considered "met" if no errors are raised in a tick. -// The supplied CollectT collects all errors from one tick (if there are any). -// If the condition is not met before waitFor, the collected errors of -// the last tick are copied to t. -// -// externalValue := false -// go func() { -// time.Sleep(8*time.Second) -// externalValue = true -// }() -// assert.EventuallyWithT(t, func(c *assert.CollectT) { -// // add assertions as needed; any assertion failure will fail the current tick -// assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") -func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - var lastFinishedTickErrs []error - ch := make(chan []error, 1) - - timer := time.NewTimer(waitFor) - defer timer.Stop() - - ticker := time.NewTicker(tick) - defer ticker.Stop() - - for tick := ticker.C; ; { - select { - case <-timer.C: - for _, err := range lastFinishedTickErrs { - t.Errorf("%v", err) - } - return Fail(t, "Condition never satisfied", msgAndArgs...) - case <-tick: - tick = nil - go func() { - collect := new(CollectT) - defer func() { - ch <- collect.errors - }() - condition(collect) - }() - case errs := <-ch: - if len(errs) == 0 { - return true - } - // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. - lastFinishedTickErrs = errs - tick = ticker.C - } - } -} - -// Never asserts that the given condition doesn't satisfy in waitFor time, -// periodically checking the target function each tick. -// -// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) -func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - ch := make(chan bool, 1) - - timer := time.NewTimer(waitFor) - defer timer.Stop() - - ticker := time.NewTicker(tick) - defer ticker.Stop() - - for tick := ticker.C; ; { - select { - case <-timer.C: - return true - case <-tick: - tick = nil - go func() { ch <- condition() }() - case v := <-ch: - if v { - return Fail(t, "Condition satisfied", msgAndArgs...) - } - tick = ticker.C - } - } -} - -// ErrorIs asserts that at least one of the errors in err's chain matches target. -// This is a wrapper for errors.Is. -func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if errors.Is(err, target) { - return true - } - - var expectedText string - if target != nil { - expectedText = target.Error() - } - - chain := buildErrorChainString(err) - - return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+ - "expected: %q\n"+ - "in chain: %s", expectedText, chain, - ), msgAndArgs...) -} - -// NotErrorIs asserts that at none of the errors in err's chain matches target. -// This is a wrapper for errors.Is. -func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if !errors.Is(err, target) { - return true - } - - var expectedText string - if target != nil { - expectedText = target.Error() - } - - chain := buildErrorChainString(err) - - return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ - "found: %q\n"+ - "in chain: %s", expectedText, chain, - ), msgAndArgs...) -} - -// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. -// This is a wrapper for errors.As. -func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if errors.As(err, target) { - return true - } - - chain := buildErrorChainString(err) - - return Fail(t, fmt.Sprintf("Should be in error chain:\n"+ - "expected: %q\n"+ - "in chain: %s", target, chain, - ), msgAndArgs...) -} - -func buildErrorChainString(err error) string { - if err == nil { - return "" - } - - e := errors.Unwrap(err) - chain := fmt.Sprintf("%q", err.Error()) - for e != nil { - chain += fmt.Sprintf("\n\t%q", e.Error()) - e = errors.Unwrap(e) - } - return chain -} diff --git a/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/doc.go b/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/doc.go deleted file mode 100644 index 4953981d38..0000000000 --- a/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/doc.go +++ /dev/null @@ -1,46 +0,0 @@ -// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. -// -// # Example Usage -// -// The following is a complete example using assert in a standard test function: -// -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// ) -// -// func TestSomething(t *testing.T) { -// -// var a string = "Hello" -// var b string = "Hello" -// -// assert.Equal(t, a, b, "The two words should be the same.") -// -// } -// -// if you assert many times, use the format below: -// -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// ) -// -// func TestSomething(t *testing.T) { -// assert := assert.New(t) -// -// var a string = "Hello" -// var b string = "Hello" -// -// assert.Equal(a, b, "The two words should be the same.") -// } -// -// # Assertions -// -// Assertions allow you to easily write test code, and are global funcs in the `assert` package. -// All assertion functions take, as the first argument, the `*testing.T` object provided by the -// testing framework. This allows the assertion funcs to write the failings and other details to -// the correct place. -// -// Every assertion function also takes an optional string message as the final argument, -// allowing custom error messages to be appended to the message the assertion method outputs. -package assert diff --git a/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/errors.go b/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/errors.go deleted file mode 100644 index ac9dc9d1d6..0000000000 --- a/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package assert - -import ( - "errors" -) - -// AnError is an error instance useful for testing. If the code does not care -// about error specifics, and only needs to return the error for example, this -// error should be used to make the test code more readable. -var AnError = errors.New("assert.AnError general error for testing") diff --git a/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/forward_assertions.go deleted file mode 100644 index df189d2348..0000000000 --- a/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/forward_assertions.go +++ /dev/null @@ -1,16 +0,0 @@ -package assert - -// Assertions provides assertion methods around the -// TestingT interface. -type Assertions struct { - t TestingT -} - -// New makes a new Assertions object for the specified TestingT. -func New(t TestingT) *Assertions { - return &Assertions{ - t: t, - } -} - -//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs" diff --git a/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/http_assertions.go b/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/http_assertions.go deleted file mode 100644 index 861ed4b7ce..0000000000 --- a/cluster-provision/gocli/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ /dev/null @@ -1,165 +0,0 @@ -package assert - -import ( - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "strings" -) - -// httpCode is a helper that returns HTTP code of the response. It returns -1 and -// an error if building a new request fails. -func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) { - w := httptest.NewRecorder() - req, err := http.NewRequest(method, url, http.NoBody) - if err != nil { - return -1, err - } - req.URL.RawQuery = values.Encode() - handler(w, req) - return w.Code, nil -} - -// HTTPSuccess asserts that a specified handler returns a success status code. -// -// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - code, err := httpCode(handler, method, url, values) - if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) - } - - isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent - if !isSuccessCode { - Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) - } - - return isSuccessCode -} - -// HTTPRedirect asserts that a specified handler returns a redirect status code. -// -// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - code, err := httpCode(handler, method, url, values) - if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) - } - - isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect - if !isRedirectCode { - Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) - } - - return isRedirectCode -} - -// HTTPError asserts that a specified handler returns an error status code. -// -// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - code, err := httpCode(handler, method, url, values) - if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) - } - - isErrorCode := code >= http.StatusBadRequest - if !isErrorCode { - Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) - } - - return isErrorCode -} - -// HTTPStatusCode asserts that a specified handler returns a specified status code. -// -// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - code, err := httpCode(handler, method, url, values) - if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) - } - - successful := code == statuscode - if !successful { - Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code), msgAndArgs...) - } - - return successful -} - -// HTTPBody is a helper that returns HTTP body of the response. It returns -// empty string if building a new request fails. -func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { - w := httptest.NewRecorder() - if len(values) > 0 { - url += "?" + values.Encode() - } - req, err := http.NewRequest(method, url, http.NoBody) - if err != nil { - return "" - } - handler(w, req) - return w.Body.String() -} - -// HTTPBodyContains asserts that a specified handler returns a -// body that contains a string. -// -// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - body := HTTPBody(handler, method, url, values) - - contains := strings.Contains(body, fmt.Sprint(str)) - if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) - } - - return contains -} - -// HTTPBodyNotContains asserts that a specified handler returns a -// body that does not contain a string. -// -// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - body := HTTPBody(handler, method, url, values) - - contains := strings.Contains(body, fmt.Sprint(str)) - if contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) - } - - return !contains -} From f71b7072e626b6170c42b39868e38202a3b6edae Mon Sep 17 00:00:00 2001 From: aerosouund Date: Sat, 17 Aug 2024 15:28:48 +0300 Subject: [PATCH 2/8] feat/refactor: Introduce the KubevirtProvider type The KubevirtProvider is a struct representing an arbitrary Kubevirtci running cluster. It holds all config flags and options that are in the run and provision commands. A Kubevirt provider can be created in two ways, by creating a cluster using the Start method, or from an already running cluster. For this to be possible then json representation of the struct is persisted on the dnsmasq container and later read to parse the deployed settings Or through the normal constructor which uses the option pattern to avoid a bloated function signature The logic that was previously in run.go has been split to several methods to facilitate readability and testing (runNFSGanesha, runRegistry, prepareQemuCmd, prepareDeviceMappings) and dnsmasq creation logic got moved to its own method instead of existing in its own package Floating methods such as waitForVMToBeUp, nodeNameFromIndex, nodeContainer.. etc were grouped to be methods of the struct Signed-off-by: aerosouund --- .../gocli/providers/base_provider.go | 708 ++++++++++++++++++ cluster-provision/gocli/providers/opts.go | 334 +++++++++ cluster-provision/gocli/providers/types.go | 75 ++ 3 files changed, 1117 insertions(+) create mode 100644 cluster-provision/gocli/providers/base_provider.go create mode 100644 cluster-provision/gocli/providers/opts.go create mode 100644 cluster-provision/gocli/providers/types.go diff --git a/cluster-provision/gocli/providers/base_provider.go b/cluster-provision/gocli/providers/base_provider.go new file mode 100644 index 0000000000..b627f57838 --- /dev/null +++ b/cluster-provision/gocli/providers/base_provider.go @@ -0,0 +1,708 @@ +package providers + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "fmt" + "os" + "os/signal" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/alessio/shellescape" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/client" + "github.com/docker/go-connections/nat" + "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/api/resource" + "kubevirt.io/kubevirtci/cluster-provision/gocli/cmd/utils" + "kubevirt.io/kubevirtci/cluster-provision/gocli/docker" + "kubevirt.io/kubevirtci/cluster-provision/gocli/opts" + aaq "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/aaq" + bindvfio "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/bind-vfio" + "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/cdi" + "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/cnao" + dockerproxy "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/docker-proxy" + etcd "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/etcd" + "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/istio" + "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/ksm" + "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/labelnodes" + "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/multus" + "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/nfscsi" + "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/node01" + nodesprovision "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/nodes" + "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/prometheus" + "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/psa" + "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/realtime" + "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/rookceph" + "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/rootkey" + "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/swap" + k8s "kubevirt.io/kubevirtci/cluster-provision/gocli/pkg/k8s" + "kubevirt.io/kubevirtci/cluster-provision/gocli/pkg/libssh" +) + +func NewKubevirtProvider(k8sversion string, image string, cli *client.Client, + options []KubevirtProviderOption) *KubevirtProvider { + kp := &KubevirtProvider{ + Image: image, + Version: k8sversion, + Docker: cli, + Nodes: 1, // start with nodes equal one and will be later modified by options that set a different value + } + + for _, option := range options { + option(kp) + } + + return kp +} + +func NewFromRunning(dnsmasqPrefix string) (*KubevirtProvider, error) { + cli, err := client.NewClientWithOpts(client.FromEnv) + if err != nil { + return nil, err + } + + containers, err := docker.GetPrefixedContainers(cli, dnsmasqPrefix+"-dnsmasq") + if err != nil { + return nil, err + } + + if len(containers) == 0 { + return nil, fmt.Errorf("No running provider has the prefix %s", dnsmasqPrefix) + } + + var buf bytes.Buffer + _, err = docker.Exec(cli, containers[0].ID, []string{"cat", "provider.json"}, &buf) + if err != nil { + return nil, err + } + + kp := &KubevirtProvider{} + + err = json.Unmarshal(buf.Bytes(), kp) + if err != nil { + return nil, err + } + + kp.Docker = cli + return kp, nil +} + +func (kp *KubevirtProvider) Start(ctx context.Context, cancel context.CancelFunc, portMap nat.PortMap) (retErr error) { + stop := make(chan error, 10) + containers, _, done := docker.NewCleanupHandler(kp.Docker, stop, os.Stdout, false) + + defer func() { + stop <- retErr + <-done + }() + + go kp.handleInterrupt(cancel, stop) + + dnsmasq, err := kp.runDNSMasq(ctx, portMap) + if err != nil { + return err + } + kp.DNSMasq = dnsmasq + containers <- dnsmasq + + dnsmasqJSON, err := kp.Docker.ContainerInspect(context.Background(), kp.DNSMasq) + if err != nil { + return err + } + + if kp.SSHPort == 0 { + port, err := utils.GetPublicPort(utils.PortSSH, dnsmasqJSON.NetworkSettings.Ports) + if err != nil { + return err + } + kp.SSHPort = port + } + + if kp.APIServerPort == 0 { + port, err := utils.GetPublicPort(utils.PortAPI, dnsmasqJSON.NetworkSettings.Ports) + if err != nil { + return err + } + kp.APIServerPort = port + } + + registry, err := kp.runRegistry(ctx) + if err != nil { + return err + } + containers <- registry + + if kp.NFSData != "" { + nfsGanesha, err := kp.runNFSGanesha(ctx) + if err != nil { + return nil + } + containers <- nfsGanesha + } + + wg := sync.WaitGroup{} + wg.Add(int(kp.Nodes)) + macCounter := 0 + + for x := 0; x < int(kp.Nodes); x++ { + nodeName := kp.nodeNameFromIndex(x + 1) + sshClient, err := libssh.NewSSHClient(kp.SSHPort, x+1, false) + if err != nil { + return err + } + + nodeNum := fmt.Sprintf("%02d", x+1) + qemuCMD := kp.prepareQemuCmd(x) + macCounter++ + + vmContainerConfig := &container.Config{ + Image: kp.Image, + Env: []string{ + fmt.Sprintf("NODE_NUM=%s", nodeNum), + }, + Cmd: []string{"/bin/bash", "-c", qemuCMD}, + } + var deviceMappings []container.DeviceMapping + + if kp.GPU != "" && x == int(kp.Nodes)-1 { + dm, err := kp.prepareDeviceMappings() + if err != nil { + return err + } + deviceMappings = dm + qemuCMD = fmt.Sprintf("%s -device vfio-pci,host=%s", qemuCMD, kp.GPU) + } + + if kp.EnableCeph { + vmContainerConfig.Volumes = map[string]struct{}{ + "/var/lib/rook": {}, + } + } + + node, err := kp.Docker.ContainerCreate(ctx, vmContainerConfig, &container.HostConfig{ + Privileged: true, + NetworkMode: container.NetworkMode("container:" + kp.DNSMasq), + Resources: container.Resources{ + Devices: deviceMappings, + }, + }, nil, nil, kp.Version+"-"+nodeName) + if err != nil { + return err + } + containers <- node.ID + + if err := kp.Docker.ContainerStart(ctx, node.ID, types.ContainerStartOptions{}); err != nil { + return err + } + + success, err := docker.Exec(kp.Docker, kp.nodeContainer(kp.Version, nodeName), []string{"/bin/bash", "-c", "while [ ! -f /ssh_ready ] ; do sleep 1; done"}, os.Stdout) + if err != nil { + return err + } + + if !success { + return fmt.Errorf("checking for ssh.sh script for node %s failed", nodeName) + } + + err = kp.waitForVMToBeUp(kp.Version, nodeName) + if err != nil { + return err + } + + rootkey := rootkey.NewRootKey(sshClient) + if err = rootkey.Exec(); err != nil { + return err + } + sshClient, err = libssh.NewSSHClient(kp.SSHPort, x+1, true) + + if err = kp.provisionNode(sshClient, x+1); err != nil { + return err + } + + go func(id string) { + kp.Docker.ContainerWait(ctx, id, container.WaitConditionNotRunning) + wg.Done() + }(node.ID) + } + + sshClient, err := libssh.NewSSHClient(kp.SSHPort, 1, true) + if err != nil { + return err + } + + err = sshClient.CopyRemoteFile("/etc/kubernetes/admin.conf", ".kubeconfig") + if err != nil { + return err + } + + config, err := k8s.InitConfig(".kubeconfig", kp.APIServerPort) + if err != nil { + return err + } + + k8sClient, err := k8s.NewDynamicClient(config) + if err != nil { + return err + } + kp.Client = k8sClient + + if err = kp.provisionK8sOpts(sshClient); err != nil { + return err + } + + err = kp.persistProvider() + if err != nil { + return err + } + + return nil +} + +func (kp *KubevirtProvider) runDNSMasq(ctx context.Context, portMap nat.PortMap) (string, error) { + dnsmasqMounts := []mount.Mount{} + _, err := os.Stat("/lib/modules") + if err == nil { + dnsmasqMounts = []mount.Mount{ + { + Type: mount.TypeBind, + Source: "/lib/modules", + Target: "/lib/modules", + }, + } + } + + dnsmasq, err := kp.Docker.ContainerCreate(ctx, &container.Config{ + Image: kp.Image, + Env: []string{ + fmt.Sprintf("NUM_NODES=%d", kp.Nodes), + fmt.Sprintf("NUM_SECONDARY_NICS=%d", kp.SecondaryNics), + }, + Cmd: []string{"/bin/bash", "-c", "/dnsmasq.sh"}, + ExposedPorts: nat.PortSet{ + utils.TCPPortOrDie(utils.PortSSH): {}, + utils.TCPPortOrDie(utils.PortRegistry): {}, + utils.TCPPortOrDie(utils.PortOCP): {}, + utils.TCPPortOrDie(utils.PortAPI): {}, + utils.TCPPortOrDie(utils.PortVNC): {}, + utils.TCPPortOrDie(utils.PortHTTP): {}, + utils.TCPPortOrDie(utils.PortHTTPS): {}, + utils.TCPPortOrDie(utils.PortPrometheus): {}, + utils.TCPPortOrDie(utils.PortGrafana): {}, + utils.TCPPortOrDie(utils.PortUploadProxy): {}, + utils.UDPPortOrDie(utils.PortDNS): {}, + }, + }, &container.HostConfig{ + Privileged: true, + PublishAllPorts: kp.RandomPorts, + PortBindings: portMap, + ExtraHosts: []string{ + "nfs:192.168.66.2", + "registry:192.168.66.2", + "ceph:192.168.66.2", + }, + Mounts: dnsmasqMounts, + }, nil, nil, kp.Version+"-dnsmasq") + + if err := kp.Docker.ContainerStart(ctx, dnsmasq.ID, types.ContainerStartOptions{}); err != nil { + return "", err + } + return dnsmasq.ID, nil +} + +func (kp *KubevirtProvider) runRegistry(ctx context.Context) (string, error) { + err := docker.ImagePull(kp.Docker, ctx, utils.DockerRegistryImage, types.ImagePullOptions{}) + if err != nil { + return "", err + } + registry, err := kp.Docker.ContainerCreate(ctx, &container.Config{ + Image: utils.DockerRegistryImage, + }, &container.HostConfig{ + Privileged: true, + NetworkMode: container.NetworkMode("container:" + kp.DNSMasq), + }, nil, nil, kp.Version+"-registry") + if err != nil { + return "", err + } + + if err := kp.Docker.ContainerStart(ctx, registry.ID, types.ContainerStartOptions{}); err != nil { + return "", err + } + + return registry.ID, nil +} + +func (kp *KubevirtProvider) runNFSGanesha(ctx context.Context) (string, error) { + nfsData, err := filepath.Abs(kp.NFSData) + if err != nil { + return "", err + } + + err = docker.ImagePull(kp.Docker, ctx, utils.NFSGaneshaImage, types.ImagePullOptions{}) + if err != nil { + panic(err) + } + + nfsGanesha, err := kp.Docker.ContainerCreate(ctx, &container.Config{ + Image: utils.NFSGaneshaImage, + }, &container.HostConfig{ + Mounts: []mount.Mount{ + { + Type: mount.TypeBind, + Source: nfsData, + Target: "/data/nfs", + }, + }, + Privileged: true, + NetworkMode: container.NetworkMode("container:" + kp.DNSMasq), + }, nil, nil, kp.Version+"-nfs-ganesha") + if err != nil { + return "", err + } + + if err := kp.Docker.ContainerStart(ctx, nfsGanesha.ID, types.ContainerStartOptions{}); err != nil { + return "", err + } + return nfsGanesha.ID, nil +} + +func (kp *KubevirtProvider) provisionNode(sshClient libssh.Client, nodeIdx int) error { + opts := []opts.Opt{} + nodeName := kp.nodeNameFromIndex(nodeIdx) + + if kp.EnableFIPS { + for _, cmd := range []string{"sudo fips-mode-setup --enable", "sudo reboot"} { + if _, err := sshClient.Command(cmd, true); err != nil { + return fmt.Errorf("Starting fips mode failed: %s", err) + } + } + err := kp.waitForVMToBeUp(kp.Version, nodeName) + if err != nil { + return err + } + } + + if kp.DockerProxy != "" { + //if dockerProxy has value, generate a shell script`/script/docker-proxy.sh` which can be applied to set proxy settings + dp := dockerproxy.NewDockerProxyOpt(sshClient, kp.DockerProxy) + opts = append(opts, dp) + } + + if kp.RunEtcdOnMemory { + logrus.Infof("Creating in-memory mount for etcd data on node %s", nodeName) + etcdinmem := etcd.NewEtcdInMemOpt(sshClient, kp.EtcdCapacity) + opts = append(opts, etcdinmem) + } + + if kp.EnableRealtimeScheduler { + realtimeOpt := realtime.NewRealtimeOpt(sshClient) + opts = append(opts, realtimeOpt) + } + + for _, s := range []string{"8086:2668", "8086:2415"} { + // move the VM sound cards to a vfio-pci driver to prepare for assignment + bvfio := bindvfio.NewBindVfioOpt(sshClient, s) + opts = append(opts, bvfio) + } + + if kp.SingleStack { + if _, err := sshClient.Command("touch /home/vagrant/single_stack", false); err != nil { + return fmt.Errorf("provisioning node %d failed (setting singleStack phase): %s", nodeIdx, err) + } + } + + if kp.EnableAudit { + if _, err := sshClient.Command("touch /home/vagrant/enable_audit", false); err != nil { + return fmt.Errorf("provisioning node %d failed (setting enableAudit phase): %s", nodeIdx, err) + } + } + + if kp.EnablePSA { + psaOpt := psa.NewPsaOpt(sshClient) + opts = append(opts, psaOpt) + } + + if nodeIdx == 1 { + n := node01.NewNode01Provisioner(sshClient) + opts = append(opts, n) + + } else { + if kp.GPU != "" { + // move the assigned PCI device to a vfio-pci driver to prepare for assignment + gpuDeviceID, err := kp.getDevicePCIID(kp.GPU) + if err != nil { + return err + } + bindVfioOpt := bindvfio.NewBindVfioOpt(sshClient, gpuDeviceID) + opts = append(opts, bindVfioOpt) + } + n := nodesprovision.NewNodesProvisioner(sshClient) + opts = append(opts, n) + } + + if kp.KSM { + ksmOpt := ksm.NewKsmOpt(sshClient, int(kp.KSMInterval), int(kp.KSMPages)) + opts = append(opts, ksmOpt) + } + + if kp.Swap { + swapOpt := swap.NewSwapOpt(sshClient, int(kp.Swapiness), kp.UnlimitedSwap, int(kp.Swapsize)) + opts = append(opts, swapOpt) + } + + for _, o := range opts { + if err := o.Exec(); err != nil { + return err + } + } + + return nil +} + +func (kp *KubevirtProvider) provisionK8sOpts(sshClient libssh.Client) error { + opts := []opts.Opt{} + labelSelector := "node-role.kubernetes.io/control-plane" + if kp.Nodes > 1 { + labelSelector = "!node-role.kubernetes.io/control-plane" + } + opts = append(opts, labelnodes.NewNodeLabler(sshClient, labelSelector)) + + if kp.CDI { + opts = append(opts, cdi.NewCdiOpt(kp.Client, kp.CDIVersion)) + } + + if kp.AAQ { + if kp.Version == "k8s-1.30" { + opts = append(opts, aaq.NewAaqOpt(kp.Client, sshClient, kp.AAQVersion)) + } else { + logrus.Info("AAQ was requested but kubernetes version is less than 1.30, skipping") + } + } + + if kp.EnablePrometheus { + opts = append(opts, prometheus.NewPrometheusOpt(kp.Client, kp.EnableGrafana, kp.EnablePrometheusAlertManager)) + } + + if kp.EnableCeph { + opts = append(opts, rookceph.NewCephOpt(kp.Client)) + } + + if kp.EnableNFSCSI { + opts = append(opts, nfscsi.NewNfsCsiOpt(kp.Client)) + } + + if kp.EnableMultus { + opts = append(opts, multus.NewMultusOpt(kp.Client, sshClient)) + } + + if kp.EnableCNAO { + opts = append(opts, cnao.NewCnaoOpt(kp.Client, sshClient)) + } + + if kp.EnableIstio { + opts = append(opts, istio.NewIstioOpt(sshClient, kp.Client, kp.EnableCNAO)) + } + + for _, opt := range opts { + if err := opt.Exec(); err != nil { + return err + } + } + + return nil +} + +func (kp *KubevirtProvider) prepareDeviceMappings() ([]container.DeviceMapping, error) { + iommuGroup, err := kp.getPCIDeviceIOMMUGroup(kp.GPU) + if err != nil { + return nil, err + } + vfioDevice := fmt.Sprintf("/dev/vfio/%s", iommuGroup) + return []container.DeviceMapping{ + { + PathOnHost: "/dev/vfio/vfio", + PathInContainer: "/dev/vfio/vfio", + CgroupPermissions: "mrw", + }, + { + PathOnHost: vfioDevice, + PathInContainer: vfioDevice, + CgroupPermissions: "mrw", + }, + }, nil +} + +func (kp *KubevirtProvider) prepareQemuCmd(x int) string { + nodeQemuArgs := kp.QemuArgs + kernelArgs := kp.KernelArgs + macSuffix := fmt.Sprintf("%02x", x) + + for i := 0; i < int(kp.SecondaryNics); i++ { + netSuffix := fmt.Sprintf("%d-%d", x, i) + nodeQemuArgs = fmt.Sprintf("%s -device virtio-net-pci,netdev=secondarynet%s,mac=52:55:00:d1:56:%s -netdev tap,id=secondarynet%s,ifname=stap%s,script=no,downscript=no", nodeQemuArgs, netSuffix, macSuffix, netSuffix, netSuffix) + } + + if kp.GPU != "" && x == int(kp.Nodes)-1 { + nodeQemuArgs = fmt.Sprintf("%s -device vfio-pci,host=%s", nodeQemuArgs, kp.GPU) + } + + var vmArgsNvmeDisks []string + if len(kp.NvmeDisks) > 0 { + for i, size := range kp.NvmeDisks { + resource.MustParse(size) + disk := fmt.Sprintf("%s-%d.img", "/nvme", i) + nodeQemuArgs = fmt.Sprintf("%s -drive file=%s,format=raw,id=NVME%d,if=none -device nvme,drive=NVME%d,serial=nvme-%d", nodeQemuArgs, disk, i, i, i) + vmArgsNvmeDisks = append(vmArgsNvmeDisks, fmt.Sprintf("--nvme-device-size %s", size)) + } + } + var vmArgsSCSIDisks []string + if len(kp.ScsiDisks) > 0 { + nodeQemuArgs = fmt.Sprintf("%s -device virtio-scsi-pci,id=scsi0", nodeQemuArgs) + for i, size := range kp.ScsiDisks { + resource.MustParse(size) + disk := fmt.Sprintf("%s-%d.img", "/scsi", i) + nodeQemuArgs = fmt.Sprintf("%s -drive file=%s,if=none,id=drive%d -device scsi-hd,drive=drive%d,bus=scsi0.0,channel=0,scsi-id=0,lun=%d", nodeQemuArgs, disk, i, i, i) + vmArgsSCSIDisks = append(vmArgsSCSIDisks, fmt.Sprintf("--scsi-device-size %s", size)) + } + } + + var vmArgsUSBDisks []string + const bus = " -device qemu-xhci,id=bus%d" + const drive = " -drive if=none,id=stick%d,format=raw,file=/usb-%d.img" + const dev = " -device usb-storage,bus=bus%d.0,drive=stick%d" + const usbSizefmt = " --usb-device-size %s" + if len(kp.USBDisks) > 0 { + for i, size := range kp.USBDisks { + resource.MustParse(size) + if i%2 == 0 { + nodeQemuArgs += fmt.Sprintf(bus, i/2) + } + nodeQemuArgs += fmt.Sprintf(drive, i, i) + nodeQemuArgs += fmt.Sprintf(dev, i/2, i) + vmArgsUSBDisks = append(vmArgsUSBDisks, fmt.Sprintf(usbSizefmt, size)) + } + } + + additionalArgs := []string{} + if len(nodeQemuArgs) > 0 { + additionalArgs = append(additionalArgs, "--qemu-args", shellescape.Quote(nodeQemuArgs)) + } + + if kp.Hugepages2M > 0 { + kernelArgs += fmt.Sprintf(" hugepagesz=2M hugepages=%d", kp.Hugepages2M) + } + + if kp.Hugepages1G > 0 { + kernelArgs += fmt.Sprintf(" hugepagesz=1G hugepages=%d", kp.Hugepages1G) + } + + if kp.EnableFIPS { + kernelArgs += " fips=1" + } + + blockDev := "" + if kp.EnableCeph { + blockDev = "--block-device /var/run/disk/blockdev.qcow2 --block-device-size 32212254720" + } + + kernelArgs = strings.TrimSpace(kernelArgs) + if kernelArgs != "" { + additionalArgs = append(additionalArgs, "--additional-kernel-args", shellescape.Quote(kernelArgs)) + } + + return fmt.Sprintf("/vm.sh -n /var/run/disk/disk.qcow2 --memory %s --cpu %s --numa %s %s %s %s %s %s", + kp.Memory, + strconv.Itoa(int(kp.CPU)), + strconv.Itoa(int(kp.Numa)), + blockDev, + strings.Join(vmArgsSCSIDisks, " "), + strings.Join(vmArgsNvmeDisks, " "), + strings.Join(vmArgsUSBDisks, " "), + strings.Join(additionalArgs, " "), + ) +} + +func (kp *KubevirtProvider) persistProvider() error { + providerJson, err := json.Marshal(kp) + if err != nil { + return err + } + escapedJson := strconv.Quote(string(providerJson)) + + _, err = docker.Exec(kp.Docker, kp.DNSMasq, []string{"/bin/bash", "-c", fmt.Sprintf("echo %s | tee /provider.json > /dev/null", string(escapedJson))}, os.Stdout) + if err != nil { + return err + } + return nil +} + +func (kp *KubevirtProvider) getDevicePCIID(pciAddress string) (string, error) { + file, err := os.Open(filepath.Join("/sys/bus/pci/devices", pciAddress, "uevent")) + if err != nil { + return "", err + } + defer file.Close() + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "PCI_ID") { + equal := strings.Index(line, "=") + value := strings.TrimSpace(line[equal+1:]) + return strings.ToLower(value), nil + } + } + return "", fmt.Errorf("no pci_id is found") +} + +func (kp *KubevirtProvider) getPCIDeviceIOMMUGroup(address string) (string, error) { + iommuLink := filepath.Join("/sys/bus/pci/devices", address, "iommu_group") + iommuPath, err := os.Readlink(iommuLink) + if err != nil { + return "", fmt.Errorf("failed to read iommu_group link %s for device %s - %v", iommuLink, address, err) + } + _, iommuGroup := filepath.Split(iommuPath) + return iommuGroup, nil +} + +func (kp *KubevirtProvider) handleInterrupt(cancel context.CancelFunc, stop chan error) { + interrupt := make(chan os.Signal, 1) + signal.Notify(interrupt, os.Interrupt) + <-interrupt + cancel() + stop <- fmt.Errorf("Interrupt received, clean up") +} + +func (kp *KubevirtProvider) nodeNameFromIndex(x int) string { + return fmt.Sprintf("node%02d", x) +} + +func (kp *KubevirtProvider) nodeContainer(version string, node string) string { + return version + "-" + node +} + +func (kp *KubevirtProvider) waitForVMToBeUp(prefix string, nodeName string) error { + var err error + for x := 0; x < 10; x++ { + _, err = docker.Exec(kp.Docker, kp.nodeContainer(prefix, nodeName), []string{"/bin/bash", "-c", "ssh.sh echo VM is up"}, os.Stdout) + if err == nil { + break + } + logrus.WithError(err).Warningf("Could not establish a ssh connection to the VM, retrying ...") + time.Sleep(1 * time.Second) + } + + if err != nil { + return fmt.Errorf("could not establish a connection to the node after a generous timeout: %v", err) + } + + return nil +} diff --git a/cluster-provision/gocli/providers/opts.go b/cluster-provision/gocli/providers/opts.go new file mode 100644 index 0000000000..7b268a1f52 --- /dev/null +++ b/cluster-provision/gocli/providers/opts.go @@ -0,0 +1,334 @@ +package providers + +func WithNodes(nodes interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.Nodes = nodes.(uint) + } +} + +func WithNuma(numa interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.Numa = numa.(uint) + } +} + +func WithMemory(memory interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.Memory = memory.(string) + } +} + +func WithCPU(cpu interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.CPU = cpu.(uint) + } +} + +func WithSwap(swap interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.Swap = swap.(bool) + } +} + +func WithUnlimitedSwap(us interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.UnlimitedSwap = us.(bool) + } +} + +func WithSwapiness(s interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.Swapiness = s.(uint) + } +} + +func WithSwapSize(s interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.Swapsize = s.(uint) + } +} + +func WithSecondaryNics(secondaryNics interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.SecondaryNics = secondaryNics.(uint) + } +} + +func WithQemuArgs(qemuArgs interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.QemuArgs = qemuArgs.(string) + } +} + +func WithKernelArgs(kernelArgs interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.KernelArgs = kernelArgs.(string) + } +} + +func WithBackground(background interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.Background = background.(bool) + } +} + +func WithRandomPorts(randomPorts interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.RandomPorts = randomPorts.(bool) + } +} + +func WithSlim(slim interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.Slim = slim.(bool) + } +} + +func WithVNCPort(vncPort interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.VNCPort = vncPort.(uint16) + } +} + +func WithHTTPPort(httpPort interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.HTTPPort = httpPort.(uint16) + } +} + +func WithHTTPSPort(httpsPort interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.HTTPSPort = httpsPort.(uint16) + } +} + +func WithRegistryPort(registryPort interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.RegistryPort = registryPort.(uint16) + } +} + +func WithOCPort(ocpPort interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.OCPort = ocpPort.(uint16) + } +} + +func WithK8sPort(k8sPort interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.K8sPort = k8sPort.(uint16) + } +} + +func WithSSHPort(sshPort interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.SSHPort = sshPort.(uint16) + } +} + +func WithPrometheusPort(prometheusPort interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.PrometheusPort = prometheusPort.(uint16) + } +} + +func WithGrafanaPort(grafanaPort interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.GrafanaPort = grafanaPort.(uint16) + } +} + +func WithDNSPort(dnsPort interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.DNSPort = dnsPort.(uint16) + } +} + +func WithNFSData(nfsData interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.NFSData = nfsData.(string) + } +} + +func WithEnableCeph(enableCeph interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.EnableCeph = enableCeph.(bool) + } +} + +func WithEnableIstio(enableIstio interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.EnableIstio = enableIstio.(bool) + } +} + +func WithEnableCNAO(enableCNAO interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.EnableCNAO = enableCNAO.(bool) + } +} + +func WithEnableNFSCSI(enableNFSCSI interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.EnableNFSCSI = enableNFSCSI.(bool) + } +} + +func WithEnablePrometheus(enablePrometheus interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.EnablePrometheus = enablePrometheus.(bool) + } +} + +func WithEnablePrometheusAlertManager(enablePrometheusAlertManager interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.EnablePrometheusAlertManager = enablePrometheusAlertManager.(bool) + } +} + +func WithEnableGrafana(enableGrafana interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.EnableGrafana = enableGrafana.(bool) + } +} +func WithMultus(multus interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.EnableMultus = multus.(bool) + } +} +func WithAAQ(aaq interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.AAQ = aaq.(bool) + } +} +func WithCDI(cdi interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.CDI = cdi.(bool) + } +} + +func WithKSM(ksm interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.KSM = ksm.(bool) + } +} + +func WithKSMInterval(ki interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.KSMInterval = ki.(uint) + } +} + +func WithKSMPages(kp interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.KSMPages = kp.(uint) + } +} + +func WithDockerProxy(dockerProxy interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.DockerProxy = dockerProxy.(string) + } +} + +func WithGPU(gpu interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.GPU = gpu.(string) + } +} + +func WithCDIVersion(cdi interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.CDIVersion = cdi.(string) + } +} + +func WithAAQVersion(aaq interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.AAQVersion = aaq.(string) + } +} + +func WithNvmeDisks(nvmeDisks interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.NvmeDisks = nvmeDisks.([]string) + } +} + +func WithScsiDisks(scsiDisks interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.ScsiDisks = scsiDisks.([]string) + } +} + +func WithRunEtcdOnMemory(runEtcdOnMemory interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.RunEtcdOnMemory = runEtcdOnMemory.(bool) + } +} + +func WithEtcdCapacity(etcdCapacity interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.EtcdCapacity = etcdCapacity.(string) + } +} + +func WithHugepages2M(hugepages2M interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.Hugepages2M = hugepages2M.(uint) + } +} + +func WithHugepages1G(hugepages1G interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.Hugepages1G = hugepages1G.(uint) + } +} + +func WithEnableRealtimeScheduler(enableRealtimeScheduler interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.EnableRealtimeScheduler = enableRealtimeScheduler.(bool) + } +} + +func WithEnableFIPS(enableFIPS interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.EnableFIPS = enableFIPS.(bool) + } +} + +func WithEnablePSA(enablePSA interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.EnablePSA = enablePSA.(bool) + } +} + +func WithSingleStack(singleStack interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.SingleStack = singleStack.(bool) + } +} + +func WithEnableAudit(enableAudit interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.EnableAudit = enableAudit.(bool) + } +} + +func WithUSBDisks(usbDisks interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.USBDisks = usbDisks.([]string) + } +} + +func WithAdditionalKernelArgs(ka interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.AdditionalKernelArgs = ka.([]string) + } +} + +func WithPhases(p interface{}) KubevirtProviderOption { + return func(c *KubevirtProvider) { + c.Phases = p.(string) + } +} diff --git a/cluster-provision/gocli/providers/types.go b/cluster-provision/gocli/providers/types.go new file mode 100644 index 0000000000..06e15f5f0b --- /dev/null +++ b/cluster-provision/gocli/providers/types.go @@ -0,0 +1,75 @@ +package providers + +import ( + "github.com/docker/docker/client" + k8s "kubevirt.io/kubevirtci/cluster-provision/gocli/pkg/k8s" +) + +type KubevirtProvider struct { + Client k8s.K8sDynamicClient `json:"-"` + Docker *client.Client `json:"-"` + DNSMasq string `json:"dnsmasq"` + + Version string `json:"version"` + Image string `json:"image"` + Nodes uint `flag:"nodes" short:"n" json:"nodes"` + Numa uint `flag:"numa" short:"u" json:"numa"` + Memory string `flag:"memory" short:"m" json:"memory"` + CPU uint `flag:"cpu" short:"c" json:"cpu"` + + SecondaryNics uint `flag:"secondary-nics" json:"secondary_nics"` + QemuArgs string `flag:"qemu-args" json:"qemu_args"` + KernelArgs string `flag:"kernel-args" json:"kernel_args"` + Background bool `flag:"background" short:"b" json:"background"` + RandomPorts bool `flag:"random-ports" json:"random_ports"` + Slim bool `flag:"slim" json:"slim"` + VNCPort uint16 `flag:"vnc-port" json:"vnc_port"` + HTTPPort uint16 `flag:"http-port" json:"http_port"` + HTTPSPort uint16 `flag:"https-port" json:"https_port"` + RegistryPort uint16 `flag:"registry-port" json:"registry_port"` + OCPort uint16 `flag:"ocp-port" json:"ocp_port"` + K8sPort uint16 `flag:"k8s-port" json:"k8s_port"` + SSHPort uint16 `flag:"ssh-port" json:"ssh_port"` + PrometheusPort uint16 `flag:"prometheus-port" json:"prometheus_port"` + GrafanaPort uint16 `flag:"grafana-port" json:"grafana_port"` + DNSPort uint16 `flag:"dns-port" json:"dns_port"` + APIServerPort uint16 `json:"api_server_port"` + NFSData string `flag:"nfs-data" json:"nfs_data"` + EnableCeph bool `flag:"enable-ceph" json:"enable_ceph"` + EnableIstio bool `flag:"enable-istio" json:"enable_istio"` + EnableCNAO bool `flag:"enable-cnao" json:"enable_cnao"` + EnableNFSCSI bool `flag:"enable-nfs-csi" json:"enable_nfs_csi"` + EnablePrometheus bool `flag:"enable-prometheus" json:"enable_prometheus"` + EnablePrometheusAlertManager bool `flag:"enable-prometheus-alertmanager" json:"enable_prometheus_alertmanager"` + EnableGrafana bool `flag:"enable-grafana" json:"enable_grafana"` + EnableMultus bool `flag:"deploy-multus" json:"deploy_multus"` + DockerProxy string `flag:"docker-proxy" json:"docker_proxy"` + AAQ bool `flag:"deploy-aaq" json:"deploy_aaq"` + AAQVersion string `flag:"aaq-version" json:"aaq_version"` + CDI bool `flag:"deploy-cdi" json:"deploy_cdi"` + CDIVersion string `flag:"cdi-version" json:"cdi_version"` + GPU string `flag:"gpu" json:"gpu"` + KSM bool `flag:"enable-ksm" json:"enable_ksm"` + KSMPages uint `flag:"ksm-page-count" json:"ksm_page_count"` + KSMInterval uint `flag:"ksm-scan-interval" json:"ksm_scan_interval"` + Swap bool `flag:"enable-swap" json:"enable_swap"` + Swapsize uint `flag:"swap-size" json:"swap_size"` + UnlimitedSwap bool `flag:"unlimited-swap" json:"unlimited_swap"` + Swapiness uint `flag:"swapiness" json:"swapiness"` + NvmeDisks []string `flag:"nvme" json:"nvme"` + ScsiDisks []string `flag:"scsi" json:"scsi"` + USBDisks []string `flag:"usb" json:"usb"` + AdditionalKernelArgs []string `flag:"additional-persistent-kernel-arguments" json:"additional-persistent-kernel-arguments"` + Phases string `flag:"phases" json:"phases"` + RunEtcdOnMemory bool `flag:"run-etcd-on-memory" json:"run_etcd_on_memory"` + EtcdCapacity string `flag:"etcd-capacity" json:"etcd_capacity"` + Hugepages2M uint `flag:"hugepages-2m" json:"hugepages_2m"` + Hugepages1G uint `flag:"hugepages-1g" json:"hugepages_1g"` + EnableRealtimeScheduler bool `flag:"enable-realtime-scheduler" json:"enable_realtime_scheduler"` + EnableFIPS bool `flag:"enable-fips" json:"enable_fips"` + EnablePSA bool `flag:"enable-psa" json:"enable_psa"` + SingleStack bool `flag:"single-stack" json:"single_stack"` + EnableAudit bool `flag:"enable-audit" json:"enable_audit"` +} + +type KubevirtProviderOption func(c *KubevirtProvider) From 327072a3b9885fb6e7b5cfd3784b12eba259bb46 Mon Sep 17 00:00:00 2001 From: aerosouund Date: Sat, 17 Aug 2024 15:31:07 +0300 Subject: [PATCH 3/8] refactor: Rewrite run.go to use the KubevirtProvider To avoid having to read each flag and return an error if its unset leverage the FlagMap, a map of flag name to FlagConfig. a FlagConfig is the type of this flag (string, int, uint16, bool or array of string) and the option function that sets the value of this flag on the KubevirtProvider struct. During parsing of flags this map is being iterated on and each option gets appended to an array to later be used in the KubevirtProvider constructor. The run method's role is now to parse the flags and pass them to the provider and just call Start. All the floating methods in run.go are removed after being moved to the provider. Signed-off-by: aerosouund --- cluster-provision/gocli/cmd/run.go | 932 ++------------------- cluster-provision/gocli/providers/types.go | 259 ++++++ 2 files changed, 317 insertions(+), 874 deletions(-) diff --git a/cluster-provision/gocli/cmd/run.go b/cluster-provision/gocli/cmd/run.go index 2931a5e611..dc887c7727 100644 --- a/cluster-provision/gocli/cmd/run.go +++ b/cluster-provision/gocli/cmd/run.go @@ -1,94 +1,22 @@ package cmd import ( - "bufio" - "bytes" + "context" + _ "embed" "fmt" - "os" - "os/signal" - "path" - "path/filepath" - "strconv" - "strings" - "sync" - "text/template" - "time" "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/mount" "github.com/docker/docker/client" "github.com/docker/go-connections/nat" - "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "golang.org/x/net/context" - "k8s.io/apimachinery/pkg/api/resource" - "kubevirt.io/kubevirtci/cluster-provision/gocli/cmd/nodesconfig" "kubevirt.io/kubevirtci/cluster-provision/gocli/cmd/utils" - containers2 "kubevirt.io/kubevirtci/cluster-provision/gocli/containers" "kubevirt.io/kubevirtci/cluster-provision/gocli/docker" - "kubevirt.io/kubevirtci/cluster-provision/gocli/images" - "kubevirt.io/kubevirtci/cluster-provision/gocli/opts" - "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/aaq" - bindvfio "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/bind-vfio" - "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/cdi" - "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/cnao" - dockerproxy "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/docker-proxy" - etcdinmemory "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/etcd" - "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/istio" - "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/ksm" - "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/multus" - "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/nfscsi" - "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/node01" - nodesprovision "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/nodes" - "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/prometheus" - "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/psa" - "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/realtime" - "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/rookceph" - "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/rootkey" - "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/swap" - k8s "kubevirt.io/kubevirtci/cluster-provision/gocli/pkg/k8s" - "kubevirt.io/kubevirtci/cluster-provision/gocli/pkg/libssh" - - "github.com/alessio/shellescape" + "kubevirt.io/kubevirtci/cluster-provision/gocli/providers" ) -const ( - proxySettings = ` -curl {{.Proxy}}/ca.crt > /etc/pki/ca-trust/source/anchors/docker_registry_proxy.crt -update-ca-trust - -mkdir -p /etc/systemd/system/crio.service.d -cat </etc/systemd/system/crio.service.d/override.conf -[Service] -Environment="HTTP_PROXY={{.Proxy}}" -Environment="HTTPS_PROXY={{.Proxy}}" -Environment="NO_PROXY=localhost,127.0.0.1,registry,10.96.0.0/12,10.244.0.0/16,192.168.0.0/16,fd00:10:96::/112,fd00:10:244::/112,fd00::/64" -EOT - -systemctl daemon-reload -systemctl restart crio.service -EOF -` - etcdDataDir = "/var/lib/etcd" - nvmeDiskImagePrefix = "/nvme" - scsiDiskImagePrefix = "/scsi" -) - -var soundcardPCIIDs = []string{"8086:2668", "8086:2415"} -var cli *client.Client -var nvmeDisks []string -var scsiDisks []string -var usbDisks []string -var sshClient libssh.Client - -type dockerSetting struct { - Proxy string -} +var nvmeDisks, scsiDisks, usbDisks []string -// NewRunCommand returns command that runs given cluster func NewRunCommand() *cobra.Command { - run := &cobra.Command{ Use: "run", Short: "run starts a given cluster", @@ -114,11 +42,10 @@ func NewRunCommand() *cobra.Command { run.Flags().Uint16("ssh-port", 0, "port on localhost for ssh server") run.Flags().Uint16("prometheus-port", 0, "port on localhost for prometheus server") run.Flags().Uint16("grafana-port", 0, "port on localhost for grafana server") - run.Flags().Uint16("dns-port", 0, "port on localhost for dns server") + run.Flags().Uint16("dns-port", 31111, "port on localhost for dns server") run.Flags().String("nfs-data", "", "path to data which should be exposed via nfs to the nodes") run.Flags().Bool("enable-ceph", false, "enables dynamic storage provisioning using Ceph") run.Flags().Bool("enable-istio", false, "deploys Istio service mesh") - run.Flags().Bool("reverse", false, "reverse node setup order") run.Flags().Bool("enable-cnao", false, "enable network extensions with istio") run.Flags().Bool("deploy-multus", false, "deploy multus") run.Flags().Bool("deploy-cdi", true, "deploy cdi") @@ -157,40 +84,45 @@ func NewRunCommand() *cobra.Command { } func run(cmd *cobra.Command, args []string) (retErr error) { + opts := []providers.KubevirtProviderOption{} + flags := cmd.Flags() + for flagName, flagConfig := range providers.RunFlagMap { + switch flagConfig.FlagType { + case "string": + flagVal, err := flags.GetString(flagName) + if err != nil { + return err + } + opts = append(opts, flagConfig.ProviderOptFunc(flagVal)) + case "bool": + flagVal, err := flags.GetBool(flagName) + if err != nil { + return err + } + opts = append(opts, flagConfig.ProviderOptFunc(flagVal)) - prefix, err := cmd.Flags().GetString("prefix") - if err != nil { - return err - } - - nodes, err := cmd.Flags().GetUint("nodes") - if err != nil { - return err - } - - memory, err := cmd.Flags().GetString("memory") - if err != nil { - return err - } - resource.MustParse(memory) - - reverse, err := cmd.Flags().GetBool("reverse") - if err != nil { - return err - } - - randomPorts, err := cmd.Flags().GetBool("random-ports") - if err != nil { - return err - } - - slim, err := cmd.Flags().GetBool("slim") - if err != nil { - return err + case "uint": + flagVal, err := flags.GetUint(flagName) + if err != nil { + return err + } + opts = append(opts, flagConfig.ProviderOptFunc(flagVal)) + case "uint16": + flagVal, err := flags.GetUint16(flagName) + if err != nil { + return err + } + opts = append(opts, flagConfig.ProviderOptFunc(flagVal)) + case "[]string": + flagVal, err := flags.GetStringArray(flagName) + if err != nil { + return err + } + opts = append(opts, flagConfig.ProviderOptFunc(flagVal)) + } } portMap := nat.PortMap{} - utils.AppendTCPIfExplicit(portMap, utils.PortSSH, cmd.Flags(), "ssh-port") utils.AppendTCPIfExplicit(portMap, utils.PortVNC, cmd.Flags(), "vnc-port") utils.AppendTCPIfExplicit(portMap, utils.PortHTTP, cmd.Flags(), "http-port") @@ -202,85 +134,22 @@ func run(cmd *cobra.Command, args []string) (retErr error) { utils.AppendTCPIfExplicit(portMap, utils.PortGrafana, cmd.Flags(), "grafana-port") utils.AppendUDPIfExplicit(portMap, utils.PortDNS, cmd.Flags(), "dns-port") - qemuArgs, err := cmd.Flags().GetString("qemu-args") - if err != nil { - return err - } - kernelArgs, err := cmd.Flags().GetString("kernel-args") - if err != nil { - return err - } - - cpu, err := cmd.Flags().GetUint("cpu") - if err != nil { - return err - } - - numa, err := cmd.Flags().GetUint("numa") - if err != nil { - return err - } - - secondaryNics, err := cmd.Flags().GetUint("secondary-nics") - if err != nil { - return err - } - - nfsData, err := cmd.Flags().GetString("nfs-data") - if err != nil { - return err - } - - dockerProxy, err := cmd.Flags().GetString("docker-proxy") - if err != nil { - return err - } - - cephEnabled, err := cmd.Flags().GetBool("enable-ceph") - if err != nil { - return err - } - - nfsCsiEnabled, err := cmd.Flags().GetBool("enable-nfs-csi") - if err != nil { - return err - } - - istioEnabled, err := cmd.Flags().GetBool("enable-istio") - if err != nil { - return err - } - - prometheusEnabled, err := cmd.Flags().GetBool("enable-prometheus") - if err != nil { - return err - } - - prometheusAlertmanagerEnabled, err := cmd.Flags().GetBool("enable-prometheus-alertmanager") - if err != nil { - return err + k8sVersion := args[0] + allowedK8sVersions := []string{"k8s-1.28", "k8s-1.29", "k8s-1.30", "1.31"} + var validVersion bool + for _, v := range allowedK8sVersions { + if k8sVersion == v { + validVersion = true + } } - - grafanaEnabled, err := cmd.Flags().GetBool("enable-grafana") - if err != nil { - return err - } - - cluster := args[0] - - background, err := cmd.Flags().GetBool("background") - if err != nil { - return err + if !validVersion { + return fmt.Errorf("Invalid k8s version passed, please use one of k8s-1.28, k8s-1.29, k8s-1.30 or 1.31") } containerRegistry, err := cmd.Flags().GetString("container-registry") if err != nil { return err } - gpuAddress, err := cmd.Flags().GetString("gpu") - if err != nil { - return err - } containerOrg, err := cmd.Flags().GetString("container-org") if err != nil { @@ -292,719 +161,34 @@ func run(cmd *cobra.Command, args []string) (retErr error) { return err } - runEtcdOnMemory, err := cmd.Flags().GetBool("run-etcd-on-memory") - if err != nil { - return err - } - - etcdDataMountSize, err := cmd.Flags().GetString("etcd-capacity") - if err != nil { - return err - } - resource.MustParse(etcdDataMountSize) - - hugepages2Mcount, err := cmd.Flags().GetUint("hugepages-2m") - if err != nil { - return err - } - hugepages1Gcount, err := cmd.Flags().GetUint("hugepages-1g") - if err != nil { - return err - } - - realtimeSchedulingEnabled, err := cmd.Flags().GetBool("enable-realtime-scheduler") - if err != nil { - return err - } - psaEnabled, err := cmd.Flags().GetBool("enable-psa") - if err != nil { - return err - } - singleStack, err := cmd.Flags().GetBool("single-stack") - if err != nil { - return err - } - enableAudit, err := cmd.Flags().GetBool("enable-audit") - if err != nil { - return err - } - fipsEnabled, err := cmd.Flags().GetBool("enable-fips") - if err != nil { - return err - } - cnaoEnabled, err := cmd.Flags().GetBool("enable-cnao") - if err != nil { - return err - } - - deployCdi, err := cmd.Flags().GetBool("deploy-cdi") - if err != nil { - return err - } - - cdiVersion, err := cmd.Flags().GetString("cdi-version") - if err != nil { - return err - } - - deployAaq, err := cmd.Flags().GetBool("deploy-aaq") - if err != nil { - return err - } - - aaqVersion, err := cmd.Flags().GetString("aaq-version") - if err != nil { - return err - } - - deployMultus, err := cmd.Flags().GetBool("deploy-multus") - if err != nil { - return err - } - - enableSwap, err := cmd.Flags().GetBool("enable-swap") - if err != nil { - return err - } - - unlimitedSwap, err := cmd.Flags().GetBool("unlimited-swap") - if err != nil { - return err - } - - swapiness, err := cmd.Flags().GetUint("swapiness") - if err != nil { - return err - } - - swapSize, err := cmd.Flags().GetUint("swap-size") - if err != nil { - return err - } - - enableKsm, err := cmd.Flags().GetBool("enable-ksm") - if err != nil { - return err - } - - ksmPageCount, err := cmd.Flags().GetUint("ksm-page-count") - if err != nil { - return err - } - - ksmScanInterval, err := cmd.Flags().GetUint("ksm-scan-interval") + slim, err := cmd.Flags().GetBool("slim") if err != nil { return err } - cli, err = client.NewClientWithOpts(client.FromEnv) + cli, err := client.NewClientWithOpts(client.FromEnv) if err != nil { return err } - b := context.Background() - ctx, cancel := context.WithCancel(b) - - stop := make(chan error, 10) - containers, _, done := docker.NewCleanupHandler(cli, stop, cmd.OutOrStderr(), false) - - defer func() { - stop <- retErr - <-done - }() - - go func() { - interrupt := make(chan os.Signal, 1) - signal.Notify(interrupt, os.Interrupt) - <-interrupt - cancel() - stop <- fmt.Errorf("Interrupt received, clean up") - }() - - clusterImage := cluster - - // Check if cluster container suffix has not being override - // in that case use the default prefix stored at the binary - if containerSuffix == "" { - containerSuffix = images.SUFFIX - } - if containerSuffix != "" { - clusterImage = fmt.Sprintf("%s/%s%s", containerOrg, cluster, containerSuffix) - } else { - clusterImage = path.Join(containerOrg, cluster) - } + clusterImage := fmt.Sprintf("%s/%s/%s%s", containerRegistry, containerOrg, k8sVersion, containerSuffix) if slim { clusterImage += "-slim" } - if len(containerRegistry) > 0 { - clusterImage = path.Join(containerRegistry, clusterImage) - fmt.Printf("Download the image %s\n", clusterImage) - err = docker.ImagePull(cli, ctx, clusterImage, types.ImagePullOptions{}) - if err != nil { - panic(fmt.Sprintf("Failed to download cluster image %s, %s", clusterImage, err)) - } - } - - dnsmasq, err := containers2.DNSMasq(cli, ctx, &containers2.DNSMasqOptions{ - ClusterImage: clusterImage, - SecondaryNicsCount: secondaryNics, - RandomPorts: randomPorts, - PortMap: portMap, - Prefix: prefix, - NodeCount: nodes, - }) - if err != nil { - return err - } - - containers <- dnsmasq.ID - if err := cli.ContainerStart(ctx, dnsmasq.ID, container.StartOptions{}); err != nil { - return err - } - - dm, err := cli.ContainerInspect(context.Background(), dnsmasq.ID) - if err != nil { - return err - } - - sshPort, err := utils.GetPublicPort(utils.PortSSH, dm.NetworkSettings.Ports) - apiServerPort, err := utils.GetPublicPort(utils.PortAPI, dm.NetworkSettings.Ports) - - // Pull the registry image - err = docker.ImagePull(cli, ctx, utils.DockerRegistryImage, types.ImagePullOptions{}) - if err != nil { - panic(err) - } - - // Start registry - registry, err := cli.ContainerCreate(ctx, &container.Config{ - Image: utils.DockerRegistryImage, - }, &container.HostConfig{ - Privileged: true, // fixme we just need proper selinux volume labeling - NetworkMode: container.NetworkMode("container:" + dnsmasq.ID), - }, nil, nil, prefix+"-registry") - if err != nil { - return err - } - containers <- registry.ID - if err := cli.ContainerStart(ctx, registry.ID, container.StartOptions{}); err != nil { - return err - } - - if nfsData != "" { - nfsData, err := filepath.Abs(nfsData) - if err != nil { - return err - } - // Pull the ganesha image - err = docker.ImagePull(cli, ctx, utils.NFSGaneshaImage, types.ImagePullOptions{}) - if err != nil { - panic(err) - } - - // Start the ganesha image - nfsServer, err := cli.ContainerCreate(ctx, &container.Config{ - Image: utils.NFSGaneshaImage, - }, &container.HostConfig{ - Mounts: []mount.Mount{ - { - Type: mount.TypeBind, - Source: nfsData, - Target: "/data/nfs", - }, - }, - Privileged: true, - NetworkMode: container.NetworkMode("container:" + dnsmasq.ID), - }, nil, nil, prefix+"-nfs-ganesha") - if err != nil { - return err - } - containers <- nfsServer.ID - if err := cli.ContainerStart(ctx, nfsServer.ID, container.StartOptions{}); err != nil { - return err - } - } - - // Add serial pty so we can do stuff like 'screen /dev/pts0' to access - // the VM console from the container without ssh - qemuArgs += " -serial pty" - - wg := sync.WaitGroup{} - wg.Add(int(nodes)) - // start one vm after each other - macCounter := 0 - for x := 0; x < int(nodes); x++ { - - nodeQemuArgs := qemuArgs - - for i := 0; i < int(secondaryNics); i++ { - netSuffix := fmt.Sprintf("%d-%d", x, i) - macSuffix := fmt.Sprintf("%02x", macCounter) - macCounter++ - nodeQemuArgs = fmt.Sprintf("%s -device virtio-net-pci,netdev=secondarynet%s,mac=52:55:00:d1:56:%s -netdev tap,id=secondarynet%s,ifname=stap%s,script=no,downscript=no", nodeQemuArgs, netSuffix, macSuffix, netSuffix, netSuffix) - } - - nodeName := nodeNameFromIndex(x + 1) - nodeNum := fmt.Sprintf("%02d", x+1) - sshClient, err = libssh.NewSSHClient(sshPort, x+1, false) - if err != nil { - return err - } - if reverse { - nodeName = nodeNameFromIndex((int(nodes) - x)) - nodeNum = fmt.Sprintf("%02d", (int(nodes) - x)) - sshClient, err = libssh.NewSSHClient(sshPort, (int(nodes) - x), false) - - if err != nil { - return err - } - } - - // assign a GPU to one node - var deviceMappings []container.DeviceMapping - if gpuAddress != "" && x == int(nodes)-1 { - iommu_group, err := getPCIDeviceIOMMUGroup(gpuAddress) - if err != nil { - return err - } - vfioDevice := fmt.Sprintf("/dev/vfio/%s", iommu_group) - deviceMappings = []container.DeviceMapping{ - { - PathOnHost: "/dev/vfio/vfio", - PathInContainer: "/dev/vfio/vfio", - CgroupPermissions: "mrw", - }, - { - PathOnHost: vfioDevice, - PathInContainer: vfioDevice, - CgroupPermissions: "mrw", - }, - } - nodeQemuArgs = fmt.Sprintf("%s -device vfio-pci,host=%s", nodeQemuArgs, gpuAddress) - } - - var vmArgsNvmeDisks []string - if len(nvmeDisks) > 0 { - for i, size := range nvmeDisks { - resource.MustParse(size) - disk := fmt.Sprintf("%s-%d.img", nvmeDiskImagePrefix, i) - nodeQemuArgs = fmt.Sprintf("%s -drive file=%s,format=raw,id=NVME%d,if=none -device nvme,drive=NVME%d,serial=nvme-%d", nodeQemuArgs, disk, i, i, i) - vmArgsNvmeDisks = append(vmArgsNvmeDisks, fmt.Sprintf("--nvme-device-size %s", size)) - } - } - var vmArgsSCSIDisks []string - if len(scsiDisks) > 0 { - nodeQemuArgs = fmt.Sprintf("%s -device virtio-scsi-pci,id=scsi0", nodeQemuArgs) - for i, size := range scsiDisks { - resource.MustParse(size) - disk := fmt.Sprintf("%s-%d.img", scsiDiskImagePrefix, i) - nodeQemuArgs = fmt.Sprintf("%s -drive file=%s,if=none,id=drive%d -device scsi-hd,drive=drive%d,bus=scsi0.0,channel=0,scsi-id=0,lun=%d", nodeQemuArgs, disk, i, i, i) - vmArgsSCSIDisks = append(vmArgsSCSIDisks, fmt.Sprintf("--scsi-device-size %s", size)) - } - } - - var vmArgsUSBDisks []string - const bus = " -device qemu-xhci,id=bus%d" - const drive = " -drive if=none,id=stick%d,format=raw,file=/usb-%d.img" - const dev = " -device usb-storage,bus=bus%d.0,drive=stick%d" - const usbSizefmt = " --usb-device-size %s" - if len(usbDisks) > 0 { - for i, size := range usbDisks { - resource.MustParse(size) - if i%2 == 0 { - nodeQemuArgs += fmt.Sprintf(bus, i/2) - } - nodeQemuArgs += fmt.Sprintf(drive, i, i) - nodeQemuArgs += fmt.Sprintf(dev, i/2, i) - vmArgsUSBDisks = append(vmArgsUSBDisks, fmt.Sprintf(usbSizefmt, size)) - } - } - - additionalArgs := []string{} - if len(nodeQemuArgs) > 0 { - additionalArgs = append(additionalArgs, "--qemu-args", shellescape.Quote(nodeQemuArgs)) - } - - if hugepages2Mcount > 0 { - kernelArgs += fmt.Sprintf(" hugepagesz=2M hugepages=%d", hugepages2Mcount) - } - - if hugepages1Gcount > 0 { - kernelArgs += fmt.Sprintf(" hugepagesz=1G hugepages=%d", hugepages1Gcount) - } - - if fipsEnabled { - kernelArgs += " fips=1" - } - - blockDev := "" - if cephEnabled { - blockDev = "--block-device /var/run/disk/blockdev.qcow2 --block-device-size 32212254720" - } - - kernelArgs = strings.TrimSpace(kernelArgs) - if kernelArgs != "" { - additionalArgs = append(additionalArgs, "--additional-kernel-args", shellescape.Quote(kernelArgs)) - } - - vmContainerConfig := &container.Config{ - Image: clusterImage, - Env: []string{ - fmt.Sprintf("NODE_NUM=%s", nodeNum), - }, - Cmd: []string{"/bin/bash", "-c", fmt.Sprintf("/vm.sh -n /var/run/disk/disk.qcow2 --memory %s --cpu %s --numa %s %s %s %s %s %s", - memory, - strconv.Itoa(int(cpu)), - strconv.Itoa(int(numa)), - blockDev, - strings.Join(vmArgsSCSIDisks, " "), - strings.Join(vmArgsNvmeDisks, " "), - strings.Join(vmArgsUSBDisks, " "), - strings.Join(additionalArgs, " "), - )}, - } - - if cephEnabled { - vmContainerConfig.Volumes = map[string]struct{}{ - "/var/lib/rook": {}, - } - } - - node, err := cli.ContainerCreate(ctx, vmContainerConfig, &container.HostConfig{ - Privileged: true, - NetworkMode: container.NetworkMode("container:" + dnsmasq.ID), - Resources: container.Resources{ - Devices: deviceMappings, - }, - }, nil, nil, prefix+"-"+nodeName) - if err != nil { - return err - } - containers <- node.ID - if err := cli.ContainerStart(ctx, node.ID, container.StartOptions{}); err != nil { - return err - } - - // Wait for vm start - success, err := docker.Exec(cli, nodeContainer(prefix, nodeName), []string{"/bin/bash", "-c", "while [ ! -f /ssh_ready ] ; do sleep 1; done"}, os.Stdout) - if err != nil { - return err - } - - if !success { - return fmt.Errorf("checking for ssh.sh script for node %s failed", nodeName) - } - - err = waitForVMToBeUp(prefix, nodeName) - if err != nil { - return err - } - - rootkey := rootkey.NewRootKey(sshClient) - if err = rootkey.Exec(); err != nil { - return err - } - sshClient, err = libssh.NewSSHClient(sshPort, x+1, true) - - linuxConfigFuncs := []nodesconfig.LinuxConfigFunc{ - nodesconfig.WithFipsEnabled(fipsEnabled), - nodesconfig.WithDockerProxy(dockerProxy), - nodesconfig.WithEtcdInMemory(runEtcdOnMemory), - nodesconfig.WithEtcdSize(etcdDataMountSize), - nodesconfig.WithSingleStack(singleStack), - nodesconfig.WithEnableAudit(enableAudit), - nodesconfig.WithGpuAddress(gpuAddress), - nodesconfig.WithRealtime(realtimeSchedulingEnabled), - nodesconfig.WithPSA(psaEnabled), - nodesconfig.WithKsm(enableKsm), - nodesconfig.WithKsmPageCount(int(ksmPageCount)), - nodesconfig.WithKsmScanInterval(int(ksmScanInterval)), - nodesconfig.WithSwap(enableSwap), - nodesconfig.WithSwapiness(int(swapiness)), - nodesconfig.WithSwapSize(int(swapSize)), - nodesconfig.WithUnlimitedSwap(unlimitedSwap), - } - - n := nodesconfig.NewNodeLinuxConfig(x+1, prefix, linuxConfigFuncs) - - if err = provisionNode(sshClient, n); err != nil { - return err - } - - go func(id string) { - cli.ContainerWait(ctx, id, container.WaitConditionNotRunning) - wg.Done() - }(node.ID) - } - - sshClient, err := libssh.NewSSHClient(sshPort, 1, true) - if err != nil { - return err - } - - k8sConfs := []nodesconfig.K8sConfigFunc{ - nodesconfig.WithCeph(cephEnabled), - nodesconfig.WithPrometheus(prometheusEnabled), - nodesconfig.WithAlertmanager(prometheusAlertmanagerEnabled), - nodesconfig.WithGrafana(grafanaEnabled), - nodesconfig.WithIstio(istioEnabled), - nodesconfig.WithNfsCsi(nfsCsiEnabled), - nodesconfig.WithCnao(cnaoEnabled), - nodesconfig.WithMultus(deployMultus), - nodesconfig.WithCdi(deployCdi), - nodesconfig.WithCdiVersion(cdiVersion), - nodesconfig.WithAAQ(deployAaq), - nodesconfig.WithAAQVersion(aaqVersion), - } - n := nodesconfig.NewNodeK8sConfig(k8sConfs) - - kubeConfFile, err := os.Create(".kubeconfig") - if err != nil { - return err - } - - err = sshClient.CopyRemoteFile("/etc/kubernetes/admin.conf", kubeConfFile) - if err != nil { - return err - } - - config, err := k8s.NewConfig(".kubeconfig", apiServerPort) + b := context.Background() + ctx, cancel := context.WithCancel(b) + err = docker.ImagePull(cli, ctx, clusterImage, types.ImagePullOptions{}) if err != nil { - return err + return fmt.Errorf("Failed to download cluster image %s, %s", clusterImage, err) } + kp := providers.NewKubevirtProvider(k8sVersion, clusterImage, cli, opts) - k8sClient, err := k8s.NewDynamicClient(config) + err = kp.Start(ctx, cancel, portMap) if err != nil { return err } - if err = provisionK8sOptions(sshClient, k8sClient, n, prefix); err != nil { - return err - } - - // If background flag was specified, we don't want to clean up if we reach that state - if !background { - wg.Wait() - stop <- fmt.Errorf("Done. please clean up") - } - - return nil -} - -func provisionK8sOptions(sshClient libssh.Client, k8sClient k8s.K8sDynamicClient, n *nodesconfig.NodeK8sConfig, k8sVersion string) error { - opts := []opts.Opt{} - - if n.Ceph { - cephOpt := rookceph.NewCephOpt(k8sClient) - opts = append(opts, cephOpt) - } - - if n.NfsCsi { - nfsCsiOpt := nfscsi.NewNfsCsiOpt(k8sClient) - opts = append(opts, nfsCsiOpt) - } - - if n.Multus { - multusOpt := multus.NewMultusOpt(k8sClient, sshClient) - opts = append(opts, multusOpt) - } - - if n.CNAO { - cnaoOpt := cnao.NewCnaoOpt(k8sClient, sshClient) - opts = append(opts, cnaoOpt) - } - - if n.Istio { - istioOpt := istio.NewIstioOpt(sshClient, k8sClient, n.CNAO) - opts = append(opts, istioOpt) - } - - if n.Prometheus { - prometheusOpt := prometheus.NewPrometheusOpt(k8sClient, n.Grafana, n.Alertmanager) - opts = append(opts, prometheusOpt) - } - - if n.CDI { - cdi := cdi.NewCdiOpt(k8sClient, sshClient, n.CDIVersion) - opts = append(opts, cdi) - } - - if n.AAQ { - if k8sVersion == "k8s-1.30" { - aaq := aaq.NewAaqOpt(k8sClient, sshClient, n.CDIVersion) - opts = append(opts, aaq) - } else { - logrus.Info("AAQ was requested but k8s version is not k8s-1.30, skipping") - } - } - - for _, opt := range opts { - if err := opt.Exec(); err != nil { - return err - } - } - return nil } - -func provisionNode(sshClient libssh.Client, n *nodesconfig.NodeLinuxConfig) error { - opts := []opts.Opt{} - nodeName := nodeNameFromIndex(n.NodeIdx) - - if n.FipsEnabled { - for _, cmd := range []string{"sudo fips-mode-setup --enable", "sudo reboot"} { - if err := sshClient.Command(cmd); err != nil { - return fmt.Errorf("Starting fips mode failed: %s", err) - } - } - err := waitForVMToBeUp(n.K8sVersion, nodeName) - if err != nil { - return err - } - } - - if n.DockerProxy != "" { - //if dockerProxy has value, generate a shell script`/script/docker-proxy.sh` which can be applied to set proxy settings - dp := dockerproxy.NewDockerProxyOpt(sshClient, n.DockerProxy) - opts = append(opts, dp) - } - - if n.EtcdInMemory { - logrus.Infof("Creating in-memory mount for etcd data on node %s", nodeName) - etcdinmem := etcdinmemory.NewEtcdInMemOpt(sshClient, n.EtcdSize) - opts = append(opts, etcdinmem) - } - - if n.Realtime { - realtimeOpt := realtime.NewRealtimeOpt(sshClient) - opts = append(opts, realtimeOpt) - } - - for _, s := range soundcardPCIIDs { - // move the VM sound cards to a vfio-pci driver to prepare for assignment - bvfio := bindvfio.NewBindVfioOpt(sshClient, s) - opts = append(opts, bvfio) - } - - if n.EnableAudit { - if err := sshClient.Command("touch /home/vagrant/enable_audit"); err != nil { - return fmt.Errorf("provisioning node %d failed (setting enableAudit phase): %s", n.NodeIdx, err) - } - } - - if n.PSA { - psaOpt := psa.NewPsaOpt(sshClient) - opts = append(opts, psaOpt) - } - - if n.NodeIdx == 1 { - n := node01.NewNode01Provisioner(sshClient, n.SingleStack) - opts = append(opts, n) - - } else { - if n.GpuAddress != "" { - // move the assigned PCI device to a vfio-pci driver to prepare for assignment - gpuDeviceID, err := getDevicePCIID(n.GpuAddress) - if err != nil { - return err - } - bindVfioOpt := bindvfio.NewBindVfioOpt(sshClient, gpuDeviceID) - opts = append(opts, bindVfioOpt) - } - n := nodesprovision.NewNodesProvisioner(sshClient, n.SingleStack) - opts = append(opts, n) - } - - if n.KsmEnabled { - ksmOpt := ksm.NewKsmOpt(sshClient, n.KsmScanInterval, n.KsmPageCount) - opts = append(opts, ksmOpt) - } - - if n.SwapEnabled { - swapOpt := swap.NewSwapOpt(sshClient, n.Swappiness, n.UnlimitedSwap, n.SwapSize) - opts = append(opts, swapOpt) - } - - for _, o := range opts { - if err := o.Exec(); err != nil { - return err - } - } - - return nil -} - -func waitForVMToBeUp(prefix string, nodeName string) error { - var err error - // Wait for the VM to be up - for x := 0; x < 10; x++ { - err = _cmd(cli, nodeContainer(prefix, nodeName), "ssh.sh echo VM is up", "waiting for node to come up") - if err == nil { - break - } - logrus.WithError(err).Warningf("Could not establish a ssh connection to the VM, retrying ...") - time.Sleep(1 * time.Second) - } - - if err != nil { - return fmt.Errorf("could not establish a connection to the node after a generous timeout: %v", err) - } - - return nil -} - -func nodeNameFromIndex(x int) string { - return fmt.Sprintf("node%02d", x) -} - -func nodeContainer(prefix string, node string) string { - return prefix + "-" + node -} - -func getDockerProxyConfig(proxy string) (string, error) { - p := dockerSetting{Proxy: proxy} - buf := new(bytes.Buffer) - - t, err := template.New("docker-proxy").Parse(proxySettings) - if err != nil { - return "", err - } - err = t.Execute(buf, p) - if err != nil { - return "", err - } - return buf.String(), nil -} - -// getDeviceIOMMUGroup gets devices iommu_group -// e.g. /sys/bus/pci/devices/0000\:65\:00.0/iommu_group -> ../../../../../kernel/iommu_groups/45 -func getPCIDeviceIOMMUGroup(pciAddress string) (string, error) { - iommuLink := filepath.Join("/sys/bus/pci/devices", pciAddress, "iommu_group") - iommuPath, err := os.Readlink(iommuLink) - if err != nil { - return "", fmt.Errorf("failed to read iommu_group link %s for device %s - %v", iommuLink, pciAddress, err) - } - _, iommuGroup := filepath.Split(iommuPath) - return iommuGroup, nil -} - -func getDevicePCIID(pciAddress string) (string, error) { - file, err := os.Open(filepath.Join("/sys/bus/pci/devices", pciAddress, "uevent")) - if err != nil { - return "", err - } - defer file.Close() - - scanner := bufio.NewScanner(file) - for scanner.Scan() { - line := scanner.Text() - if strings.HasPrefix(line, "PCI_ID") { - equal := strings.Index(line, "=") - value := strings.TrimSpace(line[equal+1:]) - return strings.ToLower(value), nil - } - } - return "", fmt.Errorf("no pci_id is found") -} diff --git a/cluster-provision/gocli/providers/types.go b/cluster-provision/gocli/providers/types.go index 06e15f5f0b..939e15a606 100644 --- a/cluster-provision/gocli/providers/types.go +++ b/cluster-provision/gocli/providers/types.go @@ -73,3 +73,262 @@ type KubevirtProvider struct { } type KubevirtProviderOption func(c *KubevirtProvider) + +type FlagConfig struct { + FlagType string + ProviderOptFunc func(interface{}) KubevirtProviderOption +} + +var ProvisionFlagMap = map[string]FlagConfig{ + "memory": { + FlagType: "string", + ProviderOptFunc: WithMemory, + }, + "cpu": { + FlagType: "uint", + ProviderOptFunc: WithCPU, + }, + "slim": { + FlagType: "bool", + ProviderOptFunc: WithSlim, + }, + "random-ports": { + FlagType: "bool", + ProviderOptFunc: WithRandomPorts, + }, + "phases": { + FlagType: "string", + ProviderOptFunc: WithPhases, + }, + "additional-persistent-kernel-arguments": { + FlagType: "[]string", + ProviderOptFunc: WithAdditionalKernelArgs, + }, + "vnc-port": { + FlagType: "uint16", + ProviderOptFunc: WithVNCPort, + }, + "ssh-port": { + FlagType: "uint16", + ProviderOptFunc: WithSSHPort, + }, + "qemu-args": { + FlagType: "string", + ProviderOptFunc: WithQemuArgs, + }, +} + +var RunFlagMap = map[string]FlagConfig{ + "nodes": { + FlagType: "uint", + ProviderOptFunc: WithNodes, + }, + "numa": { + FlagType: "uint", + ProviderOptFunc: WithNuma, + }, + "memory": { + FlagType: "string", + ProviderOptFunc: WithMemory, + }, + "cpu": { + FlagType: "uint", + ProviderOptFunc: WithCPU, + }, + "secondary-nics": { + FlagType: "uint", + ProviderOptFunc: WithSecondaryNics, + }, + "qemu-args": { + FlagType: "string", + ProviderOptFunc: WithQemuArgs, + }, + "kernel-args": { + FlagType: "string", + ProviderOptFunc: WithKernelArgs, + }, + "background": { + FlagType: "bool", + ProviderOptFunc: WithBackground, + }, + "random-ports": { + FlagType: "bool", + ProviderOptFunc: WithRandomPorts, + }, + "slim": { + FlagType: "bool", + ProviderOptFunc: WithSlim, + }, + "vnc-port": { + FlagType: "uint16", + ProviderOptFunc: WithVNCPort, + }, + "http-port": { + FlagType: "uint16", + ProviderOptFunc: WithHTTPPort, + }, + "https-port": { + FlagType: "uint16", + ProviderOptFunc: WithHTTPSPort, + }, + "registry-port": { + FlagType: "uint16", + ProviderOptFunc: WithRegistryPort, + }, + "ocp-port": { + FlagType: "uint16", + ProviderOptFunc: WithOCPort, + }, + "k8s-port": { + FlagType: "uint16", + ProviderOptFunc: WithK8sPort, + }, + "ssh-port": { + FlagType: "uint16", + ProviderOptFunc: WithSSHPort, + }, + "prometheus-port": { + FlagType: "uint16", + ProviderOptFunc: WithPrometheusPort, + }, + "grafana-port": { + FlagType: "uint16", + ProviderOptFunc: WithGrafanaPort, + }, + "dns-port": { + FlagType: "uint16", + ProviderOptFunc: WithDNSPort, + }, + "nfs-data": { + FlagType: "string", + ProviderOptFunc: WithNFSData, + }, + "enable-ceph": { + FlagType: "bool", + ProviderOptFunc: WithEnableCeph, + }, + "enable-istio": { + FlagType: "bool", + ProviderOptFunc: WithEnableIstio, + }, + "enable-cnao": { + FlagType: "bool", + ProviderOptFunc: WithEnableCNAO, + }, + "enable-nfs-csi": { + FlagType: "bool", + ProviderOptFunc: WithEnableNFSCSI, + }, + "enable-prometheus": { + FlagType: "bool", + ProviderOptFunc: WithEnablePrometheus, + }, + "enable-prometheus-alertmanager": { + FlagType: "bool", + ProviderOptFunc: WithEnablePrometheusAlertManager, + }, + "enable-grafana": { + FlagType: "bool", + ProviderOptFunc: WithEnableGrafana, + }, + "docker-proxy": { + FlagType: "string", + ProviderOptFunc: WithDockerProxy, + }, + "gpu": { + FlagType: "string", + ProviderOptFunc: WithGPU, + }, + "nvme": { + FlagType: "[]string", + ProviderOptFunc: WithNvmeDisks, + }, + "scsi": { + FlagType: "[]string", + ProviderOptFunc: WithScsiDisks, + }, + "run-etcd-on-memory": { + FlagType: "bool", + ProviderOptFunc: WithRunEtcdOnMemory, + }, + "etcd-capacity": { + FlagType: "string", + ProviderOptFunc: WithEtcdCapacity, + }, + "hugepages-2m": { + FlagType: "uint", + ProviderOptFunc: WithHugepages2M, + }, + "enable-realtime-scheduler": { + FlagType: "bool", + ProviderOptFunc: WithEnableRealtimeScheduler, + }, + "enable-fips": { + FlagType: "bool", + ProviderOptFunc: WithEnableFIPS, + }, + "enable-psa": { + FlagType: "bool", + ProviderOptFunc: WithEnablePSA, + }, + "single-stack": { + FlagType: "bool", + ProviderOptFunc: WithSingleStack, + }, + "enable-audit": { + FlagType: "bool", + ProviderOptFunc: WithEnableAudit, + }, + "usb": { + FlagType: "[]string", + ProviderOptFunc: WithUSBDisks, + }, + "deploy-multus": { + FlagType: "bool", + ProviderOptFunc: WithMultus, + }, + "deploy-aaq": { + FlagType: "bool", + ProviderOptFunc: WithAAQ, + }, + "deploy-cdi": { + FlagType: "bool", + ProviderOptFunc: WithCDI, + }, + "enable-ksm": { + FlagType: "bool", + ProviderOptFunc: WithKSM, + }, + "ksm-page-count": { + FlagType: "uint", + ProviderOptFunc: WithKSMPages, + }, + "ksm-scan-interval": { + FlagType: "uint", + ProviderOptFunc: WithKSMInterval, + }, + "enable-swap": { + FlagType: "bool", + ProviderOptFunc: WithSwap, + }, + "unlimited-swap": { + FlagType: "bool", + ProviderOptFunc: WithUnlimitedSwap, + }, + "swap-size": { + FlagType: "uint", + ProviderOptFunc: WithSwapSize, + }, + "swapiness": { + FlagType: "uint", + ProviderOptFunc: WithSwapiness, + }, + "cdi-version": { + FlagType: "string", + ProviderOptFunc: WithCDIVersion, + }, + "aaq-version": { + FlagType: "string", + ProviderOptFunc: WithAAQVersion, + }, +} From 2080c4d1df990eab3afca9d240c169841ae56cbb Mon Sep 17 00:00:00 2001 From: aerosouund Date: Sat, 17 Aug 2024 15:31:39 +0300 Subject: [PATCH 4/8] refactor: Delete the nodesconfig package This functionality now exists in the KubevirtProvider type and doesn't need a package of its own Signed-off-by: aerosouund --- .../gocli/cmd/nodesconfig/nodeconfig.go | 62 ------ .../gocli/cmd/nodesconfig/opts.go | 197 ------------------ 2 files changed, 259 deletions(-) delete mode 100644 cluster-provision/gocli/cmd/nodesconfig/nodeconfig.go delete mode 100644 cluster-provision/gocli/cmd/nodesconfig/opts.go diff --git a/cluster-provision/gocli/cmd/nodesconfig/nodeconfig.go b/cluster-provision/gocli/cmd/nodesconfig/nodeconfig.go deleted file mode 100644 index 6bf26ac040..0000000000 --- a/cluster-provision/gocli/cmd/nodesconfig/nodeconfig.go +++ /dev/null @@ -1,62 +0,0 @@ -package nodesconfig - -// NodeLinuxConfig type holds the config params that a node can have for its linux system -type NodeLinuxConfig struct { - NodeIdx int - K8sVersion string - FipsEnabled bool - DockerProxy string - EtcdInMemory bool - EtcdSize string - SingleStack bool - EnableAudit bool - GpuAddress string - Realtime bool - PSA bool - KsmEnabled bool - SwapEnabled bool - KsmPageCount int - KsmScanInterval int - Swappiness int - UnlimitedSwap bool - SwapSize int -} - -// NodeK8sConfig type holds the config k8s options for kubevirt cluster -type NodeK8sConfig struct { - Ceph bool - Prometheus bool - Alertmanager bool - Grafana bool - Istio bool - NfsCsi bool - CNAO bool - Multus bool - CDI bool - CDIVersion string - AAQ bool - AAQVersion string -} - -func NewNodeK8sConfig(confs []K8sConfigFunc) *NodeK8sConfig { - n := &NodeK8sConfig{} - - for _, conf := range confs { - conf(n) - } - - return n -} - -func NewNodeLinuxConfig(nodeIdx int, k8sVersion string, confs []LinuxConfigFunc) *NodeLinuxConfig { - n := &NodeLinuxConfig{ - NodeIdx: nodeIdx, - K8sVersion: k8sVersion, - } - - for _, conf := range confs { - conf(n) - } - - return n -} diff --git a/cluster-provision/gocli/cmd/nodesconfig/opts.go b/cluster-provision/gocli/cmd/nodesconfig/opts.go deleted file mode 100644 index da0446761c..0000000000 --- a/cluster-provision/gocli/cmd/nodesconfig/opts.go +++ /dev/null @@ -1,197 +0,0 @@ -package nodesconfig - -type LinuxConfigFunc func(n *NodeLinuxConfig) - -type K8sConfigFunc func(n *NodeK8sConfig) - -func WithNodeIdx(nodeIdx int) LinuxConfigFunc { - return func(n *NodeLinuxConfig) { - n.NodeIdx = nodeIdx - } -} - -func WithK8sVersion(k8sVersion string) LinuxConfigFunc { - return func(n *NodeLinuxConfig) { - n.K8sVersion = k8sVersion - } -} - -func WithFipsEnabled(fipsEnabled bool) LinuxConfigFunc { - return func(n *NodeLinuxConfig) { - n.FipsEnabled = fipsEnabled - } -} - -func WithDockerProxy(dockerProxy string) LinuxConfigFunc { - return func(n *NodeLinuxConfig) { - n.DockerProxy = dockerProxy - } -} - -func WithEtcdInMemory(etcdInMemory bool) LinuxConfigFunc { - return func(n *NodeLinuxConfig) { - n.EtcdInMemory = etcdInMemory - } -} - -func WithEtcdSize(etcdSize string) LinuxConfigFunc { - return func(n *NodeLinuxConfig) { - n.EtcdSize = etcdSize - } -} - -func WithSingleStack(singleStack bool) LinuxConfigFunc { - return func(n *NodeLinuxConfig) { - n.SingleStack = singleStack - } -} - -func WithEnableAudit(enableAudit bool) LinuxConfigFunc { - return func(n *NodeLinuxConfig) { - n.EnableAudit = enableAudit - } -} - -func WithGpuAddress(gpuAddress string) LinuxConfigFunc { - return func(n *NodeLinuxConfig) { - n.GpuAddress = gpuAddress - } -} - -func WithRealtime(realtime bool) LinuxConfigFunc { - return func(n *NodeLinuxConfig) { - n.Realtime = realtime - } -} - -func WithPSA(psa bool) LinuxConfigFunc { - return func(n *NodeLinuxConfig) { - n.PSA = psa - } -} - -func WithKsm(ksm bool) LinuxConfigFunc { - return func(n *NodeLinuxConfig) { - n.KsmEnabled = ksm - } -} - -func WithSwap(swap bool) LinuxConfigFunc { - return func(n *NodeLinuxConfig) { - n.SwapEnabled = swap - } -} - -func WithKsmEnabled(ksmEnabled bool) LinuxConfigFunc { - return func(n *NodeLinuxConfig) { - n.KsmEnabled = ksmEnabled - } -} - -func WithSwapEnabled(swapEnabled bool) LinuxConfigFunc { - return func(n *NodeLinuxConfig) { - n.SwapEnabled = swapEnabled - } -} - -func WithKsmPageCount(ksmPageCount int) LinuxConfigFunc { - return func(n *NodeLinuxConfig) { - n.KsmPageCount = ksmPageCount - } -} - -func WithKsmScanInterval(ksmScanInterval int) LinuxConfigFunc { - return func(n *NodeLinuxConfig) { - n.KsmScanInterval = ksmScanInterval - } -} - -func WithSwapiness(swapiness int) LinuxConfigFunc { - return func(n *NodeLinuxConfig) { - n.Swappiness = swapiness - } -} - -func WithUnlimitedSwap(unlimitedSwap bool) LinuxConfigFunc { - return func(n *NodeLinuxConfig) { - n.UnlimitedSwap = unlimitedSwap - } -} - -func WithSwapSize(swapSize int) LinuxConfigFunc { - return func(n *NodeLinuxConfig) { - n.SwapSize = swapSize - } -} - -func WithCeph(ceph bool) K8sConfigFunc { - return func(n *NodeK8sConfig) { - n.Ceph = ceph - } -} - -func WithPrometheus(prometheus bool) K8sConfigFunc { - return func(n *NodeK8sConfig) { - n.Prometheus = prometheus - } -} - -func WithAlertmanager(alertmanager bool) K8sConfigFunc { - return func(n *NodeK8sConfig) { - n.Alertmanager = alertmanager - } -} - -func WithGrafana(grafana bool) K8sConfigFunc { - return func(n *NodeK8sConfig) { - n.Grafana = grafana - } -} - -func WithIstio(istio bool) K8sConfigFunc { - return func(n *NodeK8sConfig) { - n.Istio = istio - } -} - -func WithNfsCsi(nfsCsi bool) K8sConfigFunc { - return func(n *NodeK8sConfig) { - n.NfsCsi = nfsCsi - } -} - -func WithCnao(cnao bool) K8sConfigFunc { - return func(n *NodeK8sConfig) { - n.CNAO = cnao - } -} - -func WithMultus(multus bool) K8sConfigFunc { - return func(n *NodeK8sConfig) { - n.Multus = multus - } -} - -func WithCdi(cdi bool) K8sConfigFunc { - return func(n *NodeK8sConfig) { - n.CDI = cdi - } -} - -func WithCdiVersion(cdiVersion string) K8sConfigFunc { - return func(n *NodeK8sConfig) { - n.CDIVersion = cdiVersion - } -} - -func WithAAQ(aaq bool) K8sConfigFunc { - return func(n *NodeK8sConfig) { - n.AAQ = aaq - } -} - -func WithAAQVersion(aaqVersion string) K8sConfigFunc { - return func(n *NodeK8sConfig) { - n.AAQVersion = aaqVersion - } -} From 8d61da097f441930463d8f31c63e72d00f942380 Mon Sep 17 00:00:00 2001 From: aerosouund Date: Sat, 17 Aug 2024 15:32:05 +0300 Subject: [PATCH 5/8] testing: Move testing logic to the providers package The KubevirtProvider type is what provides the methods that run a node or run the k8s options. Testing logic has been moved to a Base Provider Suite Signed-off-by: aerosouund --- .../providers/base_provider_suite_test.go | 13 ++++++ .../base_provider_test.go} | 43 +++++++++---------- 2 files changed, 34 insertions(+), 22 deletions(-) create mode 100644 cluster-provision/gocli/providers/base_provider_suite_test.go rename cluster-provision/gocli/{cmd/run_test.go => providers/base_provider_test.go} (72%) diff --git a/cluster-provision/gocli/providers/base_provider_suite_test.go b/cluster-provision/gocli/providers/base_provider_suite_test.go new file mode 100644 index 0000000000..775f262b85 --- /dev/null +++ b/cluster-provision/gocli/providers/base_provider_suite_test.go @@ -0,0 +1,13 @@ +package providers + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestCmd(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Base Provider Suite") +} diff --git a/cluster-provision/gocli/cmd/run_test.go b/cluster-provision/gocli/providers/base_provider_test.go similarity index 72% rename from cluster-provision/gocli/cmd/run_test.go rename to cluster-provision/gocli/providers/base_provider_test.go index ef97ac5155..3153bfea93 100644 --- a/cluster-provision/gocli/cmd/run_test.go +++ b/cluster-provision/gocli/providers/base_provider_test.go @@ -1,14 +1,15 @@ -package cmd +package providers import ( + "github.com/docker/docker/client" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "go.uber.org/mock/gomock" - "kubevirt.io/kubevirtci/cluster-provision/gocli/cmd/nodesconfig" "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/aaq" bindvfio "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/bind-vfio" etcdinmemory "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/etcd" "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/istio" + "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/labelnodes" "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/nfscsi" "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/node01" "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/psa" @@ -23,6 +24,7 @@ var _ = Describe("Node Provisioning", func() { sshClient *kubevirtcimocks.MockSSHClient reactors []k8s.ReactorConfig k8sClient k8s.K8sDynamicClient + kp *KubevirtProvider ) BeforeEach(func() { @@ -35,6 +37,19 @@ var _ = Describe("Node Provisioning", func() { } k8sClient = k8s.NewTestClient(reactors...) + kp = NewKubevirtProvider("k8s-1.30", "", &client.Client{}, []KubevirtProviderOption{ + WithNodes(uint(1)), + WithEnablePSA(true), + WithEtcdCapacity("512M"), + WithRunEtcdOnMemory(true), + WithEnableCeph(true), + WithEnablePrometheus(true), + WithEnablePrometheusAlertManager(true), + WithEnableIstio(true), + WithAAQ(true), + WithEnableNFSCSI(true), + WithEnableGrafana(true), + }) }) AfterEach(func() { @@ -45,42 +60,26 @@ var _ = Describe("Node Provisioning", func() { Describe("ProvisionNode", func() { It("should execute the correct commands", func() { - linuxConfigFuncs := []nodesconfig.LinuxConfigFunc{ - nodesconfig.WithEtcdInMemory(true), - nodesconfig.WithEtcdSize("512M"), - nodesconfig.WithPSA(true), - } - - n := nodesconfig.NewNodeLinuxConfig(1, "k8s-1.30", linuxConfigFuncs) - etcdinmemory.AddExpectCalls(sshClient, "512M") bindvfio.AddExpectCalls(sshClient, "8086:2668") bindvfio.AddExpectCalls(sshClient, "8086:2415") psa.AddExpectCalls(sshClient) node01.AddExpectCalls(sshClient) - err := provisionNode(sshClient, n) + err := kp.provisionNode(sshClient, 1) Expect(err).NotTo(HaveOccurred()) }) }) Describe("ProvisionNodeK8sOpts", func() { It("should execute the correct K8s option commands", func() { - k8sConfs := []nodesconfig.K8sConfigFunc{ - nodesconfig.WithCeph(true), - nodesconfig.WithPrometheus(true), - nodesconfig.WithAlertmanager(true), - nodesconfig.WithGrafana(true), - nodesconfig.WithIstio(true), - nodesconfig.WithNfsCsi(true), - nodesconfig.WithAAQ(true), - } - n := nodesconfig.NewNodeK8sConfig(k8sConfs) + kp.Client = k8sClient + labelnodes.AddExpectCalls(sshClient, "node-role.kubernetes.io/control-plane") istio.AddExpectCalls(sshClient) aaq.AddExpectCalls(sshClient) - err := provisionK8sOptions(sshClient, k8sClient, n, "k8s-1.30") + err := kp.provisionK8sOpts(sshClient) Expect(err).NotTo(HaveOccurred()) }) }) From 34232f65e34e9884eef17ec14d9020a70d8a2500 Mon Sep 17 00:00:00 2001 From: aerosouund Date: Sat, 17 Aug 2024 15:33:46 +0300 Subject: [PATCH 6/8] refactor!: Move provision logic to the KubevirtProvider Implement provisioning logic on the provider and leverage the linux and k8s provisioning instead of the scripts. Leverage the same pattern of creating an unpriviliged sshClient to add the ssh key to the root user then use a priviliged client. Include a map of version major, minor to a version with major, minor and patch to pass it to the provisionk8s opt. Remove unwanted floating methods such as _cmd, performPhase and implement copyDirectory as a method of the provider type. Change provision.go command to call the Provision method. Change type of port flags to uint16 to match the struct type. Change default value of random-ports to true. THIS COMMIT BREAKS THE KUBEVIRTCI RELEASE AND VERSIONING PATTERNS Signed-off-by: aerosouund --- cluster-provision/gocli/cmd/provision.go | 375 +++--------------- .../gocli/providers/base_provider.go | 255 +++++++++++- 2 files changed, 304 insertions(+), 326 deletions(-) diff --git a/cluster-provision/gocli/cmd/provision.go b/cluster-provision/gocli/cmd/provision.go index e86858eca8..7f5adfcbf9 100644 --- a/cluster-provision/gocli/cmd/provision.go +++ b/cluster-provision/gocli/cmd/provision.go @@ -2,33 +2,32 @@ package cmd import ( "fmt" - "io/ioutil" - "os" - "os/signal" "path/filepath" - "strconv" "strings" - "github.com/alessio/shellescape" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/volume" "github.com/docker/docker/client" - "github.com/docker/docker/pkg/archive" "github.com/docker/go-connections/nat" - "github.com/sirupsen/logrus" "github.com/spf13/cobra" "golang.org/x/net/context" - containers2 "kubevirt.io/kubevirtci/cluster-provision/gocli/containers" "kubevirt.io/kubevirtci/cluster-provision/gocli/cmd/utils" - "kubevirt.io/kubevirtci/cluster-provision/gocli/docker" + "kubevirt.io/kubevirtci/cluster-provision/gocli/providers" ) +const ( + baseLinuxPhase = "quay.io/kubevirtci/centos9" + baseK8sPhase = "quay.io/kubevirtci/centos9:2408130400-bb670376" +) + +var versionMap = map[string]string{ + "1.30": "1.30.2", + "1.29": "1.29.6", + "1.28": "1.28.11", + "1.31": "1.31.0", +} + // NewProvisionCommand provision given cluster func NewProvisionCommand() *cobra.Command { - provision := &cobra.Command{ Use: "provision", Short: "provision starts a given cluster", @@ -38,10 +37,10 @@ func NewProvisionCommand() *cobra.Command { provision.Flags().StringP("memory", "m", "3096M", "amount of ram per node") provision.Flags().UintP("cpu", "c", 2, "number of cpu cores per node") provision.Flags().String("qemu-args", "", "additional qemu args to pass through to the nodes") - provision.Flags().Bool("random-ports", false, "expose all ports on random localhost ports") + provision.Flags().Bool("random-ports", true, "expose all ports on random localhost ports") provision.Flags().Bool("slim", false, "create slim provider (uncached images)") - provision.Flags().Uint("vnc-port", 0, "port on localhost for vnc") - provision.Flags().Uint("ssh-port", 0, "port on localhost for ssh server") + provision.Flags().Uint16("vnc-port", 0, "port on localhost for vnc") + provision.Flags().Uint16("ssh-port", 0, "port on localhost for ssh server") provision.Flags().String("container-suffix", "", "use additional suffix for the provisioned container image") provision.Flags().String("phases", "linux,k8s", "phases to run, possible values: linux,k8s linux k8s") provision.Flags().StringArray("additional-persistent-kernel-arguments", []string{}, "additional persistent kernel arguments applied after provision") @@ -51,330 +50,88 @@ func NewProvisionCommand() *cobra.Command { func provisionCluster(cmd *cobra.Command, args []string) (retErr error) { var base string - packagePath := args[0] - versionBytes, err := os.ReadFile(filepath.Join(packagePath, "version")) - if err != nil { - return err - } - version := strings.TrimSpace(string(versionBytes)) - baseBytes, err := os.ReadFile(filepath.Join(packagePath, "base")) - if err != nil { - return err + versionNoMinor := args[0] + + v, ok := versionMap[versionNoMinor] + if !ok { + return fmt.Errorf("Invalid version passed, exiting!") + } + + opts := []providers.KubevirtProviderOption{} + flags := cmd.Flags() + for flagName, flagConfig := range providers.ProvisionFlagMap { + switch flagConfig.FlagType { + case "string": + flagVal, err := flags.GetString(flagName) + if err != nil { + return err + } + opts = append(opts, flagConfig.ProviderOptFunc(flagVal)) + case "bool": + flagVal, err := flags.GetBool(flagName) + if err != nil { + return err + } + opts = append(opts, flagConfig.ProviderOptFunc(flagVal)) + + case "uint": + flagVal, err := flags.GetUint(flagName) + if err != nil { + return err + } + opts = append(opts, flagConfig.ProviderOptFunc(flagVal)) + case "uint16": + flagVal, err := flags.GetUint16(flagName) + if err != nil { + return err + } + opts = append(opts, flagConfig.ProviderOptFunc(flagVal)) + case "[]string": + flagVal, err := flags.GetStringArray(flagName) + if err != nil { + return err + } + opts = append(opts, flagConfig.ProviderOptFunc(flagVal)) + } } + phases, err := cmd.Flags().GetString("phases") if err != nil { return err } if strings.Contains(phases, "linux") { - base = fmt.Sprintf("quay.io/kubevirtci/%s", strings.TrimSpace(string(baseBytes))) + base = baseLinuxPhase } else { - k8sPath := fmt.Sprintf("%s/../", packagePath) - baseImageBytes, err := os.ReadFile(filepath.Join(k8sPath, "base-image")) - if err != nil { - return err - } - base = strings.TrimSpace(string(baseImageBytes)) + base = baseK8sPhase } containerSuffix, err := cmd.Flags().GetString("container-suffix") if err != nil { return err } - name := filepath.Base(packagePath) + name := filepath.Base(versionNoMinor) if len(containerSuffix) > 0 { name = fmt.Sprintf("%s-%s", name, containerSuffix) } - prefix := fmt.Sprintf("k8s-%s-provision", name) - target := fmt.Sprintf("quay.io/kubevirtci/k8s-%s", name) - scripts := filepath.Join(packagePath) - - if phases == "linux" { - target = base + "-base" - } - - memory, err := cmd.Flags().GetString("memory") - if err != nil { - return err - } - - randomPorts, err := cmd.Flags().GetBool("random-ports") - if err != nil { - return err - } - - slim, err := cmd.Flags().GetBool("slim") - if err != nil { - return err - } portMap := nat.PortMap{} utils.AppendTCPIfExplicit(portMap, utils.PortSSH, cmd.Flags(), "ssh-port") utils.AppendTCPIfExplicit(portMap, utils.PortVNC, cmd.Flags(), "vnc-port") - qemuArgs, err := cmd.Flags().GetString("qemu-args") - if err != nil { - return err - } - - cpu, err := cmd.Flags().GetUint("cpu") - if err != nil { - return err - } - cli, err := client.NewClientWithOpts(client.FromEnv) if err != nil { return err } - ctx := context.Background() - - stop := make(chan error, 10) - containers, volumes, done := docker.NewCleanupHandler(cli, stop, cmd.OutOrStderr(), true) - - defer func() { - stop <- retErr - <-done - }() - go func() { - interrupt := make(chan os.Signal, 1) - signal.Notify(interrupt, os.Interrupt) - <-interrupt - stop <- fmt.Errorf("Interrupt received, clean up") - }() + ctx, cancel := context.WithCancel(context.Background()) - // Pull the base image - err = docker.ImagePull(cli, ctx, base, types.ImagePullOptions{}) + kp := providers.NewKubevirtProvider(versionNoMinor, base, cli, opts) + err = kp.Provision(ctx, cancel, portMap, v) if err != nil { - panic(err) - } - - // Start dnsmasq - dnsmasq, err := containers2.DNSMasq(cli, ctx, &containers2.DNSMasqOptions{ - ClusterImage: base, - SecondaryNicsCount: 0, - RandomPorts: randomPorts, - PortMap: portMap, - Prefix: prefix, - NodeCount: 1, - }) - if err != nil { - return err - } - containers <- dnsmasq.ID - if err := cli.ContainerStart(ctx, dnsmasq.ID, container.StartOptions{}); err != nil { return err } - nodeName := nodeNameFromIndex(1) - nodeNum := fmt.Sprintf("%02d", 1) - - vol, err := cli.VolumeCreate(ctx, volume.CreateOptions{ - Name: fmt.Sprintf("%s-%s", prefix, nodeName), - }) - if err != nil { - return err - } - volumes <- vol.Name - registryVol, err := cli.VolumeCreate(ctx, volume.CreateOptions{ - Name: fmt.Sprintf("%s-%s", prefix, "registry"), - }) - if err != nil { - return err - } - - if len(qemuArgs) > 0 { - qemuArgs = "--qemu-args " + qemuArgs - } - node, err := cli.ContainerCreate(ctx, &container.Config{ - Image: base, - Env: []string{ - fmt.Sprintf("NODE_NUM=%s", nodeNum), - }, - Volumes: map[string]struct{}{ - "/var/run/disk": {}, - "/var/lib/registry": {}, - }, - Cmd: []string{"/bin/bash", "-c", fmt.Sprintf("/vm.sh --memory %s --cpu %s %s", memory, strconv.Itoa(int(cpu)), qemuArgs)}, - }, &container.HostConfig{ - Mounts: []mount.Mount{ - { - Type: "volume", - Source: vol.Name, - Target: "/var/run/disk", - }, - { - Type: "volume", - Source: registryVol.Name, - Target: "/var/lib/registry", - }, - }, - Privileged: true, - NetworkMode: container.NetworkMode("container:" + dnsmasq.ID), - }, nil, nil, nodeContainer(prefix, nodeName)) - if err != nil { - return err - } - containers <- node.ID - if err := cli.ContainerStart(ctx, node.ID, container.StartOptions{}); err != nil { - return err - } - - // copy provider scripts - err = copyDirectory(ctx, cli, node.ID, scripts, "/scripts") - if err != nil { - return err - } - - // Wait for ssh.sh script to exist - err = _cmd(cli, nodeContainer(prefix, nodeName), "while [ ! -f /ssh_ready ] ; do sleep 1; done", "checking for ssh.sh script") - if err != nil { - return err - } - - // Wait for the VM to be up - err = _cmd(cli, nodeContainer(prefix, nodeName), "ssh.sh echo VM is up", "waiting for node to come up") - if err != nil { - return err - } - - envVars := fmt.Sprintf("version=%s slim=%t", version, slim) - if strings.Contains(phases, "linux") { - err = performPhase(cli, nodeContainer(prefix, nodeName), "/scripts/provision.sh", envVars) - if err != nil { - return err - } - } - if strings.Contains(phases, "k8s") { - // copy provider scripts - err = copyDirectory(ctx, cli, node.ID, scripts, "/scripts") - if err != nil { - return err - } - err = _cmd(cli, nodeContainer(prefix, nodeName), "if [ -f /scripts/extra-pre-pull-images ]; then scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i vagrant.key -P 22 /scripts/extra-pre-pull-images vagrant@192.168.66.101:/tmp/extra-pre-pull-images; fi", "copying /scripts/extra-pre-pull-images if existing") - if err != nil { - return err - } - err = _cmd(cli, nodeContainer(prefix, nodeName), "if [ -f /scripts/fetch-images.sh ]; then scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i vagrant.key -P 22 /scripts/fetch-images.sh vagrant@192.168.66.101:/tmp/fetch-images.sh; fi", "copying /scripts/fetch-images.sh if existing") - if err != nil { - return err - } - - err = _cmd(cli, nodeContainer(prefix, nodeName), "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i vagrant.key vagrant@192.168.66.101 'mkdir -p /tmp/ceph /tmp/cnao /tmp/nfs-csi /tmp/nodeports /tmp/prometheus /tmp/whereabouts /tmp/kwok'", "Create required manifest directories before copy") - if err != nil { - return err - } - // Copy manifests to the VM - err = _cmd(cli, nodeContainer(prefix, nodeName), "scp -r -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i vagrant.key -P 22 /scripts/manifests/* vagrant@192.168.66.101:/tmp", "copying manifests to the VM") - if err != nil { - return err - } - - err = performPhase(cli, nodeContainer(prefix, nodeName), "/scripts/k8s_provision.sh", envVars) - if err != nil { - return err - } - } - - _cmd(cli, nodeContainer(prefix, nodeName), "ssh.sh sudo shutdown now -h", "shutting down the node") - err = _cmd(cli, nodeContainer(prefix, nodeName), "rm /usr/local/bin/ssh.sh", "removing the ssh.sh script") - if err != nil { - return err - } - err = _cmd(cli, nodeContainer(prefix, nodeName), "rm /ssh_ready", "removing the ssh_ready mark") - if err != nil { - return err - } - logrus.Info("waiting for the node to stop") - okChan, errChan := cli.ContainerWait(ctx, nodeContainer(prefix, nodeName), container.WaitConditionNotRunning) - select { - case <-okChan: - case err := <-errChan: - if err != nil { - return fmt.Errorf("waiting for the node to stop failed: %v", err) - } - } - - logrus.Info("preparing additional persistent kernel arguments after initial provision") - additionalKernelArguments, err := cmd.Flags().GetStringArray("additional-persistent-kernel-arguments") - if err != nil { - return err - } - - dir, err := ioutil.TempDir("", "gocli") - if err != nil { - return fmt.Errorf("failed creating a temporary directory: %v", err) - } - defer os.RemoveAll(dir) - if err := ioutil.WriteFile(filepath.Join(dir, "additional.kernel.args"), []byte(shellescape.QuoteCommand(additionalKernelArguments)), 0666); err != nil { - return fmt.Errorf("failed creating additional.kernel.args file: %v", err) - } - if err := copyDirectory(ctx, cli, node.ID, dir, "/"); err != nil { - return fmt.Errorf("failed copying additional kernel arguments into the container: %v", err) - } - - logrus.Infof("Commiting the node as %s", target) - _, err = cli.ContainerCommit(ctx, node.ID, container.CommitOptions{ - Reference: target, - Comment: "PROVISION SUCCEEDED", - Author: "gocli", - Changes: nil, - Pause: false, - Config: nil, - }) - if err != nil { - return fmt.Errorf("commiting the node failed: %v", err) - } - return nil } - -func copyDirectory(ctx context.Context, cli *client.Client, containerID string, sourceDirectory string, targetDirectory string) error { - srcInfo, err := archive.CopyInfoSourcePath(sourceDirectory, false) - if err != nil { - return err - } - - srcArchive, err := archive.TarResource(srcInfo) - if err != nil { - return err - } - defer srcArchive.Close() - - dstInfo := archive.CopyInfo{Path: targetDirectory} - - dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo) - if err != nil { - return err - } - defer preparedArchive.Close() - - err = cli.CopyToContainer(ctx, containerID, dstDir, preparedArchive, types.CopyToContainerOptions{AllowOverwriteDirWithFile: false}) - if err != nil { - return err - } - return nil -} - -func _cmd(cli *client.Client, container string, cmd string, description string) error { - logrus.Info(description) - success, err := docker.Exec(cli, container, []string{"/bin/bash", "-c", cmd}, os.Stdout) - if err != nil { - return fmt.Errorf("%s failed: %v", description, err) - } else if !success { - return fmt.Errorf("%s failed", cmd) - } - return nil -} - -func performPhase(cli *client.Client, container string, script string, envVars string) error { - err := _cmd(cli, container, fmt.Sprintf("test -f %s", script), "checking provision scripts") - if err != nil { - return err - } - - return _cmd(cli, container, - fmt.Sprintf("ssh.sh sudo %s /bin/bash < %s", envVars, script), - fmt.Sprintf("provisioning the node (%s)", script)) -} diff --git a/cluster-provision/gocli/providers/base_provider.go b/cluster-provision/gocli/providers/base_provider.go index b627f57838..6a62f80c52 100644 --- a/cluster-provision/gocli/providers/base_provider.go +++ b/cluster-provision/gocli/providers/base_provider.go @@ -6,6 +6,7 @@ import ( "context" "encoding/json" "fmt" + "io" "os" "os/signal" "path/filepath" @@ -18,7 +19,9 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/volume" "github.com/docker/docker/client" + "github.com/docker/docker/pkg/archive" "github.com/docker/go-connections/nat" "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/api/resource" @@ -32,6 +35,7 @@ import ( dockerproxy "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/docker-proxy" etcd "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/etcd" "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/istio" + "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/k8sprovision" "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/ksm" "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/labelnodes" "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/multus" @@ -39,6 +43,7 @@ import ( "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/node01" nodesprovision "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/nodes" "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/prometheus" + provisionopt "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/provision" "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/psa" "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/realtime" "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/rookceph" @@ -200,7 +205,7 @@ func (kp *KubevirtProvider) Start(ctx context.Context, cancel context.CancelFunc } containers <- node.ID - if err := kp.Docker.ContainerStart(ctx, node.ID, types.ContainerStartOptions{}); err != nil { + if err := kp.Docker.ContainerStart(ctx, node.ID, container.StartOptions{}); err != nil { return err } @@ -239,12 +244,17 @@ func (kp *KubevirtProvider) Start(ctx context.Context, cancel context.CancelFunc return err } - err = sshClient.CopyRemoteFile("/etc/kubernetes/admin.conf", ".kubeconfig") + kubeConf, err := os.Create(".kubeconfig") if err != nil { return err } - config, err := k8s.InitConfig(".kubeconfig", kp.APIServerPort) + err = sshClient.CopyRemoteFile("/etc/kubernetes/admin.conf", kubeConf) + if err != nil { + return err + } + + config, err := k8s.NewConfig(".kubeconfig", kp.APIServerPort) if err != nil { return err } @@ -267,6 +277,196 @@ func (kp *KubevirtProvider) Start(ctx context.Context, cancel context.CancelFunc return nil } +func (kp *KubevirtProvider) Provision(ctx context.Context, cancel context.CancelFunc, portMap nat.PortMap, k8sVersion string) (retErr error) { + prefix := fmt.Sprintf("k8s-%s-provision", kp.Version) + target := fmt.Sprintf("quay.io/kubevirtci/k8s-%s", kp.Version) + if kp.Phases == "linux" { + target = kp.Image + "-base" + } + kp.Version = prefix + + stop := make(chan error, 10) + containers, volumes, done := docker.NewCleanupHandler(kp.Docker, stop, os.Stdout, true) + + defer func() { + stop <- retErr + <-done + }() + + go kp.handleInterrupt(cancel, stop) + + err := docker.ImagePull(kp.Docker, ctx, kp.Image, types.ImagePullOptions{}) + if err != nil { + return err + } + + dnsmasq, err := kp.runDNSMasq(ctx, portMap) + if err != nil { + return err + } + + kp.DNSMasq = dnsmasq + containers <- dnsmasq + + dm, err := kp.Docker.ContainerInspect(context.Background(), dnsmasq) + if err != nil { + return err + } + + sshPort, err := utils.GetPublicPort(utils.PortSSH, dm.NetworkSettings.Ports) + if err != nil { + return err + } + + nodeName := kp.nodeNameFromIndex(1) + nodeNum := fmt.Sprintf("%02d", 1) + + vol, err := kp.Docker.VolumeCreate(ctx, volume.CreateOptions{ + Name: fmt.Sprintf("%s-%s", prefix, nodeName), + }) + if err != nil { + return err + } + volumes <- vol.Name + registryVol, err := kp.Docker.VolumeCreate(ctx, volume.CreateOptions{ + Name: fmt.Sprintf("%s-%s", prefix, "registry"), + }) + if err != nil { + return err + } + + node, err := kp.Docker.ContainerCreate(ctx, &container.Config{ + Image: kp.Image, + Env: []string{ + fmt.Sprintf("NODE_NUM=%s", nodeNum), + }, + Volumes: map[string]struct{}{ + "/var/run/disk": {}, + "/var/lib/registry": {}, + }, + Cmd: []string{"/bin/bash", "-c", fmt.Sprintf("/vm.sh --memory %s --cpu %s %s", kp.Memory, strconv.Itoa(int(kp.CPU)), kp.QemuArgs)}, + }, &container.HostConfig{ + Mounts: []mount.Mount{ + { + Type: "volume", + Source: vol.Name, + Target: "/var/run/disk", + }, + { + Type: "volume", + Source: registryVol.Name, + Target: "/var/lib/registry", + }, + }, + Privileged: true, + NetworkMode: container.NetworkMode("container:" + kp.DNSMasq), + }, nil, nil, kp.nodeContainer(kp.Version, nodeName)) + if err != nil { + return err + } + containers <- node.ID + if err := kp.Docker.ContainerStart(ctx, node.ID, container.StartOptions{}); err != nil { + return err + } + + // Wait for ssh.sh script to exist + _, err = docker.Exec(kp.Docker, kp.nodeContainer(kp.Version, nodeName), []string{"bin/bash", "-c", "while [ ! -f /ssh_ready ] ; do sleep 1; done", "checking for ssh.sh script"}, os.Stdout) + if err != nil { + return err + } + + sshClient, err := libssh.NewSSHClient(sshPort, 1, false) + if err != nil { + return err + } + + rootkey := rootkey.NewRootKey(sshClient) + if err = rootkey.Exec(); err != nil { + fmt.Println(err) + } + + sshClient, err = libssh.NewSSHClient(sshPort, 1, true) + if err != nil { + return err + } + + if strings.Contains(kp.Phases, "linux") { + provisionOpt := provisionopt.NewLinuxProvisioner(sshClient) + if err = provisionOpt.Exec(); err != nil { + return err + } + } + + if strings.Contains(kp.Phases, "k8s") { + // copy provider scripts + if err = sshClient.Command("mkdir -p /tmp/ceph /tmp/cnao /tmp/nfs-csi /tmp/nodeports /tmp/prometheus /tmp/whereabouts /tmp/kwok"); err != nil { + return err + } + // Copy manifests to the VM + success, err := docker.Exec(kp.Docker, kp.nodeContainer(kp.Version, nodeName), []string{"/bin/bash", "-c", "scp -r -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i vagrant.key -P 22 /scripts/manifests/* root@192.168.66.101:/tmp"}, os.Stdout) + if err != nil { + return err + } + + if !success { + return fmt.Errorf("error copying manifests to node") + } + + provisionK8sOpt := k8sprovision.NewK8sProvisioner(sshClient, k8sVersion, kp.Slim) + if err = provisionK8sOpt.Exec(); err != nil { + return err + } + } + + if err = sshClient.Command("sudo shutdown now -h"); err != nil { + return err + } + + _, err = docker.Exec(kp.Docker, kp.nodeContainer(kp.Version, nodeName), []string{"rm", "/ssh_ready"}, io.Discard) + if err != nil { + return err + } + + logrus.Info("waiting for the node to stop") + okChan, errChan := kp.Docker.ContainerWait(ctx, kp.nodeContainer(kp.Version, nodeName), container.WaitConditionNotRunning) + select { + case <-okChan: + case err := <-errChan: + if err != nil { + return fmt.Errorf("waiting for the node to stop failed: %v", err) + } + } + + if len(kp.AdditionalKernelArgs) > 0 { + dir, err := os.MkdirTemp("", "gocli") + if err != nil { + return fmt.Errorf("failed creating a temporary directory: %v", err) + } + defer os.RemoveAll(dir) + if err := os.WriteFile(filepath.Join(dir, "additional.kernel.args"), []byte(shellescape.QuoteCommand(kp.AdditionalKernelArgs)), 0666); err != nil { + return fmt.Errorf("failed creating additional.kernel.args file: %v", err) + } + if err := kp.copyDirectory(ctx, kp.Docker, node.ID, dir, "/"); err != nil { + return fmt.Errorf("failed copying additional kernel arguments into the container: %v", err) + } + } + + logrus.Infof("Commiting the node as %s", target) + _, err = kp.Docker.ContainerCommit(ctx, node.ID, container.CommitOptions{ + Reference: target, + Comment: "PROVISION SUCCEEDED", + Author: "gocli", + Changes: nil, + Pause: false, + Config: nil, + }) + if err != nil { + return fmt.Errorf("commiting the node failed: %v", err) + } + + return nil +} + func (kp *KubevirtProvider) runDNSMasq(ctx context.Context, portMap nat.PortMap) (string, error) { dnsmasqMounts := []mount.Mount{} _, err := os.Stat("/lib/modules") @@ -312,7 +512,7 @@ func (kp *KubevirtProvider) runDNSMasq(ctx context.Context, portMap nat.PortMap) Mounts: dnsmasqMounts, }, nil, nil, kp.Version+"-dnsmasq") - if err := kp.Docker.ContainerStart(ctx, dnsmasq.ID, types.ContainerStartOptions{}); err != nil { + if err := kp.Docker.ContainerStart(ctx, dnsmasq.ID, container.StartOptions{}); err != nil { return "", err } return dnsmasq.ID, nil @@ -333,7 +533,7 @@ func (kp *KubevirtProvider) runRegistry(ctx context.Context) (string, error) { return "", err } - if err := kp.Docker.ContainerStart(ctx, registry.ID, types.ContainerStartOptions{}); err != nil { + if err := kp.Docker.ContainerStart(ctx, registry.ID, container.StartOptions{}); err != nil { return "", err } @@ -368,7 +568,7 @@ func (kp *KubevirtProvider) runNFSGanesha(ctx context.Context) (string, error) { return "", err } - if err := kp.Docker.ContainerStart(ctx, nfsGanesha.ID, types.ContainerStartOptions{}); err != nil { + if err := kp.Docker.ContainerStart(ctx, nfsGanesha.ID, container.StartOptions{}); err != nil { return "", err } return nfsGanesha.ID, nil @@ -380,7 +580,7 @@ func (kp *KubevirtProvider) provisionNode(sshClient libssh.Client, nodeIdx int) if kp.EnableFIPS { for _, cmd := range []string{"sudo fips-mode-setup --enable", "sudo reboot"} { - if _, err := sshClient.Command(cmd, true); err != nil { + if err := sshClient.Command(cmd); err != nil { return fmt.Errorf("Starting fips mode failed: %s", err) } } @@ -413,14 +613,8 @@ func (kp *KubevirtProvider) provisionNode(sshClient libssh.Client, nodeIdx int) opts = append(opts, bvfio) } - if kp.SingleStack { - if _, err := sshClient.Command("touch /home/vagrant/single_stack", false); err != nil { - return fmt.Errorf("provisioning node %d failed (setting singleStack phase): %s", nodeIdx, err) - } - } - if kp.EnableAudit { - if _, err := sshClient.Command("touch /home/vagrant/enable_audit", false); err != nil { + if err := sshClient.Command("touch /home/vagrant/enable_audit"); err != nil { return fmt.Errorf("provisioning node %d failed (setting enableAudit phase): %s", nodeIdx, err) } } @@ -431,7 +625,7 @@ func (kp *KubevirtProvider) provisionNode(sshClient libssh.Client, nodeIdx int) } if nodeIdx == 1 { - n := node01.NewNode01Provisioner(sshClient) + n := node01.NewNode01Provisioner(sshClient, kp.SingleStack) opts = append(opts, n) } else { @@ -444,7 +638,7 @@ func (kp *KubevirtProvider) provisionNode(sshClient libssh.Client, nodeIdx int) bindVfioOpt := bindvfio.NewBindVfioOpt(sshClient, gpuDeviceID) opts = append(opts, bindVfioOpt) } - n := nodesprovision.NewNodesProvisioner(sshClient) + n := nodesprovision.NewNodesProvisioner(sshClient, kp.SingleStack) opts = append(opts, n) } @@ -476,7 +670,7 @@ func (kp *KubevirtProvider) provisionK8sOpts(sshClient libssh.Client) error { opts = append(opts, labelnodes.NewNodeLabler(sshClient, labelSelector)) if kp.CDI { - opts = append(opts, cdi.NewCdiOpt(kp.Client, kp.CDIVersion)) + opts = append(opts, cdi.NewCdiOpt(kp.Client, sshClient, kp.CDIVersion)) } if kp.AAQ { @@ -673,6 +867,33 @@ func (kp *KubevirtProvider) getPCIDeviceIOMMUGroup(address string) (string, erro return iommuGroup, nil } +func (kp *KubevirtProvider) copyDirectory(ctx context.Context, cli *client.Client, containerID string, sourceDirectory string, targetDirectory string) error { + srcInfo, err := archive.CopyInfoSourcePath(sourceDirectory, false) + if err != nil { + return err + } + + srcArchive, err := archive.TarResource(srcInfo) + if err != nil { + return err + } + defer srcArchive.Close() + + dstInfo := archive.CopyInfo{Path: targetDirectory} + + dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo) + if err != nil { + return err + } + defer preparedArchive.Close() + + err = cli.CopyToContainer(ctx, containerID, dstDir, preparedArchive, types.CopyToContainerOptions{AllowOverwriteDirWithFile: false}) + if err != nil { + return err + } + return nil +} + func (kp *KubevirtProvider) handleInterrupt(cancel context.CancelFunc, stop chan error) { interrupt := make(chan os.Signal, 1) signal.Notify(interrupt, os.Interrupt) From 86c176fe50df4d3c0d263f163f8e7ae0e042fed2 Mon Sep 17 00:00:00 2001 From: aerosouund Date: Sat, 17 Aug 2024 15:42:15 +0300 Subject: [PATCH 7/8] feat: Introduce retries in ssh connection in the ssh client Signed-off-by: aerosouund --- cluster-provision/gocli/pkg/libssh/ssh.go | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/cluster-provision/gocli/pkg/libssh/ssh.go b/cluster-provision/gocli/pkg/libssh/ssh.go index e6f8f3d10d..06a57285aa 100644 --- a/cluster-provision/gocli/pkg/libssh/ssh.go +++ b/cluster-provision/gocli/pkg/libssh/ssh.go @@ -12,7 +12,10 @@ import ( "strings" "sync" + "time" + "github.com/bramvdbogaerde/go-scp" + "github.com/cenkalti/backoff/v4" "github.com/sirupsen/logrus" "golang.org/x/crypto/ssh" ) @@ -224,9 +227,24 @@ func (s *SSHClientImpl) executeCommand(cmd string, outWriter, errWriter io.Write func (s *SSHClientImpl) initClient() error { s.initMutex.Lock() defer s.initMutex.Unlock() - client, err := ssh.Dial("tcp", net.JoinHostPort("127.0.0.1", fmt.Sprint(s.sshPort)), s.config) + + var ( + client *ssh.Client + err error + ) + + operation := func() error { + client, err = ssh.Dial("tcp", net.JoinHostPort("127.0.0.1", fmt.Sprint(s.sshPort)), s.config) + return err + } + + backoffStrategy := backoff.NewExponentialBackOff() + backoffStrategy.InitialInterval = 3 * time.Second + backoffStrategy.MaxElapsedTime = 1 * time.Minute + + err = backoff.Retry(operation, backoffStrategy) if err != nil { - return fmt.Errorf("Failed to connect to SSH server: %v", err) + return err } conn, err := client.Dial("tcp", fmt.Sprintf("192.168.66.10%d:22", s.nodeIdx)) From ea8daed1bf73179f5b2d37c3a1849bdbb24fcb39 Mon Sep 17 00:00:00 2001 From: aerosouund Date: Mon, 26 Aug 2024 18:24:23 +0300 Subject: [PATCH 8/8] fix: Don't handle ssh shutdown error Signed-off-by: aerosouund --- cluster-provision/gocli/providers/base_provider.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/cluster-provision/gocli/providers/base_provider.go b/cluster-provision/gocli/providers/base_provider.go index 6a62f80c52..ba18f11bba 100644 --- a/cluster-provision/gocli/providers/base_provider.go +++ b/cluster-provision/gocli/providers/base_provider.go @@ -418,9 +418,7 @@ func (kp *KubevirtProvider) Provision(ctx context.Context, cancel context.Cancel } } - if err = sshClient.Command("sudo shutdown now -h"); err != nil { - return err - } + _ = sshClient.Command("sudo shutdown now -h") _, err = docker.Exec(kp.Docker, kp.nodeContainer(kp.Version, nodeName), []string{"rm", "/ssh_ready"}, io.Discard) if err != nil {