diff --git a/.gitignore b/.gitignore index 1edb61b3..faad6d19 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,4 @@ values.yaml charts/csi-cloudscale/charts cmd/cloudscale-csi-plugin/cloudscale-csi-plugin +k8test/ diff --git a/.gitmodules b/.gitmodules index 95687114..e69de29b 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +0,0 @@ -[submodule "deploy/kubespray"] - path = deploy/kubespray - url = https://github.com/kubernetes-incubator/kubespray diff --git a/README.md b/README.md index b9509afe..ddb5664d 100644 --- a/README.md +++ b/README.md @@ -331,6 +331,9 @@ Requirements: Build out the `charts/` directory from the `Chart.lock` file: ``` +$ cd charts/csi-cloudscale/ +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm repo update $ helm dependency build charts/csi-cloudscale ``` diff --git a/deploy/README.md b/deploy/README.md index a2e1041b..92d3699d 100644 --- a/deploy/README.md +++ b/deploy/README.md @@ -3,57 +3,49 @@ ## Testing To test csi-cloudscale in conjunction with Kubernetes, a suite of integration tests has been implemented. -To run this test suite, a Kubernetes cluster is required. For this purpose, this setup was prepared using kubespray. +To run this test suite, a Kubernetes cluster is required. For this purpose, this setup was prepared using +[k8test](https://github.com/cloudscale-ch/k8test). -> ⚠️ Running these tests yourself may incur unexpected costs and may result in data loss if run against a production account with live systems. herefore, we strongly advise you to use a separate account for these tests. +> ⚠️ Running these tests yourself may incur unexpected costs and may result in data loss if run against a production account with live systems. herefore, we strongly advise you to use a separate account for these tests. > The Kubernetes cluster created is not production ready and should not be used for any purpose other than testing. +First bootstrap the cluster - # setup all required charts in the local folder, as they will be used by the ansible playbook. - cd charts/csi-cloudscale/ - helm repo add bitnami https://charts.bitnami.com/bitnami - helm repo update - helm dependency build - cd ../../ + # Export your API Token obtained from http://control.cloudscale.ch + export CLOUDSCALE_API_TOKEN="..." - # kubspray is provided as a git submodule - git submodule init - git submodule update - # if you want to test against another Kubernetes version, checkout a differnt tag in the the kubspray folder + # See the script for options, sensible defaults apply + ./helpers/bootstrap-cluster - # setup the python venv - cd deploy - python3 -m venv venv - . venv/bin/activate - # or requirements-{VERSION}.txt, see https://github.com/kubernetes-sigs/kubespray/blob/master/docs/ansible.md#ansible-python-compatibility - pip install -r kubespray/requirements.txt - - # setup the cluster - cd kubespray/ - # get a token from the cloudscale.ch Control Panel and set it as CLOUDSCALE_TOKEN envrionment variable - export CLOUDSCALE_TOKEN="foobar" - # running this playbook will install a Kubernetes cluster on cloudscale.ch - ansible-playbook ../integration_test.yml -i inventory/hosts.ini --skip-tags cleanup --skip-tags test - - # get the IP address of server "test-kubernetes-master" from the cloudscale.ch Control Panel - # add the IP in the property "server" in the file "kubeconfig.yml", keep the https prefix and the port - cd ../../ - vi deploy/kubeconfig.yml - - # add the path of this file to the KUBECONFIG env variable - export KUBECONFIG=$(pwd)/deploy/kubeconfig.yml - - # finally, run the integration tests + # Verify cluster setup and access + export KUBECONFIG=$PWD/k8test/cluster/admin.conf + kubectl get nodes -o wide + + +You can **either** install the driver from your working directory + + # Install driver using dev image from working dir + # Pre-requesit: ensure the you have run `helm dependency build` as described in the main README file. + helm install -g -n kube-system --set controller.image.tag=dev --set node.image.tag=dev --set controller.image.pullPolicy=Always --set node.image.pullPolicy=Always ./charts/csi-cloudscale + +**Or** you can install a released version: + + # List all released versions + helm search repo csi-cloudscale/csi-cloudscale --versions + # Install a specific Chart version or latest if --version is omitted + helm install -n kube-system -g csi-cloudscale/csi-cloudscale [ --version v1.0.0 ] + +Then execute the test suite: + make test-integration -*Command line options for the playbook:* -- If you just want to provision a cluster, you can use an additional `--skip-tags cleanup --skip-tags test`. If not, the VMs will be removed again. -- If you want to a test release other than `dev`, you can use an - additional `-e version=v1.0.0`. Caution: This does only inject the docker image tag in to helm, but uses the chart from the current working directory. +The get rid of the cluster: + + ./helpers/clean-up ## Debugging -If the playbook does not pass, there are a good number of ways to debug. +If the suite does not pass, there are a good number of ways to debug. You can just redeploy all csi pods and push a new version to docker hub: VERSION=dev make publish @@ -127,11 +119,3 @@ Using etcdctl: > kubectl get nodes \# get nodes > name kubectl get \--raw /k8s/clusters/{}/api/v1/nodes/{}/proxy/metrics > \| grep kubelet_vol - -Ansible: - - # Keep cluster after test run - CLOUDSCALE_TOKEN="foobar" ansible-playbook integration_test.yml -i inventory/hosts.ini --skip-tags cleanup - - # Just run tests - ansible-playbook -i inventory/hosts.ini integration_test.yml --tags test diff --git a/deploy/ansible.cfg b/deploy/ansible.cfg deleted file mode 100644 index 6534e7af..00000000 --- a/deploy/ansible.cfg +++ /dev/null @@ -1,3 +0,0 @@ -[defaults] -inventory=inventory -host_key_checking = False diff --git a/deploy/integration_test.yml b/deploy/integration_test.yml deleted file mode 100644 index 226de9c6..00000000 --- a/deploy/integration_test.yml +++ /dev/null @@ -1,144 +0,0 @@ ---- -- hosts: localhost - tags: [install-csi] - vars: - ssh_key_files: "{{ lookup('fileglob', '~/.ssh/id*.pub', wantlist=True) }}" - ssh_keys: "{{ [lookup('file', ssh_key_files[0])] }}" - base_server_name: 'test-kubernetes-' - servers: - - "master" - - "node1" - - "node2" - cloudscale_api_token: "{{ lookup('env','CLOUDSCALE_TOKEN') }}" - - tasks: - - debug: - msg: "Starting servers with keys found in ~/.ssh/id*.pub': {{ ssh_keys }}" - - - name: Start the cloudscale.ch servers - cloudscale_server: - name: "{{ base_server_name }}{{ item }}" - flavor: plus-8-4 - image: fedora-35 - ssh_keys: '{{ ssh_keys }}' - api_token: '{{ cloudscale_api_token }}' - loop: "{{ servers }}" - register: created_servers - - - name: Waiting for servers to come up - pause: - seconds: 30 - - - name: Register master in inventory - add_host: - hostname: "{{ created_servers.results[0].name }}" - ansible_ssh_host: "{{ created_servers.results[0].interfaces[0].addresses[0].address }}" - groups: - - kube_control_plane - - etcd - - k8s_cluster - ansible_user: 'root' - - - name: Register nodes in inventory - add_host: - hostname: "{{ item.name }}" - ansible_ssh_host: "{{ item.interfaces[0].addresses[0].address }}" - groups: - - kube_node - - k8s_cluster - ansible_user: 'root' - loop: ["{{ created_servers.results[1] }}", "{{ created_servers.results[2] }}"] - -# this is not needed for the plugin or the integration tests to work, but helps if you want -# to take a closer look at the LUKS volumes with SSH on a node -- hosts: k8s_cluster - tags: [install-cryptsetup] - tasks: - - name: "Install cryptsetup on nodes" - yum: - name: cryptsetup - -- name: Include the play that installs kubernetes - tags: [install-kubernetes] - import_playbook: kubespray/cluster.yml - -- hosts: kube_control_plane - vars: - cloudscale_api_token: "{{ lookup('env','CLOUDSCALE_TOKEN') }}" - secret_file: 'secret.yml' - config_file: 'csi-config.yml' - version: "{{ lookup('env', 'version')|default('dev', true) }}" - tags: [install-csi] - tasks: - - name: Create secrets in cluster - template: - src: templates/secret.yml.j2 - dest: "{{ secret_file }}" - - - name: Delete secret if it exists from a previous run - shell: "kubectl -n kube-system delete secret cloudscale || true" - - - name: Create secrets in cluster - shell: "kubectl create -f {{ secret_file }}" - - - name: Store show secrets - shell: kubectl -n kube-system get secrets - register: secret_output - - - name: Stored secrets in kubernetes - debug: - msg: "{{ secret_output.stdout_lines }}" - - # It would be nice to install helm using the tasks/roles in kubepsray, - # but I could not get it working. - - name: "Install git on nodes" - yum: - name: git - - - name: Get Helm installer - ansible.builtin.get_url: - url: https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 - dest: "./get_helm.sh" - mode: '0700' - - - name: Install Helm - shell: "./get_helm.sh" - - - name: Copy chart - copy: - src: "../charts/csi-cloudscale" - dest: "charts" - - - name: Install csi-driver - shell: "helm install -g -n kube-system --set controller.image.tag={{ version }} --set node.image.tag={{ version }} ./charts/csi-cloudscale" - - - name: Copy kubernetes config to localhost - fetch: - src: /root/.kube/config - dest: "kubeconfig.yml" - flat: true - - -- hosts: localhost - tags: [test] - tasks: - - name: "Run the tests" - shell: "KUBECONFIG=$(pwd)/deploy/kubeconfig.yml chdir=../ make test-integration" - -- hosts: localhost - tags: [cleanup] - vars: - base_server_name: 'test-kubernetes-' - servers: - - "master" - - "node1" - - "node2" - cloudscale_api_token: "{{ lookup('env','CLOUDSCALE_TOKEN') }}" - - tasks: - - name: Delete the cloudscale.ch servers - cloudscale_server: - name: "{{ base_server_name }}{{ item }}" - api_token: '{{ cloudscale_api_token }}' - state: absent - loop: "{{ servers }}" diff --git a/deploy/inventory/hosts.ini b/deploy/inventory/hosts.ini deleted file mode 100644 index 38416399..00000000 --- a/deploy/inventory/hosts.ini +++ /dev/null @@ -1,19 +0,0 @@ -[kube_control_plane] -# node1 -# node2 - -[etcd] -# node1 -# node2 -# node3 - -[kube_node] -# node2 -# node3 -# node4 -# node5 -# node6 - -[k8s_cluster:children] -kube_node -kube_control_plane diff --git a/deploy/kubespray b/deploy/kubespray deleted file mode 160000 index 2cf23e31..00000000 --- a/deploy/kubespray +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 2cf23e3104f9b8b20ca1aefd36e3e89be26fd090 diff --git a/deploy/templates/secret.yml.j2 b/deploy/templates/secret.yml.j2 deleted file mode 100644 index 7dbe1c1f..00000000 --- a/deploy/templates/secret.yml.j2 +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: cloudscale - namespace: kube-system -stringData: - access-token: "{{ cloudscale_api_token }}" diff --git a/helpers/bootstrap-cluster b/helpers/bootstrap-cluster new file mode 100755 index 00000000..071277b5 --- /dev/null +++ b/helpers/bootstrap-cluster @@ -0,0 +1,130 @@ +#!/usr/bin/env bash +# +# Ensures that a Kubernetes test cluster is present and updated +# +set -euo pipefail + +# Default values +RANDOM_NUMBER=$((RANDOM % 8193)) +K8TEST_SHA="cefd4ab" +ZONE="lpg1" +CLUSTER_PREFIX="csi-test-$RANDOM_NUMBER" +KUBERNETES="latest" +FLAVOR="plus-8-4" +CONTROL_COUNT=1 +WORKER_COUNT=3 +IMAGE="ubuntu-22.04" +VOLUME_SIZE_GB="25" + +# Parse command line arguments +while [[ $# -gt 0 ]]; do + key="$1" + case $key in + --k8test-sha) + K8TEST_SHA="$2" + shift + shift + ;; + --zone) + ZONE="$2" + shift + shift + ;; + --cluster-prefix) + CLUSTER_PREFIX="$2" + shift + shift + ;; + --kubernetes) + KUBERNETES="$2" + shift + shift + ;; + --control-count) + CONTROL_COUNT="$2" + shift + shift + ;; + --worker-count) + WORKER_COUNT="$2" + shift + shift + ;; + --image) + IMAGE="$2" + shift + shift + ;; + --flavor) + FLAVOR="$2" + shift + shift + ;; + --volume-size-gb) + VOLUME_SIZE_GB="$2" + shift + shift + ;; + *) + echo "Unknown option: $1" + exit 1 + ;; + esac +done + +# Prepares k8test with an existing virtual env, or a newly created on +function ensure-k8test() { + if ! test -d k8test; then + git clone git@github.com:cloudscale-ch/k8test.git + git -C k8test checkout "$K8TEST_SHA" + fi + + if [[ "${VIRTUAL_ENV:-}" == "" ]]; then + + if ! test -d k8test/venv; then + python3 -m venv k8test/venv + fi + + # shellcheck source=/dev/null + source k8test/venv/bin/activate + fi + + if ! command -v poetry > /dev/null; then + pip install poetry + fi + + if ! command -v ansible > /dev/null; then + poetry install --directory k8test + fi +} + +# Launches the test cluster, if there's no inventory yet +function ensure-inventory() { + if ! test -d k8test/cluster; then + mkdir k8test/cluster + fi + + if ! test -f k8test/cluster/ssh.pub; then + ssh-keygen -t ed25519 -N '' -f k8test/cluster/ssh + fi + + if ! test -f k8test/cluster/inventory.yml; then + k8test/playbooks/create-cluster.yml \ + -e ssh_key=k8test/cluster/ssh.pub \ + -e zone="$ZONE" \ + -e cluster_prefix="$CLUSTER_PREFIX" \ + -e kubernetes="$KUBERNETES" \ + -e control_count="$CONTROL_COUNT" \ + -e worker_count="$WORKER_COUNT" \ + -e image="$IMAGE" \ + -e flavor="$FLAVOR" \ + -e volume_size_gb="$VOLUME_SIZE_GB" + + # Those won't really change between runs, so update them during install + k8test/playbooks/update-secrets.yml \ + -i k8test/cluster/inventory.yml + fi +} + +ensure-k8test +ensure-inventory diff --git a/helpers/clean-up b/helpers/clean-up new file mode 100755 index 00000000..5605e086 --- /dev/null +++ b/helpers/clean-up @@ -0,0 +1,14 @@ +#!/usr/bin/env bash +# +# Ensures that any Kubernetes cluster is cleaned up +# +set -euo pipefail + +if test -f k8test/cluster/inventory.yml; then + source k8test/venv/bin/activate + k8test/playbooks/destroy-cluster.yml -i k8test/cluster/inventory.yml +fi + +if test -d k8test; then + rm -rf k8test +fi