Skip to content

AKS (BYOCNI)

AKS (BYOCNI) #4772

Workflow file for this run

name: AKS (BYOCNI)
# Any change in triggers needs to be reflected in the concurrency group.
on:
### FOR TESTING PURPOSES
# This workflow runs in the context of `main`, and ignores changes to
# workflow files in PRs. For testing changes to this workflow from a PR:
# - Make sure the PR uses a branch from the base repository (requires write
# privileges). It will not work with a branch from a fork (missing secrets).
# - Uncomment the `pull_request` event below, commit separately with a `DO
# NOT MERGE` message, and push to the PR. As long as the commit is present,
# any push to the PR will trigger this workflow.
# - Don't forget to remove the `DO NOT MERGE` commit once satisfied. The run
# will disappear from the PR checks: please provide a direct link to the
# successful workflow run (can be found from Actions tab) in a comment.
#
# pull_request: {}
###
pull_request_target: {}
# Run every 6 hours
schedule:
- cron: '0 0/6 * * *'
# By specifying the access of one of the scopes, all of those that are not
# specified are set to 'none'.
permissions:
# To be able to access the repository with actions/checkout
contents: read
# To allow retrieving information from the PR API
pull-requests: read
# Required to generate OIDC tokens for `az` authentication
id-token: write
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || 'scheduled' }}
cancel-in-progress: true
env:
location: westus2
cost_reduction: --node-vm-size Standard_B2s --node-osdisk-size 30
# renovate: datasource=github-releases depName=cilium/cilium
cilium_version: v1.16.1
kubectl_version: v1.23.6
jobs:
installation-and-connectivity:
name: AKS BYOCNI Installation and Connectivity Test
if: ${{ github.repository == 'cilium/cilium-cli' }}
runs-on: ubuntu-22.04
timeout-minutes: 60
steps:
- name: Checkout
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Install kubectl
run: |
curl -sLO "https://dl.k8s.io/release/${{ env.kubectl_version }}/bin/linux/amd64/kubectl"
curl -sLO "https://dl.k8s.io/${{ env.kubectl_version }}/bin/linux/amd64/kubectl.sha256"
echo "$(cat kubectl.sha256) kubectl" | sha256sum --check
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
kubectl version --client
- name: Login to Azure
uses: azure/login@a65d910e8af852a8061c627c456678983e180302 # v2.2.0
with:
client-id: ${{ secrets.AZURE_PR_CLIENT_ID }}
tenant-id: ${{ secrets.AZURE_PR_TENANT_ID }}
subscription-id: ${{ secrets.AZURE_PR_SUBSCRIPTION_ID }}
# Temporary workaround due to the issue: https://github.com/Azure/azure-cli/issues/28708
- name: Fetch OIDC token every 4 mins
shell: bash
run: |
while true; do
token=$(curl -H "Authorization: bearer $ACTIONS_ID_TOKEN_REQUEST_TOKEN" "${ACTIONS_ID_TOKEN_REQUEST_URL}&audience=api://AzureADTokenExchange" | jq .value -r)
az login --service-principal -u ${{ secrets.AZURE_PR_CLIENT_ID }} -t ${{ secrets.AZURE_PR_TENANT_ID }} --federated-token $token --output none
# Sleep for 4 minutes
sleep 240
done &
- name: Install aks-preview CLI extension
run: |
az extension add --name aks-preview
az extension update --name aks-preview
az version
- name: Set up job variables
id: vars
run: |
if [ ${{ github.event.issue.pull_request || github.event.pull_request }} ]; then
PR_API_JSON=$(curl \
-H "Accept: application/vnd.github.v3+json" \
-H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
${{ github.event.issue.pull_request.url || github.event.pull_request.url }})
SHA=$(echo "$PR_API_JSON" | jq -r ".head.sha")
OWNER=$(echo "$PR_API_JSON" | jq -r ".number")
else
SHA=${{ github.sha }}
OWNER=${{ github.sha }}
fi
echo "sha=${SHA}" >> $GITHUB_OUTPUT
echo "owner=${OWNER}" >> $GITHUB_OUTPUT
echo "name=${{ github.event.repository.name }}-${{ github.run_id }}-${{ github.run_attempt }}" >> $GITHUB_OUTPUT
- name: Create AKS cluster
run: |
# Create group
az group create \
--name ${{ steps.vars.outputs.name }} \
--location ${{ env.location }} \
--tags usage=${{ github.repository_owner }}-${{ github.event.repository.name }} owner=${{ steps.vars.outputs.owner }}
# Create AKS cluster
az aks create \
--resource-group ${{ steps.vars.outputs.name }} \
--name ${{ steps.vars.outputs.name }} \
--location ${{ env.location }} \
--network-plugin none \
--node-count 2 \
${{ env.cost_reduction }} \
--generate-ssh-keys
- name: Get cluster credentials
run: |
az aks get-credentials \
--resource-group ${{ steps.vars.outputs.name }} \
--name ${{ steps.vars.outputs.name }}
- name: Install Cilium CLI
uses: ./
with:
skip-build: 'true'
image-tag: ${{ steps.vars.outputs.sha }}
- name: Run test
run: |
cilium install \
--version "${{ env.cilium_version }}" \
--datapath-mode=aks-byocni \
--wait=false \
--set loadBalancer.l7.backend=envoy \
--set tls.secretsBackend=k8s \
--set bpf.monitorAggregation=none \
--set ipam.operator.clusterPoolIPv4PodCIDRList=192.168.0.0/16 # To avoid clashing with the default Service CIDR of AKS (10.0.0.0/16)
# Wait for cilium to be ready
# NB: necessary to work against occassional flakes due to https://github.com/cilium/cilium-cli/issues/918
cilium status --wait
# Run connectivity test
cilium connectivity test --test-concurrency=5 --collect-sysdump-on-failure --external-target bing.com.
kubectl delete namespace -l "app.kubernetes.io/name=cilium-cli"
# Run performance test
cilium connectivity perf --duration 1s
# Retrieve Cilium status
cilium status
- name: Post-test information gathering
if: ${{ !success() }}
run: |
echo "=== Retrieve cluster state ==="
kubectl get pods --all-namespaces -o wide
cilium status
cilium sysdump --output-filename cilium-sysdump-out
shell: bash {0} # Disable default fail-fast behaviour so that all commands run independently
- name: Clean up AKS
if: ${{ always() }}
run: |
az group delete --name ${{ steps.vars.outputs.name }} --yes --no-wait
shell: bash {0} # Disable default fail-fast behaviour so that all commands run independently
- name: Upload artifacts
if: ${{ !success() }}
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
with:
name: cilium-sysdump-out.zip
path: cilium-sysdump-out.zip
retention-days: 5