Skip to content

Commit

Permalink
feat: all flows test
Browse files Browse the repository at this point in the history
Add a test to trigger accesstokens, userinfo, and introspection all together.
  • Loading branch information
jlangy committed Oct 5, 2023
1 parent 7301481 commit 6ae4896
Show file tree
Hide file tree
Showing 7 changed files with 157 additions and 15 deletions.
15 changes: 14 additions & 1 deletion k6/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ This folder contains load tests for our sso application

- `podman-compose up`

**Note**: _You will need to have installed podman and podman-compose_
**Note**: _You will need to have installed podman and podman-compose. Alternatively, you can use the same commands with docker compose, just specify the file with the -f flag._

This will start our custom keycloak image on localhost:8080, you can login with credentials username=admin, password=admin. To stop the image, you can ctrl+c out (alternatively, add the -d flag to run detached), and run `podman-compose down`. To clear out the volumes, with the image stopped, run `podman volume prune` (or specify the volumes if you have additional ones to keep). The image is currently set to use `ghcr.io/bcgov/sso:7.6.25-build.1`, this can be updated as later builds come up.

Expand Down Expand Up @@ -65,3 +65,16 @@ The test run can be configured with the following variables at the top of the fi
**CONCURRENT_LOOPS**: The number of loops to run concurrently. Increasing this number will allow the test to fire more requests at the same time. E.g running 3 concurrent loops would send 3 requests to the user info endpoint at once, and then wait the **LOOP_DELAY**, then fire all three again in the next realm.
**ITERATIONS_PER_LOOP**: The number of times each loop will run. Each loop will hit the user info endpoint this number of times, waiting a small delay between requests set by the **LOOP_DELAY** variable.
**LOOP_DELAY**: The amount of time to wait between requests in each loop, in seconds. e.g 0.1 is 100ms. Set to 0 to fire as soon as possible.

### [Constant Rate all Flows](./constantRateAllFlows.js)

Run this test to simulate fetching an access token, grabbing user info, and introspecting the token all together. This test has two scenarios, `peakProfile` and `stress`. The peak profile test is used to imitate our peak traffic running against the application for a two hour period. The stress test will ramp up traffic linearly over a 1 hour period until API requests start to fail, and then abort.

When stress testing, the application may get saturated with requests which prevents the teardown logic from succeeding, since it depends on the keycloak API being able to receive and act on requests. In this case, the test realms will not delete properly. These realms are all prefixed with "newrealm" and will need to be deleted manually.

The test run can be configured with the following variables at the top of the file:

**TOTAL_REALMS** = The number of realms to create.
**MAX_ALLOWED_FAILURE_RATE**: The percentage of requests to allow to fail before counting the test as failed. Enter as a string of a decimal number, e.g `'0.01'` is 1%.
**OFFLINE** Set true to request offline_access tokens.
**BASELINE_RATE**: If running the peakProfile scenario, this is the peak rate per minute of requests to use. It will also determine the start rate of the stress test.
2 changes: 1 addition & 1 deletion k6/activeSessions.js
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ export const options = {
executor: 'per-vu-iterations',
vus: CONCURRENT_LOOPS,
iterations: ITERATIONS_PER_LOOP,
}
},
},
thresholds: {
http_req_failed: [
Expand Down
90 changes: 90 additions & 0 deletions k6/constantRateAllFlows.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
import { sleep } from 'k6';
import { createRealm, deleteRealm, createUser, generateRealms, getAccessToken, hitIntrospectionRoute, hitUserInfoRoute, createClient } from './helpers.js';
import { user, client } from './constants.js';
import { username, password, clientId } from './env.js';

// Alter configuration to run separate tests. See this test in the readme for configuration details.
const TOTAL_REALMS = 1;
// This essentially just means no dropped requests allowed since we dont get to 10000 on the peak profile.
const MAX_ALLOWED_FAILURE_RATE = '0.0001';
const OFFLINE = false;

// Peak requests per minutes we've seen on the system
const BASELINE_RATE = 34;

export const options = {
scenarios: {
peakProfile: {
executor: 'constant-arrival-rate',
duration: '2h',
timeUnit: '1m',
rate: 34,
preAllocatedVUs: 5,
},
// stress: {
// executor: 'ramping-arrival-rate', //Assure load increase if the system slows
// startRate: BASELINE_RATE,
// timeUnit: '1m',
// preAllocatedVUs: 20000,
// stages: [
// { duration: '1m', target: BASELINE_RATE }, // just slowly ramp-up to a HUGE load
// // just slowly ramp-up to an EPIC load.
// { duration: '1h', target: 20000 },
// ],
// }
},
thresholds: {
http_req_failed: [
{
threshold: `rate<${MAX_ALLOWED_FAILURE_RATE}`,
// Leave this in! Don't keep hammering the poor server after its failing, requests will queue
abortOnFail: true,
},
],
// Requests tend to drop after 60 second timeout. Can use below to fail earlier
// http_req_duration: [
// {
// threshold: `p(95)<15000`,
// abortOnFail: true,
// },
// ]
},
};

export function setup() {
const accessToken = getAccessToken({ username, password, clientId, confidential: true });
const emptyRealms = generateRealms(TOTAL_REALMS);
emptyRealms.forEach((realm, i) => {
createRealm(realm, accessToken);
const newUser = Object.assign({}, user, { username: `${user.username}_${i}` })
createUser(newUser, realm.realm, accessToken);
// Create a confidential client to be able to use the introspection endpoint with this realm
createClient(realm.realm, accessToken)
});
return emptyRealms;
}

export default function (realms) {
realms.forEach((realm, i) => {
const accessToken = getAccessToken({
username: `${user.username}_${i}`,
password: user.credentials[0].value,
clientId,
confidential: true,
realm: realm.realm,
offline: OFFLINE
});
hitUserInfoRoute(accessToken, realm.realm)
hitIntrospectionRoute(accessToken, realm.realm, client.clientId, client.secret)
})
}

export function teardown(realms) {
// When stress testing, the enqueued requests can block teardown api requests from succeeding. Adding in a sleep to let the system recover a bit before trying to cleaunup.
sleep(45)
console.log('tearing down...')
const accessToken = getAccessToken({ username, password, clientId, confidential: true });
realms.forEach((realm, i) => {
deleteRealm(realm.realm, accessToken);
});
}
8 changes: 8 additions & 0 deletions k6/constants.js
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,15 @@ const user = {
credentials: [{ type: 'password', value: 'password', temporary: false }],
};

const client = {
secret: 'secret',
serviceAccountsEnabled: true,
clientId: 'test_privateClient',
directAccessGrantsEnabled: true,
}

module.exports = {
user,
realm,
client,
}
25 changes: 21 additions & 4 deletions k6/helpers.js
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import http, { head } from 'k6/http';
import { baseUrl, clientId, clientSecret } from './env.js';
import { realm } from './constants.js';
import { realm, client } from './constants.js';
import encoding from 'k6/encoding';

const getHeaders = (accessToken) => ({
Expand All @@ -18,15 +18,30 @@ function deleteRealm(realm, accessToken) {
return http.del(`${baseUrl}/admin/realms/${realm}`, {}, { headers });
}

function getAccessToken({username, password, clientId, confidential, realm = 'master', offline = false}) {
function createClient(realm, accessToken) {
const headers = getHeaders(accessToken);
const result = http.post(`${baseUrl}/admin/realms/${realm}/clients`, JSON.stringify(client), { headers });

// Clien internal id is returned in the location header as the trailing piece or the URL.
const locationURIParts = result.headers.Location.split('/')
const clientInternalId = locationURIParts[locationURIParts.length - 1]
return clientInternalId
}

function deleteClient(realm, clientId, accessToken) {
const headers = getHeaders(accessToken);
http.del(`${baseUrl}/admin/realms/${realm}/clients/${clientId}`, {}, { headers });
}

function getAccessToken({username, password, clientId, confidential, realm = 'master', offline = false, secret = clientSecret}) {
const body = {
grant_type: 'password',
client_id: clientId,
username,
password,
}
if (confidential) {
body['client_secret'] = clientSecret
body['client_secret'] = secret
}
if (offline) {
body['scope'] = 'email profile offline_access'
Expand Down Expand Up @@ -77,7 +92,7 @@ function hitUserInfoRoute(accessToken, realmName) {
const result = http.get(url, { headers });
}

export function hitIntrospectionRoute(accessToken, realmName) {
function hitIntrospectionRoute(accessToken, realmName, clientId, clientSecret) {
const base64Credentials = encoding.b64encode(`${clientId}:${clientSecret}`)
const url = `${baseUrl}/realms/${realmName}/protocol/openid-connect/token/introspect`;
const headers = { Authorization: `Basic ${base64Credentials}` }
Expand All @@ -94,4 +109,6 @@ module.exports = {
generateRealms,
hitUserInfoRoute,
hitIntrospectionRoute,
createClient,
deleteClient,
};
13 changes: 9 additions & 4 deletions k6/local_setup/podman-grapher/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,14 @@ app.whenReady().then(() => {

// Handle data from the child process and send it to the renderer process
stats.stdout.on('data', (data) => {
const dataLine = data.toString().split('\n')[1]
const [cpuPercent, memoryPercent] = dataLine.split(',')
// Send data to the renderer process for graphing
win.webContents.send('podmanData', {cpuPercent, memoryPercent});
try {
const dataLine = data.toString().split('\n')[1]
const [cpuPercent, memoryPercent] = dataLine.split(',')
// Send data to the renderer process for graphing
win.webContents.send('podmanData', {cpuPercent, memoryPercent});
} catch (e) {
console.error('Error while parsing podman stats', e);
return
}
});
})
19 changes: 14 additions & 5 deletions k6/tokenIntrospection.js
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
import { sleep } from 'k6';
import { hitIntrospectionRoute, getAccessToken } from './helpers.js';
import { hitIntrospectionRoute, getAccessToken, createClient, deleteClient } from './helpers.js';
import { username, password, clientId } from './env.js';
import { client } from './constants.js';

const CONCURRENT_LOOPS = 1;
const ITERATIONS_PER_LOOP = 1000;
const ITERATIONS_PER_LOOP = 10;
const LOOP_DELAY = 0.01;
const MAX_ALLOWED_FAILURE_RATE = '0.01'

Expand All @@ -28,11 +29,19 @@ export const options = {

export function setup() {
const accessToken = getAccessToken({ username, password, clientId, confidential: true });
return accessToken;
const clientInternalId = createClient('master', accessToken)
return { accessToken, clientInternalId };
}

// VU code
export default function (accessToken) {
export default function ({ accessToken }) {
sleep(LOOP_DELAY)
hitIntrospectionRoute(accessToken, 'master')
// If running longer than the token lifetime this will be inactive. But should be similar load. Fetching a fresh token would
// change this test to also include the load of requesting access tokens.
hitIntrospectionRoute(accessToken, 'master', client.clientId, client.secret)
}

export function teardown({clientInternalId}) {
const accessToken = getAccessToken({ username, password, clientId: client.clientId, confidential: true, secret: client.secret });
deleteClient('master', clientInternalId, accessToken)
}

0 comments on commit 6ae4896

Please sign in to comment.