diff --git a/docs/cloud/android.md b/docs/cloud/android.md deleted file mode 100644 index db2c07775..000000000 --- a/docs/cloud/android.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: "Overview" ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -To execute tests on Android devices marathon needs Android SDK -installed. Devices are expected to be connected to local machine by any means -supported by the adb (local usb connection, local emulator or TCP/IP). - -:::tip - -You can connect remote devices using `adb connect IP:port`. Marathon will be able to use them just like -any other Android devices - -::: diff --git a/docs/cloud/android/configure.md b/docs/cloud/android/configure.md deleted file mode 100644 index 3b671c0b4..000000000 --- a/docs/cloud/android/configure.md +++ /dev/null @@ -1,1097 +0,0 @@ ---- -title: "Configuration" ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Required CLI configuration - -When using CLI you have to specify the *type* of vendor configuration in the root of the **Marathonfile** configuration as following: - -```yaml -... -vendorConfiguration: - type: "Android" - additional_option1: ... - additional_option2: ... -``` - -## Required options - -### Android SDK path - -:::info - -This option is automatically detected if: - -1. You're using the Gradle Plugin -2. You're using the CLI and you have an **ANDROID_SDK_ROOT** or **ANDROID_HOME** environment variable - -::: - - - - -```yaml -vendorConfiguration: - type: "Android" - androidSdk: "/usr/share/local/android" -``` - - - - -### APK paths - -#### Single module testing - -##### Application APK path - -:::info - -This option is automatically detected if you're using Gradle Plugin - -::: - - - - -```yaml -vendorConfiguration: - type: "Android" - applicationApk: "dist/app-debug.apk" -``` - - - - -#### Test application APK path - -:::info - -This option is automatically detected if you're using Gradle Plugin - -::: - - - - -```yaml -vendorConfiguration: - type: "Android" - testApplicationApk: "dist/app-debug.apk" -``` - - - - -#### Multi module testing - -:::danger - -This mode is not supported by Gradle Plugin - -::: - -Marathon supports testing multiple modules at the same time (e.g. your tests are across more than one module): - - - - -```yaml -vendorConfiguration: - type: "Android" - outputs: - - application: "dist/app-debug.apk" - testApplication: "dist/app-debug-androidTest.apk" - - testApplication: "dist/library-debug-androidTest.apk" -``` - - - - -Each entry consists of `testApplication` in case of library testing and `application` + `testApplication` for application testing. - -#### Split apk testing (Dynamic Feature) - -:::danger - -This mode is not supported by Gradle Plugin - -This mode is also not available for Android devices with version less Android 5. - -::: -Marathon supports testing dynamic feature modules: - - - - -```yaml -vendorConfiguration: - type: "Android" - applicationApk: "dist/base.apk" - splitApks: - - "dist/dynamic-feature1-debug.apk" - - "dist/dynamic-feature2-debug.apk" -``` - - - - -## Optional - -### Automatic granting of permissions - -This option will grant all runtime permissions during the installation of the -application. This works like the option ```-g``` for [```adb install```][2] command. By default, it's set to **false**. - - - - -```yaml -vendorConfiguration: - type: "Android" - autoGrantPermission: true -``` - - - - -```kotlin -marathon { - autoGrantPermission = true -} -``` - - - - -```groovy -marathon { - autoGrantPermission = true -} -``` - - - - -### ADB initialisation timeout - -This option allows you to increase/decrease the default adb init timeout of 30 -seconds. - - - - -```yaml -vendorConfiguration: - type: "Android" - adbInitTimeoutMillis: 60000 -``` - - - - -```kotlin -marathon { - adbInitTimeout = 100000 -} -``` - - - - -```groovy -marathon { - adbInitTimeout = 100000 -} -``` - - - - -### Device serial number assignment - -This option allows to customise how marathon assigns a serial number to devices. -Possible values are: - -* ```automatic``` -* ```marathon_property``` -* ```boot_property``` -* ```hostname``` -* ```ddms``` - - - - -```yaml -vendorConfiguration: - type: "Android" - serialStrategy: "automatic" -``` - - - - -```kotlin -marathon { - serialStrategy = SerialStrategy.AUTOMATIC -} -``` - - - - -```groovy -marathon { - serialStrategy = SerialStrategy.AUTOMATIC -} -``` - - - - -Notes on the source of serial number: - -```marathon_property``` - Property name `marathon.serialno` - -```boot_property``` - Property name `ro.boot.serialno` - -```hostname``` - Property name `net.hostname` - -```ddms``` - Adb serial number(same as you see with `adb devices` command) - -```automatic``` - Sequantially checks all available options for first non empty value. - -Priority order: - -Before 0.6: ```marathon_property``` -> ```boot_property``` -> ```hostname``` -> ```ddms``` -> UUID - -After 0.6: ```marathon_property``` -> ```ddms``` -> ```boot_property``` -> ```hostname``` -> UUID - -### Install options - -By default, these will be ```-g -r``` (```-r``` prior to marshmallow). You can specify additional options to append to the default ones. - - - - - -```yaml -vendorConfiguration: - type: "Android" - installOptions: "-d" -``` - - - - -```kotlin -marathon { - installOptions = "-d" -} -``` - - - - -```groovy -marathon { - installOptions = "-d" -} -``` - - - - -### Screen recorder configuration - -By default, device will record a 1280x720 1Mbps video of up to 180 seconds if it is supported. If on the other hand you want to force -screenshots or configure the recording parameters you can specify this as follows: - -:::tip - -Android's `screenrecorder` doesn't support videos longer than 180 seconds - -::: - - - - -```yaml -vendorConfiguration: - type: "Android" - screenRecordConfiguration: - preferableRecorderType: "screenshot" - videoConfiguration: - enabled: false - width: 1080 - height: 1920 - bitrateMbps: 2 - timeLimit: 300 - screenshotConfiguration: - enabled: false - width: 1080 - height: 1920 - delayMs: 200 -``` - - - - -```kotlin -marathon { - screenRecordConfiguration = ScreenRecordConfiguration( - RecorderType.SCREENSHOT, - VideoConfiguration( - false, //enabled - 1080, //width - 1920, //height - 2, //Bitrate in Mbps - 300 //Max duration in seconds - ), - ScreenshotConfiguration( - false, //enabled - 1080, //width - 1920, //height - 200 //Delay between taking screenshots - ) - ) -} -``` - - - - -```groovy -marathon { - screenRecordConfiguration = ScreenRecordConfiguration( - RecorderType.SCREENSHOT, - VideoConfiguration( - false, //enabled - 1080, //width - 1920, //height - 2, //Bitrate in Mbps - 300 //Max duration in seconds - ), - ScreenshotConfiguration( - false, //enabled - 1080, //width - 1920, //height - 200 //Delay between taking screenshots - ) - ) -} -``` - - - - -### Clear state between test batch executions - -By default, marathon does not clear state between test batch executions. To mitigate potential test side effects, one could add an option to -clear the package data between test runs. Keep in mind that test side effects might be present. -If you want to isolate tests even further, then you should consider reducing the batch size. - -:::caution - -Since `pm clear` resets the permissions of the package, the granting of permissions during installation is essentially overridden. Marathon -doesn't grant the permissions again. -If you need permissions to be granted, and you need to clear the state, consider alternatives -like [GrantPermissionRule](https://developer.android.com/reference/androidx/test/rule/GrantPermissionRule) - -::: - - - - -```yaml -vendorConfiguration: - type: "Android" - applicationPmClear: true - testApplicationPmClear: true -``` - - - - -```kotlin -marathon { - applicationPmClear = true - testApplicationPmClear = true -} -``` - - - - -```groovy -marathon { - applicationPmClear = true - testApplicationPmClear = true -} -``` - - - - -### Instrumentation arguments - -If you want to pass additional arguments to the `am instrument` command executed on the device: - - - - -```yaml -vendorConfiguration: - type: "Android" - instrumentationArgs: - size: small -``` - - - - -```kotlin -marathon { - instrumentationArgs { - set("size", "small") - } -} -``` - - - - -```groovy -marathon { - instrumentationArgs { - set("size", "small") - } -} -``` - - - - -### [Allure-kotlin][3] support - -This option enables collection of allure's data from devices. -Configuration below works out of the box for allure-kotlin 2.3.0+. - - - - -```yaml -vendorConfiguration: - type: "Android" - allureConfiguration: - enabled: true -``` - - - - -```kotlin -marathon { - allureConfiguration { - enabled = true - } -} -``` - - - - -```groovy -marathon { - allureConfiguration { - enabled = true - } -} -``` - - - - -Additional configuration parameters include **pathRoot** which has two options: - -* `EXTERNAL_STORAGE` that is usually the `/sdcard/` on most of the devices -* `APP_DATA` which is usually `/data/data/$appPackage/` - -Besides the expected path root, you might need to provide the **relativeResultsDirectory**: this is the relative path to `pathRoot`. The -default path for allure-kotlin as of 2.3.0 is `/data/data/$appPackage/allure-results`. - - - - -```yaml -vendorConfiguration: - type: "Android" - allureConfiguration: - enabled: true - relativeResultsDirectory: "relative/path/to/allure-results" -``` - - - - -```kotlin -marathon { - allureConfiguration { - enabled = true - relativeResultsDirectory = "relative/path/to/allure-results" - } -} -``` - - - - -```groovy -marathon { - allureConfiguration { - enabled = true - relativeResultsDirectory = "relative/path/to/allure-results" - } -} -``` - - - - -Please refer to [allure's documentation][3] on the usage of allure. - -:::tip - -Starting with allure 2.3.0 your test application no longer needs MANAGE_EXTERNAL_STORAGE permission to write allure's output, so there is no -need to add any special permissions. - -::: - -Enabling this option effectively creates two allure reports for each test run: - -* one from the point of view of the marathon test runner -* one from the point of view of on-device test execution - -The on-device report gives you more flexibility and allows you to: - -* Take screenshots whenever you want -* Divide large tests into steps and visualise them in the report -* Capture window hierarchy - and more. - -All allure output from devices will be collected under `$output/device-files/allure-results` folder. - -### Timeout configuration - -With the introduction of [adam][4] we can precisely control the timeout of individual requests. Here is how you can use it: - - - - - -```yaml -vendorConfiguration: - type: "Android" - timeoutConfiguration: - # ISO_8601 duration - shell: "PT30S" - listFiles: "PT1M" - pushFile: "PT1H" - pushFolder: "PT1H" - pullFile: "P1D" - uninstall: "PT1S" - install: "P1DT12H30M5S" - screenrecorder: "PT1H" - screencapturer: "PT1S" - boot: "PT30S" -``` - - - - -```kotlin -marathon { - timeoutConfiguration { - shell = Duration.ofSeconds(30) - } -} -``` - - - - -```groovy -marathon { - timeoutConfiguration { - shell = Duration.ofSeconds(30) - } -} -``` - - - - -### Sync/pull files from device after test run - -Sometimes you need to pull some folders from each device after the test execution. It may be screenshots or logs or other debug information. -To help with this marathon supports pulling files from devices at the end of the test batch execution. Here is how you can configure it: - - - - -```yaml -vendorConfiguration: - type: "Android" - fileSyncConfiguration: - pull: - - relativePath: "my-device-folder1" - aggregationMode: TEST_RUN - pathRoot: EXTERNAL_STORAGE - - relativePath: "my-device-folder2" - aggregationMode: DEVICE - pathRoot: EXTERNAL_STORAGE - - relativePath: "my-device-folder3" - aggregationMode: DEVICE_AND_POOL - pathRoot: EXTERNAL_STORAGE - - relativePath: "my-device-folder4" - aggregationMode: POOL - pathRoot: EXTERNAL_STORAGE -``` - - - - -```kotlin -marathon { - fileSyncConfiguration { - pull.add( - FileSyncEntry( - "my-device-folder1", - AggregationMode.TEST_RUN, - PathRoot.EXTERNAL_STORAGE - ) - ) - pull.add( - FileSyncEntry( - "my-device-folder2", - AggregationMode.DEVICE, - PathRoot.EXTERNAL_STORAGE - ) - ) - pull.add( - FileSyncEntry( - "my-device-folder3", - AggregationMode.DEVICE_AND_POOL, - PathRoot.EXTERNAL_STORAGE - ) - ) - pull.add( - FileSyncEntry( - "my-device-folder4", - AggregationMode.POOL, - PathRoot.EXTERNAL_STORAGE - ) - ) - } -} -``` - - - - -```groovy -marathon { - fileSyncConfiguration { - pull.add( - new FileSyncEntry( - "my-device-folder1", - AggregationMode.TEST_RUN, - PathRoot.EXTERNAL_STORAGE - ) - ) - pull.add( - FileSyncEntry( - "my-device-folder2", - AggregationMode.DEVICE, - PathRoot.EXTERNAL_STORAGE - ) - ) - pull.add( - FileSyncEntry( - "my-device-folder3", - AggregationMode.DEVICE_AND_POOL, - PathRoot.EXTERNAL_STORAGE - ) - ) - pull.add( - FileSyncEntry( - "my-device-folder4", - AggregationMode.POOL, - PathRoot.EXTERNAL_STORAGE - ) - ) - } -} -``` - - - - -:::caution - -Please pay attention to the path on the device: if path root is `EXTERNAL_STORAGE` (which is the default value if you don't specify -anything), -then `relativePath` is relative to the `Environment.getExternalStorageDirectory()` or -the `EXTERNAL_STORAGE` envvar. In practice this means that if you have a folder like `/sdcard/my-folder` you should specify `/my-folder` as -a relative path. - -::: - -Starting with Android 11 your test application will require **MANAGE_EXTERNAL_STORAGE** permission: - -```xml - - - - ... - -``` - -Marathon will automatically grant this permission before executing tests if you pull/push files from devices with `EXTERNAL_STORAGE` path -root. - -If you don't want to add any permissions to your test application, you can also use the path root `APP_DATA`. This will automatically -transfer the files from your application's private folder, e.g. `/data/data/com.example/my-folder`. - -### Push files to device before each batch execution - -Sometimes you need to push some files/folders to each device before the test execution. Here is how you can configure it: - -:::tip - -By default, pushing will be done to `LOCAL_TMP` path root which refers to the `/data/local/tmp`. -You can also push files to `EXTERNAL_STORAGE`. Currently, pushing to `APP_DATA` is not supported. - -::: - - - - -```yaml -vendorConfiguration: - type: "Android" - fileSyncConfiguration: - push: - - path: "/home/user/folder" - - path: "/home/user/testfile.txt" -``` - - - - -```kotlin -marathon { - fileSyncConfiguration { - push.add(FilePushEntry("/home/user/folder")) - push.add(FilePushEntry("/home/user/testfile.txt")) - } -} -``` - - - - -```groovy -marathon { - fileSyncConfiguration { - push.add(new FilePushEntry("/home/user/folder")) - push.add(new FilePushEntry("/home/user/testfile.txt")) - } -} -``` - - - - -### Test parser - -:::tip - -If you need to parallelize the execution of parameterized tests or have complex runtime test generation -(custom test runners, e.g. cucumber) - remote parser is your choice. - -::: - -Test parsing (collecting a list of tests expected to execute) can be done using either a local test parser, which uses byte code analysis, -or a remote test parser that uses an Android device to collect a list of tests expected to run. Both have pros and cons listed below: - -| YAML type | Gradle class | Pros | Const | -|-----------|:------------------:|------------------------------------------------------------------------------------------------------------------:|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:| -| "local" | `LocalTestParser` | Doesn't require a booted Android device | Doesn't support runtime-generated tests, e.g. named parameterized tests. Doesn't support parallelising parameterized tests | -| "remote" | `RemoteTestParser` | Supports any runtime-generated tests, including parameterized, and allows marathon to parallelise their execution | Requires a booted Android device for parsing. If you need to use annotations for filtering purposes, requires test apk changes as well as attaching a test run listener for parser | - -Default test parser is local. - -For annotations parsing using remote test parser test run is triggered without running tests (using `-e log true` option). Annotations are -expected to be reported as test metrics, e.g.: - -```text -INSTRUMENTATION_STATUS_CODE: 0 -INSTRUMENTATION_STATUS: class=com.example.FailedAssumptionTest -INSTRUMENTATION_STATUS: current=4 -INSTRUMENTATION_STATUS: id=AndroidJUnitRunner -INSTRUMENTATION_STATUS: numtests=39 -INSTRUMENTATION_STATUS: stream= -com.example.FailedAssumptionTest: -INSTRUMENTATION_STATUS: test=ignoreTest -INSTRUMENTATION_STATUS_CODE: 1 -INSTRUMENTATION_STATUS: com.malinskiy.adam.junit4.android.listener.TestAnnotationProducer.v2=[androidx.test.filters.SmallTest(), io.qameta.allure.kotlin.Severity(value=critical), io.qameta.allure.kotlin.Story(value=Slow), org.junit.Test(expected=class org.junit.Test$None:timeout=0), io.qameta.allure.kotlin.Owner(value=user2), io.qameta.allure.kotlin.Feature(value=Text on main screen), io.qameta.allure.kotlin.Epic(value=General), org.junit.runner.RunWith(value=class io.qameta.allure.android.runners.AllureAndroidJUnit4), kotlin.Metadata(bytecodeVersion=[I@bdf6b25:data1=[Ljava.lang.String;@46414fa:data2=[Ljava.lang.String;@5d4aab:extraInt=0:extraString=:kind=1:metadataVersion=[I@fbb1508:packageName=), io.qameta.allure.kotlin.Severity(value=critical), io.qameta.allure.kotlin.Story(value=Slow)] -INSTRUMENTATION_STATUS_CODE: 2 -INSTRUMENTATION_STATUS: class=com.example.FailedAssumptionTest -INSTRUMENTATION_STATUS: current=4 -INSTRUMENTATION_STATUS: id=AndroidJUnitRunner -INSTRUMENTATION_STATUS: numtests=39 -INSTRUMENTATION_STATUS: stream=. -INSTRUMENTATION_STATUS: test=ignoreTest -``` - -To generate the above metrics you need to add a JUnit 4 listener to your dependencies: - -```groovy -dependecies { - androidTestImplementation("com.malinskiy.adam:android-junit4-test-annotation-producer:${LATEST_VERSION}") -} -``` - -Then you need to attach it to the execution. One way to attach the listener is using `am instrument` parameters, e.g. -`-e listener com.malinskiy.adam.junit4.android.listener.TestAnnotationProducer`. Below you will find an example for configuring a remote -test -parser: - -:::caution - -Keep in mind that `instrumentationArgs` should include a listener only for the test parser. During the actual execution there is no need -to produce test annotations. - -::: - - - - -```yaml -vendorConfiguration: - type: "Android" - testParserConfiguration: - type: "remote" - instrumentationArgs: - listener: "com.malinskiy.adam.junit4.android.listener.TestAnnotationProducer" -``` - - - - -```kotlin -marathon { - testParserConfiguration = TestParserConfiguration.RemoteTestParserConfiguration( - mapOf( - "listener" to "com.malinskiy.adam.junit4.android.listener.TestAnnotationProducer" - ) - ) -} -``` - - - - -### Test access configuration -:::info - -This is power-user feature of marathon that allows setting up GPS location on the emulator, simulating calls, SMS and more thanks to the -access to device-under-test from the test itself. - -::: - -Marathon supports adam's junit extensions which allow tests to gain access to adb on all devices and emulator's control + gRPC port. See the -[docs](https://malinskiy.github.io/adam/extensions/1-android-junit/) as well as the [PR](https://github.com/Malinskiy/adam/pull/30) for -description on how this works. - - - - -```yaml -vendorConfiguration: - type: "Android" - testAccessConfiguration: - adb: true - grpc: true - console: true - consoleToken: "cantFoolMe" -``` - - - - -```kotlin -marathon { - testAccessConfiguration = TestAccessConfiguration( - adb = true, - grpc = true, - console = true, - consoleToken = "cantFoolMe" - ) -} -``` - - - - -### Multiple adb servers - -Default configuration of marathon assumes that adb server is started locally and is available at `127.0.0.1:5037`. In some cases it may be -desirable to connect multiple adb servers instead of connecting devices to a single adb server. An example of this is distributed execution -of tests using test access (calling adb commands from tests). For such scenario all emulators should be connected via a local (in relation -to the emulator) adb server. Default port for each host is 5037. - -:::tip - -Adb server started on another machine should be exposed to external traffic, e.g. using option `-a`. For example, if you want to -expose the adb server and start it in foreground explicitly on port 5037: `adb nodaemon server -a -P 5037`. - -::: - - - - - -```yaml -vendorConfiguration: - type: "Android" - adbServers: - - host: 127.0.0.1 - - host: 10.0.0.2 - port: 5037 -``` - - - - -```kotlin -marathon { - adbServers = listOf( - AdbEndpoint(host = "127.0.0.1"), - AdbEndpoint(host = "10.0.0.2", port = 5037) - ) -} -``` - - - - -### Extra applications APK path - -Install extra apk before running the tests if required, e.g. test-butler.apk - -:::caution - -For Gradle Plugin users, the `extraApplications` parameter will affect all the testing apk configurations in a single module. - -::: - - - - -```yaml -vendorConfiguration: - type: "Android" - extraApplicationsApk: - - "dist/extra.apk" - - "dist/extra_two.apk" -``` - - - - -```kotlin -marathon { - extraApplications = listOf( - File(project.rootDir, "test-butler-app-2.2.1.apk") - File ("/home/user/other-apk-with-absolute-path.apk"), - ) -} -``` - - - - -### AndroidX ScreenCapture API - -Marathon supports automatic pull of screenshots taken -via [ScreenCapture API](https://developer.android.com/reference/androidx/test/runner/screenshot/package-summary) - -To enable marathon to pull screenshots you need to use a -custom [ScreenCaptureProcessor](https://developer.android.com/reference/androidx/test/runner/screenshot/ScreenCaptureProcessor) called -`AdamScreenCaptureProcessor`. - -Firstly, add `com.malinskiy.adam:android-junit4-test-annotation-producer:${LATEST_VERSION}` to your test code. - -Secondly, enable AdamScreenCaptureProcessor in your tests. You can do this manually: - -```kotlin -Screenshot.addScreenCaptureProcessors(setOf(AdamScreenCaptureProcessor())) -``` - -or using JUnit4 rule `AdamScreenCaptureRule`: - -```kotlin -class ScreenshotTest { - @get:Rule - val screencaptureRule = AdamScreenCaptureRule() - - @Test - fun testScreencapture() { - Screenshot.capture().process() - } -} - -``` - -That's it, you're done. No need for custom configuration on Marathon's side: everything should be picked up automatically. - -More information on this custom ScreenCaptureProcessor can be -found [here](https://malinskiy.github.io/adam/extensions/2-android-event-producers/#adamscreencaptureprocessor). - -### Enable window animations - -By default, marathon uses `--no-window-animation` flag. Use the following option if you want to enable window animations: - - - - -```yaml -vendorConfiguration: - type: "Android" - disableWindowAnimation: false -``` - - - - -```kotlin -marathon { - disableWindowAnimation = false -} -``` - - - - -[1]: https://developer.android.com/studio/ - -[2]: https://developer.android.com/studio/command-line/adb#issuingcommands - -[3]: https://github.com/allure-framework/allure-kotlin - -[4]: https://github.com/Malinskiy/adam diff --git a/docs/cloud/android/examples.md b/docs/cloud/android/examples.md deleted file mode 100644 index 48f949cf8..000000000 --- a/docs/cloud/android/examples.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: "Examples" ---- - -All of the sample apps are located [here][1] - -## Android application -You can find a sample of gradle based android application with one Activity and EditText with examples of flaky, failing, skipped and slow -espresso tests [here][2]. Test execution can be done using CLI and marathon gradle plugin. - -## Android library - -You can find a sample of gradle based android library with one instrumented test -[here][3]. Test execution can be done using CLI and marathon gradle plugin. - -[1]: https://github.com/MarathonLabs/marathon/tree/develop/sample - -[2]: https://github.com/MarathonLabs/marathon/tree/develop/sample/android-app - -[3]: https://github.com/MarathonLabs/marathon/tree/develop/sample/android-library diff --git a/docs/cloud/android/install.md b/docs/cloud/android/install.md deleted file mode 100644 index 5b5a3bdc3..000000000 --- a/docs/cloud/android/install.md +++ /dev/null @@ -1,135 +0,0 @@ ---- -title: "Gradle Plugin" ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -:::caution - -Gradle plugin is a just thin wrapper around CLI. It bundles the CLI and installs it on-the-fly. -You **should** try using [CLI][2] first - -::: - -# Tradeoffs using Gradle Plugin - -| Pros | Cons | -|------------------------------------------|-------------------------------------------------------------------------------------------| -| Configuration using Gradle syntax | Requires project sync before testing starts | -| No installation of marathon CLI required | Less flexibility in choosing AGP+Gradle versions. CLI is independent of your Gradle setup | -| | Easier to manage when you have more than 1 test run configuration | -| | Missing features, e.g. multi-module testing | - -## Install - -Marathon gradle plugin is published to [plugins.gradle.org][1]. -To apply the plugin: - - - - -```kotlin -plugins { - id("com.malinskiy.marathon") version "X.X.X" -} -``` - - - - -```groovy -plugins { - id 'com.malinskiy.marathon' version 'X.X.X' -} -``` - - - - -All the test tasks will start with **marathon** prefix, for example **marathonDebugAndroidTest**. - -[1]: https://plugins.gradle.org -[2]: /intro/install.md - -## Configure - -Configuration for Gradle Plugin can only be done via Gradle DSL, i.e. you can't use Marathonfile as configuration when running tests using Gradle Plugin. - -Here is an example of gradle config using Kotlin DSL: - -```kotlin -marathon { - name = "sample-app tests" - baseOutputDir = "./marathon" - outputConfiguration { - maxPath = 1024 - } - analytics { - influx { - url = "http://influx.svc.cluster.local:8086" - user = "root" - password = "root" - dbName = "marathon" - } - } - poolingStrategy { - operatingSystem = true - } - shardingStrategy { - countSharding { - count = 5 - } - } - sortingStrategy { - executionTime { - percentile = 90.0 - executionTime = Instant.now().minus(3, ChronoUnit.DAYS) - } - } - batchingStrategy { - fixedSize { - size = 10 - } - } - flakinessStrategy { - probabilityBased { - minSuccessRate = 0.8 - maxCount = 3 - timeLimit = Instant.now().minus(30, ChronoUnit.DAYS) - } - } - retryStrategy { - fixedQuota { - totalAllowedRetryQuota = 200 - retryPerTestQuota = 3 - } - } - filteringConfiguration { - allowlist { - add(SimpleClassnameFilterConfiguration(".*".toRegex())) - } - blocklist { - add(SimpleClassnameFilterConfiguration("$^".toRegex())) - } - } - includeSerialRegexes = emptyList() - excludeSerialRegexes = emptyList() - uncompletedTestRetryQuota = 100 - ignoreFailures = false - isCodeCoverageEnabled = false - fallbackToScreenshots = false - testOutputTimeoutMillis = 30_000 - debug = true - autoGrantPermission = true -} -``` - -## Execute - -Executing your tests via gradle is done via calling generated marathon gradle task, for example *marathonDebugAndroidTest*. -These tasks will be created for all testing flavors including multi-dimension setup. - -```shell-session -foo@bar $ gradle :app:marathonDebugAndroidTest -``` diff --git a/docs/cloud/cli/cicd.md b/docs/cloud/cli/cicd.md new file mode 100644 index 000000000..003247724 --- /dev/null +++ b/docs/cloud/cli/cicd.md @@ -0,0 +1,28 @@ +--- +title: "CI/CD" +--- + +### +Marathon Cloud offers several ways to run tests within your CI/CD platform: +1. Platform-Specific Steps +2. Docker Image +3. Marathon CLI + +### Platform steps +We prepared step for common platform so you could use it for your setup. +- [Github Action](https://github.com/MarathonLabs/action-test) +- [CircleCI Orb](https://circleci.com/developer/orbs/orb/marathonlabs/marathon-cloud-orb) +- [Bitrise](https://bitrise.io/integrations/steps/run-tests-using-marathon-cloud) + +### Docker image +Certain platforms provide the option to employ the [Marathon Docker Image](https://hub.docker.com/r/marathonlabs/marathon-cloud) +for each step. We recommend using it for Gitlab CI and Jenkins environments. + +### Marathon CLI +If the previously mentioned solutions are not applicable, +you have the alternative of installing the [Marathon CLI](./installation) and executing tests using it. + +### Other +If you have problems using all of the previous solutions feel free to [contact us](email:sy@marathonlabs.io). + + diff --git a/docs/cloud/cli/installation.md b/docs/cloud/cli/installation.md new file mode 100644 index 000000000..77887c40e --- /dev/null +++ b/docs/cloud/cli/installation.md @@ -0,0 +1,19 @@ +--- +title: "Installation" +--- + +### +This section of the documentation explains how to install the Marathon CLI on your local machine. +For running tests within your Continuous Integration and Delivery (CI/CD) system, please refer to the [CI/CD section](./cicd). +The installation can be performed using [Homebrew](https://brew.sh/). Here’s how to add the MarathonLabs repository: +```shell +brew tap malinskiy/tap +``` +Next, install the Marathon Cloud CLI: + +```shell +brew install malinskiy/tap/marathon-cloud +``` +Alternatively, you can download prebuild binaries for Windows, Linux, +or MacOS from [the Release page](https://github.com/MarathonLabs/marathon-cloud-cli/releases). + diff --git a/docs/cloud/cli/parameters.md b/docs/cloud/cli/parameters.md new file mode 100644 index 000000000..15b443d34 --- /dev/null +++ b/docs/cloud/cli/parameters.md @@ -0,0 +1,36 @@ +--- +title: "Parameters" +--- + +### +You can find all the available marathon-cli parameters by running the command "marathon-cloud --help." +Below, you'll find a list of the parameters you can set. + +```bash +marathon-cloud --help + -app string + application filepath. Required + android example: /home/user/workspace/sample.apk + ios example: /home/user/workspace/sample.zip + -testapp string + test apk file path. Required + android example: /home/user/workspace/testSample.apk + ios example: /home/user/workspace/sampleUITests-Runner.zip + -platform string + testing platform. Required + possible values: "Android" or "iOS" + -api-key string + api-key for client. Required + -os-version string + Android or iOS OS version + -link string + link to commit + -name string + name for run, for example it could be description of commit + -o string + allure raw results output folder + -system-image string + OS-specific system image. For Android one of [default,google_apis]. For iOS only [default] + -isolated bool + Run each test using isolated execution. Default is false. +``` diff --git a/docs/cloud/configuration/analytics.md b/docs/cloud/configuration/analytics.md deleted file mode 100644 index bd93e3fb8..000000000 --- a/docs/cloud/configuration/analytics.md +++ /dev/null @@ -1,182 +0,0 @@ ---- -title: "Analytics" ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -Configuration of analytics backend to be used for storing and retrieving test metrics. This plays a major part in optimising performance and -mitigating flakiness. - -### Disable analytics - -By default, no analytics backend is expected which means that each test will be treated as a completely new test. - -### [InfluxDB v2][3] - -Assuming you've done the setup for InfluxDB v2 you need to provide: - -- url -- token - Token for authentication -- organization - Organization is the name of the organization you wish to write/read from -- bucket - Destination bucket to write/read from -- retention policy - -Bucket is quite useful in case you have multiple configurations of tests/devices and you don't want metrics from one configuration to -affect the other one, e.g. regular and end-to-end tests. - - - - -```yaml -analyticsConfiguration: - type: "influxdb2" - url: "http://influx2.svc.cluster.local:8086" - token: "my-super-secret-token" - organization: "starlabs" - bucket: "marathon" - retentionPolicyConfiguration: - everySeconds: 604800 # Duration in seconds for how long data will be kept in the database. 0 means infinite. minimum: 0 - shardGroupDurationSeconds: 0 # Shard duration measured in seconds -``` - - - - -```kotlin -marathon { - analytics { - influx { - url = "http://influx2.svc.cluster.local:8086" - token = "my-super-secret-token" - organization = "starlabs" - bucket = "marathon" - } - } -} -``` - - - - -### [InfluxDB][1] - -Assuming you've done the setup for InfluxDB you need to provide: - -- url -- username -- password -- database name -- retention policy - -Database name is quite useful in case you have multiple configurations of tests/devices and you don't want metrics from one configuration to -affect the other one, e.g. regular and end-to-end tests. - - - - -```yaml -analyticsConfiguration: - type: "influxdb" - url: "http://influx.svc.cluster.local:8086" - user: "root" - password: "root" - dbName: "marathon" - retentionPolicyConfiguration: - name: "rpMarathonTest" - duration: "90d" - shardDuration: "1h" - replicationFactor: 5 - isDefault: false -``` - - - - -```kotlin -marathon { - analytics { - influx { - url = "http://influx.svc.cluster.local:8086" - user = "root" - password = "root" - dbName = "marathon" - } - } -} -``` - - - - -```groovy -marathon { - analytics { - influx { - url = "http://influx.svc.cluster.local:8086" - user = "root" - password = "root" - dbName = "marathon" - } - } -} -``` - - - - -### [Graphite][2] - -Graphite can be used as an alternative to InfluxDB. It uses the following parameters: - -- host -- port (optional) - the default is 2003 -- prefix (optional) - no metrics prefix will be used if not specified - - - - -```yaml -analyticsConfiguration: - type: "graphite" - host: "influx.svc.cluster.local" - port: "8080" - prefix: "prf" -``` - - - - -```kotlin -marathon { - analytics { - graphite { - host = "influx.svc.cluster.local" - port = "8080" - prefix = "prf" - } - } -} -``` - - - - -```groovy -marathon { - analytics { - graphite { - host = "influx.svc.cluster.local" - port = "8080" - prefix = "prf" - } - } -} -``` - - - - -[1]: https://www.influxdata.com/ -[2]: https://graphiteapp.org/ -[3]: https://docs.influxdata.com/influxdb/v2.0/ diff --git a/docs/cloud/configuration/batching.md b/docs/cloud/configuration/batching.md deleted file mode 100644 index 9dd54b16f..000000000 --- a/docs/cloud/configuration/batching.md +++ /dev/null @@ -1,151 +0,0 @@ ---- -title: "Batching" ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -Batching mechanism allows you to trade off stability for performance. A sorted group of tests without duplicates, executed using a single command is called a **batch**. -Most of the time, this means that between tests in the same batch you're sharing the device state and code state increasing the risk of side-effect because there is no external clean-up. -On the other hand you gain performance improvements since the execution command is usually quite slow (up to 10 seconds for some platforms). Most importantly, batching allows tests to be horizontally parallelized. - -### Isolate batching - -Each test is executed using separate command execution, so performance is sacrificed for stability. -This is the default mode. - - - - -```yaml -batchingStrategy: - type: "isolate" -``` - - - - -```kotlin -marathon { - batchingStrategy {} -} -``` - - - - -```groovy -marathon { - batchingStrategy {} -} -``` - - - - -### Fixed size batching - -Each batch is created based on the **size** parameter which is required. When a new batch of tests is needed the queue is dequeued for at -most **size** tests. - -Optionally if you want to limit the batch duration you have to specify the **timeLimit** for the test metrics time window and the ** -durationMillis**. For each test the analytics backend is accessed and **percentile** of it's duration is queried. If the sum of durations is -more than the **durationMillis** then no more tests are added to the batch. - -This is useful if you have extremely long tests and you use batching, e.g. you batch by size 10 and your test run duration is roughly 10 -minutes, but you have tests that are expected to run 2 minutes each. If you batch all of them together then at least one device will be -finishing it's execution in 20 minutes while all other devices might already finish. To mitigate this just specify the time limit for the -batch using **durationMillis**. - -Another optional parameter for this strategy is the **lastMileLength**. At the end of execution batching tests actually hurts the -performance so for the last tests it's much better to execute them in parallel in separate batches. This works only if you execute on -multiple devices. You can specify when this optimisation kicks in using the **lastMileLength** parameter, the last **lastMileLength** tests -will use this optimisation. - - - - -```yaml -batchingStrategy: - type: "fixed-size" - size: 5 - durationMillis: 100000 - percentile: 80.0 - timeLimit: "-PT1H" - lastMileLength: 10 -``` - - - - -```kotlin -marathon { - batchingStrategy { - fixedSize { - size = 5 - durationMillis = 100000 - percentile = 80.0 - timeLimit = Instant.now().minus(Duration.parse("PT1H")) - lastMileLength = 10 - } - } -} -``` - - - - -```groovy -marathon { - batchingStrategy { - fixedSize { - size = 5 - durationMillis = 100000 - percentile = 80.0 - timeLimit = Instant.now().minus(Duration.parse("PT1H")) - lastMileLength = 10 - } - } -} -``` - - - - - -### Test class batching - -Each batch will be based on test class size. We can advice to use this configuration wisely to avoid cross runs side effects. - - - - -```yaml -batchingStrategy: - type: "class-name" -``` - - - - -```kotlin -marathon { - batchingStrategy { - className {} - } -} -``` - - - - -```groovy -marathon { - batchingStrategy { - className {} - } -} -``` - - - diff --git a/docs/cloud/configuration/dynamic-configuration.md b/docs/cloud/configuration/dynamic-configuration.md deleted file mode 100644 index f7108b60e..000000000 --- a/docs/cloud/configuration/dynamic-configuration.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: "Dynamic configuration" ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -Marathon allows you to pass dynamic variables to your marathon configuration, e.g. a list of tests or url for analytics backend. - -## CLI - -Marathonfile support environment variable interpolation in the Marathonfile. Every occurance of `${X}` in the Marathonfile will be replaced -with the value of envvar `X` For example, if you want to dynamically pass the index of the test run to the fragmentation filter: - -```yaml -filteringConfiguration: - allowlist: - - type: "fragmentation" - index: ${MARATHON_FRAGMENT_INDEX} - count: 10 -``` - -and then execute the testing as following: - -```shell-session -foo@bar:~$ MARATHON_FRAGMENT_INDEX=0 marathon -``` - -## Gradle - -To pass a parameter to the Gradle's build script: - -```shell-session -foo@bar:~$ gradle -PmarathonOutputDir=reports -``` - - - - -```kotlin -marathon { - output = property("marathonOutputDir") -} -``` - - - - -```groovy -marathon { - output = property('marathonOutputDir') -} -``` - - - - -Note that dynamic properties must be named differently from fields they set. - -You can use `findProperty()` for optional properties: - -```groovy -marathon { - filteringConfiguration { - allowlist { - annotationFilter = findProperty('marathonAnnotations')?.split(',') ?: [] - } - } -} -``` - -For more info refer to the [Gradle's dynamic project properties](https://docs.gradle.org/current/javadoc/org/gradle/api/Project.html#properties) diff --git a/docs/cloud/configuration/filtering.md b/docs/cloud/configuration/filtering.md deleted file mode 100644 index 22b200ac3..000000000 --- a/docs/cloud/configuration/filtering.md +++ /dev/null @@ -1,490 +0,0 @@ ---- -title: "Filtering" ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -Test filtering in marathon is done using **allowlist** and **blocklist** pattern. - -:::tip - -Test filtering works after a list of tests expected to run has been determined using [test class regular expression][1]. -If your tests are not showing up after applying filtering logic - double-check the above parameter is not filtering it out. -::: - -## Filtering logic - -First allowlist is applied, then the blocklist. Each accepts a collection of **TestFilter** declarations and their parameters. - -All the filters can be used in allowlist and in blocklist block as well, for example the following will run only smoke tests: - - - - -```yaml -filteringConfiguration: - allowlist: - - type: "annotation" - values: - - "com.example.SmokeTest" -``` - - - - -```kotlin -marathon { - filteringConfiguration { - allowlist { - add(AnnotationFilterConfiguration(values = listOf("com.example.SmokeTest"))) - } - } -} -``` - - - - -```groovy -marathon { - filteringConfiguration { - allowlist { - annotationFilter = ['com.example.SmokeTest'] - } - } -} -``` - - - - -And the next snippet will execute everything, but the smoke tests: - - - - -```yaml -filteringConfiguration: - blocklist: - - type: "annotation" - values: - - "com.example.SmokeTest" -``` - - - - -```kotlin -marathon { - filteringConfiguration { - blocklist { - add(AnnotationFilterConfiguration(values = listOf("com.example.SmokeTest"))) - } - } -} -``` - - - - -```groovy -marathon { - filteringConfiguration { - blocklist { - annotationFilter = ['com.example.SmokeTest'] - } - } -} -``` - - - - -### "fully-qualified-test-name" -Filters tests by their FQTN which is `$package.$class#$method`. The `#` sign is important! - -### "fully-qualified-class-name" -Filters tests by their FQCN which is `$package.$class` | - -### "simple-class-name" -Filters tests by using only test class name, e.g. `MyTest` - -### "package" -Filters tests by using only test package, e.g. `com.example` - -### "method" -Filters tests by using only test method, e.g. `myComplicatedTest` - -### "annotation" -Filters tests by using only test annotation name, e.g. `androidx.test.filters.LargeTest` - -### "allure" -Filters tests by using allure-test-filter, [source][2] - -### Gradle plugin mapping - -| YAML type | Gradle class | -| --------------------------------- |:-----------------------------------------------:| -| "fully-qualified-test-name" | `FullyQualifiedTestnameFilterConfiguration` | -| "fully-qualified-class-name" | `FullyQualifiedClassnameFilterConfiguration` | -| "simple-class-name" | `SimpleClassnameFilterConfiguration` | -| "package" | `TestPackageFilterConfiguration` | -| "method" | `TestMethodFilterConfiguration` | -| "annotation" | `AnnotationFilterConfiguration` | -| "allure" | `AllureFilterConfiguration` | - -:::caution - -Gradle will require you to import the filtering classes just as any Groovy/Kotlin code would. -Using Kotlin DSL will make it a bit simpler though. - -::: - -## Filter parameters - -Each of the above filters expects **only one** of the following parameters: - -- A `regex` for matching -- An array of `values` -- A `file` that contains each value on a separate line (empty lines will be ignored) - -### Regex filtering - -An example of `regex` filtering is executing any test for a particular package, e.g. for package: `com.example` and it's subpackages: - -```yaml -filteringConfiguration: - allowlist: - - type: "package" - regex: "com\.example.*" -``` - -### Values filtering - -You could also specify each package separately via values: - -```yaml -filteringConfiguration: - allowlist: - - type: "package" - values: - - "com.example" - - "com.example.subpackage" -``` - -### Values file filtering - -Or you can supply these packages via a file (be careful with the relative paths: they will be relative to the workdir of the process): - -```yaml -filteringConfiguration: - allowlist: - - type: "package" - file: "testing/myfilterfile" -``` - -Inside the `testing/myfilterfile` you should supply the values, each on a separate line: - -```text -com.example -com.example.subpackage -``` - -### Groovy filtering - -With almost every filtering configuration (except for `annotationDataFilter`) it is possible to have `regex` and `values`. - -Providing one value will be mapped to `regex` in the generated Marathonfile, more than one will end up in `values` - -```groovy -marathon { - filteringConfiguration { - allowlist { - annotationFilter = ['com.example.SmokeTest']//mapped to regex - testMethodFilter = ['testMethod', 'testSomethingElse']//mapped to values - } - } -} -``` - -## Common examples - -### Running only specific tests - -A common scenario is to execute a list of tests. You can do this via the FQTN filter: - - - - -```yaml -filteringConfiguration: - allowlist: - - type: "fully-qualified-test-name" - values: - - "com.example.ElaborateTest#testMethod" - - "com.example.subpackage.OtherTest#testSomethingElse" -``` - - - - -```kotlin -marathon { - filteringConfiguration { - allowlist { - add( - FullyQualifiedTestnameFilterConfiguration( - values = listOf( - "com.example.ElaborateTest#testMethod", - "com.example.subpackage.OtherTest#testSomethingElse", - ) - ) - ) - } - } -} -``` - - - - -```groovy -marathon { - filteringConfiguration { - allowlist { - fullyQualifiedTestnameFilter = ['com.example.ElaborateTest#testMethod', 'com.example.subpackage.OtherTest#testSomethingElse'] - } - } -} -``` - - - - -### Running only specific test classes - -If you want to execute tests `ScaryTest` and `FlakyTest` for any package using the *class name* filter: - -```yaml -filteringConfiguration: - allowlist: - - type: "simple-class-name" - values: - - "ScaryTest" - - "FlakyTest" -``` - -### Extracting filtering values into a separate file -In case you want to separate the filtering configuration from the *Marathonfile* you can supply a reference to an external file: - -```yaml -filteringConfiguration: - allowlist: - - type: "simple-class-name" - file: testing/myfilterfile -``` - -:::tip - -This extraction approach works for any test filter that supports **values**. - -::: - -Inside the `testing/myfilterfile` you should supply the same values, each on a separate line, e.g. *fully qualified class name* filter: - -``` -com.example.ScaryTest -com.example.subpackage.FlakyTest -``` - -### Allure platform test filter - - - -```yaml -filteringConfiguration: - allowlist: - - type: "allure" -``` - - - - -```kotlin -marathon { - filteringConfiguration { - allowlist { - add(TestFilterConfiguration.AllureFilterConfiguration) - } - } -} -``` - - - - -```groovy -marathon { - filteringConfiguration { - allowlist { - allureTestFilter = true - } - } -} -``` - - - - -### Composition filtering - -Marathon supports filtering using multiple test filters at the same time using a *composition* filter. It accepts a list of base filters and -also an operation such as **UNION**, **INTERSECTION** or **SUBTRACT**. You can create complex filters such as get all the tests starting -with *E2E* but get only methods from there ending with *Test*. Composition filter is not supported by groovy gradle scripts, but is -supported if you use gradle kts. - -An important thing to mention is that by default platform specific ignore options are not taken into account. This is because a -cross-platform test runner cannot account for all the possible test frameworks out there. However, each framework's ignore option can still -be "explained" to marathon, e.g. JUnit's **org.junit.Ignore** annotation can be specified in the filtering configuration. - - - - -```yaml -filteringConfiguration: - allowlist: - - type: "simple-class-name" - regex: ".*" - - type: "fully-qualified-class-name" - values: - - "com.example.MyTest" - - "com.example.MyOtherTest" - - type: "fully-qualified-class-name" - file: "testing/mytestfilter" - - type: "method" - regex: "." - - type: "composition" - filters: - - type: "package" - regex: ".*" - - type: "method" - regex: ".*" - op: "UNION" - blocklist: - - type: "package" - regex: ".*" - - type: "annotation" - regex: ".*" -``` - - - - -```kotlin -marathon { - filteringConfiguration { - allowlist = mutableListOf( - SimpleClassnameFilterConfiguration(".*".toRegex()), - FullyQualifiedClassnameFilterConfiguration(".*".toRegex()), - TestMethodFilterConfiguration(".*".toRegex()), - CompositionFilterConfiguration( - listOf( - TestPackageFilterConfiguration(".*".toRegex()), - TestMethodFilterConfiguration(".*".toRegex()) - ), - CompositionFilterConfiguration.OPERATION.UNION - ) - ) - blocklist = mutableListOf( - TestPackageFilterConfiguration(".*".toRegex()), - AnnotationFilterConfiguration(".*".toRegex()) - ) - } -} -``` - - - - -```groovy -//With Groovy configuration, specifying only one value per configuration will result in regex -marathon { - filteringConfiguration { - allowlist { - simpleClassNameFilter = [".*"] - fullyQualifiedClassnameFilter = [".*"] - testMethodFilter = [".*"] - } - blocklist { - testPackageFilter = [".*"] - annotationFilter = [".*"] - } - } -} -``` - - - - -## Fragmented execution of tests - -This is a test filter similar to sharded test execution that [AOSP provides][3]. - -It is intended to be used in situations where it is not possible to connect multiple execution devices to a single test run, e.g. CI setup -that can schedule parallel jobs each containing a single execution device. There are two parameters for using fragmentation: - -* **count** - the number of overall fragments (e.g. 10 parallel execution) -* **index** - current execution index (in our case of 10 executions valid indexes are 0..9) - -This is a dynamic programming technique, hence the results will be sub-optimal compared to connecting multiple devices to the same test run - - - - -```yaml -filteringConfiguration: - allowlist: - - type: "fragmentation" - index: 0 - count: 10 -``` - - - - -```kotlin -marathon { - filteringConfiguration { - allowlist = mutableListOf( - FragmentationFilterConfiguration(index = 0, count = 10) - ) - } -} -``` - - - - -If you want to dynamically pass the index of the test run you can use yaml envvar interpolation, e.g.: - -```yaml -filteringConfiguration: - allowlist: - - type: "fragmentation" - index: ${MARATHON_FRAGMENT_INDEX} - count: 10 -``` - -and then execute the testing as following: - -```bash -$ MARATHON_FRAGMENT_INDEX=0 marathon -``` - -To pass the fragment index in gradle refer to -the [Gradle's dynamic project properties](https://docs.gradle.org/current/javadoc/org/gradle/api/Project.html#properties) - -[1]: /intro/configure.md#test-class-regular-expression -[2]: https://github.com/allure-framework/allure-java/tree/master/allure-test-filter -[3]: https://source.android.com/devices/tech/test_infra/tradefed/architecture/advanced/sharding diff --git a/docs/cloud/configuration/flakiness.md b/docs/cloud/configuration/flakiness.md deleted file mode 100644 index 124c51c04..000000000 --- a/docs/cloud/configuration/flakiness.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: "Flakiness" ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -This is the code of the prediction logic for marathon. Using the analytics backend we can understand the success rate and hence queue preventive -retries to mitigate the flakiness of the tests and test environment. - -### Ignore flakiness - -Nothing is done preventatively in this mode. This is the default behaviour. - - - - -```yaml -flakinessStrategy: - type: "ignore" -``` - - - - -```kotlin -marathon { - flakinessStrategy {} -} -``` - - - - -```groovy -marathon { - flakinessStrategy {} -} -``` - - - - -### Probability based flakiness strategy - -The main idea is that flakiness strategy anticipates the flakiness of the test based on the probability of test passing and tries to -maximise the probability of passing when executed multiple times. - -For example the probability of test A passing is 0.5 and configuration has -probability of 0.8 requested, then the flakiness strategy multiplies the test A to be executed 3 times (0.5 x 0.5 x 0.5 = 0.125 is the -probability of all tests failing, so with probability 0.875 > 0.8 at least one of tests will pass). - -$$ -P_{passing-with-retries} = 1 - P_{passing}^N = 1 - 0.5^3 = 0.875 -$$ - -The minimal probability that you're comfortable with is specified using **minSuccessRate** during the time window controlled by the **timeLimit**. -Additionally, if you specify too high **minSuccessRate** you'll have too many retries, so the upper bound for this is controlled by the -**maxCount** parameter so that this strategy will calculate the required number of retries according to the **minSuccessRate** but if it's -higher than the **maxCount** it will choose **maxCount**. - - - - -```yaml -flakinessStrategy: - type: "probability" - minSuccessRate: 0.7 - maxCount: 3 - timeLimit: "2015-03-14T09:26:53.590Z" -``` - - - - -```kotlin -marathon { - flakinessStrategy { - probabilityBased { - minSuccessRate = 0.7 - maxCount = 3 - timeLimit = Instant.now().minus(Duration.parse("PT1H")) - } - } -} -``` - - - - -```groovy -marathon { - flakinessStrategy { - probabilityBased { - minSuccessRate = 0.7 - maxCount = 3 - timeLimit = Instant.now().minus(Duration.parse("PT1H")) - } - } -} -``` - - - diff --git a/docs/cloud/configuration/pooling.md b/docs/cloud/configuration/pooling.md deleted file mode 100644 index 8b6441cc8..000000000 --- a/docs/cloud/configuration/pooling.md +++ /dev/null @@ -1,189 +0,0 @@ ---- -title: "Pooling" ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -Pooling strategy defines how devices are grouped together. - -## Omni a.k.a. one huge pool - -All connected devices are merged into one group. **This is the default mode**. - - - - -```yaml -poolingStrategy: - type: "omni" -``` - - - - -```kotlin -marathon { - poolingStrategy {} -} -``` - - - - -```groovy -marathon { - poolingStrategy {} -} -``` - - - - -## By abi - -Devices are grouped by their ABI, e.g. *x86* and *mips*. - - - - -```yaml -poolingStrategy: - type: "abi" -``` - - - - -```kotlin -marathon { - poolingStrategy { - abi = true - } -} -``` - - - - -```groovy -marathon { - poolingStrategy { - abi = true - } -} -``` - - - - -## By manufacturer - -Devices are grouped by manufacturer, e.g. *Samsung* and *Yota*. - - - - -```yaml -poolingStrategy: - type: "manufacturer" -``` - - - - -```kotlin -marathon { - poolingStrategy { - manufacturer = true - } -} -``` - - - - -```groovy -marathon { - poolingStrategy { - manufacturer = true - } -} -``` - - - - -## By device model - -Devices are grouped by model name, e.g. *LG-D855* and *SM-N950F*. - - - - -```yaml -poolingStrategy: - type: "device-model" -``` - - - - -```kotlin -marathon { - poolingStrategy { - model = true - } -} -``` - - - - -```groovy -marathon { - poolingStrategy { - model = true - } -} -``` - - - - -## By OS version - -Devices are grouped by OS version, e.g. *24* and *25*. - - - - -```yaml -poolingStrategy: - type: "os-version" -``` - - - - -```kotlin -marathon { - poolingStrategy { - operatingSystem = true - } -} -``` - - - - -```groovy -marathon { - poolingStrategy { - operatingSystem = true - } -} -``` - - - diff --git a/docs/cloud/configuration/retries.md b/docs/cloud/configuration/retries.md deleted file mode 100644 index 3d46ce966..000000000 --- a/docs/cloud/configuration/retries.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -title: "Retries" ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -This is the logic that kicks in if preventive retries facilitated by flakiness configuration have failed. - -:::caution - -Retries from this configuration are added in-flight, i.e. after the tests were actually executed, hence -there is no way to parallelize test run. This will significantly affect the performance - -::: - -### No retries - -As the name implies, no retries are done. This is the default mode. - - - - -```yaml -retryStrategy: - type: "no-retry" -``` - - - - -```kotlin -marathon { - retryStrategy {} -} -``` - - - - -```groovy -marathon { - retryStrategy {} -} -``` - - - - -### Fixed quota retry strategy - -Parameter **totalAllowedRetryQuota** below specifies how many retries at all (for all the tests is total) are allowed. - -Parameter **retryPerTestQuota** controls how many retries can be done for each test individually. - - - - -```yaml -retryStrategy: - type: "fixed-quota" - totalAllowedRetryQuota: 100 - retryPerTestQuota: 3 -``` - - - - -```kotlin -marathon { - retryStrategy { - fixedQuota { - retryPerTestQuota = 3 - totalAllowedRetryQuota = 100 - } - } -} -``` - - - - -```groovy -marathon { - retryStrategy { - fixedQuota { - retryPerTestQuota = 3 - totalAllowedRetryQuota = 100 - } - } -} -``` - - - diff --git a/docs/cloud/configuration/sharding.md b/docs/cloud/configuration/sharding.md deleted file mode 100644 index 51eb71212..000000000 --- a/docs/cloud/configuration/sharding.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: "Sharding" ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -Sharding is a mechanism that allows the marathon to affect the tests scheduled for execution inside each pool. - -:::caution - -Sharding in marathon is NOT related to splitting the tests into parallel runs. -If you're looking for parallelization of marathon runs - check out the [fragmentation filter][1] - -::: - -### Parallel sharding - -Executes each test using available devices. This is the default behaviour. - - - - -```yaml -shardingStrategy: - type: "parallel" -``` - - - - -```kotlin -marathon { - shardingStrategy {} -} -``` - - - - -```groovy -marathon { - shardingStrategy {} -} -``` - - - - -### Count sharding - -Executes each test **count** times inside each pool. For example you want to test the flakiness of a specific test hence you need to execute -this test a lot of times. Instead of running the build X times just use this sharding strategy and the test will be executed X times. - - - - -```yaml -shardingStrategy: - type: "count" - count: 5 -``` - - - - -```kotlin -marathon { - shardingStrategy { - countSharding { - count = 5 - } - } -} -``` - - - - -```groovy -marathon { - shardingStrategy { - countSharding { - count = 5 - } - } -} -``` - - - - -[1]: filtering.md#fragmented-execution-of-tests diff --git a/docs/cloud/configuration/sorting.md b/docs/cloud/configuration/sorting.md deleted file mode 100644 index f7fe0c63a..000000000 --- a/docs/cloud/configuration/sorting.md +++ /dev/null @@ -1,182 +0,0 @@ ---- -title: "Sorting" ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Sorting strategy - -Prioritising performance of test execution requires tests to be sorted. Sorting is possible only when analytics backend is available. - -Sorting can be done based on test duration and success/failure rate. - -### No sorting - -No sorting of tests is done at all. This is the default behaviour. - - - - -```yaml -sortingStrategy: - type: "no-sorting" -``` - - - - -```kotlin -marathon { - sortingStrategy {} -} -``` - - - - -```groovy -marathon { - sortingStrategy {} -} -``` - - - - -### Success rate sorting - -For each test analytics storage is providing the success rate for a time window specified by time **timeLimit** parameter. All the tests are -then sorted by the success rate in an increasing order, that is failing tests go first and successful tests go last. If you want to reverse -the order set the `ascending` to `true`. - - - - -```yaml -sortingStrategy: - type: "success-rate" - timeLimit: "2015-03-14T09:26:53.590Z" - ascending: false -``` - - - - -```kotlin -marathon { - sortingStrategy { - successRate { - limit = Instant.now().minus(Duration.parse("PT1H")) - ascending = false - } - } -} -``` - - - - -```groovy -marathon { - sortingStrategy { - successRate { - limit = Instant.now().minus(Duration.parse("PT1H")) - ascending = false - } - } -} -``` - - - - -### Execution time sorting - -For each test analytics storage is providing the X percentile duration for a time window specified by time **timeLimit** parameter. Apart -from absolute date/time it can be also be an ISO 8601 formatted duration. - -Percentile is configurable via the **percentile** parameter. - -All the tests are sorted so that long tests go first and short tests are executed last. This allows marathon to minimise the error of -balancing the execution of tests at the end of execution. - - - - -```yaml -sortingStrategy: - type: "execution-time" - percentile: 80.0 - timeLimit: "-PT1H" -``` - - - - -```kotlin -marathon { - sortingStrategy { - executionTime { - percentile = 80.0 - timeLimit = Instant.now().minus(Duration.parse("PT1H")) - } - } -} -``` - - - - -```groovy -marathon { - sortingStrategy { - executionTime { - percentile = 80.0 - timeLimit = Instant.now().minus(Duration.parse("PT1H")) - } - } -} -``` - - - - -### Random order sorting - -Sort tests in random order. This strategy may be useful to detect dependencies between tests. - - - - -```yaml -sortingStrategy: - type: "random-order" -``` - - - - -```kotlin -marathon { - sortingStrategy { - randomOrder { - } - } -} -``` - - - - -```groovy -marathon { - sortingStrategy { - randomOrder { - } - } -} -``` - - - diff --git a/docs/cloud/index.md b/docs/cloud/index.md index 9e457baee..123551d0c 100644 --- a/docs/cloud/index.md +++ b/docs/cloud/index.md @@ -1,145 +1,79 @@ ---- -title: "Getting started" ---- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; -## Prepare bundles for testing +# Quick start -### Android +### Install +The installation can be performed using [Homebrew](https://brew.sh/). Here’s how to add the MarathonLabs repository: +```shell +brew tap malinskiy/tap +``` +Next, install the Marathon Cloud CLI: + +```shell +brew install malinskiy/tap/marathon-cloud +``` +Alternatively, you can download prebuild binaries for Windows, Linux, or MacOS from [the Release page](https://github.com/MarathonLabs/marathon-cloud-cli/releases). + + +### API Key + +Token creation and management are available at the [Tokens page](https://cloud.marathonlabs.io/tokens). Generate a token and save it somewhere safe for the next step. + +### Samples (optional) + +To showcase the advantages of Marathon Cloud compared to other solutions, we’ve prepared a sample app with 300 tests, out of which 15% are flaky. During the initial run, our platform will gather information about the tests. During the second run, it will optimize it to fit within 15 minutes. + + + +Download prebuild iOS Application + +```shell +curl https://cloud.marathonlabs.io/samples/ios/sample.zip -o sample.zip +``` +Download prebuild iOS Test Application -Marathon Cloud supports tests written with **UIAutomator, Cucumber, Espresso, and [Kaspresso](https://github.com/KasperskyLab/Kaspresso) frameworks**. +```shell +curl https://cloud.marathonlabs.io/samples/ios/sampleUITests-Runner.zip -o sampleUITests-Runner.zip +``` -Before initiating the testing process for your application, you’ll require two APK files: one for the application that’s being tested, and another for the tests themselves. Typically, `debug` variants are utilized for this purpose. + + +Download prebuild Android Application -If the primary application resides under the `app/` subproject, you can execute the following command to build both the app and test APK: +```shell +curl https://cloud.marathonlabs.io/samples/android/app.apk -o app.apk +``` +Download prebuild Android Test Application ```shell -# file structure -# | -# |--home -# |--john -# |--project <== you are here -# |--app <== it's your primary application -# ... -# |--build.gragle -# |--settings.gradle -./gradlew :app:assembleDebug :app:assembleDebugAndroidTest +curl https://cloud.marathonlabs.io/samples/android/appTest.apk -o appTest.apk ``` -Be sure to note the relative paths of the test APK and the app APK, as they will be required for running the tests. In the context of our example, involving the `app` project and the `debug` build, these files can be located at the following paths: + + -- App APK: `/home/john/project/app/build/outputs/apk/debug/app-debug.apk` -- Test APK: `/home/john/project/app/build/outputs/apk/androidTest/debug/app-debug-androidTest.apk` +To use your own applications please read Documentation. -### iOS -Marathon Cloud supports tests written with **XCTest and XCUITest frameworks**. -Both the application and the tests must be built for the **ARM architecture**. +### Execution -Before initiating the testing process for your iOS application, you’ll need to create two `.app` bundles: one for the application that's being tested, and another for the tests themselves. Typically, `debug` variants are utilized for this purpose. +Now you can start running your tests. Use the following command to execute the CLI with the necessary parameters: -Let's say our project is called "Sample". The code snippet below shows how to build the .app bundle: + + ```shell -# file structure -# | -# |--home -# |--john -# |--sample <== you are here -# |--sample <== it's your application -# ... -# |--sample.xcodeproj - -xcodebuild build-for-testing \ - -project sample.xcodeproj \ - -scheme sample \ - -destination 'platform=iOS Simulator,name=iPhone 14,OS=16.1' \ - -derivedDataPath ./build - ``` - - Note the relative paths of applications, as they will be required for running the tests. In the context of our example and `debug` build, these files can be located at the following paths: - - - Application: `/home/john/sample/build/Build/Products/Debug-iphonesimulator/sample.app` - - Test APK: `/home/john/sample/build/Build/Products/Debug-iphonesimulator/sampleUITests-Runner.app` - - One important thing to note is that `*.app` files are actually folders in disguise. To transfer them, it's necessary to convert these bundles into `.ipa` format or standard `zip` archives: - - ```shell -# file structure -# | -# |--home -# |--john -# |--sample <== you are here -# |--build <== derivedData folder -# |--sample <== it's your application -# ... -# |--sample.xcodeproj - cd build/Build/Products/Debug-iphonesimulator -# convert to zip archive in this example - zip -r sample.zip sample.app - zip -r sampleUITests-Runner.zip sampleUITests-runner.app - ``` - - Further, we will use these files: - - - Application: `/home/john/sample/build/Build/Products/Debug-iphonesimulator/sample.zip` - - Test APK: `/home/john/sample/build/Build/Products/Debug-iphonesimulator/sampleUITests-Runner.zip` - - -## Samples (optional) - - To showcase the advantages of Marathon Cloud compared to other solutions, we've prepared a sample app with 300 tests, of which 15% are flaky. During the initial run, our platform will gather information about the tests. During the second run, it will optimize it to fit within 15 minutes. - - - - ```shell -# Download the prebuilt iOS Application - curl https://cloud.marathonlabs.io/samples/ios/sample.zip -o sample.zip - -# Download the prebuilt iOS Test Application - curl https://cloud.marathonlabs.io/samples/ios/sampleUITests-Runner.zip -o sampleUITests-Runner.zip - ``` - - - - - ```shell -# Download the prebuilt Android Application - curl https://cloud.marathonlabs.io/samples/android/app.apk -o app.apk - -# Download the prebuilt Android Test Application - curl https://cloud.marathonlabs.io/samples/android/appTest.apk -o appTest.apk - ``` - - - - -## Execution - - Now you can start running your tests. Use the following command to execute the CLI with the necessary parameters: - - - - - ```shell - marathon-cloud \ - -api-key api_key \ - -app sample.zip \ - -testapp sampleUITests-Runner.zip \ - -platform iOS - ``` - - - - - ```shell - marathon-cloud \ - -api-key api_key \ - -app app.apk \ - -testapk appTest.apk \ - -platform Android - ``` - - - - - For additional parameters, refer to the [marathon-cloud-cli README](https://github.com/MarathonLabs/marathon-cloud-cli/#installation). +marathon-cloud -api_key generated_api_key -apk sample.zip -testapk sampleUITests-Runner.zip -platform iOS +``` + + + +```shell +marathon-cloud -api_key api_key -apk app.apk -testapk appTest.apk -platform Android +``` + + + +For additional parameters, refer to the [marathon-cloud-cli README](https://github.com/MarathonLabs/marathon-cloud-cli/#installation) + diff --git a/docs/cloud/intro/android.md b/docs/cloud/intro/android.md new file mode 100644 index 000000000..807c2fbc0 --- /dev/null +++ b/docs/cloud/intro/android.md @@ -0,0 +1,35 @@ +--- +title: "Android" +--- + +### Supported frameworks +Marathon Cloud supports tests written with +UIAutomator, Cucumber, Espresso, and [Kaspresso](https://github.com/KasperskyLab/Kaspresso) frameworks. +It is also supports tests written for Flutter apps with [Patrol](https://patrol.leancode.co/) framework. + +### Application and Test Application + +Before initiating the testing process for your application, you’ll require two APK files: +one for the application that’s being tested, and another for the tests themselves. +Typically, debug variants are utilized for this purpose. + +If the primary application resides under the app/ subproject, +you can execute the following command to build both the app and test APK: + +```shell +# file structure +# | +# |--home +# |--john +# |--project <== you are here +# |--app <== it's your primary application +# ... +# |--build.gragle +# |--settings.gradle +./gradlew :app:assembleDebug :app:assembleDebugAndroidTest +``` +Be sure to note the relative paths of the test APK and the app APK, as they will be required for running the tests. +In the context of our example, involving the `app` project and the `debug` build, these files can be located at the following paths: + +- Application APK: `/home/john/project/app/build/outputs/apk/debug/app-debug.apk` +- Test Application APK: `/home/john/project/app/build/outputs/apk/androidTest/debug/app-debug-androidTest.apk` diff --git a/docs/cloud/intro/configure.md b/docs/cloud/intro/configure.md deleted file mode 100644 index 6e4aa54d7..000000000 --- a/docs/cloud/intro/configure.md +++ /dev/null @@ -1,622 +0,0 @@ ---- -title: "Configuration" ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -Configuration of marathon is done using a YAML formatted configuration usually placed in the root of the project and named -**Marathonfile**. - -Below is a very simple example of Marathonfile (without the platform configuration): - -```yaml -name: "My awesome tests" -outputDir: "marathon" -debug: false -``` - -There are _a lot_ of options in marathon. This can be overwhelming especially when you're just starting out. We will split the options into -general options below, complex options that you can find as subsections in the menu on the left and platform-specific options under each -platform section. - -If you're unsure how to properly format your options in Marathonfile take a look at the samples or take a look at the [deserialisation logic][1] in the *configuration* module of the project. -Each option might use a default deserializer from yaml or a custom one. Usually the custom deserializer expects the _type_ option for polymorphic types to -understand which specific object we need to instantiate. - -## Important notes -### File-system path handling - -When specifying **relative host file** paths in the configuration they will be resolved relative to the directory of the Marathonfile, e.g. if -you have `/home/user/app/Marathonfile` with `baseOutputDir = "./output"` then the actual path to the output directory will -be `/home/user/app/output`. - -## Required -Below you will find a list of currently supported configuration parameters and examples of how to set them up. Keep in mind that not all of the -additional parameters are supported by each platform. If you find that something doesn't work - please submit an issue for a -platform at fault. - -### Test run configuration name - -This string specifies the name of this test run configuration. It is used mainly in the generated test reports. - - - - -```yaml -name: "My test run for sample app" -``` - - - - -```kotlin -marathon { - name = "My test run for sample app" -} -``` - - - - -```groovy -marathon { - name = "My test run for sample app" -} -``` - - - - -### Output directory - -Directory path to use as the root folder for all the runner output (logs, reports, etc). - -For gradle, the output path will automatically be set to a `marathon` folder in your reports folder unless it's overridden. - - - - -```yaml -outputDir: "build/reports/marathon" -``` - - - - -```kotlin -marathon { - baseOutputDir = "some-path" -} -``` - - - - -```groovy -marathon { - baseOutputDir = "some-path" -} -``` - - - - -### Platform-specific options -Marathon requires you to specify the platform for each run, for example: -```yaml -vendorConfiguration: - type: "Android" -``` - -Refer to platform configuration for additional options inside the `vendorConfiguration` block. - -## Optional - -### Ignore failures -By default, the build fails if some tests failed. If you want to the build to succeed even if some tests failed use *true*. - - - - -```yaml -ignoreFailures: true -``` - - - - -```kotlin -marathon { - ignoreFailures = true -} -``` - - - - -```groovy -marathon { - ignoreFailures = true -} -``` - - - - -### Code coverage -Depending on the vendor implementation code coverage may not be supported. By default, code coverage is disabled. If this option is enabled, -code coverage will be collected and marathon assumes that code coverage generation will be setup by user (e.g. proper build flags, jacoco -jar added to classpath, etc). - - - - -```yaml -isCodeCoverageEnabled: true -``` - - - - -```kotlin -marathon { - isCodeCoverageEnabled = true -} -``` - - - - -```groovy -marathon { - codeCoverageEnabled = true -} -``` - - - - -### Test output timeout -This parameter specifies the behaviour for the underlying test executor to timeout if there is no output. By default, this is set to 5 -minutes. - - - - -```yaml -testOutputTimeoutMillis: 30000 -``` - - - - -```kotlin -marathon { - testOutputTimeoutMillis = 30000 -} -``` - - - - -```groovy -marathon { - testOutputTimeoutMillis = 30000 -} -``` - - - - -### Test batch timeout - -This parameter specifies the behaviour for the underlying test executor to timeout if the batch execution exceeded some duration. By -default, this is set to 30 minutes. - - - - -```yaml -testBatchTimeoutMillis: 900000 -``` - - - - -```kotlin -marathon { - testBatchTimeoutMillis = 900000 -} -``` - - - - -```groovy -marathon { - testBatchTimeoutMillis = 900000 -} -``` - - - - -### Device provider init timeout - -When the test run starts device provider is expected to provide some devices. This should not take more than 3 minutes by default. If your -setup requires this to be changed please override as following: - - - - -```yaml -deviceInitializationTimeoutMillis: 300000 -``` - - - - -```kotlin -marathon { - deviceInitializationTimeoutMillis = 300000 -} -``` - - - - -```groovy -marathon { - deviceInitializationTimeoutMillis = 300000 -} -``` - - - - -### Analytics tracking - -To better understand the use-cases that marathon is used for we're asking you to provide us with anonymised information about your usage. By -default, this is enabled. Use **false** to disable. - - - - -```yaml -analyticsTracking: false -``` - - - - -```kotlin -marathon { - analyticsTracking = false -} -``` - - - - -```groovy -marathon { - analyticsTracking = false -} -``` - - - - -:::note - -analyticsTracking can also be enabled (default value) / disabled directly from the CLI. It is disabled if it's set to be disabled in either the config or the CLI. - -::: - - -### BugSnag reporting - -To better understand crashes, we report crashes with anonymised info. By default, this is enabled. Use **false** to disable. - - - - -```yaml -bugsnagReporting: false -``` - - - - -```kotlin -marathon { - bugsnagReporting = false -} -``` - - - - -```groovy -marathon { - bugsnagReporting = false -} -``` - - - - -:::note - -bugsnagReporting can also be enabled (default value) / disabled directly from the CLI. It is disabled if it's set to be disabled in either the config or the CLI. - -::: - - -### Uncompleted test retry quota -By default, tests that don't have any status reported after execution (for example a device disconnected during the execution) retry -indefinitely. You can limit the number of total execution for such cases using this option. - - - - -```yaml -uncompletedTestRetryQuota: 100 -``` - - - - -```kotlin -marathon { - uncompletedTestRetryQuota = 100 -} -``` - - - - -```groovy -marathon { - uncompletedTestRetryQuota = 100 -} -``` - - - - -### Execution strategy -When executing tests with retries there are multiple trade-offs to be made. Two execution strategies are supported: any success or all success. -By default, `ANY_SUCCESS` strategy is used with fast execution i.e. if one of the test retries succeeds then the test is considered successfully -executed and all non-started retries are removed. - -#### Any success -Test passes if any of its executions are passing. This mode works only if there is no complex sharding strategy applied. This is the default. - - - - -```yaml -executionStrategy: - mode: ANY_SUCCESS -``` - - - - -```kotlin -marathon { - executionStrategy = ExecutionStrategyConfiguration(ExecutionMode.ANY_SUCCESS) -} -``` - - - - -```groovy -marathon { - executionStrategy = ExecutionStrategyConfiguration(ExecutionMode.ANY_SUCCESS) -} -``` - - - - -:::info - -Complex sharding with `ANY_SUCCESS` mode doesn't make sense when user asks for N number of tests to run explicitly, and we pass on the first one. - -::: - -#### All success -Test passes if and only if all its executions are passing. This mode works only if there are no retries, i.e. no complex flakiness strategy, no retry strategy. - - - - -```yaml -executionStrategy: - mode: ALL_SUCCESS -``` - - - - -```kotlin -marathon { - executionStrategy = ExecutionStrategyConfiguration(ExecutionMode.ALL_SUCCESS) -} -``` - - - - -```groovy -marathon { - executionStrategy = ExecutionStrategyConfiguration(ExecutionMode.ALL_SUCCESS) -} -``` - - - - -:::info - -Adding retries with retry/flakiness strategies means users wants to trade off cost for reliability, i.e. add more retries and pass if one -of test retries passes, so retries only make sense for `ANY_SUCCESS` mode. - -When we use `ALL_SUCCESS` mode it means the user want to verify each test with a number of tries (they are not retries per se) and pass only if -all of them succeed. This is the case when fixing a flaky test or adding a new test, and we want to have a signal that the test is fixed/not flaky. - -::: - -#### Fast execution mode -When the test reaches a state where a decision about its state can be made, marathon can remove additional in-progress retries. -This decision point is different depending on the execution mode used. Let's walk through two examples. - -Assume `ANY_SUCCESS` strategy is used and 100 retries are scheduled for a test A via flakiness strategy. Let's say first 3 failed and the 4th attempt succeeded. At -this point the test should already be considered passed since `ANY_SUCCESS` out of all retries leads to the result by definition of `ANY_SUCCESS` -execution strategy. To save cost one can remove additional non-started retries by using fast execution mode (this is the default behavior for -`ANY_SUCCESS` strategy). On the other hand one could disable fast execution and get much more accurate statistics about this test by executing -more retries and calculating the probability of passing as a measure of flakiness for test A. - -Assume `ALL_SUCCESS` strategy is used and 100 retries are scheduled using sharding strategy. Let's say first 3 passed and the 4th attempt failed. -At this point the test should already be considered failed since any failure out of all retries leads to the result by definition of `ALL_SUCCESS` -execution strategy. You can save cost by removing additional non-started retries by using fast execution mode (this is the default behaviour for -`ALL_SUCCESS` strategy). On the other hand one could disable fast execution and verify the flakiness rate with a defined precision, in this case -there are 100 retries, so you would get precision up to 1% for test A. - - - - -```yaml -executionStrategy: - fast: true -``` - - - - -```kotlin -marathon { - executionStrategy = ExecutionStrategyConfiguration(fast = true) -} -``` - - - - -```groovy -marathon { - executionStrategy = ExecutionStrategyConfiguration(ExecutionMode.ALL_SUCCESS, true) -} -``` - - - - -### Debug mode -Enabled very verbose logging to stdout of all the marathon components. Very useful for debugging. - - - - -```yaml -debug: true -``` - - - - -```kotlin -marathon { - debug = true -} -``` - - - - -```groovy -marathon { - debug = true -} -``` - - - - -### Screen recording policy -By default, screen recording will only be pulled for tests that failed (**ON_FAILURE** option). This is to save space and also to reduce the -test duration time since we're not pulling additional files. If you need to save screen recording regardless of the test pass/failure please -use the **ON_ANY** option: - - - - -```yaml -screenRecordingPolicy: "ON_ANY" -``` - - - - -```kotlin -marathon { - screenRecordingPolicy = ScreenRecordingPolicy.ON_ANY -} -``` - - - - -```groovy -marathon { - screenRecordingPolicy = ScreenRecordingPolicy.ON_ANY -} -``` - - - - -### Output configuration -#### Max file path -By default, the max file path for any output file is capped at 255 characters due to some of OSs limitations. This is the reason why some -test runs have lots of "File path length cannot exceed" messages in the log. Since there is currently no API to programmatically -establish this limit it's user's responsibility to set it up to larger value if OS supports this and the user desires it. - - - - -```yaml -outputConfiguration: - maxPath: 1024 -``` - - - - -```kotlin -marathon { - outputConfiguration { - maxPath = 1024 - } -} -``` - - - - -```groovy -marathon { - outputConfiguration { - maxPath = 1024 - } -} -``` - - - - -[1]: https://github.com/MarathonLabs/marathon/blob/develop/configuration/src/main/kotlin/com/malinskiy/marathon/config/serialization/ConfigurationFactory.kt diff --git a/docs/cloud/intro/contribute.md b/docs/cloud/intro/contribute.md deleted file mode 100644 index 6f0837497..000000000 --- a/docs/cloud/intro/contribute.md +++ /dev/null @@ -1,152 +0,0 @@ ---- -layout: docs -title: "Contribute" -category: dev -date: 2018-11-19 16:55:00 -order: 2 ---- - -Contributions to Marathon's source are welcome! Here is a quick guide on where to start. - -# Setup - -* Install your flavour of Java Development Kit, e.g. [Eclipse Temurin][1] -* Install an IDE of choice that supports Kotlin and Gradle, e.g. [IntelliJ IDEA CE][2] -* Install a git client to check out the repository, e.g. [GitHub Desktop][3] - -Checkout the marathon repository and open the project in your IDE. - -```shell-session -foo@bar $ git clone https://github.com/MarathonLabs/marathon.git -``` - -## Contribution workflow - -All issues are tracked in GitHub, so check the [issue tracker][4] and [project board][5] for a list of something to work on. -Alternatively, you can submit an issue and then work on it. Before you do that, we highly encourage you to check if this issue may be a -duplicate. If it is new - chat with us using our [Slack channel][6]. - -Once you pick your issue please assign yourself to work on it. - -All the issues relevant to a specific version are assigned to a GitHub milestone so if the issue you're working on is required to fixed for -the next version we'll add it to the current milestone. - -## Getting the source code - -You can get the source code of marathon by cloning the repo. - -```bash -git clone https://github.com/MarathonLabs/marathon.git -``` - -If you plan to submit changes to the repo then please fork the project in GitHub. If you commit frequently then we can add you to the main -repository also. - -## Included run configurations for IntelliJ IDEA - -Marathon's project has built-in run configurations for executing included sample projects: - -![html report home page](/img/idea-run-configurations.png "IntelliJ Run Configurations") - -You can use this setup to quickly debug something on a sample app, or you can change the workdir of the configuration and debug your own -codebase. Default configurations for IntelliJ use CLI version of marathon. - -## Building the project - -### CLI - -While working on an issue it's much faster to use CLI distribution which you can build using the ```:cli:installDist``` task in gradle: - -```shell-session -foo@bar $ ./gradlew :cli:installDist -``` - -This task builds marathon binary with all of its dependencies. -The output binary can be found at ```cli/build/install/marathon/bin/marathon```. - -If you use this output frequently consider changing your path to use this custom version: - -```shell-session -foo@bar $ export PATH=$PATH:$MARATHON_CHECKOUT_DIR/cli/build/install/marathon/bin -``` - -### marathon-gradle-plugin - -To test gradle plugin changes we install all modules into a maven structured folder. - -To install all packages into *build/repository* folder you need to execute - -```shell-session -./gradlew publishDefaultPublicationToLocalRepository -PreleaseMode=SNAPSHOT -``` - -After that you need to sync your project and point it to your local repository. Alternatively you can publish to maven local. - -By default, all sample projects depend on the local folder and pick up marathon from there. -If it's not working, check you've actually built everything related to the plugin and the version that you specified in sample project and -the one that's published do indeed match. - -## Creating a custom distribution - -If you want to create a distributable zip or a tarball: - -```shell-session -foo@bar $ ./gradlew :cli:distZip -foo@bar $ ls cli/build/distributions -marathon-X.X.X-SNAPSHOT.zip -``` - -## Testing changes - -Before trying to execute real tests try executing unit and integration tests via ```./gradlew clean test jacocoTestReport integrationTest``` -command. Assuming everything passes check relevant sample project where you can test your changes. If your change is related to the core -part then you must check that both android and ios vendor implementations will not be affected. - -## Linting - -Before pushing your changes please check if our linter (*detekt*) passes via ```./gradlew clean detektCheck``` command. - -## General overview of modules - -### core - -This is the main logic part of the runner. - -### marathon-gradle-plugin - -This is a gradle plugin implementation for Android testing - -### vendor - -This is custom vendor implementation related to specific platform. One specific implementation that is important is -vendor-test, this is a fake implementation that we're using for integration testing - -### report - -This is a group of modules which implement various reports that marathon generates after the build - -### cli - -This is the command-line interface wrapper for Marathon - -### analytics:usage - -This is an analytics implementation that we're using for tracking anonymized usage of marathon. - -# Development chat - -We're available for any questions or proposals on [Slack][6] or [Telegram][7] if you prefer to just chat. Feel free to join! - -[1]: https://projects.eclipse.org/projects/adoptium.temurin - -[2]: https://www.jetbrains.com/idea/download/ - -[3]: https://desktop.github.com/ - -[4]: https://github.com/MarathonLabs/marathon/issues - -[5]: https://github.com/MarathonLabs/marathon/projects/1 - -[6]: https://bit.ly/2LLghaW - -[7]: https://t.me/marathontestrunner diff --git a/docs/cloud/intro/execute.md b/docs/cloud/intro/execute.md deleted file mode 100644 index 6eea384f6..000000000 --- a/docs/cloud/intro/execute.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: "Execute" ---- - -Executing the CLI version of marathon requires you to provide all the options through the *Marathonfile*. -By default, it will be searched in the working directory by the name **Marathonfile**. - -If you need a custom file path you can specify it via the options. - -```shell-session -foo@bar $ marathon -h -usage: marathon [-h] [--marathonfile MARATHONFILE] - -optional arguments: - -h, --help show this help message and exit - - --marathonfile MARATHONFILE, marathonfile file path - -m MARATHONFILE -``` - -Marathon CLI supports the following commands: run, parse and version. - -## Run command -```shell-session -foo@bar $ marathon run -h -Usage: marathon run [OPTIONS] - - Run Marathon to execute tests - -Options: - -m, --marathonfile PATH Marathonfile file path - --analyticsTracking VALUE Enable/Disable anonymous analytics tracking - --bugsnag VALUE Enable/Disable anonymous crash reporting. Enabled by default - -h, --help Show this message and exit -``` - -## Parse command -```shell-session -foo@bar $ marathon parse -h -Usage: marathon parse [OPTIONS] - - Print the list of tests without executing them - -Options: - -m, --marathonfile PATH Marathonfile file path - -o, --output TEXT Output file name in yaml format - -h, --help Show this message and exit -``` - -## Version command -```shell-session -foo@bar $ marathon version -h -Usage: marathon version [OPTIONS] - - Print version and exit - -Options: - -h, --help Show this message and exit -``` - -## Default command -Default command is the run command, so the old CLI syntax works the same way: -```shell-session -foo@bar $ marathon -m MARATHONFILE -``` diff --git a/docs/cloud/intro/faq.md b/docs/cloud/intro/faq.md deleted file mode 100644 index 92e1dcc3d..000000000 --- a/docs/cloud/intro/faq.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: "FAQ" ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -Below is a list of frequently asked questions - -## How to investigate device-specific problems? -Sometimes the execution device is incorrectly configured or doesn't work as expected. You have several options to identify such problems: -* grafana dashboard and see the distribution of test failures by device -* Timeline report -* Allure's timeline report - -## How to check overall test flakiness? -It's quite easy to check the overall status of your tests if you navigate to the grafana dashboard based on the InfluxDB data and check - test pass probability distribution. Ideally you should have a lot of tests either at the 1 bucket and 0. Everything in-between affects the - test performance drastically especially the buckets closer to the 1 like *[0.9 - 1.0)* so it's much more desirable to have tests which have - low probability of passing instead of the tests which have 0.9999 probability of passing (*sometimes* fails). - -## How to check for common underlying problems in tests? -Assuming your common problem throws the same message in the exception in different tests you can quickly understand this if you - navigate to **Categories** tab in the Allure report and check the number of occurrences on the right for each problem. - -## How to check quality by specific feature/team/epic? -Marathon supports tests that have meta-information such as Epic, Team or Feature from allure. This makes it possible to understand, for - example, if a particular test corresponds to a specific feature. Allure report visualises this information and makes it possible to - navigate all of them using the **Behaviors** page. - -## How to check the load distribution of test run? -Sometimes your devices might not be utilised during all of the execution time. To check this you need to have a timeline. This is available - either in timeline report or allure. Keep in mind that allure report doesn't contain the retries whereas the marathon's timeline report - does. - -## How to check the retry count of a test? -Retries can be checked in the allure report. Test pass probability on the other hand is the reason why we do retries so grafana dashboard is - another option you can check for the expected and observed retries. - -## My logs/screenshots are missing from the html report -If you're opening your reports with `file://` schema then it's a known issue: modern browsers reject our requests to additional files such - as logs or videos for security reasons. CI systems usually have webserver to host these files, but local environment can also start a - webserver for example using IntelliJ based IDEs by right clicking on the html file -> open in browser. Or doing something like - `python3 -m http.server 8000` in the directory of the report and navigating to `localhost`. - -## Can I run my Android tests if my emulator(s) are on a remote instance? -Of course, you can do this by connecting your remote Android device (emulator or real device) by executing `adb connect $IP_OF_DEVICE`. - Assuming you have enabled adb over TCP/IP properly you should be able to execute your test on a remote Android device (or a hundred of - devices depending on how many you connect) - -## How to execute a single test case? -This is possible and will depend on the distribution you're using. - - - - -```yaml -filteringConfiguration: - allowlist: - - type: "fully-qualified-class-name" - regex: "com\.example\.MySingleTest" -``` - - - - -```kotlin -filteringConfiguration { - allowlist { - add(com.malinskiy.marathon.execution.FullyQualifiedClassnameFilter("com\\.example\\.MySingleTest".toRegex())) - } -} -``` - - - - -## Some of my test artifacts (videos, screenshots, etc) are missing! -Due to the nature of marathon to device connection being unreliable, we can not guarantee that every single artifact is going to be there. - Marathon tries best to pull necessary information from the device, but sometimes the device is unresponsive or just stopped working at all. - If the test passed before it died marathon considers the test to be passing and resumes the execution. - -## My test execution show 100%+ (e.g. 110%) progress. What's wrong? -This is an edge case with runtime-generated tests, e.g. Parameterized tests on Android, and how they're identified, executed and reported. -It is possible to use remote test parsing using real devices on some platforms that support it, but in general there is no solution unless -the platform supports proper test discovery. diff --git a/docs/cloud/intro/install.md b/docs/cloud/intro/install.md deleted file mode 100644 index e03ebd972..000000000 --- a/docs/cloud/intro/install.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: "Install" ---- - -Marathon runs on a JVM, so you can run it on any system that JVM can be installed to: Linux, macOS, FreeBSD, etc. - -## macOS -Grab the latest release with [homebrew][5]: - -```bash -brew tap malinskiy/tap -brew install malinskiy/tap/marathon -``` - -## Other OS - -Grab the latest release from [GitHub Releases][1] page. Extract the archive into your apps folder and add the binary to your path using -local terminal session or using your profile file (.bashrc or equivalent), e.g. - -```bash -unzip -d $DESTINATION marathon-X.X.X.zip -export PATH=$PATH:$DESTINATION/marathon-X.X.X/bin -``` - -[1]: https://github.com/MarathonLabs/marathon/releases -[2]: https://search.maven.org/ -[4]: https://github.com/MarathonLabs/marathon/releases/latest -[5]: https://brew.sh/ diff --git a/docs/cloud/intro/ios.md b/docs/cloud/intro/ios.md new file mode 100644 index 000000000..fa8cc0457 --- /dev/null +++ b/docs/cloud/intro/ios.md @@ -0,0 +1,58 @@ +--- +title: "iOS" +--- + +### Supported frameworks +Marathon Cloud supports tests written with **XCTest and XCUITest frameworks**. +Both the application and the tests must be built for the **ARM architecture**. + +### Application and Test Application + +Before initiating the testing process for your iOS application, you’ll need to create two `.app` bundles: one for the application that's being tested, and another for the tests themselves. Typically, `debug` variants are utilized for this purpose. +Let's say our project is called "Sample". The code snippet below shows how to build the .app bundle: + +```shell +# file structure +# | +# |--home +# |--john +# |--sample <== you are here +# |--sample <== it's your application +# ... +# |--sample.xcodeproj + +xcodebuild build-for-testing \ + -project sample.xcodeproj \ + -scheme sample \ + -destination 'platform=iOS Simulator,name=iPhone 14,OS=16.1' \ + -derivedDataPath ./build +``` + +Note the relative paths of applications, as they will be required for running the tests. In the context of our example and `debug` build, these files can be located at the following paths: + +- Application: `/home/john/sample/build/Build/Products/Debug-iphonesimulator/sample.app` +- Test APK: `/home/john/sample/build/Build/Products/Debug-iphonesimulator/sampleUITests-Runner.app` + +One important thing to note is that `*.app` files are actually folders in disguise. To transfer them, it's necessary to convert these bundles into `.ipa` format or standard `zip` archives: + +```shell +# file structure +# | +# |--home +# |--john +# |--sample <== you are here +# |--build <== derivedData folder +# |--sample <== it's your application +# ... +# |--sample.xcodeproj +cd build/Build/Products/Debug-iphonesimulator +# convert to zip archive in this example +zip -r sample.zip sample.app +zip -r sampleUITests-Runner.zip sampleUITests-runner.app +``` + +Further, we will use these files: + +- Application: `/home/john/sample/build/Build/Products/Debug-iphonesimulator/sample.zip` +- Test APK: `/home/john/sample/build/Build/Products/Debug-iphonesimulator/sampleUITests-Runner.zip` + diff --git a/docs/cloud/intro/overview.md b/docs/cloud/intro/overview.md index 41d11ff68..0ea224b72 100644 --- a/docs/cloud/intro/overview.md +++ b/docs/cloud/intro/overview.md @@ -1,140 +1,45 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; +--- +title: "Overview" +--- -# Overview +### +[Marathon Cloud](https://cloud.marathonlabs.io) is a cloud testing infrastructure built on top of the Marathon test runner. +It automatically provisions virtual devices to accommodate your tests within 15 minutes. +The test execution is then delegated to Marathon test runner, which handles tasks such as batching, sorting, preventive retries, and post-factum retries. +This ensures an even distribution of tests across the provisioned devices. -Marathon is a fast, platform-independent test runner focused on performance and stability. It offers easy to use platform implementations for Android and iOS as well as an API for use with custom hardware farms. +### How it works +Whenever you submit an application and test application to Marathon Cloud, the following steps are taken: +- Calculation of the necessary number of devices +- Provisioning of the devices +- Distribution and execution of the tests +- Control of the tests and real-time load balancing +- Generation of reports +- Uploading of artifacts and the generated report -Marathon implements multiple key concepts of test execution such as test **batching**, **device pools**, test **sharding**, test **sorting**, **preventive retries** as well as **post-factum retries**. By default, most of these are set to conservative defaults but custom configurations are encouraged for those who want to optimize performance and/or stability. -Marathon's primary focus is on **full control over the balance between stability of test execution and the overall test run performance**. +### Device provisioning -# Performance -Marathon takes into account two key aspects of test execution: -* The duration of the test -* The probability of the test passing +When you run tests with Marathon Cloud for the first time, we begin storing the test history in our database. +The next time you run these tests, we already have information on the average time and the probability of a successful execution for each test. +Using this data, we calculate the necessary number of devices to ensure that your tests will complete within 15 minutes. +We also monitor the progress of test executions and adjust the distribution of tests across devices as needed. +While the tests are running, our service can dynamically increase the number of devices to expedite the execution process. -Test run can only finish as quickly as possible if we plan the execution of tests with regard to the expected duration of the test. On the other hand, we need to address the flakiness of the environment and of the test itself. One key indicator of flakiness is the *probability* of the test passing. +### Batching -Marathon takes a number of steps to ensure that each test run is as balanced as possible: -* The flakiness strategy queues up preventive retries for tests which are expected to fail during the test run according to the current real-time statistical data -* The sorting strategy forces long tests to be executed first so that if an unexpected retry attempt occurs it doesn't affect the test run significantly (e.g. at the end of execution) -* If all else fail we revert back to post-factum retries, but we try to limit their impact on the run with retry quotas +Balancing speed and stability is one of the primary challenges for Marathon Cloud. +In order to maintain fast test execution, we employ a batching strategy where we group 5 tests together in a single batch. +This approach involves executing 5 tests consecutively, and afterward, we reset the device to a clean state. +If you prefer, you can enable the "isolated" parameter to manage device cleaning yourself, +but please note that this may lead to an increase in the number of devices required and the overall time of devices taken for testing. +However, the total execution time will still be 15 minutes. -## Configuration -Create a basic **Marathonfile** in the root of your project with the following content: - - -```yaml -name: "My application" -outputDir: "build/reports/marathon" -vendorConfiguration: - type: "Android" - applicationApk: "dist/app-debug.apk" - testApplicationApk: "dist/app-debug-androidTest.apk" -``` - - -```yaml -name: "My application" -outputDir: "derived-data/Marathon" -vendorConfiguration: - type: "iOS" - bundle: - application: "sample.app" - testApplication: "sampleUITests.xctest" - testType: xcuitest -``` - - -Vendor section describes platform specific details. -Since iOS doesn't have any way to discover remote execution devices you have to provide your remote simulators using the **Marathondevices** file: - -```yaml -workers: - - transport: - type: local - devices: - - type: simulator - udid: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" - - type: simulatorProfile - deviceType: com.apple.CoreSimulator.SimDeviceType.iPhone-13-mini -``` - -This **Marathondevices** file specifies a list of macOS instances and simulators for use. Marathon can use pre-provisioned simulators, but it can also provision new ones if needed. - -Example above uses the local instance where marathon is executed, but you can connect many more instance via SSH. - -:::tip - -The instance where you run marathon is not limited to macOS: if you're using remote macOS instances then -you can easily start your marathon run from Linux for example. - -::: - -You can find more information on providing devices in the [workers documentation][1] - -The file structure for testing should look something like this: - - - - -```shell-session -foo@bar $ tree . -. -├── Marathondevices -├── Marathonfile -├── dist -│   ├── app-debug.apk -│   ├── app-debug-androidTest.apk -``` - - - - -```shell-session -foo@bar $ tree . -. -├── Marathondevices -├── Marathonfile -├── build -│   ├── my.app -│   ├── my.xctest - -``` - - - - -## Execution - -Start the test runner in the root of your project -```bash -$ marathon -XXX [main] INFO com.malinskiy.marathon.cli.ApplicationView - Starting marathon -XXX [main] INFO com.malinskiy.marathon.cli.config.ConfigFactory - Checking Marathonfile config -... -``` - -# Getting Started -Start by visiting the [Download and Setup][2] page to learn how to integrate Marathon into your project. - -Then take a look at [Configuration][3] page to learn the basics of configuration. - -For more help and examples continue through the rest of the Documentation section, or take a look at one of our [sample apps][4]. - -# Requirements -Marathon requires Java Runtime Environment 8 or higher. - -[1]: /ios/workers.md -[2]: /intro/install.md -[3]: /intro/configure.md -[4]: https://github.com/MarathonLabs/marathon/tree/develop/sample diff --git a/docs/cloud/intro/reports.md b/docs/cloud/intro/reports.md deleted file mode 100644 index 1117c481b..000000000 --- a/docs/cloud/intro/reports.md +++ /dev/null @@ -1,153 +0,0 @@ ---- -title: "Reports" ---- - -There are multiple outputs available in marathon for inspection after the test run. Let's check all of them. - -## Logs -### Stdout log -The first and easiest one to is the stdout log that is printed during the execution of the test run. Enable debug mode via configuration -to have more information to inspect. - -```shell-session -foo@bar $ marathon -... - -Device emulator-5554 connected. Healthy = true -Device Unknown booted! -Installing application output to emulator-5554 -Installing application output to emulator-5556 -Uninstalling com.example from emulator-5554 - -Run instrumentation tests /Users/xxx/Development/marathon/sample/android-app/app/build/outputs/apk/androidTest/debug/app-debug-androidTest.apk for app /Users/xxx/Development/marathon/sample/android-app/app/build/outputs/apk/debug/app-debug.apk -Output: /Users/xxx/Development/marathon/sample/android-app/app/build/reports/marathon/debugAndroidTest -Ignore failures: false -Scheduling 28 tests -com.example.ClassIgnoredTest#testAlwaysIgnored, com.example.FailedAssumptionTest#failedAssumptionTest, com.example.FailedAssumptionTest#ignoreTest, com.example.MainActivityFlakyTest#testTextFlaky, com.example.MainActivityFlakyTest#testTextFlaky1, com.example.MainActivityFlakyTest#testTextFlaky2, com.example.MainActivityFlakyTest#testTextFlaky3, com.example.MainActivityFlakyTest#testTextFlaky4, com.example.MainActivityFlakyTest#testTextFlaky5, com.example.MainActivityFlakyTest#testTextFlaky6, com.example.MainActivityFlakyTest#testTextFlaky7, com.example.MainActivityFlakyTest#testTextFlaky8, com.example.MainActivitySlowTest#testTextSlow, com.example.MainActivitySlowTest#testTextSlow1, com.example.MainActivitySlowTest#testTextSlow2, com.example.MainActivitySlowTest#testTextSlow3, com.example.MainActivitySlowTest#testTextSlow4, com.example.MainActivityTest#testText, com.example.MainActivityTest#testText1, com.example.MainActivityTest#testText2, com.example.MainActivityTest#testText3, com.example.MainActivityTest#testText4, com.example.MainActivityTest#testText5, com.example.MainActivityTest#testText6, com.example.MainActivityTest#testText7, com.example.MainActivityTest#testText8, com.example.MainActivityTest#testText9, com.example.ParameterizedTest#test -device emulator-5554 associated with poolId omni -pool actor omni is being created -add device emulator-5554 -initialize emulator-5554 - -Installing com.example, /Users/xxx/Development/marathon/sample/android-app/app/build/outputs/apk/debug/app-debug.apk to emulator-5554 -Success -Installing instrumentation package to emulator-5554 -Uninstalling com.example.test from emulator-5554 -Installing com.example.test, /Users/xxx/Development/marathon/sample/android-app/app/build/outputs/apk/androidTest/debug/app-debug-androidTest.apk to emulator-5554 -Success -Prepare installation finished for emulator-5554 -tests = [] -Starting recording for ClassIgnoredTest.testAlwaysIgnored - -.... - -terminate emulator-5554 -Allure environment data saved. -Marathon run finished: -Device pool omni: - 23 passed, 2 failed, 3 ignored tests - Flakiness overhead: 0ms - Raw: 23 passed, 2 failed, 3 ignored, 0 incomplete tests -Total time: 0H 0m 57s -``` - -### Raw json log -In case you want to produce a custom report, or you want to push metrics based on the results of the execution raw json is probably your best - option. Each test is serialized as a json object inside an array. Keep in mind that this report shows retries, so you have full access to -what happened during the execution. - -This log file can be found at the `$output/test_result/raw.json` - -```json -{ - "package": "com.example", - "class": "SomeTest", - "method": "checkSomethingActuallyWorks", - "deviceSerial": "XXXXXXXXXXXX", - "ignored": false, - "success": true, - "timestamp": 1548142665055, - "duration": 13370 -} -``` - -## Visual Reports - -### Html report -This report can be found at `$output/html/index.html` - -Device pools are separated on the main page of the report: - -![html report home page](/img/screenshot-html-report-1.png "Html report") - -The pool part of the report shows test result (success, failure, ignored), video/screencapture if it's available and also device's log. - Filtering by test name and class is supported: - -![html report home page](/img/screenshot-html-report-2.png "Test list report") - -### Timeline log -This report is now part of html report but if you need to view it separately it can be found at `$output/html/timeline/index.html` - -Timeline helps identify potential misbehaviours visually helping infrastructure teams identify faulty devices and also helping developers - identify tests which affect the whole batch. For example you have a test which doesn't cleanup properly after execution and all the tests - after this one will fail in the same batch. - -![timeline report home page](/img/screenshot-timeline-report-1.png "Timeline") - -:::tip - -Hovering over a test batch will show you the name of the test. - -::: - -## CI reports -### JUnit4 report -Xml files are written according to the Junit4 specification to integrate with existing tools such as Continuous Integration servers or - third-party tools such as [Danger](https://github.com/danger/danger). - -All the reports for respective pools are placed at `$output/tests/${poolName}/*.xml`. - -### Allure report -[allure][1] report helps identify multiple possible problems during test execution. - -:::caution - -Marathon generates only the data files for report generation. -You can generate the actual allure html report via commands line or plugin options [available from allure](https://docs.qameta.io/allure/#_report_generation). - -::: - -![allure report home page](/img/screenshot-allure-report-1.png "Allure") - -One of the best use cases for this report is the grouping of problems which helps to identify if your tests have a specific issue that is - relevant for a large number of tests. Another useful feature is the ability to see all the retries visually and with video/screencapture - (if available). Allure also provides the ability to see flaky tests by storing history of the runs. - -Allure JSONs can be found at `$output/allure-results` - -:::tip - -If you're generating Allure results in your tests, you can pull those with marathon, but keep in mind that those files are not recognized -as inputs for marathon's allure report. They're different views on what happened during testing from the perspective of marathon and from -the perspective of test. - -::: - - -# Dashboards - -Marathon is able to leverage the power of InfluxDB and Graphite to store and fetch the metrics about the test runs. There are sample -dashboards for [InfluxDB][3] and [Graphite][4] that you can use in [Grafana][2] to [visualise][5] this information to better -understand what's going on. - -![grafana influxdb dashboard](/img/screenshot-grafana-1.png "Grafana dashboard example") - -[1]: https://github.com/allure-framework/allure2/ - -[2]: https://grafana.com/ - -[3]: https://github.com/MarathonLabs/marathon/blob/develop/assets/influxdb-grafana-dashboard.json - -[4]: https://github.com/MarathonLabs/marathon/blob/develop/assets/graphite-grafana-dashboard.json - -[5]: https://snapshot.raintank.io/dashboard/snapshot/j5rbxzFhfMDG6eKIcB9sLcH16IICyzvW?orgId=2 diff --git a/docs/cloud/intro/special-thanks.md b/docs/cloud/intro/special-thanks.md deleted file mode 100644 index b2f826a95..000000000 --- a/docs/cloud/intro/special-thanks.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: "Special thanks" ---- - -# Special thanks - -We're using a lot of tools for the development of marathon. Here are just some of them that we want to thank for support: - - - - diff --git a/docs/cloud/intro/vision.md b/docs/cloud/intro/vision.md deleted file mode 100644 index 212f574e0..000000000 --- a/docs/cloud/intro/vision.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: "Vision" ---- - -Every project needs a vision to prioritise what needs to be done. - -# Main priorities - -* **Stability of test runs**. This concerns flakiness of tests, flakiness of environment and everything that stands in the way of developers getting proper feedback and solving their problems. This doesn't mean that we hide bad code with retries, in fact quite the opposite: we want to help people find problems that they have in the tests, visualise them, measure and solve. -* **Performance** optimisations for ultra-high parallelization. Our goal is to try to have linear scalability of test execution time in regard to the number of execution units. - -Unfortunately these two are quite intertwined, and we have to always find balance between these two preferably leaving the choice to the user of marathon and not to developers of marathon. - -# Vendor extensibility -It should be easy to extend marathon for additional platforms. We should try our best to support whatever main testing technologies are used -on each platform, but refrain from using platform-specific features which can be reused from platform-agnostic implementations. - -# Infrastructure provisioning -Setting up a testing at scale requires a lot of components, test runner is but a small piece here. - -Marathon is a test runner that doesn't and shouldn't know anything about the provisioning of compute resources since -every setup is running a different orchestration plane (kubernetes, aws, gcp, terraform, etc). It is not practical -to put the responsibility of spinning up compute into the open source version of marathon. - -Putting up proper abstractions for every vendor implementation enables marathon to support any infrastructure, e.g. -connecting to devices via adb on Android is the only way (any other interaction is just a wrapper around adb). - -This doesn't mean we don't want to help setting up infrastructure though, it's just a separate piece of the puzzle. - -[Marathon Cloud][1] is a project that aims to solve testing as a service problem at scale. If marathon seems like -what you need for running your tests but you don't have the capacity to orchestrate the required compute for your -test runs then Marathon Cloud might be a good alternative. Ideally the testing tools should allow your engineers to -work on business problems rather than reinvent yet another device farm solution, support it and scale it. - -[1]: https://marathonlabs.io/ diff --git a/docs/cloud/ios.md b/docs/cloud/ios.md deleted file mode 100644 index f05890a10..000000000 --- a/docs/cloud/ios.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: "Overview" ---- - -Executing tests on iOS simulators requires access to Apple hardware capable of executing tests. This can be a local macOS instance or a -remote instance accessible via [secure shell][2]. For remote access file transfers are carried out incrementally using [rsync][3]. - -Device provider can provision simulators on-demand, reuse existing ones if they match desired configuration as well as utilize -pre-provisioned simulators. See documentation on [workers][1] for more information on this topic. - -Marathon can run both XCUITests and XCTests. Test bundle requires you to specify application under test as well as test application. -After preprocessing both of these inputs are distilled into an application bundle (e.g. `my.app`) and xctest bundle (e.g. `my-tests.xctest`) -You can specify `.ipa` [application archives][4] as well as `.zip` with the same content as application archive. They will be searched for the -application and xctest bundles. If there are multiple entries matching description - marathon will fail. - -:::tip - -It is much easier to supply the `.app` application bundle and `.xctest` bundle directly instead of wasting time on packaging a signed application -archive and depending on runtime discovery of your bundles - -::: - -## Compatibility notes -### XcodeGen -For users of [XcodeGen][5] marathon requires you to specify the following in your project's spec to make sure Info.plist is generated for parsing your app's information: - -```yaml -settings: - GENERATE_INFOPLIST_FILE: YES -``` - -See [XcodeGen docs][6] for more information. - - -[1]: ios/workers.md -[2]: https://en.wikipedia.org/wiki/Secure_Shell -[3]: https://en.wikipedia.org/wiki/Rsync -[4]: https://en.wikipedia.org/wiki/.ipa -[5]: https://github.com/yonaskolb/XcodeGen -[6]: https://github.com/yonaskolb/XcodeGen/blob/a9ed3cec0800ac9a8b4cd5cfb0bb3ee76429a22c/Docs/ProjectSpec.md diff --git a/docs/cloud/ios/configure.md b/docs/cloud/ios/configure.md deleted file mode 100644 index 179cf8977..000000000 --- a/docs/cloud/ios/configure.md +++ /dev/null @@ -1,511 +0,0 @@ ---- -title: "Configuration" ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -To indicate that you're using a vendor config for iOS you have to specify -the *type* in configuration as following: - -```yaml -vendorConfiguration: - type: "iOS" - additional_option1: ... - additional_option2: ... -``` - -:::caution - -All the options below should be placed under the `vendorConfiguration` with appropriate yaml indentation - -::: - -## Required options -### Test bundle -Marathon can run both XCUITests and XCTests. Test bundle requires you to specify application under test as well as test application. -After preprocessing both of these inputs are distilled into an application bundle (e.g. `my.app`) and xctest bundle (e.g. `my-tests.xctest`) -You can specify `.ipa` [application archives][4] as well as `.zip` with the same content as application archive. They will be searched for the -application and xctest bundles. If there are multiple entries matching description - marathon will fail. - -#### Raw bundles (.app + .xctest) -```yaml -application: "build/my.app" -testApplication: "build/my.xctest" -``` - -#### Archive bundles (.ipa/.zip) -:::tip - -It is much easier to supply the `.app` application bundle and `.xctest` bundle directly instead of wasting time on packaging a signed application -archive and depending on marathon's runtime type discovery of your bundles - -::: -If you want to specify your bundles as `.ipa/.zip`: - -```yaml -application: "build/my.ipa" -testApplication: "build/my.zip" -``` - -#### Derived data dir -```yaml -derivedDataDir: "derivedDataDir/" -``` - -#### Test type -Marathon will detect if the specified `.xctest` is a XCUITest bundle or XCTest bundle. If you want to save some execution time you -can explicitly specify this: - - - - -```yaml -testType: "xcuitest" -``` - - - - -```yaml -testType: "xctest" -``` - - - - -#### Extra applications -Marathon can install additional applications that might be required for testing: - -```yaml -extraApplications: - - "/path/to/additional.app" -``` - -### Devices -By default, marathon will look for a file `Marathondevices` in the same folder as `Marathonfile` for the configuration of workers. You can -override this location with the following property: - -```yaml -devices: my/devices.yaml -``` - -For the documentation of the format of this file refer to [worker's documentation][1]. - -### SSH -:::tip - -This section is only required if you're using remote workers and want to provide the same ssh configuration for all of those workers. -You can always specify and/or override this explicitly for each worker - -::: - -For each ssh connection you want to specify authentication, identifying known hosts and keep-alive: - -#### Public Key Authentication -To authenticate using private key and a username: - -```yaml -ssh: - authentication: - type: "publicKey" - username: "username" - key: "/home/user/.ssh/id_rsa" -``` - -#### Password Authentication -To authenticate using username and password: - -:::caution - -`sshpass` is required to allow rsync to pick up username+password credentials. - -::: - -```yaml -ssh: - authentication: - type: "password" - username: "username" - password: "storing-password-here-is-a-bad-idea" -``` - -:::tip - -Storing ssh password in a configuration file directly is a bad idea. Refer to [dynamic configuration][2] and utilize the envvar interpolation -to provide the password for your test runs during runtime. - -::: - - -#### Known hosts -When ssh establishes connection to a remote host it tries to verify the identity of the remote host to mitigate potential men-in-the-middle -attack. You can specify the `known_hosts` file in the OpenSSH format as following: - -```yaml -ssh: - knownHostsPath: "/home/user/.ssh/known_hosts" -``` - -:::caution - -If you omit this configuration then marathon will trust any remote host. This is a bad idea for production. - -::: - -#### Secure Shell debug -If you are experiencing issues with ssh connections and want to have more information use the following debug flag. Caution: **a lot** of -data is written to stdout when using this flag. - -```yaml -ssh: - debug: true -``` - -## Optional -### Collecting xcresult -By default, marathon will pull the xcresult bundle into the output folder under device files and cleanup the remote worker to not bloat -the worker storage. To change this override the following: - -:::info - -As of the time of writing marathon doesn't support merging the xcresult and treats them as just regular file artifacts. - -::: - -```yaml - xcresult: - pull: true - remoteClean: true -``` - -#### Attachment lifetime -Marathon generates the xctestrun file for each batch and can specify custom lifecycle attachments. By default, system attachments will be -deleted on success and user attachments will always be kept in the xcresult, but you can override this: - -```yaml - xcresult: - attachments: - systemAttachmentLifetime: DELETE_ON_SUCCESS - userAttachmentLifetime: KEEP_ALWAYS -``` - -Possible values for the lifetime are `KEEP_ALWAYS`, `DELETE_ON_SUCCESS` and `KEEP_NEVER`. - -### Screen recorder configuration -By default, marathon will record a h264-encoded video of the internal display with black mask if it is supported. -If you want to force screenshots or configure the recording parameters you can specify this as follows: - -```yaml -screenRecordConfiguration: - preferableRecorderType: "screenshot" -``` - -#### Video recorder configuration -Apple's video recorder can encode videos using `codec` `h264` and `hevc`. - -:::caution - -HEVC encoded videos are not supported by some web browsers. Such videos might not be playable in html reports that marathon produces - -::: - -```yaml -screenRecordConfiguration: - videoConfiguration: - enabled: true - codec: h264 - display: internal - mask: black -``` - -The `display` field can be either `internal` or `external`. -The `mask` field can be either `black` or `ignored`. - -#### Screenshot configuration -Marathon can resize and combine screenshots from device into a GIF image - -```yaml -screenRecordConfiguration: - screenshotConfiguration: - enabled: true - type: jpeg - display: internal - mask: black - width: 720 - height: 1280 - # ISO_8601 duration - delay: PT1S -``` - -The `display` and `mask` fields have the same options as the video recorder. -The `type` specifies the format of a single frame and is advised not to be changes. -The `delay` field specifies the minimal delay between frames using [ISO 8601][3] notation. - -### xctestrun environment variables -You specify additional environment variables for your test run: -```yaml -xctestrunEnv: - MY_ENV_VAR_1: A - MY_ENV_VAR_2: B -``` - -These will be placed in the generated xctestrun property list file under the `TestingEnvironmentVariables` key. - -:::info - -Marathon generates required values for `DYLD_FRAMEWORK_PATH`, `DYLD_LIBRARY_PATH` and `DYLD_INSERT_LIBRARIES` for test environment. -If you specify custom ones then your values will be placed as a lower priority path elements at the end of the specified envvar. - -::: - -### xcodebuild test-without-building arguments -You can specify additional arguments to pass to the underlying `xcodebuild test-without-building` invocation. -```yaml -xcodebuildTestArgs: - "-test-timeouts-enabled": "YES" - "-maximum-test-execution-time-allowance": "60" -``` - -It is impossible to override the following reserved arguments: -- `-xctestrun` -- `-enableCodeCoverage` -- `-resultBundlePath` -- `-destination-timeout` -- `-destination` - -### Test run lifecycle -Marathon provides two lifecycle hooks: `onPrepare` and `onDispose`. -For each you can specify one of the following actions: `SHUTDOWN` (shutdown simulator), `ERASE` (erase simulator) and `TERMINATE` (terminate simulator). - -These can be useful during provisioning of workers, e.g. you might want to erase the existing simulators before using them - -:::warning - -If you specify `TERMINATE` marathon will `kill -SIGKILL` the simulators. This usually results in simulators unable to boot with -black screen as well as a number of zombie processes and can only be resolved by erasing the state. In most cases `SHUTDOWN` is the recommended action. - -::: - -:::tip - -If you specify `ERASE` then marathon will first shut down the simulator since it's impossible to erase it otherwise - -::: - -An example for a more clean test run: -```yaml -lifecycle: - onPrepare: - - ERASE - onDispose: - - SHUTDOWN -``` - -:::tip - -Booting simulators is an expensive operation: terminating and erasing simulators is advisable only if you can't accept side effects -from the previous test runs or other usage of simulators - -::: - -#### Shutdown unused simulators -Marathon will automatically detect if some running simulators are not required by the test run and will shut down them. If you want to -override this behaviour: - -```yaml -lifecycle: - shutdownUnused: false -``` - -### Permissions -Marathon can grant permissions to application by bundle id during device setup, e.g.: - -```yaml -permissions: - bundleId: sampleBundle - grant: - - contacts - - photos-add -``` - -| Permission | Description | -|------------------|------------------------------------------------------| -| all | Apply the action to all services | -| calendar | Allow access to calendar | -| contacts-limited | Allow access to basic contact info | -| contacts | Allow access to full contact details | -| location | Allow access to location services when app is in use | -| location-always | Allow access to location services at all times | -| photos-add | Allow adding photos to the photo library | -| photos | Allow full access to the photo library | -| media-library | Allow access to the media library | -| microphone | Allow access to audio input | -| motion | Allow access to motion and fitness data | -| reminders | Allow access to reminders | -| siri | Allow use of the app with Siri | - -### Timeouts -All the timeouts for test run can be overridden, here is an example configuration with default values: - -```yaml -timeoutConfiguration: - # ISO_8601 duration - shell: PT30S - shellIdle: PT30S - reachability: PT5S - screenshot: PT10S - video: PT300S - erase: PT30S - shutdown: PT30S - delete: PT30S - create: PT30S - boot: PT30S - install: PT30S - uninstall: PT30S - testDestination: PT30S -``` - -| Name | Description | -|------------------|----------------------------------------------------------------------------------------------------| -| shell | Timeout for generic shell commands, unless a more specific action is specified | -| shellIdle | Idle timeout for generic shell commands, any input from stdout/stderr will refresh the time window | -| reachability | Timeout for considering remote worker unreachable | -| screenshot | Timeout for taking a screenshot | -| video | Timeout for recording a video. Should be longer than the duration of your longest test | -| create | Timeout for creating a simulator | -| boot | Timeout for booting a simulator | -| shutdown | Timeout for shutting down a simulator | -| erase | Timeout for erasing a simulator | -| delete | Timeout for deleting a simulator | -| install | Timeout for installing applications (does not apply for the app bundle or test bundle) | -| uninstall | Timeout for uninstalling applications | -| testDestination | Timeout for waiting for simulator specified to xcodebuild | - -### Threading -Marathon allows you to tweak the number of threads that are used for executing coroutines: - -:::tip - -This can be important if you're connecting a lot of devices to the test execution, say 100 or a 1000. -Default 8 threads in the devices provider will take a long time to process all of those devices. - -::: - -```yaml -threadingConfiguration: - deviceProviderThreads: 8 - deviceThreads: 2 -``` - -`deviceThreads` is the number of threads allocated for processing each device's coroutines. This includes screenshots, parsing results, etc. -It is an advanced setting that should not be changes unless you know what you're doing. A minimal value is 2 for the run to be stable. - -### Hide xcodebuild output -By default, marathon will print the xcodebuild output during testing. You can disable it as following: - -```yaml -hideRunnerOutput: true -``` - -### Compact output -By default, marathon will print the timestamp of each entry on each line. - - - - -```shell-session -foo@bar $ marathon -D 23:08:45.855 [main] Initializing AppleDeviceProvider -D 23:08:45.879 [AppleDeviceProvider-1] Establishing communication with ... -D 23:08:46.226 [AppleDeviceProvider-2] Available cipher factories: ... -``` - - - - -```shell-session -foo@bar $ marathon -D [main] Initializing AppleDeviceProvider -D [AppleDeviceProvider-1] Establishing communication with ... -D [AppleDeviceProvider-2] Available cipher factories: ... -``` - - - - -If you want to make the output more compact by removing the timestamps: - -```yaml -compactOutput: true -``` - -### Remote rsync configuration -:::tip - -This section is relevant only if you're using remote workers - -::: - -Override rsync binary on the remote worker - -```yaml -rsync: - remotePath: "/usr/bin/rsync-custom" -``` - -### Test parser - -:::tip - -If you need to parallelize the execution of tests generated at runtime -(i.e. flutter) - xctest parser is your choice. - -::: - -Test parsing (collecting a list of tests expected to execute) can be done using either binary inspection using nm, -or injecting marathon's proprietary blob and allows marathon to collect a list of tests expected to run without actually running them. - -:::note - -We don't provide source code for the libxctest-parser module. By using libxctest-parser you're automatically accepting it's [EULA][libxctest-parser-license] - -::: - -| YAML type | Pros | Const | -|-----------|-------------------------------------------------------------------------------------------------------------------:|-----------------------------------------------------------------------------------------------------------:| -| "nm" | Doesn't require installation of apps onto the device | Doesn't support runtime-generated tests, e.g. flutter | -| "xctest" | Supports precise test parsing and any runtime-generated tests hence allows marathon to parallelize their execution | Requires a booted iOS device for parsing and a fake test run including installation of test app under test | - -Default test parser is nm. - - - - -```yaml -vendorConfiguration: - type: "iOS" - testParserConfiguration: - type: "nm" - testClassRegexes: - - "^((?!Abstract).)*Test[s]*$" -``` - - - - -```yaml -vendorConfiguration: - type: "iOS" - testParserConfiguration: - type: "xctest" -``` - - - - - -[1]: workers.md -[2]: /configuration/dynamic-configuration.md -[3]: https://en.wikipedia.org/wiki/ISO_8601 -[libxctest-parser-license]: https://github.com/MarathonLabs/marathon/blob/-/vendor/vendor-ios/src/main/resources/EULA.md diff --git a/docs/cloud/ios/examples.md b/docs/cloud/ios/examples.md deleted file mode 100644 index ff74a7fda..000000000 --- a/docs/cloud/ios/examples.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: "Examples" ---- - -All the sample apps are located [here][1] - -## iOS application - -You can find a sample of iOS application with one screen and examples of flaky, failing, skipped and slow tests -[here][1]. Test execution can be done using CLI. - -[1]: https://github.com/MarathonLabs/marathon/tree/develop/sample/ios-app diff --git a/docs/cloud/ios/workers.md b/docs/cloud/ios/workers.md deleted file mode 100644 index d9ee38290..000000000 --- a/docs/cloud/ios/workers.md +++ /dev/null @@ -1,173 +0,0 @@ ---- -title: "Workers" ---- - -## Providing Apple workers -To inform marathon of the accessible Apple hardware a yaml file named **Marathondevices** -is read on startup. - -The structure of the file is a workers object with list of worker machines and the simulator devices -that can be used or created on those workers. - -```yaml -workers: - - transport: - type: ssh - addr: 10.0.0.2 - devices: - - type: simulator - udid: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" - - type: simulatorProfile - deviceType: com.apple.CoreSimulator.SimDeviceType.iPhone-13-mini - - transport: - type: ssh - addr: 10.0.0.3 - devices: - - type: simulatorProfile - deviceType: com.apple.CoreSimulator.SimDeviceType.iPhone-13-mini -``` - -For each worker a transport object describes how to access this particular worker - -### Local worker -If you're already running marathon on Apple hardware then you can use it in your test runs. - -```yaml -workers: - - transport: - type: local - devices: - - type: simulator - udid: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" - - type: simulatorProfile - deviceType: com.apple.CoreSimulator.SimDeviceType.iPhone-13-mini -``` - -:::tip - -This might be all that you need for getting started with marathon in terms of providing hardware, but if you want to take -your test run performance to the next level - keep reading for the ability to parallelise your test runs across -hundreds of simulators - -::: - -### SSH worker -If you want to connect to a remote Apple hardware (maybe because you need to parallelize across 5 MacMinis or -because you're executing the tests from a Linux machine in CI), then you can use ssh: - -```yaml -workers: - - transport: - type: ssh - addr: 10.0.0.2 - devices: - - type: simulatorProfile - deviceType: com.apple.CoreSimulator.SimDeviceType.iPhone-13-mini -``` - -Ssh transport accepts three parameters: -1. **addr** - address of the host -2. **port** - port of the ssh server, defaults to 22 -3. **authentication** - override for authentication specifically for this worker - -## Providing simulator devices -Each worker definition has a list of devices that can be used on that worker - -:::caution - -Using any devices assumes you're responsible for pre-provisioning appropriate devices or specifying correct versions of marathon-provisioned -devices that will work for your application - -::: - -### simulator -This device type is a pre-provisioned Simulator identified using UDID (Unique Device Identifier). - -```yaml -workers: - - transport: - type: local - devices: - - type: simulator - udid: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" -``` - -### simulatorProfile -This device type assumes you just want some instance of a simulator with a specified type, e.g.: - -```yaml -workers: - - transport: - type: ssh - addr: 10.0.0.2 - devices: - - type: simulatorProfile - deviceType: com.apple.CoreSimulator.SimDeviceType.iPhone-13-mini -``` - -:::tip - -You can list available device type identifiers using the following command: -```shell-session -foo@bar $ xcrun simctl list devicetypes -== Device Types == -iPhone 4s (com.apple.CoreSimulator.SimDeviceType.iPhone-4s) -iPhone 5 (com.apple.CoreSimulator.SimDeviceType.iPhone-5) -iPhone 5s (com.apple.CoreSimulator.SimDeviceType.iPhone-5s) -... -``` - -::: - -When the test run starts marathon will analyze if a device of a requested deviceType has already been created. -If it exists then marathon will reuse it for testing. If it doesn't exist it will create a new simulator. - -#### newNamePrefix -By default, newly created devices will have a prefix **marathon**. You can override it if you need to: -```yaml -workers: - - transport: - type: ssh - addr: 10.0.0.2 - devices: - - type: simulatorProfile - deviceType: com.apple.CoreSimulator.SimDeviceType.iPhone-13-mini - newNamePrefix: red-pill -``` - -#### runtime -By default, marathon will use the latest available runtime in the current active Xcode. If you want to explicitly -specify the runtime version: - -```yaml -workers: - - transport: - type: ssh - addr: 10.0.0.2 - devices: - - type: simulatorProfile - deviceType: com.apple.CoreSimulator.SimDeviceType.iPhone-13-mini - runtime: -``` - -:::tip - -You can list available device type identifiers using the following command: -```shell-session -foo@bar $ xcrun simctl list runtimes -v -== Runtimes == -iOS 16.2 (16.2 - 20C52) - com.apple.CoreSimulator.SimRuntime.iOS-16-2 [/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Library/Developer/CoreSimulator/Profiles/Runtimes/iOS.simruntime] -tvOS 16.1 (16.1 - 20K67) - com.apple.CoreSimulator.SimRuntime.tvOS-16-1 [/Library/Developer/CoreSimulator/Volumes/tvOS_20K67/Library/Developer/CoreSimulator/Profiles/Runtimes/tvOS 16.1.simruntime] -watchOS 9.1 (9.1 - 20S75) - com.apple.CoreSimulator.SimRuntime.watchOS-9-1 [/Library/Developer/CoreSimulator/Volumes/watchOS_20S75/Library/Developer/CoreSimulator/Profiles/Runtimes/watchOS 9.1.simruntime] -... -``` - -::: - -:::caution - -Most installations of Xcode will only have one version of the runtime available, so specifying the runtime version -explicitly will break for those installations on update, because the runtime will not be available on update -by default. - -::: diff --git a/docs/cloud/cloud/pricing.md b/docs/cloud/misc/pricing.md similarity index 96% rename from docs/cloud/cloud/pricing.md rename to docs/cloud/misc/pricing.md index 41e27fe1b..936df0860 100644 --- a/docs/cloud/cloud/pricing.md +++ b/docs/cloud/misc/pricing.md @@ -1,12 +1,12 @@ --- title: "Pricing" --- - +### We operate on a Pay-As-You-Go model, meaning you only pay for the actual time spent running tests on our virtual devices. The pricing is set at a straightforward rate of $2 per hour for each virtual device. -## Billing +### Billing To better understand this, picture having a test suite that requires 2 hours and 15 minutes (or 135 minutes) to run on one device. With Marathon Cloud, we automatically deploy an infrastructure of 9 virtual devices for you (135 minutes / 9 devices = 15 minutes). We then evenly distribute the tests among these devices, run them, and deliver the results to you within just 15 minutes. However, your tests will have used up 2 hours and 15 minutes (or 2.25 hours) of our device time in total. This means the cost of this test run for you would be $4.5. -# Calculating Time per Device +### Calculating Time per Device The computation of time for each device during a test run is based on the time span from the beginning of the first test on that device to the end of the last test on the same device. This means we don't factor in the device booting process or the application installation stage. We do, however, take into account the brief pauses between tests and the periodic device clean-ups that occur when using a batching strategy. Typically, pauses between tests take only fractions of a second, but clean-ups can require up to 6-8 seconds. diff --git a/docs/cloud/sidebars.js b/docs/cloud/sidebars.js new file mode 100644 index 000000000..c44c00bc8 --- /dev/null +++ b/docs/cloud/sidebars.js @@ -0,0 +1,51 @@ +/** +* Creating a sidebar enables you to: +- create an ordered group of docs +- render a sidebar for each doc of that group +- provide next/previous navigation + +The sidebars can be generated from the filesystem, or explicitly defined here. + +Create as many sidebars as you want. +*/ + +// @ts-check + +/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ +const sidebars = { + docs: [ + { + type: 'category', + label: 'Getting started', + collapsed: false, + items: [ + 'index', + 'intro/overview', + 'intro/android', + 'intro/ios', + ] + }, + { + type: 'category', + label: 'CLI', + collapsed: false, + items: [ + 'cli/installation', + 'cli/parameters', + 'cli/cicd' + ] + + }, + { + type: 'category', + label: 'Misc', + collapsed: false, + items: [ + 'misc/pricing' + ] + }, + + ], +}; + +module.exports = sidebars; diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js index 3d8ab44f4..ca3489c1d 100644 --- a/docs/docusaurus.config.js +++ b/docs/docusaurus.config.js @@ -44,7 +44,7 @@ const config = { id: 'runner', path: 'runner', routeBasePath: 'runner', - sidebarPath: require.resolve('./sidebars.js'), + sidebarPath: require.resolve('./runner/sidebars.js'), editCurrentVersion: false, remarkPlugins: [math], rehypePlugins: [katex], @@ -58,7 +58,20 @@ const config = { } }, ], - + [ + '@docusaurus/plugin-content-docs', + { + id: 'enterprise', + path: 'enterprise', + routeBasePath: 'enterprise', + sidebarPath: require.resolve('./enterprise/sidebars.js'), + editCurrentVersion: false, + remarkPlugins: [math], + rehypePlugins: [katex], + breadcrumbs: false, + }, + ], + ], presets: [ @@ -67,8 +80,8 @@ const config = { { docs: { path: 'cloud', - routeBasePath: 'cloud', - sidebarPath: require.resolve('./sidebarsCloud.js'), + routeBasePath: '/', + sidebarPath: require.resolve('./cloud/sidebars.js'), remarkPlugins: [math], rehypePlugins: [katex], breadcrumbs: false, @@ -106,14 +119,30 @@ const config = { width: 113, }, items: [ + { + type: 'docSidebar', + position: 'left', + sidebarId: 'docs', + label: 'Cloud', + }, + { + to: 'runner', + label: 'OSS Runner', + position: 'left' + }, + { + to: 'enterprise', + label: 'Enterprise', + position: 'left' + }, { docsPluginId: "default", type: 'docsVersionDropdown', position: 'right', dropdownActiveClassDisabled: true, className: 'navbar__dropdown--versions cloud', - + }, { docsPluginId: "runner", @@ -121,7 +150,7 @@ const config = { position: 'right', dropdownActiveClassDisabled: true, className: 'navbar__dropdown--versions runner', - + }, { diff --git a/docs/enterprise/index.md b/docs/enterprise/index.md new file mode 100644 index 000000000..6258efbb2 --- /dev/null +++ b/docs/enterprise/index.md @@ -0,0 +1,5 @@ +--- +title: "Overview" +--- + +Comming soon diff --git a/docs/sidebarsCloud.js b/docs/enterprise/sidebars.js similarity index 67% rename from docs/sidebarsCloud.js rename to docs/enterprise/sidebars.js index e0e35e62d..d9f692c13 100644 --- a/docs/sidebarsCloud.js +++ b/docs/enterprise/sidebars.js @@ -13,18 +13,17 @@ Create as many sidebars as you want. /** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ const sidebars = { - docs: [ - { - type: 'category', - label: 'Getting started', - collapsed: false, - items: [ - 'index', - 'cloud/overview', - 'cloud/pricing' - ] - } - ], + docs: [ + { + type: 'category', + label: 'Getting started', + collapsed: false, + items: [ + 'index', + ] + }, + + ], }; module.exports = sidebars; diff --git a/docs/sidebars.js b/docs/runner/sidebars.js similarity index 100% rename from docs/sidebars.js rename to docs/runner/sidebars.js diff --git a/docs/src/styles/custom.scss b/docs/src/styles/custom.scss index c2f0f9253..bd0fb589a 100644 --- a/docs/src/styles/custom.scss +++ b/docs/src/styles/custom.scss @@ -186,6 +186,36 @@ iframe { margin: 1rem 0 0; } +div[role=tabpanel].tab-content-with-text { + background: white; + padding: 4px; +} + +#__docusaurus .navbar__item.navbar__link { + align-self: stretch; + display: flex; + align-items: center; + position: relative; + flex-wrap: wrap; + justify-content: space-around; + font-weight: 600; + font-size: 18px; + min-width: 100px; +} + +#__docusaurus .navbar__item:not(.dropdown).navbar__link::after { + left: 2px; + right: 2px; +} + +#__docusaurus .navbar__item:not(.dropdown).navbar__link--active { + color: var(--c-marathon-purple-bright); +} + +#__docusaurus .navbar__item:not(.dropdown).navbar__link--active::after { + background: var(--c-marathon-purple-bright); +} + html.plugin-id-default .navbar__dropdown--versions.runner { display: none !important; }