diff --git a/.gitignore b/.gitignore index 0304f15..b2db310 100644 --- a/.gitignore +++ b/.gitignore @@ -30,3 +30,5 @@ Session.vim *~ # auto-generated tag files tags + +.DS_Store diff --git a/.travis.yml b/.travis.yml index 1693c3b..ef9a661 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,9 @@ language: node_js node_js: + - v8 - v7 - v6 - - v5 - - v4 -sudo: false +script: + - npm run lint + - npm run test + - npm run upload-coverage diff --git a/README.md b/README.md index 836a3f5..6ad6042 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,13 @@ ## Magellan: Large-Scale Automated Testing [![Build Status](https://travis-ci.org/TestArmada/magellan.svg?branch=master)](https://travis-ci.org/TestArmada/magellan) -[![Codecov](https://img.shields.io/codecov/c/github/codecov/example-python.svg)]() +[![codecov](https://codecov.io/gh/TestArmada/magellan/branch/master/graph/badge.svg)](https://codecov.io/gh/TestArmada/magellan) [![Downloads](http://img.shields.io/npm/dm/testarmada-magellan.svg?style=flat)](https://npmjs.org/package/testarmada-magellan) ![image](https://cloud.githubusercontent.com/assets/12995/9419235/e2fbb4f2-480e-11e5-9de8-c6c4871890b9.png) -Magellan is a tool for massively-scaling your automated test suite, with added reliability. Run large test suites across many environments (multiple browsers or versions, or multiple native iOS or Android devices) at the same time, in parallel, with a friendly command-line workflow that is both local development and continuous-integration friendly. Magellan is compatible with `mocha` (`wd.js`, `webdriver.io`, `appium`) tests ( [example Mocha/wd project](https://github.com/TestArmada/boilerplate-mocha) ) and `Nightwatch.js` tests ( [example Nightwatch project](https://github.com/TestArmada/boilerplate-nightwatch) ), and includes [SauceLabs](http://www.saucelabs.com/) support. Through Magellan's `mocha` support, you can scale regular node.js test suites too. +Magellan is a tool for massively-scaling your automated test suite, with added reliability. Run large test suites across many environments (multiple browsers or versions, or multiple native iOS or Android devices) at the same time, in parallel, with a friendly command-line workflow that is both local development and continuous-integration friendly. Magellan is compatible with `mocha` (`wd.js`, `webdriver.io`, `appium`) tests and `Nightwatch.js` tests ( [example Nightwatch project](https://github.com/TestArmada/boilerplate-nightwatch) ), and includes third party browser provider support such as [SauceLabs](http://www.saucelabs.com/). Through Magellan's `mocha` support, you can scale regular node.js test suites too. Features ======== @@ -501,6 +501,23 @@ Where `browser_profiles.json` should have a structure similar to placing `profil } ``` +Magellan's Strategies +========================================== + +Since 10.1.0 magellan supports strategies. Strategy is a rule which tells magellan when to do what. There are two strategies that magellan allows for now + +## Bail strategy +Bail strategy is a rule which tells magellan when to fail the whole test suite when there are certain failures in your test run, you can tell magellan to terminate your test run early via a certain bail strategy. + +Current supported bail strategies: [magellan-early-bail-strategy](https://github.com/TestArmada/magellan-early-bail-strategy) and [magellan-fast-bail-strategy](https://github.com/TestArmada/magellan-fast-bail-strategy). + +Please refer to the readme of each repo for more details. + +## Resource strategy +Resource strategy tells magellan what to do if required resources are not available for the test. + + + Setting Up Setup and Teardown Tasks for CI ========================================== diff --git a/bin/magellan b/bin/magellan index 8155d42..dfa9907 100755 --- a/bin/magellan +++ b/bin/magellan @@ -3,23 +3,66 @@ "use strict"; -const yargs = require("yargs"); +const co = require("co"); +const argv = require("marge").argv; + const logger = require("../src/logger"); -const margs = require("marge"); +const cli = require("../src/cli"); +const settings = require("../src/settings"); +const constants = require("../src/constants"); + +co(function *() { + cli.version(); + + yield cli.loadFramework({ argv }); + yield cli.loadExecutors({ argv }); + + const profiles = yield cli.detectProfiles({ argv, settings }); + const executors = yield cli.enableExecutors({ argv, profiles }); + const strategies = yield cli.loadStrategies({ argv }); + const listeners = yield cli.loadListeners({ argv }); + + if (argv.help) { + yield cli.help({ argv }); + } + // console.log(profiles) + // console.log(strategies) + // console.log(listeners) + // console.log(executors); + + const tests = yield cli.loadTests({ argv }); + // console.log(tests); + + yield cli.startTestSuite({ + argv, + tests, + profiles, + executors, + strategies, + listeners + }); + + // we exit magellan process by providing code directly + process.exit(0); -const defaultConfigFilePath = "./magellan.json"; -const configFilePath = yargs.argv.config; +}).catch((err) => { -if (configFilePath) { - logger.log("Will try to load configuration from " + configFilePath); -} else { - logger.log("Will try to load configuration from default of " + defaultConfigFilePath); -} + switch (err.code) { + case constants.ERROR_CODE.HELP: + // hacky way to exit after help + process.exit(0); + break; -// NOTE: marge can throw an error here if --config points at a file that doesn't exist -// FIXME: handle this error nicely instead of printing an ugly stack trace -margs.init(defaultConfigFilePath, configFilePath); + case constants.ERROR_CODE.TEST_FAILURE: + // test failure + process.exit(1); + break; -require("../src/cli")() - .then(() => process.exit(0)) - .catch(() => process.exit(1)); + default: + logger.err(`Error while running Magellan: ${err.message}`); + logger.err(err.stack); + // we exit magellan process by providing code directly + process.exit(1); + break; + } +}); diff --git a/magellan.json b/magellan.json index f1a031b..fec248a 100644 --- a/magellan.json +++ b/magellan.json @@ -2,7 +2,6 @@ "mocha_tests": [ "./integration" ], - "framework": "vanilla-mocha", "max_workers": 7, "executors": [], "profiles": { diff --git a/package.json b/package.json index ea5a135..a18aaa5 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "testarmada-magellan", - "version": "10.1.1", + "version": "11.0.0", "description": "Massively parallel automated testing", "main": "src/main", "directories": { @@ -26,28 +26,25 @@ ], "license": "MIT", "scripts": { - "test": "eslint src/** bin/** && mocha --recursive && npm run coverage && npm run check-coverage", - "dev-test": "mocha --recursive && eslint src/** bin/**", - "integration": "eslint src/** bin/** && ./bin/magellan", + "test": "jest", "lint": "eslint src/** bin/**", - "coverage": "istanbul cover _mocha -- --recursive", - "check-coverage": "istanbul check-coverage --statement 95 --function 95 --branch 85" + "upload-coverage": "codecov" }, "dependencies": { "async": "^2.1.4", "cli-color": "^1.1.0", + "co": "^4.6.0", "glob": "^7.1.1", "lodash": "^4.6.1", "marge": "^1.0.1", - "node-slackr": "0.1.4", "once": "^1.3.1", "portscanner": "^2.1.1", "pretty-ms": "^2.1.0", - "q": "1.4.1", "request": "^2.55.0", "sanitize-filename": "^1.5.3", "slugify": "^1.0.2", "sync-request": "^4.0.1", + "testarmada-magellan-local-executor": "^2.0.0", "testarmada-tree-kill": "^2.0.0", "yargs": "6.5.0" }, @@ -61,14 +58,32 @@ }, "devDependencies": { "babel-eslint": "^7.1.1", - "chai": "^3.4.1", - "chai-as-promised": "^6.0.0", + "codecov": "^1.0.1", "eslint": "^3.12.2", "eslint-config-walmart": "^1.1.0", "eslint-plugin-filenames": "^1.1.0", "istanbul": "^0.4.5", - "mocha": "^3.2.0", + "jest": "^22.4.3", "sinon": "^1.17.6", "testarmada-magellan-mocha-plugin": "^7.0.1" + }, + "jest": { + "verbose": true, + "collectCoverage": true, + "coverageDirectory": "./coverage", + "coverageReporters": [ + "lcov" + ], + "resetMocks": true, + "testMatch": [ + "**/test/**/**.test.js" + ], + "coverageThreshold": { + "global": { + "statements": 90, + "branches": 80, + "functions": 90 + } + } } } diff --git a/src/cli.js b/src/cli.js index be852c0..58be81e 100644 --- a/src/cli.js +++ b/src/cli.js @@ -9,13 +9,10 @@ // the package.json that contains magellan resides. In addition // configuration must either be explicitly specified relative to that directory // or absolutely (or exist implicitly in the default location) - -const yargs = require("yargs"); +const async = require("async"); const path = require("path"); const _ = require("lodash"); -const margs = require("marge"); -const async = require("async"); -const Q = require("q"); +const clc = require("cli-color"); const analytics = require("./global_analytics"); const TestRunner = require("./test_runner"); @@ -26,519 +23,435 @@ const settings = require("./settings"); const profiles = require("./profiles"); const loadRelativeModule = require("./util/load_relative_module"); const processCleanup = require("./util/process_cleanup"); -const magellanArgs = require("./help").help; +const constants = require("./constants"); const logger = require("./logger"); -const BailStrategy = require("./bail"); - -module.exports = (opts) => { - const defer = Q.defer(); - - const runOpts = _.assign({ - require, - analytics, - settings, - yargs, - margs, - WorkerAllocator, - TestRunner, - process, - getTests, - testFilters, - processCleanup, - profiles, - path, - loadRelativeModule - }, opts); - - const project = runOpts.require("../package.json"); - - logger.log("Magellan " + project.version); - - // const isNodeBased = runOpts.margs.argv.framework && - // runOpts.margs.argv.framework.indexOf("mocha") > -1; - - const debug = runOpts.margs.argv.debug || false; - const useSerialMode = runOpts.margs.argv.serial; - let MAX_TEST_ATTEMPTS = parseInt(runOpts.margs.argv.max_test_attempts) || 3; - let targetProfiles; - let workerAllocator; - let MAX_WORKERS; - - const magellanGlobals = { - analytics: runOpts.analytics - }; - - - runOpts.analytics.push("magellan-run"); - runOpts.analytics.push("magellan-busy", undefined, "idle"); - - // - // Initialize Framework Plugins - // ============================ - // TODO: move to a function - - // We translate old names like "mocha" to the new module names for the - // respective plugins that provide support for those frameworks. Officially, - // moving forward, we should specify our framework (in magellan.json) - const legacyFrameworkNameTranslations = { - "rowdy-mocha": "testarmada-magellan-mocha-plugin", - "vanilla-mocha": "testarmada-magellan-mocha-plugin", - "nightwatch": "testarmada-magellan-nightwatch-plugin" - }; - - if (legacyFrameworkNameTranslations[runOpts.settings.framework]) { - runOpts.settings.framework = legacyFrameworkNameTranslations[runOpts.settings.framework]; - } - let frameworkLoadException; - try { - // - // HELP WANTED: If someone knows how to do this more gracefully, please contribute! - // - const frameworkModulePath = "./node_modules/" + runOpts.settings.framework + "/index"; - runOpts.settings.testFramework = runOpts.require(runOpts.path.resolve(frameworkModulePath)); - } catch (e) { - frameworkLoadException = e; - } +const BailStrategy = require("./strategies/bail"); +const ResourceStrategy = require("./strategies/resource"); - let frameworkInitializationException; - try { - const pkg = runOpts.require(runOpts.path.join(runOpts.process.cwd(), "package.json")); - - runOpts.settings.pluginOptions = null; - if (runOpts.settings.testFramework.getPluginOptions - && typeof runOpts.settings.testFramework.getPluginOptions === "function") { - // backward support - runOpts.settings.pluginOptions - = runOpts.settings.testFramework.getPluginOptions( - { - rootPackage: pkg, - rootWorkingDirectory: runOpts.process.cwd() - }); - } - runOpts.settings.testFramework.initialize(runOpts.margs.argv, runOpts.settings.pluginOptions); - } catch (e) { - frameworkInitializationException = e; - } +module.exports = { + + initialize() { + + }, - if (!runOpts.settings.testFramework || - frameworkLoadException || - frameworkInitializationException) { - logger.err("Could not start Magellan."); - if (frameworkLoadException) { - logger.err("Could not load the testing framework plugin '" - + runOpts.settings.framework + "'."); - logger.err("Check and make sure your package.json includes a module named '" - + runOpts.settings.framework + "'."); - logger.err("If it does not, you can remedy this by typing:" - + "\nnpm install --save " + runOpts.settings.framework); - logger.err(frameworkLoadException); - } else /* istanbul ignore else */ if (frameworkInitializationException) { - logger.err("Could not initialize the testing framework plugin '" - + runOpts.settings.framework + "'."); - logger.err("This plugin was found and loaded, but an error occurred during initialization:"); - logger.err(frameworkInitializationException); + version() { + const project = require("../package.json"); + logger.log(`Version: ${clc.greenBright(project.version)}`); + logger.log("Use --help to list out all command options"); + }, + + help(opts) { + // Show help + logger.log("Printing magellan command line arguments:"); + require("./cli_help").help(opts); + + // exit process with exit code 0 + const e = new Error("end of help"); + e.code = constants.ERROR_CODE.HELP; + + return Promise.reject(e); + }, + + loadFramework(opts) { + if (opts.mockFramework) { + settings.framework = opts.mockFramework; + } + // + // Initialize Framework Plugins + // ============================ + + // We translate old names like "mocha" to the new module names for the + // respective plugins that provide support for those frameworks. Officially, + // moving forward, we should specify our framework (in magellan.json) + const legacyFrameworkNameTranslations = { + "rowdy-mocha": "testarmada-magellan-mocha-plugin", + "vanilla-mocha": "testarmada-magellan-mocha-plugin", + "nightwatch": "testarmada-magellan-nightwatch-plugin" + }; + + if (legacyFrameworkNameTranslations[settings.framework]) { + settings.framework = legacyFrameworkNameTranslations[settings.framework]; } - defer.reject({ error: "Couldn't start Magellan" }); - } + return new Promise((resolve, reject) => { + + let frameworkLoadException; + try { + // + // HELP WANTED: If someone knows how to do this more gracefully, please contribute! + // + const frameworkModulePath = "./node_modules/" + settings.framework + "/index"; + settings.testFramework = require(path.resolve(frameworkModulePath)); + } catch (e) { + frameworkLoadException = e; + } + + let frameworkInitializationException; + try { + const pkg = require(path.join(process.cwd(), "package.json")); + + settings.pluginOptions = null; + + if (settings.testFramework + && settings.testFramework.getPluginOptions + && _.isFunction(settings.testFramework.getPluginOptions)) { + // backward support + settings.pluginOptions + = settings.testFramework.getPluginOptions( + { + rootPackage: pkg, + rootWorkingDirectory: process.cwd() + }); + } + settings.testFramework.initialize(opts.argv, settings.pluginOptions); + } catch (e) { + frameworkInitializationException = e; + } - logger.log("Loaded test framework: "); - logger.log(" " + runOpts.settings.framework); - // - // Initialize Executor - // ============================ - // TODO: move to a function - // TODO: move to a function - // let formalExecutor = ["local"]; - let formalExecutors = ["testarmada-magellan-local-executor"]; - - // executors is as array from magellan.json by default - if (runOpts.margs.argv.executors) { - if (_.isArray(runOpts.margs.argv.executors)) { - formalExecutors = runOpts.margs.argv.executors; - } else if (_.isString(runOpts.margs.argv.executors)) { - formalExecutors = [runOpts.margs.argv.executors]; + if (!settings.testFramework || + frameworkLoadException || + frameworkInitializationException) { + + logger.err("Could not start Magellan."); + + if (frameworkLoadException) { + logger.err("Could not load the testing framework plugin:" + + ` ${settings.framework}`); + logger.err("Check and make sure your package.json includes module:" + + ` ${settings.framework}`); + logger.err(frameworkLoadException); + } else /* istanbul ignore else */ if (frameworkInitializationException) { + logger.err("Could not initialize the testing framework plugin:" + + ` ${settings.framework}`); + logger.err("This plugin was found and loaded, but an error" + + " occurred during initialization:"); + logger.err(frameworkInitializationException); + } + + return reject("Couldn't start Magellan"); + } + + logger.log("Loaded test framework from magellan.json: "); + logger.log(` ${clc.greenBright(settings.framework)}`); + return resolve(); + }); + }, + + loadExecutors(opts) { + // Initialize Executor + // ============================ + let formalExecutors = ["testarmada-magellan-local-executor"]; + + // executors is as array from magellan.json by default + if (opts.argv.executors) { + if (_.isArray(opts.argv.executors)) { + formalExecutors = opts.argv.executors; + } else if (_.isString(opts.argv.executors)) { + formalExecutors = [opts.argv.executors]; + } else { + logger.err("Executors only accepts string and array"); + logger.warn("Setting executor to [testarmada-magellan-local-executor] by default"); + } } else { - logger.err("Executors only accepts string and array"); - logger.warn("Setting executor to \"local\" by default"); + logger.warn("No executor is configured"); + logger.warn("Setting executor to [testarmada-magellan-local-executor] by default"); } - } else { - logger.warn("No executor is passed in"); - logger.warn("Setting executor to \"local\" by default"); - } - runOpts.settings.executors = formalExecutors; + settings.executors = formalExecutors; - // load executor - const executorLoadExceptions = []; - runOpts.settings.testExecutors = {}; + return new Promise((resolve, reject) => { + // load executor + const executorLoadExceptions = []; + settings.testExecutors = {}; - _.forEach(runOpts.settings.executors, (executor) => { - try { - const targetExecutor = runOpts.require(executor); - targetExecutor.validateConfig(runOpts); - runOpts.settings.testExecutors[targetExecutor.shortName] = targetExecutor; - } catch (e) { - executorLoadExceptions.push(e); - } - }); + logger.log("Loaded test executors from magellan.json: "); - if (executorLoadExceptions.length > 0) { - // error happens while loading executor - logger.err("There are errors in loading executors"); - _.forEach(executorLoadExceptions, (exception) => { - logger.err(exception.toString()); - }); + _.forEach(settings.executors, (executor) => { + try { + const targetExecutor = require(executor); + logger.log(" " + targetExecutor.name); + // targetExecutor.validateConfig(opts.argv); + settings.testExecutors[targetExecutor.shortName] = targetExecutor; + } catch (e) { + executorLoadExceptions.push(e); + } + }); - defer.reject({ error: "Couldn't start Magellan" }); - } + if (executorLoadExceptions.length > 0) { + // error happens while loading executor + logger.err("There are errors in loading executors"); - logger.log("Loaded test executors: "); - _.forEach(runOpts.settings.testExecutors, (executor) => { - logger.log(" " + executor.name); - }); + _.forEach(executorLoadExceptions, (exception) => { + logger.err(exception.toString()); + }); - const testExecutors = runOpts.settings.testExecutors; + return reject("Couldn't start Magellan"); + } - // - // Initialize Strategy - // ==================== + return resolve(); + }); + }, - if (!runOpts.settings.strategies) { - runOpts.settings.strategies = {}; - } + loadStrategies(opts) { + // + // Initialize Strategy + // ==================== + if (!settings.strategies) { + settings.strategies = {}; + } + return new Promise((resolve, reject) => { + // Strategy - bail -------------------- + try { + settings.strategies.bail = + new BailStrategy(opts.argv); - // - // Initialize Bail Strategy - // ==================== - // - // There is only one bail strategy allowed per magellan instance. - // Bail strategy is configured via --strategy_bail. - // If no --strategy_bail , enable ./strategies/bail_never by default - let bailRule = runOpts.margs.argv.strategy_bail ? - runOpts.margs.argv.strategy_bail : "./strategies/bail_never"; - - // -------------------- - // ALERT!!!!! Will be deprecated in next release - // - // To backward support magellan's bail command line arguments - // The bail strategy will be for the whole suite, so if --bail_time is set explicitly - // the bail_never strategy will be used for whole suite and --bail_time will be applied - // to test only - - if (Boolean(runOpts.margs.argv.bail_fast) - && runOpts.margs.argv.bail_fast !== "false") { - bailRule = "./strategies/bail_fast"; - } else if (Boolean(runOpts.margs.argv.bail_early) - && runOpts.margs.argv.bail_early !== "false") { - bailRule = "./strategies/bail_early"; - } else if (Boolean(runOpts.margs.argv.bail_time) - && runOpts.margs.argv.bail_time !== "false") { - bailRule = "./strategies/bail_never"; - } + if (settings.strategies.bail.MAX_TEST_ATTEMPTS) { + // bail strategy can define its own test attempts + settings.MAX_TEST_ATTEMPTS = settings.strategies.bail.MAX_TEST_ATTEMPTS; + } + + logger.log("Enabled bail strategy: "); + logger.log(` ${clc.greenBright(settings.strategies.bail.name)}:`); + logger.log(` -> ${settings.strategies.bail.getDescription()}`); + } catch (err) { + logger.err(`Cannot load bail strategy due to ${err}`); + logger.err("Please npm install and configure it in magellan.json"); + return reject("Couldn't start Magellan"); + } + + // Strategy - resource -------------------- + try { + settings.strategies.resource = + new ResourceStrategy(opts.argv); + + logger.log("Enabled resource strategy: "); + logger.log(` ${clc.greenBright(settings.strategies.resource.name)}:`); + logger.log(` -> ${settings.strategies.resource.getDescription()}`); + } catch (err) { + logger.err(`Cannot load resource strategy due to ${err}`); + logger.err("Please npm install and configure in magellan.json"); + return reject("Couldn't start Magellan"); + } - // -------------------- + return resolve(settings.strategies); + }); + }, - try { - runOpts.settings.strategies.bail = new BailStrategy(bailRule); - runOpts.settings.strategies.bail.configure(runOpts.margs.argv); + loadListeners(opts) { + // + // Initialize Listeners + // ==================== + // + // All listener/reporter types are optional and either activated through the existence + // of configuration (i.e environment vars), CLI switches, or magellan.json config. + let listeners = []; - if (runOpts.settings.strategies.bail.MAX_TEST_ATTEMPTS) { - // backward support - // bail strategy can define its own test attempts - MAX_TEST_ATTEMPTS = runOpts.settings.strategies.bail.MAX_TEST_ATTEMPTS; + // + // Setup / Teardown + // ================ + // + // This is merely a listener like any other reporter, but with a developer-friendly name. + if (opts.argv.setup_teardown) { + // NOTE: loadRelativeModule can throw an error here if the setup module doesn't exist + // FIXME: handle this error nicely instead of printing an ugly stack trace + listeners.push(loadRelativeModule(opts.argv.setup_teardown)); } - logger.log("Enabled bail strategy: "); - logger.log(` ${runOpts.settings.strategies.bail.name}: ` - + `${runOpts.settings.strategies.bail.getDescription()}`); - } catch (e) { - logger.err("Error: bail strategy: " + bailRule - + " cannot be loaded because of error [" + e + "]"); - defer.reject({ error: "Couldn't start Magellan" }); - } + // + // Load reporters from magellan.json + // ================================= + // + // Reporters that conform to the reporter API and inherit from src/reporter + // can be loaded in magellan.json through a reporters[] list. These can refer to + // either npm modules defined in package.json or to paths relative to the current + // working directory of the calling script or shell. + if (opts.argv.reporters + && _.isArray(opts.argv.reporters)) { + // NOTE: loadRelativeModule can throw an error here if any of the reporter modules don't exist + // FIXME: handle this error nicely instead of printing an ugly stack trace + listeners = listeners.concat( + opts.argv.reporters.map((reporterModule) => + loadRelativeModule(reporterModule)) + ); + } - // finish processing all params =========================== + // optional_reporters are modules we want to load only if found. If not found, we + // still continue initializing Magellan and don't throw any errors or warnings + if (opts.argv.optional_reporters + && _.isArray(opts.argv.optional_reporters)) { + listeners = listeners.concat( + opts.argv.optional_reporters.map((reporterModule) => + loadRelativeModule(reporterModule, true)) + ); + } - // Show help and exit if it's asked for - if (runOpts.margs.argv.help) { - const help = runOpts.require("./cli_help"); - help.help(); - defer.resolve(0); - return defer.promise; - } + // + // Serial Mode Reporter (enabled with --serial) + // + if (opts.argv.serial) { + const SerialReporter = require("./reporters/stdout/reporter"); + listeners.push(new SerialReporter()); + } - // handle executor specific params - const executorParams = _.omit(runOpts.margs.argv, _.keys(magellanArgs)); - - // ATTENTION: there should only be one executor param matched for the function call - _.forEach(runOpts.settings.testExecutors, (v, k) => { - _.forEach(executorParams, (epValue, epKey) => { - if (v.help[epKey] && v.help[epKey].type === "function") { - // we found a match in current executor - // method name convention for an executor: PREFIX_string_string_string_... - let names = epKey.split("_"); - names = names.slice(1, names.length); - const executorMethodName = _.camelCase(names.join(" ")); - - if (_.has(v, executorMethodName)) { - // method found in current executor - v[executorMethodName](runOpts, () => { - defer.resolve(); - }); + // intiialize listeners + return new Promise((resolve, reject) => { + async.each(listeners, (listener, done) => { + listener.initialize({ + analytics, + workerAmount: settings.MAX_WORKERS + }) + .then(() => done()) + .catch((err) => done(err)); + }, (err) => { + if (err) { + return reject(err); } else { - logger.err("Error: executor" + k + " doesn't has method " + executorMethodName + "."); - defer.resolve(); + return resolve(listeners); } - } + }); }); - }); - - // - // Initialize Listeners - // ==================== - // - // All listener/reporter types are optional and either activated through the existence - // of configuration (i.e environment vars), CLI switches, or magellan.json config. - let listeners = []; - - // - // Setup / Teardown - // ================ - // - // This is merely a listener like any other reporter, but with a developer-friendly name. - - if (runOpts.margs.argv.setup_teardown) { - // NOTE: loadRelativeModule can throw an error here if the setup module doesn't exist - // FIXME: handle this error nicely instead of printing an ugly stack trace - listeners.push(runOpts.loadRelativeModule(runOpts.margs.argv.setup_teardown)); - } - - // - // Load reporters from magellan.json - // ================================= - // - // Reporters that conform to the reporter API and inherit from src/reporter - // can be loaded in magellan.json through a reporters[] list. These can refer to - // either npm modules defined in package.json or to paths relative to the current - // working directory of the calling script or shell. - if (runOpts.margs.argv.reporters && _.isArray(runOpts.margs.argv.reporters)) { - // NOTE: loadRelativeModule can throw an error here if any of the reporter modules don't exist - // FIXME: handle this error nicely instead of printing an ugly stack trace - listeners = listeners.concat(runOpts.margs.argv.reporters.map(runOpts.loadRelativeModule)); - } + }, - // optional_reporters are modules we want to load only if found. If not found, we - // still continue initializing Magellan and don't throw any errors or warnings - if (runOpts.margs.argv.optional_reporters && _.isArray(runOpts.margs.argv.optional_reporters)) { - listeners = listeners.concat( - runOpts.margs.argv.optional_reporters.map((reporterModule) => { - return runOpts.loadRelativeModule(reporterModule, true); - }) - ); - } + loadTests(opts) { + // + // Find Tests, Start Worker Allocator + // + logger.log("Searching for tests..."); + const tests = getTests(testFilters.detectFromCLI(opts.argv)); - // - // Slack integration (enabled if settings exist) - // - const slackSettings = runOpts.require("./reporters/slack/settings"); - if (slackSettings.enabled) { - const Slack = runOpts.require("./reporters/slack/slack"); - const slackReporter = new Slack(slackSettings); - listeners.push(slackReporter); - } + const testAmount = tests.length > 0 ? + clc.greenBright(tests.length) : clc.yellowBright(tests.length); - // - // Serial Mode Reporter (enabled with --serial) - // - if (useSerialMode) { - const StdoutReporter = runOpts.require("./reporters/stdout/reporter"); - listeners.push(new StdoutReporter()); - } + logger.log(`Total tests found: ${testAmount}`); - // - // Screenshot Aggregation (enabled with --aggregate_screenshots) - // - if (runOpts.settings.aggregateScreenshots) { - const ScreenshotAggregator = runOpts.require("./reporters/screenshot_aggregator/reporter"); - listeners.push(new ScreenshotAggregator()); - } + if (_.isEmpty(tests)) { + return Promise.reject(new Error("No tests found, please make sure" + + " test filter is set correctly," + + " or test path is configured correctly in nightwatch.json")); + } + // print out test amount and each test name + _.map(tests, (t) => logger.log(` -> ${t.filename}`)); - // - // Find Tests, Start Worker Allocator - // - const tests = runOpts.getTests(runOpts.testFilters.detectFromCLI(runOpts.margs.argv)); + return Promise.resolve(tests); + }, - if (_.isEmpty(tests)) { - logger.log("Error: no tests found"); - defer.reject({ error: "No tests found" }); - return defer.promise; - } - const initializeListeners = () => { - const deferred = Q.defer(); - magellanGlobals.workerAmount = MAX_WORKERS; - - async.each(listeners, (listener, done) => { - listener.initialize(magellanGlobals) - .then(() => done()) - .catch((err) => done(err)); - }, (err) => { - if (err) { - deferred.reject(err); - } else { - deferred.resolve(); - } + detectProfiles(opts) { + return profiles.detectFromCLI({ + argv: opts.argv, + settings: opts.settings }); - return deferred.promise; - }; - - const startSuite = () => { - const deferred = Q.defer(); - - Promise - .all(_.map(testExecutors, (executor) => executor.setupRunner())) - .then(() => { - workerAllocator.initialize((workerInitErr) => { - if (workerInitErr) { - logger.err("Could not start Magellan. Got error while initializing" - + " worker allocator"); - deferred.reject(workerInitErr); - return defer.promise; - } - - const testRunner = new runOpts.TestRunner(tests, { - debug, - - maxWorkers: MAX_WORKERS, - - maxTestAttempts: MAX_TEST_ATTEMPTS, - - profiles: targetProfiles, - executors: testExecutors, - - listeners, - - bailStrategy: runOpts.settings.strategies.bail, - - serial: useSerialMode, - - allocator: workerAllocator, - - onSuccess: () => { - /*eslint-disable max-nested-callbacks*/ - workerAllocator.teardown(() => { - Promise - .all(_.map(testExecutors, (executor) => executor.teardownRunner())) - .then(() => { - runOpts.processCleanup(() => { - deferred.resolve(); - }); - }) - .catch((err) => { - // we eat error here - logger.warn("executor teardownRunner error: " + err); - runOpts.processCleanup(() => { - deferred.resolve(); - }); - }); - }); - }, - - onFailure: (/*failedTests*/) => { - /*eslint-disable max-nested-callbacks*/ - workerAllocator.teardown(() => { - Promise - .all(_.map(testExecutors, (executor) => executor.teardownRunner())) - .then(() => { - runOpts.processCleanup(() => { - // Failed tests are not a failure in Magellan itself, - // so we pass an empty error here so that we don't - // confuse the user. Magellan already outputs a failure - // report to the screen in the case of failed tests. - deferred.reject(null); - }); - }) - .catch((err) => { - logger.warn("executor teardownRunner error: " + err); - // we eat error here - runOpts.processCleanup(() => { - deferred.reject(null); - }); - }); - }); + }, + + enableExecutors(opts) { + // this is to allow magellan to double check profile that + // is retrieved by --profile or --profiles + const enabledExecutors = {}; + return new Promise((resolve, reject) => { + + try { + _.forEach( + _.uniq(_.map(opts.profiles, (profile) => profile.executor)), + (shortname) => { + if (settings.testExecutors[shortname]) { + settings.testExecutors[shortname].validateConfig({ isEnabled: true }); + enabledExecutors[shortname] = settings.testExecutors[shortname]; } }); - testRunner.start(); - }); - }) - .catch((err) => { - deferred.reject(err); - }); - - return deferred.promise; - }; - - const enableExecutors = (_targetProfiles) => { - // this is to allow magellan to double check with profile that - // is retrieved by --profile or --profiles - targetProfiles = _targetProfiles; - - const deferred = Q.defer(); - try { - _.forEach( - _.uniq(_.map(_targetProfiles, (targetProfile) => targetProfile.executor)), - (shortname) => { - if (runOpts.settings.testExecutors[shortname]) { - runOpts.settings.testExecutors[shortname].validateConfig({ isEnabled: true }); - } - }); + // for logging purpose + if (!_.isEmpty(enabledExecutors)) { - deferred.resolve(); - } catch (err) { - deferred.reject(err); - } + logger.log("Enabled executors:"); + _.forEach(enabledExecutors, + (sn) => logger.log(` ${clc.greenBright(sn.name)}`)); + } - return deferred.promise; - }; - - runOpts.profiles - .detectFromCLI(runOpts) - .then(enableExecutors) - .then(() => { - // - // Worker Count: - // ============= - // - // Default to 3 workers in parallel mode (default). - // Default to 1 worker in serial mode. - // - MAX_WORKERS = useSerialMode ? 1 : parseInt(runOpts.margs.argv.max_workers) || 3; - workerAllocator = new runOpts.WorkerAllocator(MAX_WORKERS); - }) - .then(initializeListeners) - // NOTE: if we don't end up in catch() below, magellan exits with status code 0 naturally - .then(startSuite) - .then(() => { - defer.resolve(); - }) - .catch((err) => { - if (err) { - logger.err("Error initializing Magellan"); - logger.err("Error description:"); - logger.err(err.toString()); - logger.err("Error stack trace:"); - logger.err(err.stack); - } else { - // No err object means we didn't have an internal crash while setting up / tearing down + return resolve(enabledExecutors); + } catch (err) { + return reject(err); } - - // Fail the test suite or fail because of an internal crash - defer.reject({ error: "Internal crash" }); }); - - return defer.promise; + }, + + startTestSuite(opts) { + return new Promise((resolve, reject) => { + + const workerAllocator = new WorkerAllocator(settings.MAX_WORKERS); + + Promise + .all(_.map( + opts.executors, + (executor) => executor.setupRunner()) + ) + .then(() => opts.strategies.resource.holdSuiteResources({ + profiles: opts.profiles, + tests: opts.tests + })) + .then( + () => workerAllocator.setup(), + // if resource strategy decline the suite due to resource limit, + // we fail test run + (err) => reject(err) + ) + .then(() => + new Promise((innerResolve, innerReject) => + new TestRunner(opts.tests, { + profiles: opts.profiles, + executors: opts.executors, + listeners: opts.listeners, + strategies: opts.strategies, + allocator: workerAllocator, + onFinish: (failedTests) => { + + if (failedTests.length > 0) { + const e = new Error("Test suite failed due to test failure"); + e.code = constants.ERROR_CODE.TEST_FAILURE; + return innerReject(e); + } + + return innerResolve(); + } + }).run() + ) + ) + // resource.releaseSuiteResources is guaranteed to execute + .then( + () => opts.strategies.resource.releaseSuiteResources({ + profiles: opts.profiles, + tests: opts.tests + }), + (err) => opts.strategies.resource.releaseSuiteResources({ + profiles: opts.profiles, + tests: opts.tests + }).then(() => Promise.reject(err)) + ) + // workerAllocator.teardown is guaranteed to execute + .then( + () => workerAllocator.teardown(), + (err) => workerAllocator.teardown(err) + ) + // executor.teardownRunner is guaranteed to execute + .then( + () => Promise + .all(_.map(opts.executors, + (executor) => executor.teardownRunner())), + (err) => Promise + .all(_.map(opts.executors, + (executor) => executor.teardownRunner())) + .then(() => Promise.reject(err)) + /*eslint no-unused-vars: 0 */ + .catch((otherErr) => Promise.reject(err)) + ) + // processCleanup is guaranteed to execute + .then( + () => processCleanup(), + (err) => processCleanup(err) + ) + .then(() => resolve()) + .catch((err) => reject(err)); + }); + } }; diff --git a/src/cli_help.js b/src/cli_help.js index b13e188..f224eae 100644 --- a/src/cli_help.js +++ b/src/cli_help.js @@ -1,27 +1,24 @@ "use strict"; const _ = require("lodash"); +const clc = require("cli-color"); const project = require("../package.json"); const settings = require("./settings"); const magellanHelp = require("./help").help; const logger = require("./logger"); -const MAX_HELP_KEY_WIDTH = 40; +const MAX_HELP_KEY_WIDTH = 60; /*eslint max-len: 0*/ /*eslint max-statements: 0*/ module.exports = { + /*eslint no-unused-vars: 0 */ help: (opts) => { - const runOpts = _.assign({ - settings - }, opts); logger.loghelp(""); logger.loghelp("Usage: magellan [options]"); logger.loghelp(""); - logger.loghelp("By default, magellan will run all available tests in parallel with phantomjs."); - logger.loghelp(""); logger.loghelp("Available options:"); logger.loghelp(""); @@ -38,25 +35,25 @@ module.exports = { }); // load desire framework help - if (runOpts.settings.testFramework && runOpts.settings.testFramework.help) { - help[" Framework-specific (" + runOpts.settings.framework + ")"] = {}; + if (settings.testFramework && settings.testFramework.help) { + help[` Framework-specific (${clc.greenBright(settings.framework)})`] = {}; - _.forEach(runOpts.settings.testFramework.help, (v, k) => { + _.forEach(settings.testFramework.help, (v, k) => { if (v.visible === undefined || v.visible) { - help[" Framework-specific (" + runOpts.settings.framework + ")"][k] = v; + help[` Framework-specific (${clc.greenBright(settings.framework)})`][k] = v; } }); } // load desire executor(s) help - if (runOpts.settings.testExecutors) { - _.forEach(runOpts.settings.testExecutors, (v) => { + if (settings.testExecutors) { + _.forEach(settings.testExecutors, (v) => { if (v.help) { - help[" Executor-specific (" + v.name + ")"] = {}; + help[` Executor-specific (${clc.greenBright(v.name)})`] = {}; _.forEach(v.help, (itemValue, itemKey) => { if (itemValue.visible === undefined || itemValue.visible) { - help[" Executor-specific (" + v.name + ")"][itemKey] = itemValue; + help[` Executor-specific (${clc.greenBright(v.name)})`][itemKey] = itemValue; } }); } @@ -64,14 +61,14 @@ module.exports = { } // load desire strategy help - if (runOpts.settings.strategies) { - _.forEach(runOpts.settings.strategies, (v) => { + if (settings.strategies) { + _.forEach(settings.strategies, (v) => { if (v.help) { - help[" Strategy-specific (" + v.name + ")"] = {}; + help[` Strategy-specific (${clc.greenBright(v.name)})`] = {}; _.forEach(v.help, (itemValue, itemKey) => { if (itemValue.visible === undefined || itemValue.visible) { - help[" Strategy-specific (" + v.name + ")"][itemKey] = itemValue; + help[` Strategy-specific (${clc.greenBright(v.name)})`][itemKey] = itemValue; } }); } @@ -81,7 +78,7 @@ module.exports = { if (help) { _.forEach(help, (helpValue, helpKey) => { - logger.loghelp(" " + helpKey); + logger.loghelp(` ${clc.cyanBright(helpKey)}`); _.forEach(helpValue, (itemValue, itemKey) => { let str = " --" + itemKey; @@ -102,6 +99,6 @@ module.exports = { }); } - logger.log("Magellan@" + project.version); + logger.log(`Magellan@${project.version}`); } }; diff --git a/src/constants.js b/src/constants.js new file mode 100644 index 0000000..fc121fd --- /dev/null +++ b/src/constants.js @@ -0,0 +1,16 @@ +"use strict"; + +const TEST_PRIORITY = { + FIRST_RUN: 10, + RETRY: 2 +}; + +const ERROR_CODE = { + HELP: 999, + TEST_FAILURE: 998 +}; + +module.exports = { + TEST_PRIORITY, + ERROR_CODE +}; diff --git a/src/get_tests.js b/src/get_tests.js index 9b5fb59..a2ba2d5 100644 --- a/src/get_tests.js +++ b/src/get_tests.js @@ -1,17 +1,12 @@ "use strict"; -const _ = require("lodash"); - const testFilter = require("./test_filter"); const settings = require("./settings"); module.exports = (filters, opts) => { - const runOpts = _.assign({ - settings - }, opts); - const getTests = runOpts.settings.testFramework.iterator; - const allFiles = getTests(runOpts.settings); + const getTests = settings.testFramework.iterator; + const allFiles = getTests(settings); return testFilter.filter(allFiles, filters, opts); }; diff --git a/src/help.js b/src/help.js index c1ec1d8..48679d8 100644 --- a/src/help.js +++ b/src/help.js @@ -19,13 +19,13 @@ module.exports = { }, "max_workers": { "category": "Parallelism, Workflow and Filtering", - "example": "N", + "example": "3", "visible": true, "description": "Set maximum number of parallel works to (see defaults below)." }, "max_test_attempts": { "category": "Parallelism, Workflow and Filtering", - "example": "N", + "example": "3", "visible": true, "description": "Retry tests N times (default: 3)." }, @@ -37,39 +37,25 @@ module.exports = { }, "strategy_bail": { "category": "Strategy", - "example": "./strategy/fast_bail", + "example": "testarmada-magellan-early-bail-strategy", "visible": true, - "description": "The strategy magellan uses to decide when to terminate current test suite if failure happens." + "description": "The strategy helps magellan decide when to terminate current test suite if failure happens." }, - "bail_early": { - "category": "Bail Strategy [Will be deprecated soon, please migrate to --strategy_bail]", - "visible": true, - "description": "Kill builds that have failed at least 10% of tests, after 10 or more test runs." - }, - "bail_fast": { - "category": "Bail Strategy [Will be deprecated soon, please migrate to --strategy_bail]", - "visible": true, - "description": "Kill builds that fail any test." - }, - "bail_time": { - "category": "Bail Strategy [Will be deprecated soon, please migrate to --strategy_bail]", - "visible": true, - "description": "Set test kill time in milliseconds. *CAN* be used without bail_early/bail_fast." - }, - "early_bail_threshold": { - "category": "Bail Strategy [Will be deprecated soon, please migrate to --strategy_bail]", + "strategy_resource": { + "category": "Strategy", + "example": "testarmada-magellan-locks-resource-strategy", "visible": true, - "description": "A decimal ratio (eg 0.25 for 25%) how many tests to fail before bail_early" + "description": "The strategy helps magellan hold/release resourcs for test when limit resources are available." }, - "early_bail_min_attempts": { - "category": "Bail Strategy [Will be deprecated soon, please migrate to --strategy_bail]", + "debug": { + "category": "Parallelism, Workflow and Filtering", "visible": true, - "description": "How many test runs to run before applying bail_early rule." + "description": "Enable magellan debug messages (dev mode)." }, - "debug": { + "debugVerbose": { "category": "Parallelism, Workflow and Filtering", "visible": true, - "description": "Enable debugging magellan messages (dev mode)." + "description": "Enable magellan debug messages in verbose mode, this also enables debug log for nightwatch-extra if enabled(dev mode)." }, "config": { "category": "Configuration", diff --git a/src/hosted_profiles.js b/src/hosted_profiles.js index 7c6c7aa..cc84ff7 100644 --- a/src/hosted_profiles.js +++ b/src/hosted_profiles.js @@ -2,7 +2,6 @@ const syncRequest = require("sync-request"); const URL = require("url"); -const _ = require("lodash"); module.exports = { // Return a profile name from an URL if one is referenced with a #fragment. @@ -18,13 +17,9 @@ module.exports = { return url.hash.split("#")[1]; } }, - + /*eslint no-unused-vars: 0 */ getProfilesAtURL: (url, opts) => { - const runOpts = _.assign({ - syncRequest - }, opts); - - const res = runOpts.syncRequest("GET", url); + const res = syncRequest("GET", url); let data; try { diff --git a/src/listener.js b/src/listener.js index ccf3dc4..b2f9aa6 100644 --- a/src/listener.js +++ b/src/listener.js @@ -1,21 +1,15 @@ "use strict"; -const Q = require("q"); - class BaseListener { initialize() { - const deferred = Q.defer(); - deferred.resolve(); - return deferred.promise; + return Promise.resolve(); } listenTo(/* testRun, test, source */) { } flush() { - const deferred = Q.defer(); - deferred.resolve(); - return deferred.promise; + return Promise.resolve(); } } diff --git a/src/mkdir_sync.js b/src/mkdir_sync.js deleted file mode 100644 index dd8601c..0000000 --- a/src/mkdir_sync.js +++ /dev/null @@ -1,18 +0,0 @@ -"use strict"; - -const fs = require("fs"); -const _ = require("lodash"); - -module.exports = (path, opts) => { - const runOpts = _.assign({ - fs - }, opts); - - try { - runOpts.fs.mkdirSync(path); - } catch (e) { - if (e.code !== "EEXIST") { - throw e; - } - } -}; diff --git a/src/profiles.js b/src/profiles.js index 8bbf52b..afcc275 100644 --- a/src/profiles.js +++ b/src/profiles.js @@ -19,53 +19,49 @@ class Profile { module.exports = { detectFromCLI: (opts) => { - // runOpts.margs.argv, runOpts.settings.testExecutors /** * Handle following command argument * --profile * */ - const runOpts = _.assign({}, opts); - - const argv = runOpts.margs.argv; - const testExecutors = runOpts.settings.testExecutors; + const testExecutors = opts.settings.testExecutors; return new Promise((resolve, reject) => { let profiles = []; - if (argv.profile) { - // If a profile key is specified, look to argv for it and use it. If + if (opts.argv.profile) { + // If a profile key is specified, look to opts.argv for it and use it. If // a browser is set ia CLI, we assume details from the stored profile // and override with anything else explicitly set. - if (argv.profile.indexOf("http:") > -1 || argv.profile.indexOf("https:") > -1) { + if (opts.argv.profile.indexOf("http:") > -1 || opts.argv.profile.indexOf("https:") > -1) { // We fetch profiles from an URL if it starts with http: or https: // We assume it will have a #fragment to identify a given desired profile. // Note: The hosted profiles are merged on top of any local profiles. - const remoteProfileURL = argv.profile.split("#")[0]; + const remoteProfileURL = opts.argv.profile.split("#")[0]; const fetchedProfiles = hostedProfiles.getProfilesAtURL(remoteProfileURL, opts); if (fetchedProfiles && fetchedProfiles.profiles) { - argv.profiles = _.extend({}, argv.profiles, fetchedProfiles.profiles); + opts.argv.profiles = _.extend({}, opts.argv.profiles, fetchedProfiles.profiles); logger.log("Loaded hosted profiles from " + remoteProfileURL); } - argv.profile = hostedProfiles.getProfileNameFromURL(argv.profile); + opts.argv.profile = hostedProfiles.getProfileNameFromURL(opts.argv.profile); } - logger.log("Requested profile(s): " + argv.profile); + logger.log("Requested profile(s): " + opts.argv.profile); // NOTE: We check "profiles" (plural) here because that's what has // the actual profile definition. "profile" is the argument from the // command line. "profiles" is the list structure in magellan.json. - if (argv.profiles && Object.keys(argv.profiles).length > 0) { + if (opts.argv.profiles && Object.keys(opts.argv.profiles).length > 0) { let requestedProfiles; // Generate a list of profiles, which may typically be just one profile. - if (argv.profile.indexOf(",") > -1) { - requestedProfiles = argv.profile.split(","); - } else if (argv.profiles.hasOwnProperty(argv.profile)) { - requestedProfiles = [argv.profile]; + if (opts.argv.profile.indexOf(",") > -1) { + requestedProfiles = opts.argv.profile.split(","); + } else if (opts.argv.profiles.hasOwnProperty(opts.argv.profile)) { + requestedProfiles = [opts.argv.profile]; } // Search for the requested profiles and resolve their profiles @@ -73,10 +69,10 @@ module.exports = { const notFoundProfiles = []; _.forEach(requestedProfiles, (requestedProfile) => { - if (argv.profiles[requestedProfile]) { + if (opts.argv.profiles[requestedProfile]) { // keep only the unique profiles and eliminate duplicates from test run profiles = _.uniqWith( - _.concat(profiles, argv.profiles[requestedProfile]), + _.concat(profiles, opts.argv.profiles[requestedProfile]), _.isEqual ); } else { @@ -133,10 +129,10 @@ module.exports = { }); } else { - reject("Profile " + argv.profile + " not found!"); + reject("Profile " + opts.argv.profile + " not found!"); } } else { - reject("Profile " + argv.profile + " not found!"); + reject("Profile " + opts.argv.profile + " not found!"); } } else { // user passes profile information from command line directly, diff --git a/src/reporters/screenshot_aggregator/reporter.js b/src/reporters/screenshot_aggregator/reporter.js deleted file mode 100644 index 24b1418..0000000 --- a/src/reporters/screenshot_aggregator/reporter.js +++ /dev/null @@ -1,204 +0,0 @@ -"use strict"; - -const fs = require("fs"); -const glob = require("glob"); -const request = require("request"); -const Q = require("q"); -const slugify = require("slugify"); -const settings = require("./settings"); -const path = require("path"); -const async = require("async"); -const BaseReporter = require("../reporter"); -const _ = require("lodash"); - -const MAX_CONCURRENT_UPLOADS = 2; - -class ScreenshotAggregator extends BaseReporter { - constructor(opts) { - super(); - - _.assign(this, { - console, - request, - fs, - glob, - settings, - path - }, opts); - - // This is an URL where we've stored screenshots at for this entire build - // (regardless of subtests) - // If we successfully upload any screenshots, this value will be assigned. - this.buildURL = null; - - this.q = async.queue(this._uploadImage.bind(this), MAX_CONCURRENT_UPLOADS); - this.q.drain = this.onQueueDrained.bind(this); - // If this property is set, then onQueueDrained must resolve this deferred's promise. - this.deferFlush = null; - this.counter = 0; - } - - initialize() { - const deferred = Q.defer(); - - if (!this.settings.aggregatorURL) { - deferred.reject(new Error("ScreenshotAggregator is missing an aggregatorURL" - + " in its configuration")); - } else { - deferred.resolve(); - } - - return deferred.promise; - } - - listenTo(testRun, test, source) { - source.addListener("message", this._handleMessage.bind(this, testRun, test)); - } - - _uploadImage(image, callback) { - this.counter++; - - const formData = { - /*eslint-disable camelcase */ - build_id: image.buildId, - child_build_id: image.childBuildId, - imagefile: { - value: this.fs.createReadStream(image.localFilePath), - options: { - filename: image.intendedFilename, - contentType: "image/png" - } - } - }; - - this.request.post({ - url: this.settings.aggregatorURL, - formData - }, (err, httpResponse, body) => { - let result; - try { - result = JSON.parse(body); - } catch (e) { - // NOTE: For the moment, we eat and ignore upload errors. - err = e; - } - - if (!err && result && result.status === "success") { - this.buildURL = result.buildURL; - } else { - this.console.error("Error uploading screenshot to screenshot service. ", err); - } - - callback(); - }); - } - - _getScreenshots(tempDir) { - return this.glob.sync(this.path.resolve(tempDir) + "/*.png").concat( - this.glob.sync(this.path.resolve(tempDir) + "/*.PNG")); - } - - _deleteScreenshots(tempDir) { - // I couldn't figure out how to make nocase: true work -- it just produces empty results - this._getScreenshots(tempDir).forEach((screenshotPath) => { - this.fs.unlinkSync(screenshotPath); - }); - } - - // Collect screenshots and queue them for uploading to a remote screenshot storage service. - _collectScreenshots(tempDir, buildId, testName, browserId) { - // - // - // TODO: resolve apparent ambiguity when the same browserId is used - // multiple times in parallel but with different resolutions - // and/or orientations - // TODO: consider shifting slug generation over to TestRun class to avoid specializing - // in disambiguating here. - // - // - const childBuildId = slugify(testName + "_" + browserId); - const shots = this._getScreenshots(tempDir); - - shots.forEach((filePath) => { - const fullPath = this.path.resolve(filePath); - let intendedFilename = fullPath; - /*eslint-disable no-magic-numbers */ - if (fullPath.indexOf("/") > -1) { - intendedFilename = fullPath.split("/").pop(); - } - this.q.push({ - localFilePath: fullPath, - intendedFilename, - buildId: slugify(buildId), - childBuildId - }); - }); - } - - onQueueDrained() { - // if deferFlush has been set, it means we tried to call flush() while the upload queue - // was still running. If this is the case, onQueueDrained has been called while an external - // test runner is paused, waiting for - if (this.deferFlush) { - this.deferFlush.resolve(); - } - } - - // Summarize our results to the screen or optionally promise that we will, since summarizing - // might require pending screenshots to finish uploading. - flush() { - this.console.log(""); - - const showSummary = () => { - if (this.counter > 0) { - this.console.log("There " + (this.counter > 1 ? "are " : "is ") + - this.counter + " screenshot" + - (this.counter > 1 ? "s" : "") + " of this build available at " + this.buildURL); - } else { - this.console.log("Screenshot aggregator enabled, but no screenshots were uploaded."); - } - }; - - if (this.q.idle()) { - return showSummary(); - } else { - const awaitedUploads = this.q.length() + this.q.running(); - this.console.log("Screenshot aggregator is waiting for " + - (awaitedUploads > 1 ? awaitedUploads + " screenshots" : " screenshot") + - " to finish uploading.."); - - const deferSummary = Q.defer(); - - // Set up a deferred for - this.deferFlush = Q.defer(); - this.deferFlush.promise.then(() => { - showSummary(); - deferSummary.resolve(); - }); - - // return a promise that we'll show a summary once uploads have completed - return deferSummary.promise; - } - } - - _handleMessage(testRun, test, message) { - if (message.type === "worker-status") { - if (message.status === "finished") { - const tempDir = testRun.tempAssetPath; - - if (message.passed || test.attempts === test.maxAttempts - 1) { - // Is this our last attempt ever? Sweep up screenshots from this test run. - this._collectScreenshots(tempDir, testRun.buildId, message.name, - test.browser.slug()); - } else { - // We've failed a test and we're going to retry it again in the future. - // Delete screenshots generated by this run, we don't care about - // intermediate results. - this._deleteScreenshots(tempDir); - } - } - } - } -} - -module.exports = ScreenshotAggregator; diff --git a/src/reporters/screenshot_aggregator/settings.js b/src/reporters/screenshot_aggregator/settings.js deleted file mode 100644 index c90cf3f..0000000 --- a/src/reporters/screenshot_aggregator/settings.js +++ /dev/null @@ -1,7 +0,0 @@ -"use strict"; - -const argv = require("marge").argv; - -module.exports = { - aggregatorURL: argv.screenshot_aggregator_url -}; diff --git a/src/reporters/slack/settings.js b/src/reporters/slack/settings.js deleted file mode 100644 index c840f35..0000000 --- a/src/reporters/slack/settings.js +++ /dev/null @@ -1,16 +0,0 @@ -"use strict"; - -module.exports = { - enabled: !!process.env.MAGELLAN_SLACK_API_KEY, - - account: process.env.MAGELLAN_SLACK_ACCOUNT_NAME, - key: process.env.MAGELLAN_SLACK_API_KEY, - username: process.env.MAGELLAN_SLACK_USERNAME, - iconURL: process.env.MAGELLAN_SLACK_ICON_URL, - channel: process.env.MAGELLAN_SLACK_NOTIFY_CHANNEL, - - jobName: process.env.JOB_NAME, - buildDisplayName: process.env.BUILD_DISPLAY_NAME, - - buildURL: process.env.BUILD_URL -}; diff --git a/src/reporters/slack/slack.js b/src/reporters/slack/slack.js deleted file mode 100644 index 0013559..0000000 --- a/src/reporters/slack/slack.js +++ /dev/null @@ -1,158 +0,0 @@ -/* eslint no-invalid-this: 0 */ -"use strict"; - -/* -* Slack integration for Magellan runs -* -* When running in a CI environment, reports test failures -* based on the following environment variables: -* -* MAGELLAN_SLACK_API_KEY : Webhook key set up in Slack settings -* MAGELLAN_SLACK_NOTIFY_CHANNEL : Channel to post messages -* -*/ - -const _ = require("lodash"); -const Slack = require("node-slackr"); -const Q = require("q"); -const BaseReporter = require("../reporter"); - -class Reporter extends BaseReporter { - constructor(config, opts) { - super(); - - this.config = config; - this.failures = []; - _.assign(this, { - console, - Slack - }, opts); - } - - initialize() { - const deferred = Q.defer(); - - this.console.log("Magellan Slack Reporter initializing.."); - - // Before setting up the node-slackr instance, verify we have everything we need. - // If we don't, print the missing configuration variables and reject the promise. - let hasAllConfig = true; - [ - // slack-related - "account", - "key", - "channel", - "username", - "iconURL", - // job related - "jobName", - "buildDisplayName", - "buildURL" - ].forEach((key) => { - if (!this.config.hasOwnProperty(key)) { - hasAllConfig = false; - this.console.error("Missing Slack configuration variable: " + key); - } - }); - if (!hasAllConfig) { - deferred.reject(new Error("Error: Missing required Slack configuration variables")); - } - - this.jobName = this.config.jobName; - this.buildDisplayName = this.config.buildDisplayName; - this.buildURL = this.config.buildURL; - - this.slack = new this.Slack(this.config.account, this.config.key, { - channel: this.config.channel, - username: this.config.username, - /*eslint-disable camelcase*/ - icon_url: this.config.iconURL - }); - - deferred.resolve(); - return deferred.promise; - } - - listenTo(testRun, test, source) { - // Every time a message is received regarding this test, we also get the test object - // itself so that we're able to reason about retries, worker index, etc. - source.addListener("message", this._handleMessage.bind(this, testRun, test)); - } - - /* - * Sends complete failure message to Slack channel. For example: - * - * ================ FAILURES in SauceLabs_Test #321 [Netscape Navigator 2.1] ================ - * 1) Smoke Scenario 6 [ https://saucelabs.com/tests/..... ] - * 2) Smoke Scenario 7 [ https://saucelabs.com/tests/..... ] (2 uncaught errors detected) - * 3) Smoke Scenario 22 [ https://saucelabs.com/tests/..... ] - * - * Build Log: http://cihost/job/Magellan_SauceLabs/123/consoleFull - * - * */ - flush() { - if (this.failures.length > 0) { - const output = _.map(this.failures, (failure, i) => { - let browserErrorsNote = ""; - if (failure.browserErrors && failure.browserErrors.length > 0) { - browserErrorsNote = "(uncaught errors: " + failure.browserErrors.length + " detected)"; - } - - const url = failure.url ? "[ " + failure.url + " ]" : ""; - return "  " + (i + 1) + ") " + failure.testName + " " + url + " " + browserErrorsNote; - }).join("\n"); - - const msg = "================ FAILURES in " + this.jobName + " " + this.buildDisplayName - + " ================\n" + output + "\nBuild Log: " + this.buildURL; - - this.slack.notify(msg); - } - } - - _handleMessage(testRun, test, message) { - if (message.type === "worker-status") { - if (message.status === "finished") { - - // Remove any already-existing record of this test (i.e. assume a pass on this test). - // Note: We could just keep and not add on re-fail, but then the sauceURL - // would be incorrect. - this.failures = _.filter(this.failures, (failure) => failure.testName !== message.name); - - if (!message.passed) { - // If a test failed, add it to our list of failed tests. If we've removed a previous - // run, THIS run will have the right sauceURL. - let url = ""; - let browserErrors; - if (message.metadata) { - url = (message.metadata.sauceURL ? message.metadata.sauceURL : "") - || message.metadata.buildURL; - browserErrors = message.metadata.browserErrors; - } - this._addFailure(message.name, browserErrors, url); - } - } - } - } - - /* - * Creates a formatted line describing the test failure. - * - * failures[] is a list of test names that have failed. - * - * SauceLabs test failures include the Sauce URL look like this: - * 1) Smoke Scenario 6 [ https://saucelabs.com/tests/..... ] - * - * Non-Sauce test (i.e. PhantomJS) failures look like this: - * 1) Smoke Scenario 6 - * - * */ - _addFailure(testName, browserErrors, url) { - this.failures.push({ - testName, - browserErrors, - url - }); - } -} - -module.exports = Reporter; diff --git a/src/settings.js b/src/settings.js index 33f1a4a..cdb4eea 100644 --- a/src/settings.js +++ b/src/settings.js @@ -3,19 +3,33 @@ /*eslint-disable no-magic-numbers, no-bitwise, no-console */ const guid = require("./util/guid"); -const argv = require("marge").argv; -const env = process.env; +const yargs = require("yargs"); +const margs = require("marge"); const fs = require("fs"); const path = require("path"); const logger = require("./logger"); +const configFilePath = yargs.argv.config; +const DEFAULT_CONFIG = "./magellan.json"; + +if (configFilePath) { + logger.log("Loading configuration from: " + configFilePath); +} else { + logger.log("Loading configuration from default location: " + DEFAULT_CONFIG); +} + +// NOTE: marge can throw an error here if --config points at a file that doesn't exist +// FIXME: handle this error nicely instead of printing an ugly stack trace +margs.init(DEFAULT_CONFIG, configFilePath); + +const argv = margs.argv; // Allow an external build id (eg: from CI system, for example) to be used. If we're not given one, // we generate a random build id instead. NOTE: This build id must work as a part of a filename. // NOTE: The result of this line is that buildId is truthy so toString() should work const buildId = (argv.external_build_id || "magellan-" + guid()).toString(); // Create a temporary directory for child build assets like configuration, screenshots, etc. -const mkdirSync = require("./mkdir_sync"); +const mkdirSync = require("./util/mkdir_sync"); const TEMP_DIR = path.resolve(argv.temp_dir || "./temp"); try { @@ -41,10 +55,10 @@ try { if (fs.accessSync) { fs.accessSync(TEMP_DIR, fs.R_OK | fs.W_OK); } - logger.log("Magellan is creating temporary files at: " + TEMP_DIR); + logger.log("Creating temporary files at: " + TEMP_DIR); } catch (e) { /* istanbul ignore next */ - throw new Error("Magellan cannot write to or create the temporary directory: " + TEMP_DIR); + throw new Error("Cannot write to or create the temporary directory: " + TEMP_DIR); } let testTimeout = 8 * 60 * 1000; @@ -72,10 +86,16 @@ module.exports = { BASE_PORT_START: parseInt(argv.base_port_start) || 12000, BASE_PORT_RANGE: parseInt(argv.base_port_range) || 2000, BASE_PORT_SPACING: parseInt(argv.base_port_spacing) || 3, + MAX_WORKERS: argv.serial ? 1 : parseInt(argv.max_workers) || 3, + MAX_TEST_ATTEMPTS: parseInt(argv.max_test_attempts) || 3, - environment: env, + MAX_ALLOCATION_ATTEMPTS: 120, + WORKER_START_DELAY: 1000, - debug: argv.debug, + environment: process.env, + debug: Boolean(argv.debug) || Boolean(argv.debugVerbose), + debugVerbose: Boolean(argv.debugVerbose), + serial: Boolean(argv.serial), gatherTrends: argv.gather_trends, diff --git a/src/bail.js b/src/strategies/bail.js similarity index 66% rename from src/bail.js rename to src/strategies/bail.js index 6e87c7d..8b99435 100644 --- a/src/bail.js +++ b/src/strategies/bail.js @@ -1,26 +1,36 @@ "use strict"; const _ = require("lodash"); -const logger = require("./logger"); +const logger = require("../logger"); + +const Factory = { + /* eslint-disable global-require */ + // requires stragety on the fly + create(argv) { + // + // There is only one bail strategy allowed per magellan instance. + // Bail strategy is configured via --strategy_bail. + // If no --strategy_bail , enable ./strategies/bail_never by default + const bailRule = argv.strategy_bail ? + argv.strategy_bail : "./bail/never"; + + return require(bailRule); + } +}; class BailStrategy { - constructor(strategy) { + constructor(argv) { + this.hasBailed = false; try { - /* eslint-disable global-require */ - // requires stragety on the fly - _.assign(this, require(strategy)); - } catch (e) { - throw new Error(e); - } - } - - configure(argv) { - // set configuration if the strategy requires - // input from command line - if (this.setConfiguration) { - this.setConfiguration(argv); + _.assign(this, Factory.create(argv)); + // call configuration if set + if (this.setConfiguration) { + this.setConfiguration(argv); + } + } catch (err) { + throw err; } } @@ -59,6 +69,7 @@ class BailStrategy { if (!this.hasBailed) { // suite isn't bailed yet, let strategy decide this.hasBailed = this.decide(info); + if (this.hasBailed) { logger.warn("Test suite has bailed due to bail rule:"); logger.warn(` ${this.name}: ${this.getBailReason()}`); diff --git a/src/strategies/bail_never.js b/src/strategies/bail/never.js similarity index 100% rename from src/strategies/bail_never.js rename to src/strategies/bail/never.js diff --git a/src/strategies/bail_early.js b/src/strategies/bail_early.js deleted file mode 100644 index 5c6c8f3..0000000 --- a/src/strategies/bail_early.js +++ /dev/null @@ -1,84 +0,0 @@ -"use strict"; - -const _ = require("lodash"); -const logger = require("../logger"); - -/* eslint-disable no-magic-numbers */ -const settings = { - FAILURE_RATIO: 0.1, - MIN_TEST_ATTEMPTS: 10, - TEST_TIMEOUT: 8 * 60 * 1000 -}; - -/* istanbul ignore next */ -module.exports = { - name: "testarmada-magellan-early-bail-strategy", - description: "Magellan will bail if failure ratio exceeds a threshold within a given period", - bailReason: () => `At least ${settings.FAILURE_RATIO * 100}% of tests have ` - + `been failed after seeing at least ${settings.MIN_TEST_ATTEMPTS} tests run`, - - help: { - "early_bail_threshold": { - "visible": true, - "type": "string", - "example": "0.1", - "description": "Ratio of tests that need to fail before we abandon the build" - }, - "early_bail_min_attempts": { - "visible": true, - "type": "string", - "example": "10", - "description": "Minimum number of tests that need to run before we apply the bail strategy" - } - }, - - setConfiguration(argv) { - logger.prefix = "Early Bail Strategy"; - - if (argv.early_bail_threshold) { - settings.FAILURE_RATIO = argv.early_bail_threshold; - } - - if (argv.early_bail_min_attempts) { - settings.MIN_TEST_ATTEMPTS = argv.early_bail_min_attempts; - } - - logger.debug(`bail config: ${JSON.stringify(settings)}`); - }, - - // info format - /* - * { - * totalTests: [] // total tests - * passedTests: [] // successful tests - * failedTests: [] // failed tests - * } - */ - decide(info) { - // Bail on a threshold. - // By default, if we've run at least ${settings.minTestAttempts} tests - // and at least ${settings.failureRatio} of them have failed, we bail out early. - // This allows for useful data-gathering for debugging or trend - // analysis if we don't want to just bail on the first failed test. - - const sumAttempts = (memo, test) => memo + test.attempts; - const totalAttempts = _.reduce(info.passedTests, sumAttempts, 0) - + _.reduce(info.failedTests, sumAttempts, 0); - - // Failed attempts are not just the sum of all failed attempts but also - // of successful tests that eventually passed (i.e. total attempts - 1). - const sumExtraAttempts = (memo, test) => memo + Math.max(test.attempts - 1, 0); - const failedAttempts = _.reduce(info.failedTests, sumAttempts, 0) - + _.reduce(info.passedTests, sumExtraAttempts, 0); - - // Fail to total work ratio. - const ratio = failedAttempts / totalAttempts; - - if (totalAttempts > settings.MIN_TEST_ATTEMPTS) { - if (ratio > settings.FAILURE_RATIO) { - return true; - } - } - return false; - } -}; diff --git a/src/strategies/bail_fast.js b/src/strategies/bail_fast.js deleted file mode 100644 index 155c1bb..0000000 --- a/src/strategies/bail_fast.js +++ /dev/null @@ -1,21 +0,0 @@ -"use strict"; - -/* istanbul ignore next */ -module.exports = { - name: "testarmada-magellan-fast-bail-strategy", - description: "Magellan will bail immediately if one test has been failed", - bailReason: "At least one test has been failed", - - // info format - /* - * { - * totalTests: [] // total tests - * passedTests: [] // successful tests - * failedTests: [] // failed tests - * } - */ - decide(info) { - // never bail - return info.failedTests.length > 0; - } -}; diff --git a/src/strategies/resource.js b/src/strategies/resource.js new file mode 100644 index 0000000..0ae6d2e --- /dev/null +++ b/src/strategies/resource.js @@ -0,0 +1,107 @@ +"use strict"; + +const _ = require("lodash"); +const logger = require("../logger"); + +const Factory = { + /* eslint-disable global-require */ + // requires stragety on the fly + create(argv) { + const resourceRule = argv.strategy_resource ? + argv.strategy_resource : "./resource/never"; + + return require(resourceRule); + } +}; + +class ResourceStrategy { + constructor(argv) { + + try { + _.assign(this, Factory.create(argv)); + // call configuration if set + if (_.isFunction(this.setConfiguration)) { + this.setConfiguration(argv); + } + } catch (err) { + throw err; + } + } + + getDescription() { + // check if strategy has description defined + if (!this.description) { + logger.warn(`${this.name} doesn't have strategy description. ` + + "You might want to add description to it."); + return ""; + } + // prints out strategy's description + return this.description; + } + + getFailReason() { + // check if strategy has fail Reason defined + if (!this.failReason) { + logger.warn(`${this.name} doesn't have strategy fail reason.` + + " You might want to add a failReason to it."); + return ""; + } + // prints out strategy's fail Reason + return typeof this.failReason === "function" ? this.failReason() : this.failReason; + } + + holdTestResource(resource) { + if (_.isFunction(this.holdResourceForTest)) { + return this.holdResourceForTest(resource); + } else { + // no holdTest is defined in strategy + return Promise.resolve(resource); + } + + } + + holdSuiteResources(resources) { + if (_.isFunction(this.holdResourcesForSuite)) { + return this.holdResourcesForSuite(resources); + } else { + // no holdSuite is defined in strategy + return Promise.resolve(resources); + } + } + + releaseTestResource(resource) { + if (_.isFunction(this.releaseResourceForTest)) { + + return this.releaseResourceForTest(resource) + .then(() => Promise.resolve(resource)) + .catch((err) => { + // we log warning but eat the error here + logger.warn(`Error in releasing resource for test: ${err}.` + + " This error doesn't impact test result."); + return Promise.resolve(resource); + }); + } else { + // no holdTest is defined in strategy + return Promise.resolve(resource); + } + + } + + releaseSuiteResources(resources) { + if (_.isFunction(this.releaseResourcesForSuite)) { + return this.releaseResourcesForSuite(resources) + .then(() => Promise.resolve(resources)) + .catch((err) => { + // we log warning but eat the error here + logger.warn(`Error in releasing resources for suite: ${err}.` + + " This error doesn't impact suite result."); + return Promise.resolve(resources); + }); + } else { + // no holdSuite is defined in strategy + return Promise.resolve(resources); + } + } +} + +module.exports = ResourceStrategy; diff --git a/src/strategies/resource/never.js b/src/strategies/resource/never.js new file mode 100644 index 0000000..d89e3f1 --- /dev/null +++ b/src/strategies/resource/never.js @@ -0,0 +1,32 @@ +"use strict"; + +/* istanbul ignore next */ +module.exports = { + name: "testarmada-magellan-no-resource-strategy", + description: "Magellan doesn't require a resource manager to schedule test run", + failReason: "Magellan shouldn't depend on any resource manager to control test run", + + // resource format + holdResourceForTest(profile) { + // never use resource manager + return Promise.resolve(profile); + }, + + // resource format + holdResourcesForSuite(opts) { + // never use resource manager + return Promise.resolve(opts); + }, + + // resource format + releaseResourceForTest(profile) { + // never use resource manager + return Promise.resolve(profile); + }, + + // resource format + releaseResourcesForSuite(opts) { + // never use resource manager + return Promise.resolve(opts); + } +}; diff --git a/src/test.js b/src/test.js index 896608a..f177df1 100644 --- a/src/test.js +++ b/src/test.js @@ -3,6 +3,7 @@ const TEST_STATUS_NEW = 1; const TEST_STATUS_FAILED = 2; const TEST_STATUS_SUCCESSFUL = 3; +const TEST_STATUS_SKIPPED = 4; class Test { constructor(locator, profile, executor, maxAttempts) { @@ -76,5 +77,6 @@ class Test { Test.TEST_STATUS_NEW = TEST_STATUS_NEW; Test.TEST_STATUS_FAILED = TEST_STATUS_FAILED; Test.TEST_STATUS_SUCCESSFUL = TEST_STATUS_SUCCESSFUL; +Test.TEST_STATUS_SKIPPED = TEST_STATUS_SKIPPED; module.exports = Test; diff --git a/src/test_filter.js b/src/test_filter.js index 7b55aac..84c098b 100644 --- a/src/test_filter.js +++ b/src/test_filter.js @@ -6,14 +6,12 @@ const settings = require("./settings"); module.exports = { // Detect and return filters specified by command line arguments // from an argv object args + /*eslint no-unused-vars: 0 */ detectFromCLI: (args, opts) => { - const runOpts = _.assign({ - settings - }, opts); const filters = {}; - _.keys(runOpts.settings.testFramework.filters).forEach((f) => { + _.keys(settings.testFramework.filters).forEach((f) => { if (args[f]) { filters[f] = args[f]; } @@ -24,18 +22,16 @@ module.exports = { // Successively reduce files to a smaller set of files by // running a list of filters on the list repeatedly + /*eslint no-unused-vars: 0 */ filter: (files, filters, opts) => { - const runOpts = _.assign({ - settings - }, opts); let allFiles = files; _.forEach(filters, (n, k) => { - if (runOpts.settings.testFramework.filters[k]) { + if (settings.testFramework.filters[k]) { // if we have this filter predefined in settings.js // do filter here - allFiles = runOpts.settings.testFramework.filters[k](allFiles, filters[k]); + allFiles = settings.testFramework.filters[k](allFiles, filters[k]); } }); diff --git a/src/test_queue.js b/src/test_queue.js new file mode 100644 index 0000000..42dbab4 --- /dev/null +++ b/src/test_queue.js @@ -0,0 +1,76 @@ +"use strict"; + +const async = require("async"); +const _ = require("lodash"); + +const constants = require("./constants"); +const Test = require("./test"); + +// TODO: document this file +// Test to be retried has higher priority than new tests + +class TestQueue { + constructor(options) { + this.tests = options.tests; + this.workerAmount = options.workerAmount; + + this.handlers = { + completeQueueHandler: options.completeQueueHandler, + completeTestHandler: options.completeTestHandler + }; + + this.priorityQueue = async.priorityQueue( + options.stageTestHandler, + this.workerAmount + ); + + this.priorityQueue.drain = options.completeQueueHandler; + // queue is paused till resume is called + this.priorityQueue.pause(); + + // we put everything in priorityQueue from beginning + _.forEach(this.tests, (test) => { + this.priorityQueue.push( + test, + constants.TEST_PRIORITY.FIRST_RUN, + options.completeTestHandler); + }); + } + + isIdle() { + return this.priorityQueue.idle(); + } + + getTestAmount() { + return this.tests.length; + } + + getFailedTests() { + return _.filter(this.tests, + (test) => test.status === Test.TEST_STATUS_FAILED); + } + + getPassedTests() { + return _.filter(this.tests, + (test) => test.status === Test.TEST_STATUS_SUCCESSFUL); + } + + proceed() { + if (_.isEmpty(this.tests)) { + return this.priorityQueue.drain(); + } else { + return this.priorityQueue.resume(); + } + } + + enqueue(test, priority) { + return this.priorityQueue.push(test, priority, this.handlers.completeTestHandler); + } + + earlyTerminate() { + this.priorityQueue.kill(); + return this.handlers.completeQueueHandler(); + } +} + +module.exports = TestQueue; diff --git a/src/test_runner.js b/src/test_runner.js index f8e0fe5..b9197d8 100644 --- a/src/test_runner.js +++ b/src/test_runner.js @@ -4,20 +4,19 @@ // TODO: Extract trending into another class // TODO: Move bailFast to a strategy pattern implementation -const async = require("async"); const _ = require("lodash"); const clc = require("cli-color"); const prettyMs = require("pretty-ms"); const path = require("path"); -const Q = require("q"); const once = require("once"); -const EventEmitter = require("events").EventEmitter; const fs = require("fs"); -const mkdirSync = require("./mkdir_sync"); +const mkdirSync = require("./util/mkdir_sync"); const guid = require("./util/guid"); -const logStamp = require("./util/logstamp"); +const ChildProcessHandler = require("./util/childProcess"); const sanitizeFilename = require("sanitize-filename"); const analytics = require("./global_analytics"); +const TestQueue = require("./test_queue"); +const constants = require("./constants"); const settings = require("./settings"); const Test = require("./test"); @@ -43,49 +42,42 @@ const FINAL_CLEANUP_DELAY = 2500; class TestRunner { constructor(tests, options, opts) { _.assign(this, { - fs, - mkdirSync, settings, setTimeout, clearInterval, - setInterval, - prettyMs, - path, - analytics + setInterval }, opts); this.buildId = this.settings.buildId; - this.busyCount = 0; + this.strategies = options.strategies; - this.retryCount = 0; + this.MAX_WORKERS = this.settings.MAX_WORKERS; - this.bailStrategy = options.bailStrategy; - - this.MAX_WORKERS = options.maxWorkers; - - this.MAX_TEST_ATTEMPTS = options.maxTestAttempts; + this.MAX_TEST_ATTEMPTS = this.settings.MAX_TEST_ATTEMPTS; this.profiles = options.profiles; this.executors = options.executors; - this.debug = options.debug; - this.serial = options.serial || false; + this.debug = this.settings.debug; + this.debugVerbose = this.settings.debugVerbose; + + this.serial = this.settings.serial; this.listeners = options.listeners || []; - this.onFailure = options.onFailure; - this.onSuccess = options.onSuccess; + this.onFinish = options.onFinish; this.allocator = options.allocator; - // For each actual test path, split out - this.tests = _.flatten(tests.map((testLocator) => { - return options.profiles.map((profile) => { - return new Test(testLocator, profile, - this.executors[profile.executor], this.MAX_TEST_ATTEMPTS); - }); - })); + const testsXprofiles = _.flatten( + tests.map((testLocator) => + options.profiles.map((profile) => + new Test( + testLocator, + profile, + this.executors[profile.executor], + this.MAX_TEST_ATTEMPTS)))); if (this.settings.gatherTrends) { this.trends = { @@ -94,103 +86,45 @@ class TestRunner { logger.log("Gathering trends to ./trends.json"); } - this.numTests = this.tests.length; - this.passedTests = []; - this.failedTests = []; - - // Set up a worker queue to process tests in parallel - this.q = async.queue(this.stageTest.bind(this), this.MAX_WORKERS); - - // When the entire suite is run through the queue, run our drain handler - this.q.drain = this.buildFinished.bind(this); - } - - start() { - this.startTime = (new Date()).getTime(); - - let profileStatement = this.profiles.map((b) => b.toString()).join(", "); - - if (this.serial) { - logger.log("Running " + this.numTests + " tests in serial mode with [" - + profileStatement + "]"); - } else { - logger.log("Running " + this.numTests + " tests with " + this.MAX_WORKERS - + " workers with [" + profileStatement + "]"); - } - - if (this.tests.length === 0) { - this.q.drain(); - } else { - // Queue up tests; this will cause them to actually start - // running immediately. - this.tests.forEach((test) => { - this.q.push(test, this.onTestComplete.bind(this)); - }); - } - } - - notIdle() { - this.busyCount++; - - if (this.busyCount === 1) { - // we transitioned from being idle to being busy - this.analytics.mark("magellan-busy", "busy"); - } - } - - maybeIdle() { - this.busyCount--; + this.queue = new TestQueue({ + tests: testsXprofiles, + workerAmount: this.MAX_WORKERS, - if (this.busyCount === 0) { - // we transitioned from being busy into being idle - this.analytics.mark("magellan-busy", "idle"); - } + stageTestHandler: this.stageTestHandler.bind(this), + completeTestHandler: this.completeTestHandler.bind(this), + completeQueueHandler: this.completeQueueHandler.bind(this) + }); } + /* eslint-disable no-unused-vars,max-nested-callbacks */ + stageTestHandler(test, callback) { + // check resource strategy + this.strategies.resource + .holdTestResource({ test }) + .then((profile) => { + // resource is ready, proceed test execution + const analyticsGuid = guid(); + test.executor.setupTest((setupTestErr, token) => { + if (setupTestErr) { + return callback(setupTestErr, test); + } - // Prepare a test to be run. Find a worker for the test and send it off to be run. - stageTest(test, onTestComplete) { - const analyticsGuid = guid(); - - this.analytics.push("acquire-worker-" + analyticsGuid); - - const failTest = (error) => { - this.analytics.mark("acquire-worker-" + analyticsGuid, "failed"); - // If the allocator could not give us a worker, pass - // back a failed test result with the allocator's error. - logger.err("Worker allocator error: " + error); - logger.err(error.stack); - - /*eslint-disable no-magic-numbers*/ - test.workerIndex = -1; - test.error = undefined; - test.stdout = ""; - test.stderr = error; - - test.fail(); - - onTestComplete(null, test); - }; - - test.executor.setupTest((stageExecutorError, token) => { - if (!stageExecutorError) { - - this.allocator.get((getWorkerError, worker) => { - if (!getWorkerError) { + analytics.push(`acquire-worker-${analyticsGuid}`); + this.allocator.get((getWorkerError, worker) => { + if (getWorkerError) { + return callback(getWorkerError, test); + } - this.analytics.mark("acquire-worker-" + analyticsGuid); + analytics.mark(`acquire-worker-${analyticsGuid}`); this.runTest(test, worker) .then((runResults) => { // Give this worker back to the allocator /*eslint-disable max-nested-callbacks*/ - test.executor.teardownTest(token, () => { - this.allocator.release(worker); - }); + test.executor.teardownTest(token, + () => this.allocator.release(worker)); test.workerIndex = worker.index; - test.error = runResults.error; - test.stdout = runResults.stdout; - test.stderr = runResults.stderr; + _.merge(test, runResults); // Pass or fail the test if (runResults.error) { @@ -199,7 +133,7 @@ class TestRunner { test.pass(); } - onTestComplete(null, test); + callback(null, test); }) .catch((runTestError) => { // Catch a testing infrastructure error unrelated to the test itself failing. @@ -211,9 +145,8 @@ class TestRunner { // Give this worker back to the allocator /*eslint-disable max-nested-callbacks*/ - test.executor.wrapup(() => { - this.allocator.release(worker); - }); + test.executor.teardownTest(token, + () => this.allocator.release(worker)); test.workerIndex = worker.index; test.error = runTestError; @@ -221,18 +154,57 @@ class TestRunner { test.stderr = runTestError; test.fail(); - onTestComplete(runTestError, test); + callback(runTestError, test); }); - } else { - // fail test due to failure of allocator.get() - failTest(getWorkerError); - } + }); + }); - } else { - // fail test due to failure of test.executor.stage() - failTest(stageExecutorError); - } - }); + }) + .catch((err) => { + // no resource is available for current test + // we put test back to the queue + logger.warn(`No available resource for ${test.toString()},` + + " we'll put it back in the queue."); + + callback(err, test); + }); + + } + + completeQueueHandler() { + + this.setTimeout(() => { + + this.logTestsSummary(); + // flushing all listeners + Promise + .all( + _.map(this.listeners, + (listener) => new Promise((innerResolve) => { + listener + .flush() + .then(() => innerResolve()) + .catch((err) => { + logger.err(`Error when flushing listener output: ${err}. ` + + "This error doesn't impact test result"); + // we eat this error and contiue the listner.flush() + return innerResolve(); + }); + }))) + .then(() => this.onFinish(this.queue.getFailedTests())); + }, FINAL_CLEANUP_DELAY, true); + } + + run() { + this.startTime = (new Date()).getTime(); + + const profileStatement = this.profiles.map((b) => b.toString()).join(", "); + const serialStatement = this.serial ? "in serial mode" : `with ${this.MAX_WORKERS} workers`; + + logger.log(`Running ${this.queue.getTestAmount()} tests` + + ` ${serialStatement} with [${profileStatement}]`); + + return this.queue.proceed(); } // Spawn a process for a given test run @@ -241,355 +213,292 @@ class TestRunner { // Rejections only happen if we encounter a problem with magellan itself, not // the test. The test will resolve with a test result whether it fails or passes. execute(testRun, test) { - const deferred = Q.defer(); + return new Promise((resolve, reject) => { - if (testRun.enableExecutor - && typeof testRun.enableExecutor === "function") { - // if we have addExecutor defined in test run (new in magellan 10.0.0) - testRun.enableExecutor(test.executor); - } + if (_.isFunction(testRun.enableExecutor)) { + // if we have addExecutor defined in test run (new in magellan 10.0.0) + testRun.enableExecutor(test.executor); + } - let env; - try { - env = testRun.getEnvironment(this.settings.environment); - } catch (e) { - deferred.reject(e); - return deferred.promise; - } + let env; + try { + env = testRun.getEnvironment(this.settings.environment); + } catch (err) { + return reject(err); + } - const options = { - env, - silent: true, - detached: false, - stdio: ["pipe", "pipe", "pipe", "ipc"] - }; - - let handler; - try { - ////////////////////////////////////////////////// - handler = this.executors[test.profile.executor].execute(testRun, options); - this.notIdle(); - } catch (e) { - deferred.reject(e); - return deferred.promise; - } + const options = { + env, + silent: true, + detached: false, + stdio: ["pipe", "pipe", "pipe", "ipc"] + }; - // Simulate some of the aspects of a node process by adding stdout and stderr streams - // that can be used by listeners and reporters. - const statusEmitter = new EventEmitter(); - statusEmitter.stdout = handler.stdout; - statusEmitter.stderr = handler.stderr; - const statusEmitterEmit = (type, message) => { - statusEmitter.emit(type, message); - }; - - let sentry; - - let testMetadata; - let stdout = clc.greenBright(logStamp()) + " Magellan child process start\n\n"; - let stderr = ""; - - try { - // Attach listeners that respond to messages sent from the running test. - // These messages are sent with process.send() - this.listeners.forEach((listener) => { - if (listener.listenTo) { - listener.listenTo(testRun, test, statusEmitter); + let childProcess; + try { + ////////////////////////////////////////////////// + childProcess = new ChildProcessHandler( + this.executors[test.profile.executor] + .execute(testRun, options) + ); + + if (!this.queue.isIdle()) { + // we transitioned from being idle to being busy + analytics.mark("magellan-busy", "busy"); } - }); + } catch (err) { + return reject(err); + } - statusEmitterEmit("message", { - type: "worker-status", - status: "started", - name: test.locator.toString() - }); + let sentry; - } catch (e) { - deferred.reject(e); - return deferred.promise; - } + let testMetadata; - statusEmitterEmit("message", { - type: "analytics-event", - data: { - name: "test-run-" + testRun.guid, - - markers: [{ - name: "start", - t: Date.now() - }], - - metadata: { - test: test.locator.toString(), - profile: test.profile.id, - // NOTE: attempt numbers are 1-indexed - attemptNumber: test.attempts + 1 - } + try { + // Attach listeners that respond to messages sent from the running test. + // These messages are sent with process.send() + this.listeners.forEach((listener) => { + if (_.isFunction(listener.listenTo)) { + listener.listenTo(testRun, test, childProcess.emitter); + } + }); + + childProcess.emitMessage({ + type: "worker-status", + status: "started", + name: test.locator.toString() + }); + + } catch (err) { + return reject(err); } - }); - // Note: There are three ways a process can die: - // - // 1. "close" emitted. - // 2. "exit" emitted. - // 3. direct call of workerClosed(), with a kill of the process tree. - // - // Because "close" emits unpredictably some time after we fulfill case - // #3, we wrap this callback in once() so that we only clean up once. - const workerClosed = once((code) => { - this.maybeIdle(); - - statusEmitterEmit("message", { - type: "analytics-event-mark", - eventName: "test-run-" + testRun.guid, + childProcess.emitMessage({ + type: "analytics-event", data: { - name: code === 0 ? "passed" : "failed", - t: Date.now() + name: "test-run-" + testRun.guid, + + markers: [{ + name: "start", + t: Date.now() + }], + + metadata: { + test: test.locator.toString(), + profile: test.profile.id, + // NOTE: attempt numbers are 1-indexed + attemptNumber: test.attempts + 1 + } } }); - test.stopClock(); - this.clearInterval(sentry); - - // add executor info into meta-data - if (testMetadata) { - testMetadata.executor = test.executor.shortName; - } + // Note: There are three ways a process can die: + // + // 1. "close" emitted. + // 2. "exit" emitted. + // 3. direct call of workerClosed(), with a kill of the process tree. + // + // Because "close" emits unpredictably some time after we fulfill case + // #3, we wrap this callback in once() so that we only clean up once. + const closeWorker = once((code) => { + + if (this.queue.isIdle()) { + // we transitioned from being busy into being idle + analytics.mark("magellan-busy", "idle"); + } - statusEmitterEmit("message", { - type: "worker-status", - status: "finished", - name: test.locator.toString(), - passed: code === 0, - metadata: testMetadata - }); + childProcess.emitMessage({ + type: "analytics-event-mark", + eventName: "test-run-" + testRun.guid, + data: { + name: code === 0 ? "passed" : "failed", + t: Date.now() + } + }); - // Detach ALL listeners that may have been attached - handler.stdout.removeAllListeners(); - handler.stderr.removeAllListeners(); - handler.stdout.unpipe(); - handler.stderr.unpipe(); - handler.removeAllListeners(); + test.stopClock(); + this.clearInterval(sentry); - statusEmitter.stdout = null; - statusEmitter.stderr = null; + // add executor info into meta-data + if (testMetadata) { + testMetadata.executor = test.executor.shortName; + } - test.executor.summerizeTest( - this.buildId, - { - result: code === 0, + childProcess.emitMessage({ + type: "worker-status", + status: "finished", + name: test.locator.toString(), + passed: code === 0, metadata: testMetadata - }, - (additionalLog) => { - // Resolve the promise - deferred.resolve({ - error: code === 0 ? null : "Child test run process exited with code " + code, - stderr, - stdout: stdout + - (additionalLog && typeof additionalLog === "string" ? additionalLog : "") - }); }); - }); - - if (this.debug) { - // For debugging purposes. - handler.on("message", (msg) => { - logger.debug("Message from worker:" + JSON.stringify(msg)); - }); - } - // - // Via IPC, capture the current selenium session id. - // Reporters and listeners can exploit this to tie certain runtime artifacts to the unique - // identity of the test run. - // - // FIXME: make it possible to receive this information from test frameworks not based on nodejs - // - handler.on("message", (message) => { - if (message.type === "test-meta-data") { - testMetadata = message.metadata; - } - }); + // Detach ALL listeners that may have been attached + childProcess.teardown(); - handler.stdout.on("data", (data) => { - let text = "" + data; - if (text.trim() !== "") { - text = text - .split("\n") - .filter((line) => { - /* istanbul ignore next */ - return line.trim() !== "" || line.indexOf("\n") > -1; - }) - .map((line) => { - // NOTE: since this comes from stdout, color the stamps green - return clc.greenBright(logStamp()) + " " + line; + test.executor.summerizeTest( + this.buildId, + { + result: code === 0, + metadata: testMetadata + }, + (additionalLog) => resolve({ + error: code === 0 ? null : "Child test run process exited with code " + code, + stderr: childProcess.stderr, + stdout: childProcess.stdout + + (additionalLog && typeof additionalLog === "string" ? additionalLog : "") }) - .join("\n"); + ); + }); - /* istanbul ignore else */ - if (text.length > 0) { - stdout += text + "\n"; - } else { - stdout += "\n"; - } + if (this.debugVerbose) { + // For debugging purposes. + childProcess.enableDebugMsg(); } - }); - handler.stderr.on("data", (data) => { - let text = "" + data; - if (text.trim() !== "") { - text = text - .split("\n") - .filter((line) => { - /* istanbul ignore next */ - return line.trim() !== "" || line.indexOf("\n") > -1; - }) - .map((line) => { - // NOTE: since this comes from stderr, color the stamps red - return clc.redBright(logStamp()) + " " + line; - }) - .join("\n"); - /* istanbul ignore else */ - if (text.length > 0) { - stdout += text + "\n"; - } else { - stdout += "\n"; + // + // Via IPC, capture the current selenium session id. + // Reporters and listeners can exploit this to tie certain runtime artifacts to the unique + // identity of the test run. + // + // FIXME: make it possible to receive this information from test + // frameworks not based on nodejs + // + + childProcess.onMessage((message) => { + if (message.type === "test-meta-data") { + testMetadata = message.metadata; } - } - }); + }); - handler.on("close", workerClosed); + childProcess.onClose(closeWorker); - // A sentry monitors how long a given worker has been working. - // If bail strategy calls a bail, we kill a worker process and its - // process tree if its been running for too long. - test.startClock(); - sentry = this.setInterval(() => { - const runtime = test.getRuntime(); + // A sentry monitors how long a given worker has been working. + // If bail strategy calls a bail, we kill a worker process and its + // process tree if its been running for too long. + test.startClock(); - if (this.bailStrategy.hasBailed || runtime > settings.testTimeout) { - // Suite won't be bailed if test is killed by exceeding settings.testTimeout - // Stop the sentry now because we are going to yield for a moment before - // calling workerClosed(), which is normally responsible for stopping - // the sentry from monitoring. - this.clearInterval(sentry); + sentry = this.setInterval(() => { + const runtime = test.getRuntime(); - let customMessage = `Killed by Magellan because of ${this.bailStrategy.getBailReason()}`; + if (this.strategies.bail.hasBailed || runtime > settings.testTimeout) { + // Suite won't be bailed if test is killed by exceeding settings.testTimeout + // Stop the sentry now because we are going to yield for a moment before + // calling workerClosed(), which is normally responsible for stopping + // the sentry from monitoring. + this.clearInterval(sentry); - // Tell the child to shut down the running test immediately - if (runtime > settings.testTimeout) { - customMessage = `Killed by Magellan after ${settings.testTimeout}ms (long running test)`; - } + let customMessage = "Killed by Magellan because of" + + ` ${this.strategies.bail.getBailReason()}`; - handler.send({ - signal: "bail", - customMessage - }); + // Tell the child to shut down the running test immediately + if (runtime > settings.testTimeout) { + customMessage = `Killed by Magellan after ${settings.testTimeout}ms` + + " (long running test)"; + } - this.setTimeout(() => { - // We pass code 1 to simulate a failure return code from fork() - workerClosed(1); - }, WORKER_STOP_DELAY); + childProcess.send({ + signal: "bail", + customMessage + }); - } else { - return; - } - }, WORKER_POLL_INTERVAL); + this.setTimeout(() => { + // We pass code 1 to simulate a failure return code from fork() + closeWorker(1); + }, WORKER_STOP_DELAY); + + } else { + return; + } + }, WORKER_POLL_INTERVAL); - return deferred.promise; + }); } // Run a test with a given worker. // with a modified version of the test that contains its run status runTest(test, worker) { - const deferred = Q.defer(); - - // do not report test starts if we've bailed. - if (!this.bailStrategy.hasBailed) { - const msg = []; + return new Promise((resolve, reject) => { - msg.push("-->"); - msg.push((this.serial ? "Serial mode" : "Worker " + worker.index) + ","); + // do not report test starts if we've bailed. + if (!this.strategies.bail.hasBailed) { + const mode = this.serial ? "Serial mode" : `Worker ${worker.index}`; + const token = worker.token ? `vm token: ${worker.token}` : ""; - msg.push("port range: [" + worker.portOffset + ", " - + (worker.portOffset + settings.BASE_PORT_SPACING - 1) + "],"); + const msg = `--> ${mode}, port range: ` + + `[${worker.portOffset}, ${worker.portOffset + settings.BASE_PORT_SPACING - 1}], ` + + `${token} ` + + `${test.toString()}`; - if (worker.token) { - msg.push("VM token:" + worker.token + ","); + logger.log(msg); } - msg.push("running test: " + test.toString()); - - logger.log(msg.join(" ")); - } - - let testRun; + try { + const TestRunClass = this.settings.testFramework.TestRun; + const childBuildId = guid(); - try { - const TestRunClass = this.settings.testFramework.TestRun; - const childBuildId = guid(); + // Note: we must sanitize the buildid because it might contain slashes or "..", etc + const tempAssetPath = path.resolve(this.settings.tempDir + "/build-" + + sanitizeFilename(this.buildId) + "_" + childBuildId + "__temp_assets"); - // Note: we must sanitize the buildid because it might contain slashes or "..", etc - const tempAssetPath = this.path.resolve(this.settings.tempDir + "/build-" - + sanitizeFilename(this.buildId) + "_" + childBuildId + "__temp_assets"); + mkdirSync(tempAssetPath); - this.mkdirSync(tempAssetPath); + // magellan default port rule + let ports = { + seleniumPort: worker.portOffset, + mockingPort: null + }; - // magellan default port rule - let ports = { - seleniumPort: worker.portOffset, - mockingPort: null - }; + if (settings.BASE_PORT_SPACING > 1) { + ports.mockingPort = worker.portOffset + 1; + } - if (settings.BASE_PORT_SPACING > 1) { - ports.mockingPort = worker.portOffset + 1; - } + // if executor has its own port rule + if (test.executor.getPorts + && _.isFunction(test.executor.getPorts)) { + ports = test.executor.getPorts({ + portOffset: worker.portOffset, + portIndent: settings.BASE_PORT_SPACING + }); + } - // if executor has its own port rule - if (test.executor.getPorts - && typeof test.executor.getPorts === "function") { - ports = test.executor.getPorts({ - portOffset: worker.portOffset, - portIndent: settings.BASE_PORT_SPACING - }); + const testRun = new TestRunClass(_.assign({ + guid: childBuildId, + + // The id of this build, used by some reporters to identify the overall suite run. This + // can also be used by test run implementations to identify an individual suite run as + // part of some larger suite run. + // NOTE: This must appear as an externally accessible property on the TestRun instance + buildId: this.buildId, + + // Temporary asset path that Magellan guarantees exists and only belongs to this + // individual test run. Temporary files, logs, screenshots, etc can be put here. + // NOTE: This must appear as an externally accessible property on the TestRun instance + tempAssetPath, + + // Magellan environment id (i.e. id of browser, id of device, version, etc.), + // typically reflects one of the items from --browsers=item1,item2,item3 options + // environmentId: test.browser.browserId, + profile: test.profile, + // executor: this.executors[test.profile.executor], + + // The locator object originally generated by the plugin itself + locator: test.locator + }, ports)); + + this.setTimeout( + () => this.execute(testRun, test) + .then((testResult) => + this.strategies.resource + .releaseTestResource({ test, token: worker.token }) + .then(() => Promise.resolve(testResult)) + ) + .then((testResult) => resolve(testResult)) + .catch((err) => reject(err)), + WORKER_START_DELAY); + } catch (err) { + return reject(err); } - - testRun = new TestRunClass(_.assign({ - guid: childBuildId, - - // The id of this build, used by some reporters to identify the overall suite run. This - // can also be used by test run implementations to identify an individual suite run as - // part of some larger suite run. - // NOTE: This must appear as an externally accessible property on the TestRun instance - buildId: this.buildId, - - // Temporary asset path that Magellan guarantees exists and only belongs to this - // individual test run. Temporary files, logs, screenshots, etc can be put here. - // NOTE: This must appear as an externally accessible property on the TestRun instance - tempAssetPath, - - // Magellan environment id (i.e. id of browser, id of device, version, etc.), - // typically reflects one of the items from --browsers=item1,item2,item3 options - // environmentId: test.browser.browserId, - profile: test.profile, - // executor: this.executors[test.profile.executor], - - // The locator object originally generated by the plugin itself - locator: test.locator - }, ports)); - } catch (e) { - deferred.reject(e); - } - - if (testRun) { - this.setTimeout(() => { - this.execute(testRun, test) - .then(deferred.resolve) - .catch(deferred.reject); - }, WORKER_START_DELAY); - } - - return deferred.promise; + }); } gatherTrends() { @@ -599,7 +508,7 @@ class TestRunner { let existingTrends; try { - existingTrends = JSON.parse(this.fs.readFileSync("./trends.json")); + existingTrends = JSON.parse(fs.readFileSync("./trends.json")); } catch (e) { existingTrends = { failures: {} }; } @@ -611,54 +520,54 @@ class TestRunner { ? existingTrends.failures[key] + localFailureCount : localFailureCount; }); - this.fs.writeFileSync("./trends.json", JSON.stringify(existingTrends, null, 2)); + fs.writeFileSync("./trends.json", JSON.stringify(existingTrends, null, 2)); logger.log("Updated trends at ./trends.json"); } } - logFailedTests() { - logger.log(clc.redBright("============= Failed Tests: =============")); - - this.failedTests.forEach((failedTest) => { - logger.log("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"); - logger.log("Failed Test: " + failedTest.toString()); - logger.log(" # attempts: " + failedTest.attempts); - logger.log(" output: "); - logger.log(failedTest.stdout); - logger.log(failedTest.stderr); - }); - } - // Print information about a completed build to the screen, showing failures and // bringing in any information from listeners - summarizeCompletedBuild() { - const deferred = Q.defer(); - + logTestsSummary() { const retryMetrics = {}; + const failedTests = this.queue.getFailedTests(); + const passedTests = this.queue.getPassedTests(); + this.gatherTrends(); - if (this.failedTests.length > 0) { - this.logFailedTests(); - } + if (!_.isEmpty(failedTests)) { + analytics.mark("magellan-run", "failed"); - let status; + if (!this.serial) { + // only output failed test logs in non-serial mode + logger.log(clc.redBright("============= Failed Tests: =============")); + + _.forEach(failedTests, (test) => { + logger.warn("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"); + logger.warn(` Failed Test: ${test.toString()}`); + logger.warn(` # attempts: ${test.attempts}`); + logger.warn("From last attempt: \n"); + logger.loghelp(test.stdout); + logger.loghelp(test.stderr); + }); + } - if (this.bailStrategy.hasBailed) { - status = clc.redBright(this.bailStrategy.getBailReason()); } else { - status = this.failedTests.length > 0 ? clc.redBright("FAILED") : clc.greenBright("PASSED"); + analytics.mark("magellan-run", "passed"); } - if (this.failedTests.length > 0) { - this.analytics.mark("magellan-run", "failed"); - } else { - this.analytics.mark("magellan-run", "passed"); + let status = failedTests.length > 0 ? + clc.redBright("FAILED") : + clc.greenBright("PASSED"); + + if (this.strategies.bail.hasBailed) { + status += clc.redBright(` due to bail strategy: ${this.strategies.bail.getBailReason()}`); } - this.tests.forEach((test) => { - if (test.status === 3 && test.getRetries() > 0) { + this.queue.tests.forEach((test) => { + if (test.status === Test.TEST_STATUS_SUCCESSFUL + && test.getRetries() > 0) { if (retryMetrics[test.getRetries()]) { retryMetrics[test.getRetries()]++; } else { @@ -668,148 +577,108 @@ class TestRunner { }); logger.log(clc.greenBright("============= Suite Complete =============")); - logger.log(" Status: " + status); - logger.log(" Runtime: " + this.prettyMs((new Date()).getTime() - this.startTime)); - logger.log("Total tests: " + this.numTests); - logger.log(" Successful: " + this.passedTests.length + " / " + this.numTests); + logger.log(` Status: ${status}`); + logger.log(` Runtime: ${prettyMs((new Date()).getTime() - this.startTime)}`); + logger.log(`Total tests: ${this.queue.getTestAmount()}`); + logger.log(` Passed: ${passedTests.length} / ${this.queue.getTestAmount()}`); _.forOwn(retryMetrics, (testCount, numRetries) => { - logger.log(testCount + " test(s) have retried: " + numRetries + " time(s)"); + logger.log(`${testCount} test(s) have retried: ${numRetries} time(s)`); }); - if (this.failedTests.length > 0) { - logger.log(" Failed: " + this.failedTests.length + " / " + this.numTests); + if (!_.isEmpty(failedTests)) { + logger.log(` Failed: ${failedTests.length} / ${this.queue.getTestAmount()}`); } - const skipped = this.numTests - (this.passedTests.length + this.failedTests.length); - if (this.bailStrategy.hasBailed && skipped > 0) { - logger.log(" Skipped: " + skipped); + const skipped = this.queue.getTestAmount() - (passedTests.length + failedTests.length); + if (this.strategies.bail.hasBailed && skipped > 0) { + logger.log(` Skipped: ${skipped}`); } - - const flushNextListener = () => { - if (this.listeners.length === 0) { - // There are no listeners left to flush. We've summarized all build reports. - deferred.resolve(); - } else { - // flush listeners in the same order we added them to the listeners list - const listener = this.listeners.shift(); - if (typeof listener.flush === "function") { - // This listener implements flush. Run it and check if the result is a promise - // in case we need to wait on the listener to finish a long-running task first. - const promise = listener.flush(); - if (promise && typeof promise.then === "function") { - // This is a listener that returns a promise. Wait and then flush. - promise - .then(flushNextListener) - .catch((error) => { - logger.log("Error when flushing listener output: ", error); - flushNextListener(); - }); - } else { - // This listener that does not return a promise. Keep flushing! - flushNextListener(); - } - } else { - // This listener doesn't implement flush(). Keep flushing! - flushNextListener(); - } - } - }; - - flushNextListener(); - - return deferred.promise; - } - - // Handle an empty work queue: - // Display a build summary and then either signal success or failure. - buildFinished() { - this.setTimeout(() => { - this.summarizeCompletedBuild().then(() => { - if (this.failedTests.length === 0) { - this.onSuccess(); - } else { - this.onFailure(this.failedTests); - } - }); - }, FINAL_CLEANUP_DELAY, true); } // Completion callback called by async.queue when a test is completed - onTestComplete(error, test) { - if (this.bailStrategy.hasBailed) { - // Ignore results from this test if we've bailed. This is likely a test that + completeTestHandler(error, test) { + + if (this.strategies.bail.hasBailed) { + // Ignore results from this test if we've bailed by PREVIOUS tests. This is likely a test that // was killed when the build went into bail mode. - logger.warn("\u2716 " + clc.redBright("KILLED ") + " " + test.toString() - + (this.serial ? "\n" : "")); + logger.warn(`\u2716 ${clc.redBright("KILLED")} ${test.toString()} ` + + `${this.serial ? "\n" : ""}`); + + // if we land here current test should be marked as skipped even though + // nightwatch marks it as failed + test.status = Test.TEST_STATUS_SKIPPED; return; } - const successful = test.status === Test.TEST_STATUS_SUCCESSFUL; - let testRequeued = false; + let status = clc.greenBright("PASS"); + let enqueueNote = ""; - if (successful) { - // Add this test to the passed test list, then remove it from the failed test - // list (just in case it's a test we just retried after a previous failure). - this.passedTests.push(test); - this.failedTests = _.difference(this.failedTests, this.passedTests); - } else { + switch (test.status) { + case Test.TEST_STATUS_SUCCESSFUL: + // Add this test to the passed test list, then remove it from the failed test + // list (just in case it's a test we just retried after a previous failure). + break; + + case Test.TEST_STATUS_FAILED: + status = clc.redBright("FAIL"); if (this.settings.gatherTrends) { const key = test.toString(); - /*eslint-disable no-magic-numbers*/ + /*eslint-disable no-magic-numbers*/ this.trends.failures[key] = this.trends.failures[key] > -1 - ? this.trends.failures[key] + 1 : 1; + ? this.trends.failures[key] + 1 : 1; } - /*eslint-disable no-magic-numbers*/ - if (this.failedTests.indexOf(test) === -1) { - this.failedTests.push(test); - } + // if suite should bail due to failure + this.strategies.bail.shouldBail({ + totalTests: this.queue.tests, + passedTests: this.queue.getPassedTests(), + failedTests: this.queue.getFailedTests() + }); - // Note: Tests that failed but can still run again are pushed back into the queue. - // This push happens before the queue is given back flow control (at the end of - // this callback), which means that the queue isn't given the chance to drain. - if (!test.canRun(true)) { - this.q.push(test, this.onTestComplete.bind(this)); - testRequeued = true; + // Note: Tests that failed but can still run again are pushed back into the queue. + // This push happens before the queue is given back flow control (at the end of + // this callback), which means that the queue isn't given the chance to drain. + if (!test.canRun()) { + this.queue.enqueue(test, constants.TEST_PRIORITY.RETRY); + + enqueueNote = clc.cyanBright(`(will retry, ${test.maxAttempts - test.attempts}` + + ` time(s) left). Spent ${test.getRuntime()} ms`); } + break; + + case Test.TEST_STATUS_NEW: + // no available resource + status = clc.yellowBright("RETRY"); + this.queue.enqueue(test, constants.TEST_PRIORITY.RETRY); + + enqueueNote = clc.cyanBright("(will retry). ") + clc.redBright(error.message); + break; } - let prefix; - let suffix; + const failedTests = this.queue.getFailedTests(); + const passedTests = this.queue.getPassedTests(); - if (this.serial) { - prefix = "(" + (this.passedTests.length + this.failedTests.length) + " / " - + this.numTests + ")"; - suffix = ""; - } else { - prefix = "(" + (this.passedTests.length + this.failedTests.length) + " / " - + this.numTests + ") <-- Worker " + test.workerIndex; - suffix = ""; + let prefix = `(${failedTests.length + passedTests.length} ` + + `/ ${this.queue.getTestAmount()})`; + + if (test.attempts > 1) { + // this is a retry + prefix = "(retry)"; } - const requeueNote = testRequeued ? clc.cyanBright("(will retry). Spent " - + test.getRuntime() + " msec") : ""; - logger.log(prefix + " " - + (successful ? clc.greenBright("PASS ") : clc.redBright("FAIL ")) + requeueNote + " " - + test.toString() + " " + suffix); + if (!this.serial && test.workerIndex > 0) { + prefix += ` <-- Worker ${test.workerIndex}`; + } - this.checkBuild(); - } + logger.log(`${prefix} ${status} ${enqueueNote} ${test.toString()}`); - // Check to see how the build is going and optionally fail the build early. - checkBuild() { - if (this.bailStrategy.shouldBail({ - totalTests: this.tests, - passedTests: this.passedTests, - failedTests: this.failedTests - })) { + if (this.strategies.bail.hasBailed) { + // we handle bail for CURRENT test here // Kill the rest of the queue, preventing any new tests from running and shutting // down buildFinished - this.q.kill(); - - this.buildFinished(); + return this.queue.earlyTerminate(); } } } diff --git a/src/util/check_ports.js b/src/util/check_ports.js index d192bd9..ab0a8bf 100644 --- a/src/util/check_ports.js +++ b/src/util/check_ports.js @@ -8,16 +8,12 @@ const logger = require("../logger"); const PORT_STATUS_IN_USE = 0; const PORT_STATUS_AVAILABLE = 1; -const checkPortStatus = (desiredPort, callback, opts) => { - const runOpts = _.assign({ - request, - portscanner - }, opts); +const checkPortStatus = (desiredPort, callback) => { - runOpts.request("http://127.0.0.1:" + desiredPort + + request("http://127.0.0.1:" + desiredPort + "/wd/hub/static/resource/hub.html", (seleniumErr) => { if (seleniumErr && seleniumErr.code === "ECONNREFUSED") { - runOpts.portscanner.checkPortStatus(desiredPort, "127.0.0.1", (error, portStatus) => { + portscanner.checkPortStatus(desiredPort, "127.0.0.1", (error, portStatus) => { if (portStatus === "open") { return callback(PORT_STATUS_IN_USE); } else { @@ -26,7 +22,7 @@ const checkPortStatus = (desiredPort, callback, opts) => { }); } else { logger.log( - "Found selenium HTTP server at port " + desiredPort + ", port is in use."); + "Found selenium HTTP server at port " + desiredPort + ", port is in use."); return callback(PORT_STATUS_IN_USE); } }); @@ -41,7 +37,7 @@ const checkPortStatus = (desiredPort, callback, opts) => { // // [{ port: number, available: boolean }] // -const checkPortRange = (portNumbers, callback, opts) => { +const checkPortRange = (portNumbers, callback) => { portNumbers = _.cloneDeep(portNumbers); const statuses = []; @@ -55,7 +51,7 @@ const checkPortRange = (portNumbers, callback, opts) => { available: portStatus === PORT_STATUS_AVAILABLE }); checkNextPort(); - }, opts); + }); } else { return callback(statuses); } diff --git a/src/util/childProcess.js b/src/util/childProcess.js new file mode 100644 index 0000000..8798a4a --- /dev/null +++ b/src/util/childProcess.js @@ -0,0 +1,77 @@ +"use strict"; + +const _ = require("lodash"); +const clc = require("cli-color"); +const EventEmitter = require("events").EventEmitter; + +const logger = require("../logger"); +const logStamp = require("./logstamp"); + +const MESSAGE = "message"; +const DATA = "data"; +const CLOSE = "close"; + +module.exports = class ChildProcess { + constructor(handler) { + this.stdout = `${clc.yellowBright(logStamp())} =====> Magellan child process start\n`; + this.stderr = ""; + this.handler = handler; + this.handler.stdout.on(DATA, this.onDataCallback.bind(this)); + this.handler.stderr.on(DATA, this.onDataCallback.bind(this)); + + this.emitter = new EventEmitter(); + this.emitter.stdout = handler.stdout; + this.emitter.stderr = handler.stderr; + } + + enableDebugMsg() { + this.handler.on(MESSAGE, (msg) => { + logger.debug(`Message from worker: ${JSON.stringify(msg)}`); + }); + } + + onMessage(callback) { + this.handler.on(MESSAGE, (message) => callback(message)); + } + + onDataCallback(data) { + let text = "" + data; + if (!_.isEmpty(text.trim())) { + text = text + .split("\n") + .filter((line) => !_.isEmpty(line.trim())) + .map((line) => `${clc.yellowBright(logStamp())} ${line}`) + .join("\n"); + + /* istanbul ignore else */ + if (!_.isEmpty(text)) { + this.stdout += text + "\n"; + } else { + this.stdout += "\n"; + } + } + } + + onClose(callback) { + this.handler.on(CLOSE, callback); + } + + send(message) { + this.handler.send(message); + } + + teardown() { + this.handler.stdout.removeAllListeners(); + this.handler.stderr.removeAllListeners(); + this.handler.stdout.unpipe(); + this.handler.stderr.unpipe(); + this.handler.removeAllListeners(); + + this.emitter.stdout = null; + this.emitter.stderr = null; + } + + emitMessage(message) { + this.emitter.emit(MESSAGE, message); + } +}; diff --git a/src/util/load_relative_module.js b/src/util/load_relative_module.js index 357dba0..d17a187 100644 --- a/src/util/load_relative_module.js +++ b/src/util/load_relative_module.js @@ -1,16 +1,18 @@ "use strict"; const path = require("path"); -const _ = require("lodash"); const logger = require("../logger"); module.exports = (mPath, moduleIsOptional, opts) => { let resolvedRequire; mPath = mPath.trim(); - const runOpts = _.assign({ - require - }, opts); + // hacky solution, cannot find a good way to mock it + let inRequire = require; + + if (opts && opts.require) { + inRequire = opts.require; + } if (mPath.charAt(0) === ".") { resolvedRequire = path.resolve(process.cwd() + "/" + mPath); @@ -21,7 +23,7 @@ module.exports = (mPath, moduleIsOptional, opts) => { let RequiredModule; try { /*eslint global-require: 0*/ - RequiredModule = runOpts.require(resolvedRequire); + RequiredModule = inRequire(resolvedRequire); } catch (e) { if (e.code === "MODULE_NOT_FOUND" && moduleIsOptional !== true) { logger.err("Error loading a module from user configuration."); diff --git a/src/util/mkdir_sync.js b/src/util/mkdir_sync.js new file mode 100644 index 0000000..c1e6f9e --- /dev/null +++ b/src/util/mkdir_sync.js @@ -0,0 +1,13 @@ +"use strict"; + +const fs = require("fs"); + +module.exports = (path) => { + try { + fs.mkdirSync(path); + } catch (e) { + if (e.code !== "EEXIST") { + throw e; + } + } +}; diff --git a/src/util/port_util.js b/src/util/port_util.js index adce8c3..a2772f5 100644 --- a/src/util/port_util.js +++ b/src/util/port_util.js @@ -1,8 +1,6 @@ /* eslint operator-assignment: 0 */ "use strict"; -const _ = require("lodash"); - const settings = require("../settings"); const checkPorts = require("./check_ports"); @@ -33,14 +31,11 @@ const util = { // // 1) an Error object, if we couldnt' find a port // 2) null and a foundPort as the second argument - acquirePort: (callback, opts) => { - const runOpts = _.assign({ - checkPorts - }, opts); - + acquirePort: (callback) => { let attempts = 0; + const acquire = () => { - runOpts.checkPorts([util.getNextPort()], (result) => { + checkPorts([util.getNextPort()], (result) => { if (result[0].available) { return callback(null, result[0].port); } else { diff --git a/src/util/process_cleanup.js b/src/util/process_cleanup.js index 121eff5..9f17ea1 100644 --- a/src/util/process_cleanup.js +++ b/src/util/process_cleanup.js @@ -1,7 +1,6 @@ "use strict"; const treeUtil = require("testarmada-tree-kill"); -const _ = require("lodash"); const pid = process.pid; const settings = require("../settings"); @@ -10,35 +9,45 @@ const logger = require("../logger"); // Max time before we forcefully kill child processes left over after a suite run const ZOMBIE_POLLING_MAX_TIME = 15000; -module.exports = (callback, opts) => { - const runOpts = _.assign({ - settings, - treeUtil - }, opts); - - if (runOpts.settings.debug) { - logger.log("Checking for zombie processes..."); - } - - runOpts.treeUtil.getZombieChildren(pid, ZOMBIE_POLLING_MAX_TIME, (zombieChildren) => { - if (zombieChildren.length > 0) { - logger.log("Giving up waiting for zombie child processes to die. Cleaning up.."); - - const killNextZombie = () => { - if (zombieChildren.length > 0) { - const nextZombieTreePid = zombieChildren.shift(); - logger.log("Killing pid and its child pids: " + nextZombieTreePid); - runOpts.treeUtil.kill(nextZombieTreePid, "SIGKILL", killNextZombie); - } else { - logger.log("Done killing zombies."); - return callback(); - } - }; +module.exports = (err) => { - return killNextZombie(); - } else { - logger.debug("No zombies found."); - return callback(); + return new Promise((resolve, reject) => { + if (settings.debug) { + logger.debug("Checking for zombie processes..."); } + + treeUtil.getZombieChildren(pid, ZOMBIE_POLLING_MAX_TIME, (zombieChildren) => { + if (zombieChildren.length > 0) { + logger.log("Giving up waiting for zombie child processes to die. Cleaning up.."); + + const killNextZombie = () => { + if (zombieChildren.length > 0) { + const nextZombieTreePid = zombieChildren.shift(); + logger.log("Killing pid and its child pids: " + nextZombieTreePid); + treeUtil.kill(nextZombieTreePid, "SIGKILL", killNextZombie); + } else { + logger.log("Done killing zombies."); + + if (err) { + // pass error down to next step + return reject(err); + } else { + return resolve(); + } + } + }; + + return killNextZombie(); + } else { + logger.debug("No zombies found."); + + if (err) { + // pass error down to next step + return reject(err); + } else { + return resolve(); + } + } + }); }); }; diff --git a/src/worker_allocator.js b/src/worker_allocator.js index 20d9cc0..5eb1d00 100644 --- a/src/worker_allocator.js +++ b/src/worker_allocator.js @@ -5,9 +5,6 @@ const settings = require("./settings"); const portUtil = require("./util/port_util"); const logger = require("./logger"); -const MAX_ALLOCATION_ATTEMPTS = 120; -const WORKER_START_DELAY = 1000; - // Create a worker allocator for MAX_WORKERS workers. Note that the allocator // is not obliged to honor the creation of MAX_WORKERS, just some number of workers // between 0 and MAX_WORKERS. @@ -34,12 +31,17 @@ class Allocator { this.initializeWorkers(MAX_WORKERS); } - initialize(callback) { - callback(); + setup() { + return Promise.resolve(); } - teardown(callback) { - callback(); + teardown(err) { + if (err) { + // something is happening before this step, + // we pass this error down + return Promise.reject(err); + } + return Promise.resolve(); } initializeWorkers(numWorkers) { @@ -63,23 +65,23 @@ class Allocator { attempts++; if (worker) { return callback(null, worker); - } else if (attempts > MAX_ALLOCATION_ATTEMPTS) { - const errorMessage = "Couldn't allocate a worker after " + MAX_ALLOCATION_ATTEMPTS - + " attempts"; + } else if (attempts > settings.MAX_ALLOCATION_ATTEMPTS) { + const errorMessage = "Couldn't allocate a worker after" + + ` ${settings.MAX_ALLOCATION_ATTEMPTS} attempts`; + return callback(errorMessage); } else { // If we didn't get a worker, try again - this.setTimeout(poll, WORKER_START_DELAY); + this.setTimeout(poll, settings.WORKER_START_DELAY); } }); }; - this.setTimeout(poll, WORKER_START_DELAY); + this.setTimeout(poll, settings.WORKER_START_DELAY); } _get(callback) { const availableWorker = _.find(this.workers, (e) => !e.occupied); - if (availableWorker) { // occupy this worker while we test if we can use it availableWorker.occupied = true; diff --git a/test/README.md b/test/README.md new file mode 100644 index 0000000..2f8797d --- /dev/null +++ b/test/README.md @@ -0,0 +1,12 @@ +## How to run unit test +Magellan uses [jest](https://facebook.github.io/jest/en/) for unit test. To run unit test, please run +1. `npm run test` for all tests +2. or `./node_modules/.bin/jest cli.test.js` for individual test file. + +Magellan's unit test run also generates coverage report. Please check `coverage/lcov-report/index.html` for detail reports. + +## How to add unit test for Magellan repo + +1. Please name test file by `FILE.test.js` in test folder. +2. Please use mock as much as possible. +3. Please make sure test coverage is above the threshold before submitting to github. \ No newline at end of file diff --git a/test/bail.js b/test/bail.js deleted file mode 100644 index c4ca9b6..0000000 --- a/test/bail.js +++ /dev/null @@ -1,75 +0,0 @@ -"use strict"; - -const chai = require("chai"); -const chaiAsPromise = require("chai-as-promised"); - -const BailStrategy = require("../src/bail"); - -const BAIL_FAST = process.cwd() + "/src/strategies/bail_fast"; -const BAIL_NEVER = process.cwd() + "/src/strategies/bail_never"; -const BAIL_EARLY = process.cwd() + "/src/strategies/bail_early"; - - -chai.use(chaiAsPromise); - -const expect = chai.expect; -const assert = chai.assert; - -describe("Bail Strategy", () => { - let bailStrategy; - - beforeEach(() => { - bailStrategy = new BailStrategy(BAIL_NEVER); - }); - - it("constructor throws error", () => { - try { - bailStrategy = new BailStrategy("FAKE_BAIL"); - assert(false, "shouldn't be here"); - } catch (e) { - - } - }); - - it("call configure on strategy doesn't have setConfiguration", () => { - bailStrategy.configure({ early_bail_threshold: 1 }); - }); - - it("call configure on strategy has setConfiguration", () => { - bailStrategy = new BailStrategy(BAIL_EARLY); - bailStrategy.configure({ early_bail_threshold: 1 }); - }); - - it("call description with a strategy description", () => { - bailStrategy = new BailStrategy(BAIL_EARLY); - expect(bailStrategy.getDescription()).to.equal("Magellan will bail if failure ratio exceeds a threshold within a given period"); - }); - - it("call description without a strategy description", () => { - bailStrategy = new BailStrategy(BAIL_EARLY); - delete bailStrategy.description; - expect(bailStrategy.getDescription()).to.equal(""); - }); - - it("call bailReason with a strategy bailReason", () => { - bailStrategy = new BailStrategy(BAIL_FAST); - expect(bailStrategy.getBailReason()).to.equal("At least one test has been failed"); - }); - - it("call bailReason without a strategy bailReason", () => { - bailStrategy = new BailStrategy(BAIL_FAST); - delete bailStrategy.bailReason; - expect(bailStrategy.getBailReason()).to.equal(""); - }); - - it("call shouldBail if suite shouldn't bail", () => { - bailStrategy = new BailStrategy(BAIL_NEVER); - expect(bailStrategy.shouldBail()).to.equal(false); - }); - - it("call shouldBail if suite should bail", () => { - bailStrategy = new BailStrategy(BAIL_NEVER); - bailStrategy.decide = (info) => true; - expect(bailStrategy.shouldBail()).to.equal(true); - }); -}); \ No newline at end of file diff --git a/test/base_listener.js b/test/base_listener.js deleted file mode 100644 index 76a42f7..0000000 --- a/test/base_listener.js +++ /dev/null @@ -1,32 +0,0 @@ -/* eslint no-undef: 0, no-unused-expressions: 0 */ -"use strict"; -const BaseListener = require("../src/listener"); -const chai = require("chai"); -const chaiAsPromised = require("chai-as-promised"); - -const expect = chai.expect; -chai.use(chaiAsPromised); - -describe("listener", () => { - - it("should act like a class", () => { - expect(new BaseListener()).to.be.an.instanceof(BaseListener); - }); - - it("should listenTo", () => { - const myListener = new BaseListener(); - myListener.listenTo(); - expect(myListener.listenTo).to.not.be.null; - }); - - it("should flush", () => { - const myListener = new BaseListener(); - myListener.flush().then(() => {}); - expect(myListener.flush).to.not.be.null; - }); - - it("should initialize", () => { - const myListener = new BaseListener(); - return expect(myListener.initialize()).to.be.fulfilled; - }); -}); diff --git a/test/cli.js b/test/cli.js deleted file mode 100644 index 298cf24..0000000 --- a/test/cli.js +++ /dev/null @@ -1,575 +0,0 @@ -"use strict"; - -const chai = require("chai"); -const chaiAsPromise = require("chai-as-promised"); -const _ = require("lodash"); - -const cli = require("../src/cli.js"); -const logger = require("../src/logger"); - -chai.use(chaiAsPromise); - -const expect = chai.expect; -const assert = chai.assert; - -const _fakeRequire = (overrides) => { - return (name) => { - if (overrides && overrides(name)) { - return overrides(name); - } - if (name === "../package.json") { - return { - version: "1.2.3" - }; - } - if (name.match(/package.json/)) { - return { - name: "foobar", - dependencies: [], - devDependencies: [] - }; - } - if (name === "./cli_help") { - return { - help: () => { } - }; - } - if (name === "./reporters/slack/settings") { - return {}; - } - if (name === "./reporters/slack/slack" || - name === "./reporters/screenshot_aggregator/reporter" || - name === "./reporters/stdout/reporter") { - return new FakeReporter(); - } - if (name === "testarmada-magellan-local-executor") { - return fakeExecutor; - } - if (name.indexOf("error") > -1) { - throw new Error("FAKE FRAMEWORK EXCEPTION"); - } - - if (name.match(/\/index/)) { - return { - initialize: () => { }, - getPluginOptions: () => { } - }; - } - return { - }; - } -}; - -const fakeExecutor = { - name: "testarmada-magellan-local-executor", - shortName: "local", - help: { - "local_list_browsers": { - "visible": true, - "type": "function", - "description": "List the available browsers configured." - }, - "local_list_fakes": { - "visible": true, - "type": "function", - "description": "List the available browsers configured." - } - }, - validateConfig() { }, - setupRunner() { - return new Promise((resolve) => { - resolve(); - }); - }, - teardownRunner() { - return new Promise((resolve) => { - resolve(); - }); - }, - listBrowsers(param, callback) { - callback(); - } -}; - -const FakeProfiles = { - detectFromCLI() { - return new Promise((resolve) => { - resolve([{ executor: "local" }]); - }); - } -}; - -class FakeAllocator { - constructor() { - } - initialize(cb) { - cb(null); - } - teardown(cb) { - cb(); - } -} - -class FakeTestRunner { - constructor(tests, opts) { - this.tests = tests; - this.opts = opts; - } - start() { - this.opts.onSuccess(); - } -} - -class FakeReporter { - initialize() { - return new Promise((resolve) => { resolve() }); - } - listenTo() { - } - flush() { - return new Promise((resolve) => { resolve() }); - } -} - -const _testConfig = (overrides) => { - return _.merge({ - console: { - log: () => { }, - error: () => { } - }, - require: _fakeRequire(), - process: { - cwd: () => { - return "./"; - }, - exit: () => { - } - }, - analytics: { - mark: () => { }, - push: () => { } - }, - getTests: () => { - return [ - { test: "a" }, - { test: "b" }, - { test: "c" } - ]; - }, - margs: { - init: () => { }, - argv: { - debug: true - } - }, - settings: { - framework: "foo", - testExecutors: { - "local": fakeExecutor - } - }, - processCleanup: (cb) => { - cb(); - }, - path: { - join: (a, b) => { - const arr = [a, b]; - return arr.join("/"); - }, - resolve: (str) => { - return str; - } - }, - WorkerAllocator: FakeAllocator, - TestRunner: FakeTestRunner, - profiles: FakeProfiles, - testFilters: { - detectFromCLI: () => { } - }, - loadRelativeModule: () => { return new FakeReporter(); } - }, overrides); -}; - -describe("pure_cli", () => { - it("allow for config path", () => { - return cli(_testConfig({ - yargs: { - argv: { - config: "FOOBAR_CONFIG" - } - } - })) - .then() - .catch(err => assert(false, "shouldn't be here")); - }); - - describe("resolve framework", () => { - it("legacy framework name translation", () => { - return cli(_testConfig({ - settings: { - framework: "vanilla-mocha" - } - })) - .then() - .catch(err => assert(false, "shouldn't be here")); - }); - - it("handle framework load exception", () => { - return cli(_testConfig({ - settings: { - framework: "error" - } - })) - .then(() => assert(false, "shouldn't be here")) - .catch(err => { }); - }); - - it("handle framework init exception", () => { - return cli(_testConfig({ - settings: { - framework: "local" - }, - require: _fakeRequire((name) => { - if (name.match(/\/index/)) { - return { - initialize: () => { }, - getPluginOptions: () => { throw new Error("FAKE INIT ERROR") } - }; - } - }) - })) - .then(() => assert(false, "shouldn't be here")) - .catch(err => { }); - }); - }); - - it("get help", () => { - return cli(_testConfig({ - margs: { - argv: { - help: true - } - } - })) - .then() - .catch(err => assert(false, "shouldn't be here")); - }); - - it("setup_teardown", () => { - return cli(_testConfig({ - margs: { - argv: { - setup_teardown: "something" - } - } - })) - .then() - .catch(err => assert(false, "shouldn't be here")); - }); - - - - describe("resolve executor", () => { - it("as string", () => { - return cli(_testConfig({ - margs: { - argv: { - executors: "testarmada-magellan-local-executor" - } - } - })) - .then() - .catch(err => assert(false, "shouldn't be here")); - }); - - it("as array", () => { - return cli(_testConfig({ - margs: { - argv: { - executors: ["testarmada-magellan-local-executor"] - } - } - })) - .then() - .catch(err => assert(false, "shouldn't be here")); - }); - - it("malformed", () => { - return cli(_testConfig({ - margs: { - argv: { - executors: {} - } - } - })) - .then() - .catch(err => assert(false, "shouldn't be here")); - }); - - it("executor method", () => { - return cli(_testConfig({ - margs: { - argv: { - executors: ["testarmada-magellan-local-executor"], - local_list_browsers: true - } - } - })) - .then() - .catch(err => assert(false, "shouldn't be here")); - }); - - it("executor method no matches", () => { - return cli(_testConfig({ - margs: { - argv: { - executors: ["testarmada-magellan-local-executor"], - local_list_fakes: true - } - } - })) - .then() - .catch(err => assert(false, "shouldn't be here")); - }); - - it("executor load exception", () => { - return cli(_testConfig({ - margs: { - argv: { - executors: ["testarmada-magellan-local-executor"] - } - }, - require: _fakeRequire((name) => { - if (name === "testarmada-magellan-local-executor") { - throw new Error("FAKE EXECUTOR INIT ERROR"); - } - }) - })) - .then(() => assert(false, "shouldn't be here")) - .catch(err => { }); - }); - }); - - it("enable slack", () => { - return cli(_testConfig({ - require: _fakeRequire((name) => { - if (name === "./reporters/slack/settings") { - return { - enabled: true - }; - } - if (name === "./reporters/slack/slack") { - return FakeReporter; - } - }) - })) - .then() - .catch(err => assert(false, "shouldn't be here")); - }); - - it("reporter as array", () => { - return cli(_testConfig({ - margs: { - argv: { - reporters: ["a", "b", "c"] - } - }, - loadRelativeModule: () => { - return new FakeReporter(); - } - })) - .then() - .catch(err => assert(false, "shouldn't be here")); - }); - - it("allow optional reporter", () => { - return cli(_testConfig({ - margs: { - argv: { - optional_reporters: ["a", "b", "c"] - } - }, - loadRelativeModule: () => { - return new FakeReporter(); - } - })) - .then() - .catch(err => assert(false, "shouldn't be here")); - }); - - it("enable serial", () => { - return cli(_testConfig({ - margs: { - argv: { - serial: true - } - }, - require: _fakeRequire((name) => { - if (name === "./reporters/stdout/reporter") { - return FakeReporter; - } - }) - })) - .then() - .catch(err => assert(false, "shouldn't be here")); - }); - - it("enable screenshot", () => { - return cli(_testConfig({ - settings: { - aggregateScreenshots: true - }, - require: _fakeRequire((name) => { - if (name === "./reporters/screenshot_aggregator/reporter") { - return FakeReporter; - } - }) - })) - .then() - .catch(err => assert(false, "shouldn't be here")); - }); - - it("allow no test", () => { - return cli(_testConfig({ - getTests: () => { - return []; - } - })) - .then(() => assert(false, "shouldn't be here")) - .catch(err => { }); - }); - - it("allow worker error", () => { - return cli(_testConfig({ - WorkerAllocator: class InvalidWorkerAllocator { - constructor() { - } - initialize(cb) { - cb("FAKE_ERROR"); - } - teardown(cb) { - cb(); - } - } - })) - .then(() => assert(false, "shouldn't be here")) - .catch(err => { }); - }); - - it("executor teardownRunner error", () => { - return cli(_testConfig({ - margs: { - argv: { - executors: ["testarmada-magellan-local-executor"] - } - }, - require: _fakeRequire((name) => { - if (name === "testarmada-magellan-local-executor") { - return { - name: "testarmada-magellan-local-executor", - shortName: "local", - help: { - "local_list_browsers": { - "visible": true, - "type": "function", - "description": "List the available browsers configured." - }, - "local_list_fakes": { - "visible": true, - "type": "function", - "description": "List the available browsers configured." - } - }, - validateConfig() { }, - setupRunner() { - return new Promise((resolve) => { - resolve(); - }); - }, - teardownRunner() { - return new Promise((resolve, reject) => { - reject("FAKE_ERROR"); - }); - }, - listBrowsers(param, callback) { - callback(); - } - } - } - }) - })) - .then(() => assert(false, "shouldn't be here")) - .catch(err => { }); - }); - - it("runner on failure", () => { - return cli(_testConfig({ - TestRunner: class InvalidRunner { - constructor(tests, opts) { - this.tests = tests; - this.opts = opts; - } - start() { - this.opts.onFailure(); - } - } - })) - .then(() => assert(false, "shouldn't be here")) - .catch(err => { }); - }); - - it("executor teardownRunner error with onFailure", () => { - return cli(_testConfig({ - TestRunner: class InvalidRunner { - constructor(tests, opts) { - this.tests = tests; - this.opts = opts; - } - start() { - this.opts.onFailure(); - } - }, - margs: { - argv: { - executors: ["testarmada-magellan-local-executor"] - } - }, - require: _fakeRequire((name) => { - if (name === "testarmada-magellan-local-executor") { - return { - name: "testarmada-magellan-local-executor", - shortName: "local", - help: { - "local_list_browsers": { - "visible": true, - "type": "function", - "description": "List the available browsers configured." - }, - "local_list_fakes": { - "visible": true, - "type": "function", - "description": "List the available browsers configured." - } - }, - validateConfig() { }, - setupRunner() { - return new Promise((resolve) => { - resolve(); - }); - }, - teardownRunner() { - return new Promise((resolve, reject) => { - reject("FAKE_ERROR"); - }); - }, - listBrowsers(param, callback) { - callback(); - } - } - } - }) - })) - .then(() => assert(false, "shouldn't be here")) - .catch(err => { }); - }); -}); \ No newline at end of file diff --git a/test/cli.test.js b/test/cli.test.js new file mode 100644 index 0000000..316d28f --- /dev/null +++ b/test/cli.test.js @@ -0,0 +1,373 @@ +"use strict"; + +const _ = require("lodash"); + +const cli = require("../src/cli.js"); +const profiles = require("../src/profiles"); +const settings = require('../src/settings'); +const syncRequest = require('sync-request'); +const getTests = require("../src/get_tests"); +const testFilters = require("../src/test_filter"); +const WorkerAllocator = require("../src/worker_allocator"); +const processCleanup = require("../src/util/process_cleanup"); +const TestRunner = require("../src/test_runner"); + +jest.mock("../src/profiles"); +jest.mock('../src/settings'); +jest.mock('sync-request'); +jest.mock('../src/get_tests'); +jest.mock('../src/test_filter'); +jest.mock('../src/worker_allocator'); +jest.mock('../src/util/process_cleanup'); +jest.mock('../src/test_runner'); + +describe("cli", () => { + afterEach(() => { + jest.clearAllMocks(); + }); + + test("should initialize", () => { + cli.initialize(); + }); + + test('should print version', () => { + cli.version(); + }); + + test('should print help', () => { + expect(cli.help()).rejects.toThrow("end of help"); + }); + + test('should detect profiles', () => { + profiles.detectFromCLI.mockImplementation(() => Promise.resolve('haha')); + + expect(cli.detectProfiles({ argv: {}, settings: {} })).resolves.toEqual('haha'); + }); + + describe("resolve framework", () => { + test("should handle framework load exception", () => { + expect(cli.loadFramework({ + argv: '', + mockFramework: 'err' + })).rejects.toEqual("Couldn't start Magellan"); + }); + + test("should transilate legacy framework name", () => { + expect(cli.loadFramework({ + argv: '', + mockFramework: 'vanilla-mocha' + })).resolves.toEqual(); + }); + }); + + + describe('resolve listener', () => { + test('should load setup_teardown', () => { + expect(cli.loadListeners({ + argv: { + setup_teardown: './test/mock/mockSetupTeardownListener.js' + } + })).resolves.toHaveLength(1); + }); + + test('should load serial reporter', () => { + expect(cli.loadListeners({ + argv: { + serial: true + } + })).resolves.toHaveLength(1); + }); + + test('should load reporters as array', () => { + expect(cli.loadListeners({ + argv: { + reporters: [ + './src/reporters/stdout/reporter' + ] + } + })).resolves.toHaveLength(1); + }); + + test('should load optional reporters as array', () => { + expect(cli.loadListeners({ + argv: { + optional_reporters: [ + './test/mock/mockOptionalReporter.js' + ] + } + })).rejects.toBeInstanceOf(Error); + }); + }); + + test('should detect profile', () => { + profiles.detectFromCLI.mockImplementation(() => Promise.resolve('haha')); + + expect(cli.detectProfiles({ + argv: { + profile: 'http://some_fake_url#chrome' + }, + settings: { + testExecutors: { + 'sauce': { + getProfiles: (opts) => Promise.resolve(opts.profiles), + getCapabilities: (profile, opts) => Promise.resolve(profile) + } + } + } + })).resolves.toEqual('haha') + }); + + describe('resolve executor', () => { + afterEach(() => { + delete settings.testExecutors; + }); + + test('should enable executor', (done) => { + settings.testExecutors = { + local: { + name: 'fake executor', + validateConfig() { } + } + }; + + cli.enableExecutors({ + profiles: [{ + executor: 'local' + }] + }) + .then(es => { + expect(es.local.name).toEqual('fake executor'); + done(); + }); + }); + + test('should not enable executor if no match', (done) => { + settings.testExecutors = { + sauce: { + validateConfig() { } + } + }; + + cli.enableExecutors({ + profiles: [{ + executor: 'local' + }] + }) + .then(es => { + expect(es.local).toBeUndefined(); + done(); + }); + }); + + test('should reject if validateConfig throws error', (done) => { + settings.testExecutors = { + local: { + validateConfig() { throw new Error('on purpose'); } + } + }; + + cli.enableExecutors({ + profiles: [{ + executor: 'local' + }] + }) + .catch(e => { + expect(e).toBeInstanceOf(Error); + expect(e.message).toEqual('on purpose'); + done(); + }); + }); + }); + + describe('resolve strategies', () => { + afterEach(() => { + delete settings.strategies; + }); + + test('should resolve bail strategy', (done) => { + cli.loadStrategies({ argv: {} }) + .then(strategies => { + expect(strategies.bail.hasBailed).toEqual(false); + done(); + }); + }); + + test('should reject if fails in loading bail strategy', (done) => { + cli.loadStrategies({ argv: { strategy_bail: 'error' } }) + .catch(err => { + expect(err).toEqual("Couldn't start Magellan"); + done(); + }); + }); + + test('should resolve resource strategy', (done) => { + cli.loadStrategies({ argv: {} }) + .then(strategies => { + expect(strategies.resource.name).toEqual('testarmada-magellan-no-resource-strategy'); + done(); + }); + }); + + test('should reject if fails in loading resouorce strategy', (done) => { + cli.loadStrategies({ argv: { strategy_resource: 'error' } }) + .catch(err => { + expect(err).toEqual("Couldn't start Magellan"); + done(); + }); + }); + }); + + describe('resolve executors', () => { + + test('should load executors', (done) => { + cli.loadExecutors({ argv: { executors: ['testarmada-magellan-local-executor'] } }) + .then(() => { + done(); + }); + }); + + test('should convert executor to array and load', (done) => { + cli.loadExecutors({ argv: { executors: 'testarmada-magellan-local-executor' } }) + .then(() => { + done(); + }); + }); + + test('should alert if no string or array provided and use default', (done) => { + cli.loadExecutors({ argv: { executors: { a: 'testarmada-magellan-local-executor' } } }) + .then(() => { + done(); + }); + }); + + test('should alert if executor is given and use default', (done) => { + cli.loadExecutors({ argv: {} }) + .then(() => { + done(); + }); + }); + + test('should reject if fails in loading one executor', (done) => { + cli.loadExecutors({ argv: { executors: ['testarmada-magellan-local-executor', 'err'] } }) + .catch((e) => { + expect(e).toEqual("Couldn't start Magellan"); + done(); + }); + }); + }); + + describe('load tests', () => { + test('should load 1 test', (done) => { + getTests.mockReturnValue([{ filename: './a.js' }]); + + cli.loadTests({ argv: { tag: 'web' } }) + .then((tests) => { + expect(tests.length).toEqual(1); + done(); + }) + }); + + test('should reject if no test is identified by filter', () => { + getTests.mockImplementation(() => []); + + return expect(cli.loadTests({ argv: { tag: 'web' } })).rejects.toBeInstanceOf(Error); + }); + }); + + describe('start test suite', () => { + let opts = {}; + + beforeEach(() => { + WorkerAllocator.mockImplementation(() => { + return { + setup: () => Promise.resolve(), + teardown: () => Promise.resolve() + } + }); + + TestRunner.mockImplementation((tests, configs) => { + return { + run() { + return configs.onFinish(); + } + } + }); + + processCleanup.mockImplementation(() => Promise.resolve()); + + opts = { + executors: [{ + setupRunner: () => Promise.resolve(), + teardownRunner: () => Promise.resolve() + }], + strategies: { + resource: { + holdSuiteResources: () => Promise.resolve(), + releaseSuiteResources: () => Promise.resolve() + } + }, + + }; + }); + + test('should pass in happy path', () => { + return expect(cli.startTestSuite(opts)).resolves.toEqual(undefined); + }); + + test('should fail when no resource is available', () => { + opts.strategies.resource.holdSuiteResources = () => Promise.reject('fake resource error'); + return expect(cli.startTestSuite(opts)).rejects.toEqual('fake resource error'); + }); + + test('should fail if test run fails', (done) => { + WorkerAllocator.mockImplementation(() => { + return { + setup: () => Promise.resolve(), + teardown: (err) => Promise.reject(err) + } + }); + + TestRunner.mockImplementation((tests, configs) => { + return { + run() { + return configs.onFinish([{ filename: './b.js' }]); + } + } + }); + + processCleanup.mockImplementation((err) => Promise.reject(err)); + + cli.startTestSuite(opts) + .catch((err) => { + expect(err.message).toEqual("Test suite failed due to test failure"); + done(); + }); + }); + + test('should fail if worker teardownRunner fails', (done) => { + WorkerAllocator.mockImplementation(() => { + return { + setup: () => Promise.resolve(), + teardown: () => Promise.reject('fake worker teardown error') + } + }); + processCleanup.mockImplementation((err) => Promise.reject(err)); + + cli.startTestSuite(opts) + .catch((err) => { + expect(err).toEqual('fake worker teardown error'); + done(); + }); + }); + + test('should fail if process cleanup fails', (done) => { + processCleanup.mockImplementation((err) => Promise.reject('fake process cleanup failure')); + + cli.startTestSuite(opts) + .catch((err) => { + expect(err).toEqual('fake process cleanup failure'); + done(); + }); + }); + }); +}); \ No newline at end of file diff --git a/test/cli_help.js b/test/cli_help.js deleted file mode 100644 index 60e2c08..0000000 --- a/test/cli_help.js +++ /dev/null @@ -1,75 +0,0 @@ -"use strict"; - -const chai = require("chai"); -const chaiAsPromise = require("chai-as-promised"); - -const help = require("../src/cli_help"); - -chai.use(chaiAsPromise); - -const expect = chai.expect; -const assert = chai.assert; - -const opts = { - settings: { - testExecutors: { - "sauce": { - name: "FAKE_EXE_NAME", - help: { - "visible-command": { - "category": "Usability", - "visible": true, - "description": "FAKE_VISIBLE_DES" - }, - "invisible-command": { - "category": "Usability", - "visible": false, - "description": "FAKE_INVISIBLE_DES" - }, - "another-visible-command": { - "category": "Usability", - "description": "FAKE_ANOTHER_VISIBLE_DES" - } - } - } - }, - framework: "FAKE_FRAME_NAME", - testFramework: { - help: { - tags: { - example: "tag1,tag2", - visible: true, - description: "Run all tests that match a list of comma-delimited tags (eg: tag1,tag2)" - }, - group: { - example: "prefix/path", - description: "Run all tests that match a path prefix like ./tests/smoke" - } - } - }, - strategies: { - bail: { - help: { - "early_bail_threshold": { - "visible": true, - "type": "string", - "example": "0.1", - "description": "Ratio of tests that need to fail before we abandon the build" - }, - "early_bail_min_attempts": { - "visible": true, - "type": "string", - "example": "10", - "description": "Minimum number of tests that need to run before we apply the bail strategy" - } - } - } - } - } -}; - -describe("cli_help", () => { - it("print executors", () => { - help.help(opts); - }); -}); diff --git a/test/cli_help.test.js b/test/cli_help.test.js new file mode 100644 index 0000000..5ebb562 --- /dev/null +++ b/test/cli_help.test.js @@ -0,0 +1,79 @@ +'use strict'; + +const help = require('../src/cli_help'); +const settings = require('../src/settings'); + +jest.mock('../src/settings', () => { + return { + testExecutors: { + 'sauce': { + name: 'FAKE_EXE_NAME', + help: { + 'visible-command': { + 'category': 'Usability', + 'visible': true, + 'description': 'FAKE_VISIBLE_DES' + }, + 'invisible-command': { + 'category': 'Usability', + 'visible': false, + 'description': 'FAKE_INVISIBLE_DES' + }, + 'another-visible-command': { + 'category': 'Usability', + 'description': 'FAKE_ANOTHER_VISIBLE_DES' + } + } + } + }, + framework: 'FAKE_FRAME_NAME', + testFramework: { + help: { + tags: { + example: 'tag1,tag2', + visible: true, + description: 'Run all tests that match a list of comma-delimited tags (eg: tag1,tag2)' + }, + group: { + example: 'prefix/path', + description: 'Run all tests that match a path prefix like ./tests/smoke' + }, + test: { + example: 'prefix/path/test', + visible: false, + description: 'Run all tests that match a path prefix like ./tests/smoke' + } + } + }, + strategies: { + bail: { + help: { + 'early_bail_threshold': { + 'visible': true, + 'type': 'string', + 'example': '0.1', + 'description': 'Ratio of tests that need to fail before we abandon the build' + }, + 'early_bail_min_attempts': { + 'visible': true, + 'type': 'string', + 'example': '10', + 'description': 'Minimum number of tests that need to run before we apply the bail strategy' + }, + 'early_bail_hidden': { + 'visible': false, + 'type': 'string', + 'example': '10', + 'description': 'Hidden config' + } + } + } + } + }; +}); + +describe('cli_help', () => { + test('print executors', () => { + help.help(); + }); +}); diff --git a/test/get_tests.js b/test/get_tests.js deleted file mode 100644 index 9eee056..0000000 --- a/test/get_tests.js +++ /dev/null @@ -1,23 +0,0 @@ -/* eslint no-undef: 0 */ -"use strict"; -const expect = require("chai").expect; -const getTests = require("../src/get_tests"); - -describe("getTests", () => { - it("should get tests", () => { - expect(getTests({ - a: () => true, - b: () => true - }, { - settings: { - testFramework: { - iterator: () => ["a", "b", "c"], - filters: { - a: () => true, - b: () => true - } - } - } - })).to.eql(true); - }); -}); diff --git a/test/get_tests.test.js b/test/get_tests.test.js new file mode 100644 index 0000000..58c7af0 --- /dev/null +++ b/test/get_tests.test.js @@ -0,0 +1,27 @@ +/* eslint no-undef: 0 */ +'use strict'; + +const settings = require('../src/settings'); +const getTests = require('../src/get_tests'); + +jest.mock('../src/settings', () => { + return { + testFramework: { + iterator: () => ['a', 'b', 'c'], + filters: { + a: () => true, + b: () => true + } + } + }; +}); + +describe('getTests', () => { + test('should get tests', () => { + const tests = getTests({ + a: () => true, + b: () => true + }); + expect(tests).toEqual(true); + }); +}); diff --git a/test/global_analytics.js b/test/global_analytics.js deleted file mode 100644 index 25b0928..0000000 --- a/test/global_analytics.js +++ /dev/null @@ -1,40 +0,0 @@ -/* eslint no-undef: 0, no-unused-expressions: 0, no-magic-numbers: 0 */ -"use strict"; -const expect = require("chai").expect; -const globalAnalytics = require("../src/global_analytics"); - -describe("globalAnalytics", () => { - - it("should create an empty timeline", () => { - expect(globalAnalytics.getEmitter()).to.not.be.null; - expect(globalAnalytics.sync()).to.be.empty; - }); - - it("should push data to the timeline using push()", () => { - globalAnalytics.push("TestEvent", "Metadata", "Marker"); - expect(globalAnalytics.sync()[0]).to.include({type: "analytics-event"}); - expect(globalAnalytics.sync()[0].data).to.include({name: "TestEvent"}); - expect(globalAnalytics.sync()[0].data).to.include({metadata: "Metadata"}); - expect(globalAnalytics.sync()[0].data.markers[0]).to.include({name: "Marker"}); - expect(globalAnalytics.sync()[0].data.markers[0]).to.include.key("t"); - }); - - it("should push with a default marker name if one is not provided", () => { - globalAnalytics.push("EventName", "metadata"); - expect(globalAnalytics.sync()[1].data.markers[0]).to.include({name: "start"}); - }); - - it("should mark", () => { - globalAnalytics.mark("EventName", "MarkerName"); - expect(globalAnalytics.sync()[2]).to.include({type: "analytics-event-mark"}); - expect(globalAnalytics.sync()[2]).to.include({eventName: "EventName"}); - expect(globalAnalytics.sync()[2].data).to.include({name: "MarkerName"}); - }); - - it("should use a default marker name if one is not provided", () => { - globalAnalytics.mark("EventName"); - expect(globalAnalytics.sync()[3].data).to.include({name: "end"}); - }); - - -}); diff --git a/test/global_analytics.test.js b/test/global_analytics.test.js new file mode 100644 index 0000000..f91954b --- /dev/null +++ b/test/global_analytics.test.js @@ -0,0 +1,28 @@ +"use strict"; + +const _ = require("lodash"); +const globalAnalytics = require("../src/global_analytics"); +const EventEmitter = require("events").EventEmitter; +test("push should work with marker name", () => { + globalAnalytics.push("eventName", { code: 0 }, "magellan-run"); +}); + + +test("push should work without marker name", () => { + globalAnalytics.push("eventName", { code: 0 }); +}); + +test("makr should work with marker name", () => { + globalAnalytics.mark("eventName", "magellan-run"); +}); + +test("makr should work without marker name", () => { + globalAnalytics.mark("eventName"); +}); + +test("sync", () => { + expect(globalAnalytics.sync()).toBeInstanceOf(Array); +}); +test("get emitter", () => { + expect(globalAnalytics.getEmitter()).toBeInstanceOf(EventEmitter); +}); \ No newline at end of file diff --git a/test/help.js b/test/help.js deleted file mode 100644 index 6ceac2f..0000000 --- a/test/help.js +++ /dev/null @@ -1,67 +0,0 @@ -/* eslint no-undef: 0, no-unused-expressions: 0 */ -"use strict"; -const expect = require("chai").expect; -const cliHelp = require("../src/cli_help"); - -describe("cliHelp", () => { - it("should return default help", () => { - cliHelp.help({ - console: { - log: () => {} - }, - settings: { - testFramework: { - help: { - foobar: { - example: "baz", - description: "d" - } - } - } - } - }); - expect(cliHelp.help).to.exist; - }); - - it("should return default help without example", () => { - cliHelp.help({ - console: { - log: () => {} - }, - settings: { - testFramework: { - help: { - foobar: { - description: "d" - } - } - } - } - }); - expect(cliHelp.help).to.exist; - }); - - it("should return default help with no help key", () => { - cliHelp.help({ - console: { - log: () => {} - }, - settings: { - testFramework: { - } - } - }); - expect(cliHelp.help).to.exist; - }); - - it("should return default help with no help keys", () => { - cliHelp.help({ - console: { - log: () => {} - }, - settings: { - } - }); - expect(cliHelp.help).to.exist; - }); -}); diff --git a/test/hosted_profiles.js b/test/hosted_profiles.js deleted file mode 100644 index 776e8d3..0000000 --- a/test/hosted_profiles.js +++ /dev/null @@ -1,64 +0,0 @@ -/* eslint no-undef: 0, no-unused-expressions: 0 */ -"use strict"; -const expect = require("chai").expect; -const hostedProfile = require("../src/hosted_profiles"); - -describe("hostedProfiles", () => { - it("should return the #fragment from a URL", () => { - expect(hostedProfile.getProfileNameFromURL("http://example.com/#boo")).to.eql("boo"); - }); - - it("should return undefined for a URL wihtout fragments", () => { - expect(hostedProfile.getProfileNameFromURL("http://example.com/")).to.be.undefined; - }); - - it("should return undefined for an invalid URL", () => { - expect(hostedProfile.getProfileNameFromURL("👍")).to.be.undefined; - }); - - it("should hit URLs", () => { - expect(hostedProfile.getProfilesAtURL("http://foozbaz.com", { - syncRequest: () => { - return { - getBody: () => { - return JSON.stringify({ - profiles: "foo" - }); - } - }; - } - })).to.eql({profiles: "foo"}); - }); - - it("should check for malformed responses", () => { - try { - hostedProfile.getProfilesAtURL("http://foozbaz.com", { - syncRequest: () => { - return { - getBody: () => { - return JSON.stringify({}); - } - }; - } - }); - } catch (e) { - expect(e.message).to.eql("Profiles supplied at http://foozbaz.com are malformed."); - } - }); - - it("should check for malformed responses", () => { - try { - hostedProfile.getProfilesAtURL("http://foozbaz.com", { - syncRequest: () => { - return { - getBody: () => { - return {}; - } - }; - } - }); - } catch (e) { - expect(e.message).to.eql("Could not fetch profiles from http://foozbaz.com"); - } - }); -}); diff --git a/test/hosted_profiles.test.js b/test/hosted_profiles.test.js new file mode 100644 index 0000000..70a5144 --- /dev/null +++ b/test/hosted_profiles.test.js @@ -0,0 +1,64 @@ +/* eslint no-undef: 0, no-unused-expressions: 0 */ +'use strict'; + +const syncRequest = require('sync-request'); +const hostedProfile = require('../src/hosted_profiles'); + +jest.mock('sync-request'); + +describe('hostedProfiles', () => { + test('should return the #fragment from a URL', () => { + expect(hostedProfile.getProfileNameFromURL('http://example.com/#boo')).toEqual('boo'); + }); + + test('should return undefined for a URL wihtout fragments', () => { + expect(hostedProfile.getProfileNameFromURL('http://example.com/')).toBeUndefined(); + }); + + test('should return undefined for an invalid URL', () => { + expect(hostedProfile.getProfileNameFromURL('👍')).toBeUndefined(); + }); + + test('should hit URLs', () => { + + syncRequest.mockImplementation(() => { + return { + getBody: () => JSON.stringify({ + profiles: 'foo' + }) + }; + }); + + expect(hostedProfile.getProfilesAtURL('http://foozbaz.com')).toEqual({ profiles: 'foo' }); + }); + + test('should check for malformed responses', () => { + syncRequest.mockImplementation(() => { + return { + getBody: () => JSON.stringify({}) + }; + }); + + try { + hostedProfile.getProfilesAtURL('http://foozbaz.com'); + } catch (e) { + expect(e.message).toEqual('Profiles supplied at http://foozbaz.com are malformed.'); + } + }); + + test('should check for malformed responses', () => { + syncRequest.mockImplementation(() => { + return { + getBody: () => { + return {}; + } + }; + }); + + try { + hostedProfile.getProfilesAtURL('http://foozbaz.com'); + } catch (e) { + expect(e.message).toEqual('Could not fetch profiles from http://foozbaz.com'); + } + }); +}); diff --git a/test/main.js b/test/main.js deleted file mode 100644 index 3147b09..0000000 --- a/test/main.js +++ /dev/null @@ -1,11 +0,0 @@ -/* eslint no-undef: 0, no-unused-expressions: 0 */ -"use strict"; -const expect = require("chai").expect; -const main = require("../src/main"); - -describe("main", () => { - it("should have stuff", () => { - expect(main.Reporter).to.not.be.null; - expect(main.portUtil).to.not.be.null; - }); -}); diff --git a/test/main.test.js b/test/main.test.js new file mode 100644 index 0000000..f82ce70 --- /dev/null +++ b/test/main.test.js @@ -0,0 +1,11 @@ +/* eslint no-undef: 0, no-unused-expressions: 0 */ +'use strict'; + +const main = require('../src/main'); + +describe('main', () => { + test('should have stuff', () => { + expect(main.Reporter).not.toBeNull(); + expect(main.portUtil).not.toBeNull(); + }); +}); diff --git a/test/mkdir_sync.js b/test/mkdir_sync.js deleted file mode 100644 index 44e2e50..0000000 --- a/test/mkdir_sync.js +++ /dev/null @@ -1,47 +0,0 @@ -/* eslint no-undef: 0, no-unused-expressions: 0 */ -"use strict"; -const expect = require("chai").expect; -const mkdirSync = require("../src/mkdir_sync"); -const sinon = require("sinon"); - -describe("mkdirSync", () => { - it("should call mkdirSync", () => { - const spy = sinon.spy(); - mkdirSync("foo", { - fs: { - mkdirSync: spy - } - }); - expect(spy.called).to.be.true; - }); - - it("should throw", () => { - const ex = {code: "EEXIST"}; - try { - mkdirSync("foo", { - fs: { - mkdirSync: () => { - throw ex; - } - } - }); - } catch (e) { - expect(e).to.be.eql(ex); - } - }); - - it("should throw with odd error", () => { - const ex = {code: "FOO"}; - try { - mkdirSync("foo", { - fs: { - mkdirSync: () => { - throw ex; - } - } - }); - } catch (e) { - expect(e).to.be.eql(ex); - } - }); -}); diff --git a/test/mock/mockExecutor.js b/test/mock/mockExecutor.js new file mode 100644 index 0000000..9d41f48 --- /dev/null +++ b/test/mock/mockExecutor.js @@ -0,0 +1,3 @@ +module.exports = { + +}; \ No newline at end of file diff --git a/test/mock/mockFramework.js b/test/mock/mockFramework.js new file mode 100644 index 0000000..a638dc3 --- /dev/null +++ b/test/mock/mockFramework.js @@ -0,0 +1,9 @@ +export const framework = { + getPluginOptions: ()=>{ + return { + op1: 'a' + }; + }, + + initialize: ()=>{} +}; \ No newline at end of file diff --git a/test/mock/mockOptionalReporter.js b/test/mock/mockOptionalReporter.js new file mode 100644 index 0000000..75bf82e --- /dev/null +++ b/test/mock/mockOptionalReporter.js @@ -0,0 +1,9 @@ +module.exports = class ST { + construct() { + + } + + initialize() { + return Promise.reject(new Error()); + } +} \ No newline at end of file diff --git a/test/mock/mockPackage.json b/test/mock/mockPackage.json new file mode 100644 index 0000000..688e939 --- /dev/null +++ b/test/mock/mockPackage.json @@ -0,0 +1,3 @@ +{ + "version": "1.0.0" +} \ No newline at end of file diff --git a/test/mock/mockSetupTeardownListener.js b/test/mock/mockSetupTeardownListener.js new file mode 100644 index 0000000..cd9bb4a --- /dev/null +++ b/test/mock/mockSetupTeardownListener.js @@ -0,0 +1,9 @@ +module.exports = class ST { + construct() { + + } + + initialize() { + return Promise.resolve(); + } +} \ No newline at end of file diff --git a/test/profiles.js b/test/profiles.js deleted file mode 100644 index c9dfaec..0000000 --- a/test/profiles.js +++ /dev/null @@ -1,432 +0,0 @@ -"use strict"; - -const chai = require("chai"); -const chaiAsPromise = require("chai-as-promised"); -const _ = require("lodash"); - -const profile = require("../src/profiles"); -const logger = require("../src/logger"); - -chai.use(chaiAsPromise); - -const expect = chai.expect; -const assert = chai.assert; - -const opts = { - settings: { - testExecutors: { - "sauce": { - getProfiles: (opts) => { - return new Promise((resolve) => { - resolve(opts.profiles); - }); - }, - getCapabilities: (profile, opts) => { - return new Promise((resolve) => { - resolve(profile); - }); - } - } - } - }, - margs: { - argv: {} - }, - syncRequest: (method, url) => { - return { - getBody(encoding) { - return "{\"profiles\":{\"chrome\":{\"browser\":\"chrome\"},\"firefox\":{\"browser\":\"firefox\"}}}"; - } - }; - } -}; - -let runOpts = {}; - -describe("handleProfiles", () => { - beforeEach(() => { - runOpts = _.cloneDeep(opts); - }); - - describe("Read from --profile", () => { - it("one profile from http", () => { - runOpts.margs.argv.profile = "http://some_fake_url#chrome"; - - return profile - .detectFromCLI(runOpts) - .then((resolvedprofiles) => { - expect(resolvedprofiles.length).to.equal(1); - expect(resolvedprofiles[0].browser).to.equal("chrome"); - expect(resolvedprofiles[0].executor).to.equal("sauce"); - }); - }); - - it("one profile from https", () => { - runOpts.margs.argv.profile = "https://some_fake_url#chrome"; - - return profile - .detectFromCLI(runOpts) - .then((resolvedprofiles) => { - expect(resolvedprofiles.length).to.equal(1); - expect(resolvedprofiles[0].browser).to.equal("chrome"); - expect(resolvedprofiles[0].executor).to.equal("sauce"); - }); - }); - - it("one profile from https with given executor", () => { - runOpts.syncRequest = (method, url) => { - return { - getBody(encoding) { - return "{\"profiles\":{\"chrome\":{\"browser\":\"chrome\", \"executor\":\"local\"}}}"; - } - }; - }; - - runOpts.settings.testExecutors.local = { - getProfiles: (opts) => { - return new Promise((resolve) => { - resolve(opts.profiles); - }); - }, - getCapabilities: (profile, opts) => { - return new Promise((resolve) => { - resolve(profile); - }); - } - }; - runOpts.margs.argv.profile = "https://some_fake_url#chrome"; - - return profile - .detectFromCLI(runOpts) - .then((resolvedprofiles) => { - expect(resolvedprofiles.length).to.equal(1); - expect(resolvedprofiles[0].browser).to.equal("chrome"); - expect(resolvedprofiles[0].executor).to.equal("local"); - }); - }); - - it("one profile with five duplicate browsers should return one unique browser", () => { - runOpts.margs.argv.profile = "http://some_fake_url#chrome,chrome,chrome,chrome,chrome"; - - return profile - .detectFromCLI(runOpts) - .then((resolvedprofiles) => { - expect(resolvedprofiles.length).to.equal(1); - expect(resolvedprofiles[0].browser).to.equal("chrome"); - expect(resolvedprofiles[0].executor).to.equal("sauce"); - }); - }); - - it("multiple profiles from http", () => { - runOpts.margs.argv.profile = "http://some_fake_url#chrome,firefox"; - - return profile - .detectFromCLI(runOpts) - .then((resolvedprofiles) => { - expect(resolvedprofiles.length).to.equal(2); - expect(resolvedprofiles[0].browser).to.equal("chrome"); - expect(resolvedprofiles[0].executor).to.equal("sauce"); - expect(resolvedprofiles[1].browser).to.equal("firefox"); - expect(resolvedprofiles[1].executor).to.equal("sauce"); - }); - }); - - it("multiple profiles with one duplicate and one unique browser should return two browsers", () => { - runOpts.margs.argv.profile = "http://some_fake_url#chrome,firefox,firefox"; - - return profile - .detectFromCLI(runOpts) - .then((resolvedprofiles) => { - expect(resolvedprofiles.length).to.equal(2); - expect(resolvedprofiles[0].browser).to.equal("chrome"); - expect(resolvedprofiles[0].executor).to.equal("sauce"); - expect(resolvedprofiles[1].browser).to.equal("firefox"); - expect(resolvedprofiles[1].executor).to.equal("sauce"); - }); - }); - - it("multiple profiles with two duplicate browsers should return two browsers", () => { - runOpts.margs.argv.profile = "http://some_fake_url#chrome,firefox,firefox,chrome"; - - return profile - .detectFromCLI(runOpts) - .then((resolvedprofiles) => { - expect(resolvedprofiles.length).to.equal(2); - expect(resolvedprofiles[0].browser).to.equal("chrome"); - expect(resolvedprofiles[0].executor).to.equal("sauce"); - expect(resolvedprofiles[1].browser).to.equal("firefox"); - expect(resolvedprofiles[1].executor).to.equal("sauce"); - }); - }); - - it("multiple profiles with four duplicate and one unique browser should return two browsers", () => { - runOpts.margs.argv.profile = "http://some_fake_url#firefox,chrome,firefox,firefox,firefox"; - - return profile - .detectFromCLI(runOpts) - .then((resolvedprofiles) => { - expect(resolvedprofiles.length).to.equal(2); - expect(resolvedprofiles[0].browser).to.equal("firefox"); - expect(resolvedprofiles[0].executor).to.equal("sauce"); - expect(resolvedprofiles[1].browser).to.equal("chrome"); - expect(resolvedprofiles[1].executor).to.equal("sauce"); - }); - }); - - it("no profile from url", () => { - runOpts.syncRequest = (method, url) => { - return { - getBody(encoding) { - return "{\"profiles\":{}}"; - } - }; - }; - runOpts.margs.argv.profile = "https://some_fake_url#chrome"; - - return profile - .detectFromCLI(runOpts) - .then((resolvedprofiles) => { - assert(false, "shouldn't be here"); - }) - .catch((err) => { - expect(err).to.equal("Profile chrome not found!"); - }); - }); - - it("two profiles with duplicate profiles should return one unique profile", () => { - runOpts.syncRequest = (method, url) => { - return { - getBody(encoding) { - return "{\"profiles\":{\"chrome\":[{\"browser\":\"chrome\", \"resolution\":\"1280x1024\"}],\"chromeV2\":[{\"browser\":\"chrome\", \"resolution\":\"1280x1024\"}],\"firefox\":[{\"browser\":\"firefox\", \"resolution\":\"1280x1024\"}]}}"; - } - }; - }; - - runOpts.margs.argv.profile = "http://some_fake_url#chrome,chromeV2"; - - return profile - .detectFromCLI(runOpts) - .then((resolvedprofiles) => { - expect(resolvedprofiles.length).to.equal(1); - expect(resolvedprofiles[0].browser).to.equal("chrome"); - expect(resolvedprofiles[0].resolution).to.equal("1280x1024"); - }); - }); - - it("two duplicates and one unique profiles should return two unique profiles", () => { - runOpts.syncRequest = (method, url) => { - return { - getBody(encoding) { - return "{\"profiles\":{\"chrome\":[{\"browser\":\"chrome\", \"resolution\":\"1280x1024\"}],\"chromeV2\":[{\"browser\":\"chrome\", \"resolution\":\"1280x1024\"}],\"firefox\":[{\"browser\":\"firefox\", \"resolution\":\"1280x1024\"}]}}"; - } - }; - }; - - runOpts.margs.argv.profile = "http://some_fake_url#chrome,chromeV2,firefox"; - - return profile - .detectFromCLI(runOpts) - .then((resolvedprofiles) => { - expect(resolvedprofiles.length).to.equal(2); - expect(resolvedprofiles[0].browser).to.equal("chrome"); - expect(resolvedprofiles[0].resolution).to.equal("1280x1024"); - expect(resolvedprofiles[1].browser).to.equal("firefox"); - expect(resolvedprofiles[1].resolution).to.equal("1280x1024"); - }); - }); - - it("two profiles with no duplications should return same two profiles", () => { - runOpts.syncRequest = (method, url) => { - return { - getBody(encoding) { - return "{\"profiles\":{\"chrome\":[{\"browser\":\"chrome\", \"resolution\":\"1280x1024\"}],\"chromeV2\":[{\"browser\":\"chrome\", \"resolution\":\"1280x1024\"}],\"firefox\":[{\"browser\":\"firefox\", \"resolution\":\"1280x1024\"}]}}"; - } - }; - }; - - runOpts.margs.argv.profile = "http://some_fake_url#chromeV2,firefox"; - - return profile - .detectFromCLI(runOpts) - .then((resolvedprofiles) => { - expect(resolvedprofiles.length).to.equal(2); - expect(resolvedprofiles[0].browser).to.equal("chrome"); - expect(resolvedprofiles[0].resolution).to.equal("1280x1024"); - expect(resolvedprofiles[1].browser).to.equal("firefox"); - expect(resolvedprofiles[1].resolution).to.equal("1280x1024"); - }); - }); - - it("three unique profiles should return same three profiles", () => { - runOpts.syncRequest = (method, url) => { - return { - getBody(encoding) { - return "{\"profiles\":{\"chrome\":[{\"browser\":\"chrome\", \"resolution\":\"1280x1024\"}],\"chromeV2\":[{\"browser\":\"chrome\", \"resolution\":\"1200x1024\"}],\"firefox\":[{\"browser\":\"firefox\", \"resolution\":\"1280x1024\"}]}}"; - } - }; - }; - - runOpts.margs.argv.profile = "http://some_fake_url#chrome,chromeV2,firefox"; - - return profile - .detectFromCLI(runOpts) - .then((resolvedprofiles) => { - expect(resolvedprofiles.length).to.equal(3); - expect(resolvedprofiles[0].browser).to.equal("chrome"); - expect(resolvedprofiles[0].resolution).to.equal("1280x1024"); - expect(resolvedprofiles[1].browser).to.equal("chrome"); - expect(resolvedprofiles[1].resolution).to.equal("1200x1024"); - expect(resolvedprofiles[2].browser).to.equal("firefox"); - expect(resolvedprofiles[2].resolution).to.equal("1280x1024"); - }); - }); - - it("no profile matches from url", () => { - runOpts.margs.argv.profile = "https://some_fake_url#internet"; - - return profile - .detectFromCLI(runOpts) - .then((resolvedprofiles) => { - assert(false, "shouldn't be here"); - }) - .catch((err) => { - expect(err).to.equal("Profile internet not found!"); - }); - }); - - it("no executor found for profile", () => { - runOpts.syncRequest = (method, url) => { - return { - getBody(encoding) { - return "{\"profiles\":{\"firefox\":{\"browser\":\"firefox\", \"executor\":\"local\"}}}"; - } - }; - }; - runOpts.margs.argv.profile = "https://some_fake_url#firefox"; - - return profile - .detectFromCLI(runOpts) - .then((resolvedprofiles) => { - assert(false, "shouldn't be here"); - }) - .catch((err) => { - expect(err).to.equal("Executor local not found! You\'ll need to configure it in magellan.json"); - }); - }); - - it("getCapabilities failed", () => { - runOpts.settings.testExecutors.sauce.getCapabilities = () => { - return new Promise((resolve, reject) => { - reject(new Error("FAKE_ERROR")); - }); - }; - - runOpts.margs.argv.profile = "https://some_fake_url#chrome"; - - return profile - .detectFromCLI(runOpts) - .then((resolvedprofiles) => { - assert(false, "shouldn't be here"); - }) - .catch((err) => { - expect(err.message).to.equal("FAKE_ERROR"); - }); - }); - }); - - describe("Read from local", () => { - it("one profile", () => { - runOpts.profiles = [ - { browser: "chrome" } - ]; - - return profile - .detectFromCLI(runOpts) - .then((resolvedprofiles) => { - expect(resolvedprofiles.length).to.equal(1); - expect(resolvedprofiles[0].browser).to.equal("chrome"); - }); - }); - - it("multiple profiles", () => { - runOpts.profiles = [ - { browser: "chrome" }, - { browser: "firefox" }, - { browser: "internet explorer" } - ]; - - return profile - .detectFromCLI(runOpts) - .then((resolvedprofiles) => { - expect(resolvedprofiles.length).to.equal(3); - expect(resolvedprofiles[0].browser).to.equal("chrome"); - expect(resolvedprofiles[1].browser).to.equal("firefox"); - expect(resolvedprofiles[2].browser).to.equal("internet explorer"); - }); - }); - - it("multiple executors", () => { - runOpts.profiles = [ - { browser: "chrome" } - ]; - - runOpts.settings.testExecutors.local = { - getProfiles: (opts) => { - return new Promise((resolve) => { - resolve(opts.profiles); - }); - } - }; - - return profile - .detectFromCLI(runOpts) - .then((resolvedprofiles) => { - expect(resolvedprofiles.length).to.equal(2); - expect(resolvedprofiles[0].browser).to.equal("chrome"); - expect(resolvedprofiles[1].browser).to.equal("chrome"); - }); - }); - - it("failed", () => { - runOpts.profiles = [ - { browser: "chrome" } - ]; - - runOpts.settings.testExecutors.sauce.getProfiles = () => { - return new Promise((resolve, reject) => { - reject(new Error("FAKE_ERROR")); - }); - }; - - return profile - .detectFromCLI(runOpts) - .then((resolvedprofiles) => { - assert(false, "shouldn't be here"); - }) - .catch((err) => { - expect(err.message).to.equal("FAKE_ERROR"); - }); - }); - }); - - it("profile.toString", () => { - runOpts.profiles = [ - { - browserName: "chrome", - version: 10, - resolution: "1x1", - orientation: "upright", - executor: "on mars", - id: "chrome" - } - ]; - - return profile - .detectFromCLI(runOpts) - .then((resolvedprofiles) => { - expect(resolvedprofiles.length).to.equal(1); - expect(resolvedprofiles[0].toString()) - .to.equal("env:chrome|executor:on mars"); - }); - }); -}); diff --git a/test/profiles.test.js b/test/profiles.test.js new file mode 100644 index 0000000..c5a7254 --- /dev/null +++ b/test/profiles.test.js @@ -0,0 +1,341 @@ +'use strict'; + +const _ = require('lodash'); +const syncRequest = require('sync-request'); +const profile = require('../src/profiles'); + +// jest.mock('sync-request', (method, url) => { +// // return { +// // getBody(encoding) { +// // return "{\"profiles\":{\"chrome\":{\"browser\":\"chrome\"},\"firefox\":{\"browser\":\"firefox\"}}}"; +// // } +// // }; +// return "{\"profiles\":{\"chrome\":{\"browser\":\"chrome\"},\"firefox\":{\"browser\":\"firefox\"}}}" +// }); + +jest.mock('sync-request'); + +const opts = { + settings: { + testExecutors: { + 'sauce': { + getProfiles: (opts) => Promise.resolve(opts.profiles), + getCapabilities: (profile, opts) => Promise.resolve(profile) + } + } + }, + argv: {} +}; + +let runOpts = {}; + + +describe('Read from --profile', () => { + beforeEach(() => { + + syncRequest.mockImplementation(() => { + return { + getBody(encoding) { + return "{\"profiles\":{\"chrome\":{\"browser\":\"chrome\"},\"firefox\":{\"browser\":\"firefox\"}}}"; + } + }; + }); + + runOpts = _.cloneDeep(opts); + }); + + test('one profile from http', (done) => { + runOpts.argv.profile = 'http://some_fake_url#chrome'; + + profile + .detectFromCLI(runOpts) + .then((resolvedprofiles) => { + expect(resolvedprofiles.length).toEqual(1); + expect(resolvedprofiles[0].browser).toEqual('chrome'); + expect(resolvedprofiles[0].executor).toEqual('sauce'); + done(); + }); + }); + + test('one profile from https', (done) => { + runOpts.argv.profile = 'https://some_fake_url#chrome'; + + profile + .detectFromCLI(runOpts) + .then((resolvedprofiles) => { + expect(resolvedprofiles.length).toEqual(1); + expect(resolvedprofiles[0].browser).toEqual('chrome'); + expect(resolvedprofiles[0].executor).toEqual('sauce'); + done(); + }); + }); + + test('one profile from https with given executor', (done) => { + syncRequest.mockImplementation(() => { + return { + getBody(encoding) { + return "{\"profiles\":{\"chrome\":{\"browser\":\"chrome\", \"executor\":\"local\"}}}"; + } + }; + }); + + runOpts.settings.testExecutors.local = { + getProfiles: (opts) => Promise.resolve(opts.profiles), + getCapabilities: (profile, opts) => Promise.resolve(profile) + }; + + runOpts.argv.profile = 'https://some_fake_url#chrome'; + + profile + .detectFromCLI(runOpts) + .then((resolvedprofiles) => { + expect(resolvedprofiles.length).toEqual(1); + expect(resolvedprofiles[0].browser).toEqual('chrome'); + expect(resolvedprofiles[0].executor).toEqual('local'); + done(); + }); + }); + + test('one profile with five duplicate browsers should return one unique browser', (done) => { + runOpts.argv.profile = 'http://some_fake_url#chrome,chrome,chrome,chrome,chrome'; + + profile + .detectFromCLI(runOpts) + .then((resolvedprofiles) => { + expect(resolvedprofiles.length).toEqual(1); + expect(resolvedprofiles[0].browser).toEqual('chrome'); + expect(resolvedprofiles[0].executor).toEqual('sauce'); + done(); + }); + }); + + test('multiple profiles from http', (done) => { + runOpts.argv.profile = 'http://some_fake_url#chrome,firefox'; + + profile + .detectFromCLI(runOpts) + .then((resolvedprofiles) => { + expect(resolvedprofiles.length).toEqual(2); + expect(resolvedprofiles[0].browser).toEqual('chrome'); + expect(resolvedprofiles[0].executor).toEqual('sauce'); + expect(resolvedprofiles[1].browser).toEqual('firefox'); + expect(resolvedprofiles[1].executor).toEqual('sauce'); + done(); + }); + }); + + test('multiple profiles with one duplicate and one unique browser should return two browsers', (done) => { + runOpts.argv.profile = 'http://some_fake_url#chrome,firefox,firefox'; + + profile + .detectFromCLI(runOpts) + .then((resolvedprofiles) => { + expect(resolvedprofiles.length).toEqual(2); + expect(resolvedprofiles[0].browser).toEqual('chrome'); + expect(resolvedprofiles[0].executor).toEqual('sauce'); + expect(resolvedprofiles[1].browser).toEqual('firefox'); + expect(resolvedprofiles[1].executor).toEqual('sauce'); + done(); + }); + }); + + test('multiple profiles with two duplicate browsers should return two browsers', (done) => { + runOpts.argv.profile = 'http://some_fake_url#chrome,firefox,firefox,chrome'; + + profile + .detectFromCLI(runOpts) + .then((resolvedprofiles) => { + expect(resolvedprofiles.length).toEqual(2); + expect(resolvedprofiles[0].browser).toEqual('chrome'); + expect(resolvedprofiles[0].executor).toEqual('sauce'); + expect(resolvedprofiles[1].browser).toEqual('firefox'); + expect(resolvedprofiles[1].executor).toEqual('sauce'); + done(); + }); + }); + + test('multiple profiles with four duplicate and one unique browser should return two browsers', (done) => { + runOpts.argv.profile = 'http://some_fake_url#firefox,chrome,firefox,firefox,firefox'; + + profile + .detectFromCLI(runOpts) + .then((resolvedprofiles) => { + expect(resolvedprofiles.length).toEqual(2); + expect(resolvedprofiles[0].browser).toEqual('firefox'); + expect(resolvedprofiles[0].executor).toEqual('sauce'); + expect(resolvedprofiles[1].browser).toEqual('chrome'); + expect(resolvedprofiles[1].executor).toEqual('sauce'); + done(); + }); + }); + + it('no profile from url', () => { + syncRequest.mockImplementation(() => { + return { + getBody(encoding) { + return "{\"profiles\":{}}"; + } + }; + }); + + runOpts.argv.profile = 'https://some_fake_url#chrome'; + + profile + .detectFromCLI(runOpts) + .then((resolvedprofiles) => { + assert(false, 'shouldn\'t be here'); + }) + .catch((err) => { + expect(err).toEqual('Profile chrome not found!'); + done(); + }); + }); + + test('no profile matches from url', (done) => { + runOpts.argv.profile = 'https://some_fake_url#internet'; + + profile + .detectFromCLI(runOpts) + .then((resolvedprofiles) => { + assert(false, 'shouldn\'t be here'); + }) + .catch((err) => { + expect(err).toEqual('Profile internet not found!'); + done(); + }); + }); + + test('no executor found for profile', (done) => { + syncRequest.mockImplementation(() => { + return { + getBody(encoding) { + return "{\"profiles\":{\"firefox\":{\"browser\":\"firefox\", \"executor\":\"local\"}}}"; + } + }; + }); + runOpts.argv.profile = 'https://some_fake_url#firefox'; + + profile + .detectFromCLI(runOpts) + .then((resolvedprofiles) => { + assert(false, 'shouldn\'t be here'); + }) + .catch((err) => { + expect(err).toEqual('Executor local not found! You\'ll need to configure it in magellan.json'); + done(); + }); + }); + + test('getCapabilities failed', (done) => { + runOpts.settings.testExecutors.sauce.getCapabilities = () => Promise.reject(new Error('FAKE_ERROR')); + + runOpts.argv.profile = 'https://some_fake_url#chrome'; + + profile + .detectFromCLI(runOpts) + .then((resolvedprofiles) => { + assert(false, 'shouldn\'t be here'); + }) + .catch((err) => { + expect(err.message).toEqual('FAKE_ERROR'); + done(); + }); + }); +}); + +describe('Read from local', () => { + beforeEach(() => { + runOpts = _.cloneDeep(opts); + }) + + test('one profile', (done) => { + runOpts.profiles = [ + { browser: 'chrome' } + ]; + + profile + .detectFromCLI(runOpts) + .then((resolvedprofiles) => { + expect(resolvedprofiles.length).toEqual(1); + expect(resolvedprofiles[0].browser).toEqual('chrome'); + done(); + }); + }); + + test('multiple profiles', (done) => { + runOpts.profiles = [ + { browser: 'chrome' }, + { browser: 'firefox' }, + { browser: 'internet explorer' } + ]; + + profile + .detectFromCLI(runOpts) + .then((resolvedprofiles) => { + expect(resolvedprofiles.length).toEqual(3); + expect(resolvedprofiles[0].browser).toEqual('chrome'); + expect(resolvedprofiles[1].browser).toEqual('firefox'); + expect(resolvedprofiles[2].browser).toEqual('internet explorer'); + done(); + }); + }); + + it('multiple executors', (done) => { + runOpts.profiles = [ + { browser: 'chrome' } + ]; + + runOpts.settings.testExecutors.local = { + getProfiles: (opts) => Promise.resolve(opts.profiles) + }; + + profile + .detectFromCLI(runOpts) + .then((resolvedprofiles) => { + expect(resolvedprofiles.length).toEqual(2); + expect(resolvedprofiles[0].browser).toEqual('chrome'); + expect(resolvedprofiles[1].browser).toEqual('chrome'); + done(); + }); + }); + + test('failed', (done) => { + runOpts.profiles = [ + { browser: 'chrome' } + ]; + + runOpts.settings.testExecutors.sauce.getProfiles = () => Promise.reject(new Error('FAKE_ERROR')); + + profile + .detectFromCLI(runOpts) + .then((resolvedprofiles) => { + assert(false, 'shouldn\'t be here'); + }) + .catch((err) => { + expect(err.message).toEqual('FAKE_ERROR'); + done(); + }); + }); + + test('profile.toString', (done) => { + runOpts.profiles = [ + { + browserName: 'chrome', + version: 10, + resolution: '1x1', + orientation: 'upright', + executor: 'on mars', + id: 'chrome' + } + ]; + + profile + .detectFromCLI(runOpts) + .then((resolvedprofiles) => { + expect(resolvedprofiles.length).toEqual(1); + expect(resolvedprofiles[0].toString()) + .toEqual('env:chrome|executor:on mars'); + done(); + }); + }); +}); diff --git a/test/reporters/reporter.js b/test/reporters/reporter.js deleted file mode 100644 index 56cfc38..0000000 --- a/test/reporters/reporter.js +++ /dev/null @@ -1,13 +0,0 @@ -/* eslint no-undef: 0, no-unused-expressions: 0 */ -"use strict"; -const expect = require("chai").expect; -const Reporter = require("../../src/reporters/reporter"); - -describe("Reporter", () => { - it("should be a listener", () => { - const r = new Reporter(); - expect(r.initialize).to.not.be.null; - expect(r.listenTo).to.not.be.null; - expect(r.flush).to.not.be.null; - }); -}); diff --git a/test/reporters/reporter.test.js b/test/reporters/reporter.test.js new file mode 100644 index 0000000..7cbaa58 --- /dev/null +++ b/test/reporters/reporter.test.js @@ -0,0 +1,28 @@ +/* eslint no-undef: 0, no-unused-expressions: 0 */ +"use strict"; + +const Reporter = require("../../src/reporters/reporter"); + +describe("Reporter", () => { + test("should be a listener", () => { + const r = new Reporter(); + expect(r.initialize).not.toBeNull(); + expect(r.listenTo).not.toBeNull(); + expect(r.flush).not.toBeNull(); + }); + + test('should initialize', () => { + const r = new Reporter(); + return expect(r.initialize()).resolves.toBe(undefined); + }); + + test('should flush', () => { + const r = new Reporter(); + return expect(r.flush()).resolves.toBe(undefined); + }); + + test('should listenTo', () => { + const r = new Reporter(); + r.listenTo(); + }); +}); diff --git a/test/reporters/screenshot_aggregator/reporter.js b/test/reporters/screenshot_aggregator/reporter.js deleted file mode 100644 index 8e42b40..0000000 --- a/test/reporters/screenshot_aggregator/reporter.js +++ /dev/null @@ -1,231 +0,0 @@ -/* eslint no-undef: 0, no-unused-expressions: 0, no-magic-numbers: 0 */ -"use strict"; -const expect = require("chai").expect; -const Reporter = require("../../../src/reporters/screenshot_aggregator/reporter"); - -describe("ScreenshotAggregator Reporter", () => { - it("should initialize incorrectly", (done) => { - const r = new Reporter({ - console: {log: () => {}}, - settings: { - } - }); - r.initialize().catch(() => { - done(); - }); - }); - - it("should initialize correctly", (done) => { - const r = new Reporter({ - console: {log: () => {}}, - request: { - post: (url, cb) => { - expect(url).to.not.be.null; - cb(null, null, JSON.stringify({ - status: "success", - buildURL: "http://foo/bar.png" - })); - } - }, - settings: { - aggregatorURL: "http://foo/" - } - }); - r.initialize().then(() => { - r.listenTo("foo", "bar", { - addListener: () => {} - }); - done(); - }); - }); - - it("should handle random messages", (done) => { - const r = new Reporter({ - console: {log: () => {}}, - request: { - post: (url, cb) => { - expect(url).to.not.be.null; - cb(null, null, JSON.stringify({ - status: "success", - buildURL: "http://foo/bar.png" - })); - } - }, - settings: { - aggregatorURL: "http://foo/" - } - }); - r.initialize().then(() => { - r._handleMessage("foo", "bar", { - }); - r._handleMessage("foo", "bar", { - type: "bar" - }); - r._handleMessage("foo", "bar", { - type: "worker-status", - status: "bar" - }); - done(); - }); - }); - - it("should handle run end messages", (done) => { - const r = new Reporter({ - console: {log: () => {}}, - request: { - post: (url, cb) => { - expect(url).to.not.be.null; - cb(null, null, JSON.stringify({ - status: "success", - buildURL: "http://foo/bar.png" - })); - } - }, - settings: { - aggregatorURL: "http://foo/" - }, - glob: { - sync: (pattern) => { - return pattern.indexOf("png") > -1 ? ["a"] : []; - } - }, - path: { - resolve: () => { - return "b"; - } - }, - fs: { - unlinkSync: () => {}, - createReadStream: () => {} - } - }); - r.initialize().then(() => { - r._handleMessage({ - tempAssetPath: "./foo" - }, { - attempts: 1, - maxAttempts: 3 - }, { - type: "worker-status", - status: "finished", - passed: false - }); - r.flush(); - - r._handleMessage({ - tempAssetPath: "./foo", - buildId: "asdfasdfadf" - }, { - attempts: 2, - maxAttempts: 3, - browser: { - slug: () => { return "foo"; } - } - }, { - type: "worker-status", - status: "finished", - passed: true, - name: "yaddayadda" - }); - r.flush(); - - done(); - }); - }); - - it("should handle run end messages with single shots", (done) => { - const r = new Reporter({ - console: {log: () => {}}, - request: { - post: (url, cb) => { - expect(url).to.not.be.null; - cb(null, null, JSON.stringify({ - status: "success", - buildURL: "http://foo/bar.png" - })); - } - }, - settings: { - aggregatorURL: "http://foo/" - }, - glob: { - sync: (pattern) => { - return pattern.indexOf("png") > -1 ? ["a"] : []; - } - }, - path: { - resolve: () => { - return "b"; - } - }, - fs: { - unlinkSync: () => {}, - createReadStream: () => {} - } - }); - r.initialize().then(() => { - r._handleMessage({ - tempAssetPath: "./foo", - buildId: "asdfasdfadf" - }, { - attempts: 2, - maxAttempts: 3, - browser: { - slug: () => { return "foo"; } - } - }, { - type: "worker-status", - status: "finished", - passed: true, - name: "yaddayadda" - }); - r.flush(); - - done(); - }); - }); - - it("should handle bad server response", (done) => { - const r = new Reporter({ - console: {log: () => {}}, - request: { - post: (url, cb) => { - expect(url).to.not.be.null; - cb(null, null, "foo"); - } - }, - settings: { - aggregatorURL: "http://foo/" - }, - glob: { - sync: () => { - return ["a", "b/a", "c"]; - } - }, - fs: { - unlinkSync: () => {}, - createReadStream: () => {} - } - }); - r.initialize().then(() => { - r._handleMessage({ - tempAssetPath: "./foo", - buildId: "asdfasdfadf" - }, { - attempts: 2, - maxAttempts: 3, - browser: { - slug: () => { return "foo"; } - } - }, { - type: "worker-status", - status: "finished", - passed: true, - name: "yaddayadda" - }); - r.flush(); - - done(); - }); - }); -}); diff --git a/test/reporters/screenshot_aggregator/settings.js b/test/reporters/screenshot_aggregator/settings.js deleted file mode 100644 index 4e80806..0000000 --- a/test/reporters/screenshot_aggregator/settings.js +++ /dev/null @@ -1,10 +0,0 @@ -/* eslint no-undef: 0, no-unused-expressions: 0 */ -"use strict"; -const expect = require("chai").expect; -const Settings = require("../../../src/reporters/screenshot_aggregator/settings"); - -describe("ScreenshotAggregator Settings", () => { - it("should initialize", () => { - expect(Settings.hasOwnProperty("aggregatorURL")).to.be.true; - }); -}); diff --git a/test/reporters/slack/settings.js b/test/reporters/slack/settings.js deleted file mode 100644 index 76f14ed..0000000 --- a/test/reporters/slack/settings.js +++ /dev/null @@ -1,10 +0,0 @@ -/* eslint no-undef: 0, no-unused-expressions: 0 */ -"use strict"; -const expect = require("chai").expect; -const Settings = require("../../../src/reporters/slack/settings"); - -describe("Slack Settings", () => { - it("should initialize", () => { - expect(Settings.hasOwnProperty("enabled")).to.be.true; - }); -}); diff --git a/test/reporters/slack/slack.js b/test/reporters/slack/slack.js deleted file mode 100644 index ef82288..0000000 --- a/test/reporters/slack/slack.js +++ /dev/null @@ -1,124 +0,0 @@ -/* eslint no-undef: 0 */ -"use strict"; -const Reporter = require("../../../src/reporters/slack/slack"); - -class TestSlack { - constructor() { - } - notify() { - } -} - -describe("Slack Reporter", () => { - it("should initialize correctly", (done) => { - const r = new Reporter({ - account: "a", - key: "a", - channel: "a", - username: "a", - iconURL: "a", - jobName: "a", - buildDisplayName: "a", - buildURL: "a" - }, { - console: { - log: () => {}, - error: () => {} - }, - Slack: TestSlack - }); - r.initialize().then(() => { - r.flush(); - r._addFailure("a", "b", "c"); - r._addFailure("a"); - r.listenTo("a", "b", { - addListener: () => {} - }); - r.flush(); - done(); - }); - }); - - it("should have issues with lack of config", (done) => { - const r = new Reporter({ - account: "a", - key: "a", - username: "a", - iconURL: "a", - jobName: "a", - buildDisplayName: "a", - buildURL: "a" - }, { - console: { - log: () => {}, - error: () => {} - }, - Slack: TestSlack - }); - r.initialize().catch(() => { - done(); - }); - }); - - it("should handle messages", (done) => { - const r = new Reporter({ - account: "a", - key: "a", - channel: "a", - username: "a", - iconURL: "a", - jobName: "a", - buildDisplayName: "a", - buildURL: "a" - }, { - console: { - log: () => {}, - error: () => {} - }, - Slack: TestSlack - }); - r.initialize().then(() => { - r._handleMessage("a", "b", { - type: "worker-status", - status: "finished", - name: "a", - passed: false - }); - r._handleMessage("a", "b", { - type: "worker-status", - status: "finished", - name: "a", - passed: false, - metadata: { - sauceURL: "foo", - buildURL: "baz" - } - }); - r._handleMessage("a", "b", { - type: "worker-status", - status: "finished", - name: "a", - passed: false, - metadata: { - buildURL: "baz" - } - }); - r._handleMessage("a", "b", { - type: "worker-status", - status: "finished", - name: "a", - passed: true, - metadata: { - } - }); - r._handleMessage("a", "b", { - type: "foo" - }); - r._handleMessage("a", "b", { - type: "worker-status", - status: "bar" - }); - done(); - }); - }); -}); diff --git a/test/reporters/stdout/reporter.js b/test/reporters/stdout/reporter.test.js similarity index 55% rename from test/reporters/stdout/reporter.js rename to test/reporters/stdout/reporter.test.js index 151a02c..5fd959c 100644 --- a/test/reporters/stdout/reporter.js +++ b/test/reporters/stdout/reporter.test.js @@ -1,16 +1,19 @@ /* eslint no-undef: 0, no-unused-expressions: 0 */ "use strict"; -const expect = require("chai").expect; + const Reporter = require("../../../src/reporters/stdout/reporter"); const sinon = require("sinon"); describe("STDOUT Reporter", () => { it("should be a listener", () => { + const r = new Reporter(); - expect(r.initialize).to.not.be.null; - expect(r.listenTo).to.not.be.null; - expect(r.flush).to.not.be.null; + expect(r.initialize).not.toBeNull(); + expect(r.listenTo).not.toBeNull(); + expect(r.flush).not.toBeNull(); + const spy = sinon.spy(); + r.listenTo(null, null, { stdout: { pipe: spy @@ -19,16 +22,6 @@ describe("STDOUT Reporter", () => { pipe: spy } }); - expect(spy.called).to.be.true; - r.listenTo(null, null, { - stderr: { - pipe: spy - } - }); - r.listenTo(null, null, { - stdout: { - pipe: spy - } - }); + expect(spy.called).toBeTruthy(); }); }); diff --git a/test/strategies/bail.test.js b/test/strategies/bail.test.js new file mode 100644 index 0000000..51db761 --- /dev/null +++ b/test/strategies/bail.test.js @@ -0,0 +1,58 @@ +'use strict'; + +const Bail = require('../../src/strategies/bail'); + +test('should construct with default rule', () => { + const bail = new Bail({}); + + expect(bail.hasBailed).toEqual(false); + expect(bail.name).toEqual('testarmada-magellan-never-bail-strategy'); +}); + +test('should construct with given rule', () => { + const bail = new Bail({ strategy_bail: '../../test/strategies/mockBail' }); + + expect(bail.hasBailed).toEqual(false); + expect(bail.name).toEqual('fake-bail-strategy'); +}); + +test('should throw error if errors in module loading', () => { + try { + new Bail({ strategy_bail: './bail/fake' }); + fail(); + } catch (err) { + expect(err).toBeTruthy(); + } +}); + +test('should get description', () => { + const bail = new Bail({}); + expect(bail.getDescription()).toEqual('Magellan never bails, all tests will be executed at least once'); +}); + +test('should warn if no description', () => { + const bail = new Bail({strategy_bail: '../../test/strategies/mockBail'}); + expect(bail.getDescription()).toEqual(''); +}); + +test('should get bail reason', () => { + const bail = new Bail({}); + expect(bail.getBailReason()).toEqual('Magellan should never bail, it should never reach here'); +}); + +test('should warn if no bail reason', () => { + const bail = new Bail({strategy_bail: '../../test/strategies/mockBail'}); + expect(bail.getBailReason()).toEqual(''); +}); + +test('should tell if never bails', () => { + const bail = new Bail({ }); + expect(bail.shouldBail()).toEqual(false); + expect(bail.shouldBail()).toEqual(false); +}); + +test('should tell if bails', () => { + const bail = new Bail({ strategy_bail: '../../test/strategies/mockBail' }); + expect(bail.shouldBail()).toEqual(true); + expect(bail.shouldBail()).toEqual(true); +}); \ No newline at end of file diff --git a/test/strategies/mockBail.js b/test/strategies/mockBail.js new file mode 100644 index 0000000..cbcd75b --- /dev/null +++ b/test/strategies/mockBail.js @@ -0,0 +1,13 @@ +'use strict'; + +/* istanbul ignore next */ +module.exports = { + name: 'fake-bail-strategy', + + setConfiguration() { }, + + decide() { + // never bail + return true; + } +}; diff --git a/test/strategies/mockResource.js b/test/strategies/mockResource.js new file mode 100644 index 0000000..1af8041 --- /dev/null +++ b/test/strategies/mockResource.js @@ -0,0 +1,8 @@ +'use strict'; + +/* istanbul ignore next */ +module.exports = { + name: 'fake-resource-strategy', + + setConfiguration() { } +}; diff --git a/test/strategies/mockResourceReject.js b/test/strategies/mockResourceReject.js new file mode 100644 index 0000000..1404f96 --- /dev/null +++ b/test/strategies/mockResourceReject.js @@ -0,0 +1,20 @@ +'use strict'; + +/* istanbul ignore next */ +module.exports = { + name: 'fake-resource-reject-strategy', + + setConfiguration() { }, + + // resource format + releaseResourceForTest(profile) { + // never use resource manager + return Promise.reject(profile); + }, + + // resource format + releaseResourcesForSuite(opts) { + // never use resource manager + return Promise.reject(opts); + } +}; diff --git a/test/strategies/resource.test.js b/test/strategies/resource.test.js new file mode 100644 index 0000000..f589dff --- /dev/null +++ b/test/strategies/resource.test.js @@ -0,0 +1,94 @@ +'use strict'; + +const Resource = require('../../src/strategies/resource'); + +test('should construct with default rule', () => { + const resource = new Resource({}); + + expect(resource.name).toEqual('testarmada-magellan-no-resource-strategy'); +}); + +test('should construct with given rule', () => { + const resource = new Resource({ strategy_resource: '../../test/strategies/mockResource' }); + + expect(resource.name).toEqual('fake-resource-strategy'); +}); + +test('should throw error if errors in module loading', () => { + try { + new Resource({ strategy_resource: './resource/fake' }); + fail(); + } catch (err) { + expect(err).toBeTruthy(); + } +}); + +test('should get description', () => { + const resource = new Resource({}); + expect(resource.getDescription()).toEqual('Magellan doesn\'t require a resource manager to schedule test run'); +}); + +test('should warn if no description', () => { + const resource = new Resource({ strategy_resource: '../../test/strategies/mockResource' }); + expect(resource.getDescription()).toEqual(''); +}); + +test('should get failReason', () => { + const resource = new Resource({}); + expect(resource.getFailReason()).toEqual('Magellan shouldn\'t depend on any resource manager to control test run'); +}); + +test('should warn if no failReason', () => { + const resource = new Resource({ strategy_resource: '../../test/strategies/mockResource' }); + expect(resource.getFailReason()).toEqual(''); +}); + +test('should hold test resource', () => { + const resource = new Resource({}); + return expect(resource.holdTestResource('fake resource')).resolves.toEqual('fake resource'); +}); + +test('should return promise.resolve if no hold test resource', () => { + const resource = new Resource({ strategy_resource: '../../test/strategies/mockResource' }); + return expect(resource.holdTestResource('fake resource')).resolves.toEqual('fake resource'); +}); + +test('should hold suite resources', () => { + const resource = new Resource({}); + return expect(resource.holdSuiteResources('fake resources')).resolves.toEqual('fake resources'); +}); + +test('should return promise.resolve if no hold suite resources', () => { + const resource = new Resource({ strategy_resource: '../../test/strategies/mockResource' }); + return expect(resource.holdSuiteResources('fake resources')).resolves.toEqual('fake resources'); +}); + +test('should release test resource', () => { + const resource = new Resource({}); + return expect(resource.releaseTestResource('fake resource')).resolves.toEqual('fake resource'); +}); + +test('should return promise.resolve if no release test resource', () => { + const resource = new Resource({ strategy_resource: '../../test/strategies/mockResource' }); + return expect(resource.releaseTestResource('fake resource')).resolves.toEqual('fake resource'); +}); + +test('should return promise.resolve if release test resource fails', () => { + const resource = new Resource({ strategy_resource: '../../test/strategies/mockResourceReject' }); + return expect(resource.releaseTestResource('fake resource')).resolves.toEqual('fake resource'); +}); + +test('should release suite resources', () => { + const resource = new Resource({}); + return expect(resource.releaseSuiteResources('fake resources')).resolves.toEqual('fake resources'); +}); + +test('should return promise.resolve if no release suite resources', () => { + const resource = new Resource({ strategy_resource: '../../test/strategies/mockResource' }); + return expect(resource.releaseSuiteResources('fake resources')).resolves.toEqual('fake resources'); +}); + +test('should return promise.resolve if release suite resources fails', () => { + const resource = new Resource({ strategy_resource: '../../test/strategies/mockResourceReject' }); + return expect(resource.releaseSuiteResources('fake resources')).resolves.toEqual('fake resources'); +}); \ No newline at end of file diff --git a/test/test.js b/test/test.js deleted file mode 100644 index 7437f78..0000000 --- a/test/test.js +++ /dev/null @@ -1,107 +0,0 @@ -/* eslint no-undef: 0, no-magic-numbers: 0, no-unused-expressions: 0 */ -"use strict"; -const expect = require("chai").expect; -const Test = require("../src/test"); - -describe("Test Class", () => { - - it("should act like a class", () => { - expect(new Test()).to.be.an.instanceof(Test); - }); - - it("should use passed in locator", () => { - const locator = {2: "b"}; - const myTest = new Test(locator); - expect(myTest.locator).to.equal(locator); - }); - - it("should get runtime", () => { - const browser = "myBrowser"; - const myTest = new Test("", browser); - myTest.runningTime = 50; - expect(myTest.getRuntime()).to.eql(50); - }); - - it("should convert to a string", () => { - const browser = "myBrowser"; - const myTest = new Test("", browser); - myTest.getRuntime(); - expect(myTest.toString()).to.equal(" @myBrowser"); - }); - - it("should use passed in browser", () => { - const browser = "myBrowser"; - const myTest = new Test("", browser); - myTest.toString(); - myTest.getRuntime(); - expect(myTest.browser).to.equal(undefined); - }); - - it("should use passed in Sauce Browser Settings", () => { - const sauceBrowserSettings = {1: "a"}; - const myTest = new Test("", "", sauceBrowserSettings); - expect(myTest.sauceBrowserSettings).to.equal(undefined); - }); - - it("should use passed in max attempts", () => { - const maxAttempts = {1: "a"}; - const myTest = new Test("", "", "", maxAttempts); - expect(myTest.maxAttempts).to.equal(maxAttempts); - }); - - it("should set canRun correctly for a passed test", () => { - const myTest = new Test(); - myTest.pass(); - expect(myTest.canRun()).to.be.true; - }); - - it("should set canRun correctly for a failed test", () => { - const myTest = new Test("", "", "", 1); - myTest.fail(); - myTest.fail(); - expect(myTest.canRun()).to.be.true; - }); - - it("should set canRun correctly for new test", () => { - const myTest = new Test("", "", "", 0); - expect(myTest.canRun()).to.be.false; - }); - - it("should set test status correctly when new", () => { - const myTest = new Test(); - expect(myTest.status).to.equal(1); - }); - - it("should set test status correctly when failed", () => { - const myTest = new Test(); - myTest.fail(); - expect(myTest.status).to.equal(2); - }); - - it("should set test status correctly when failed", () => { - const myTest = new Test(); - myTest.pass(); - expect(myTest.status).to.equal(3); - }); - - it("should start the clock with a Date", () => { - const myTest = new Test(); - myTest.startClock(); - expect(myTest.runningTime).to.equal(undefined); - expect(new Date(myTest.startTime)).to.be.an.instanceof(Date); - }); - - it("should stop the clock and set running time", () => { - const myTest = new Test(); - myTest.startClock(); - myTest.stopClock(); - expect(new Date(myTest.runningTime)).to.be.an.instanceof(Date); - }); - - it("should compute retries", () => { - const myTest = new Test(); - myTest.fail(); - myTest.pass(); - expect(myTest.getRetries()).to.equal(1); - }); -}); diff --git a/test/test.test.js b/test/test.test.js new file mode 100644 index 0000000..769d617 --- /dev/null +++ b/test/test.test.js @@ -0,0 +1,107 @@ +/* eslint no-undef: 0, no-magic-numbers: 0, no-unused-expressions: 0 */ +'use strict'; + +const Test = require('../src/test'); + +describe('Test Class', () => { + + test('should act like a class', () => { + expect(new Test()).toBeInstanceOf(Test); + }); + + test('should use passed in locator', () => { + const locator = { 2: 'b' }; + const myTest = new Test(locator); + expect(myTest.locator).toEqual(locator); + }); + + test('should get runtime', () => { + const browser = 'myBrowser'; + const myTest = new Test('', browser); + myTest.runningTime = 50; + expect(myTest.getRuntime()).toEqual(50); + }); + + test('should convert to a string', () => { + const browser = 'myBrowser'; + const myTest = new Test('', browser); + myTest.getRuntime(); + expect(myTest.toString()).toEqual(' @myBrowser'); + }); + + test('should use passed in browser', () => { + const browser = 'myBrowser'; + const myTest = new Test('', browser); + myTest.toString(); + myTest.getRuntime(); + expect(myTest.browser).toBeUndefined(); + }); + + test('should use passed in Sauce Browser Settings', () => { + const sauceBrowserSettings = { 1: 'a' }; + const myTest = new Test('', '', sauceBrowserSettings); + expect(myTest.sauceBrowserSettings).toBeUndefined(); + }); + + test('should use passed in max attempts', () => { + const maxAttempts = { 1: 'a' }; + const myTest = new Test('', '', '', maxAttempts); + expect(myTest.maxAttempts).toEqual(maxAttempts); + }); + + test('should set canRun correctly for a passed test', () => { + const myTest = new Test(); + myTest.pass(); + expect(myTest.canRun()).toBeTruthy(); + }); + + test('should set canRun correctly for a failed test', () => { + const myTest = new Test('', '', '', 1); + myTest.fail(); + myTest.fail(); + expect(myTest.canRun()).toBeTruthy(); + }); + + test('should set canRun correctly for new test', () => { + const myTest = new Test('', '', '', 0); + expect(myTest.canRun()).not.toBeTruthy(); + }); + + test('should set test status correctly when new', () => { + const myTest = new Test(); + expect(myTest.status).toEqual(1); + }); + + test('should set test status correctly when failed', () => { + const myTest = new Test(); + myTest.fail(); + expect(myTest.status).toEqual(2); + }); + + test('should set test status correctly when failed', () => { + const myTest = new Test(); + myTest.pass(); + expect(myTest.status).toEqual(3); + }); + + test('should start the clock with a Date', () => { + const myTest = new Test(); + myTest.startClock(); + expect(myTest.runningTime).toBeUndefined(); + expect(new Date(myTest.startTime)).toBeInstanceOf(Date); + }); + + test('should stop the clock and set running time', () => { + const myTest = new Test(); + myTest.startClock(); + myTest.stopClock(); + expect(new Date(myTest.runningTime)).toBeInstanceOf(Date); + }); + + test('should compute retries', () => { + const myTest = new Test(); + myTest.fail(); + myTest.pass(); + expect(myTest.getRetries()).toEqual(1); + }); +}); diff --git a/test/test_filter.js b/test/test_filter.js deleted file mode 100644 index 81c5e42..0000000 --- a/test/test_filter.js +++ /dev/null @@ -1,63 +0,0 @@ -/* eslint no-undef: 0 */ -"use strict"; -const expect = require("chai").expect; -const testFilter = require("../src/test_filter"); - -describe("test_filter", () => { - it("should filter", () => { - expect(testFilter.filter( - ["a", "b", "c"], - { - a: true, - b: true - }, - { - settings: { - testFramework: { - filters: { - a: () => { return true; }, - b: () => { return false; } - } - } - } - } - )).to.eql(false); - }); - - it("should filter to true", () => { - expect(testFilter.filter( - ["a", "b", "c"], - { - a: true, - b: true - }, - { - settings: { - testFramework: { - filters: { - } - } - } - } - )).to.eql([ "a", "b", "c" ]); - }); - - it("should detectFromCLI", () => { - expect(testFilter.detectFromCLI( - { - a: true, - b: true - }, - { - settings: { - testFramework: { - filters: { - a: () => {}, - c: () => {} - } - } - } - } - )).to.eql({a: true}); - }); -}); diff --git a/test/test_filter.test.js b/test/test_filter.test.js new file mode 100644 index 0000000..b631438 --- /dev/null +++ b/test/test_filter.test.js @@ -0,0 +1,34 @@ +/* eslint no-undef: 0 */ +'use strict'; + +const settings = require('../src/settings'); +const testFilter = require('../src/test_filter'); + +jest.mock('../src/settings', () => { + return { + testFramework: { + iterator: () => ['a', 'b', 'c'], + filters: { + a: () => true, + b: () => true + } + } + }; +}); + +describe('test_filter', () => { + test('should filter from settings', () => { + const tests = testFilter.filter(['a', 'b', 'c'], { + a: () => true, + b: () => true + }); + + expect(tests).toEqual(true); + }); + + test('should detect from cli', () => { + const filters = testFilter.detectFromCLI({ a: 'abcdefg' }); + + expect(filters).toEqual({ a: 'abcdefg' }); + }); +}); diff --git a/test/test_queue.test.js b/test/test_queue.test.js new file mode 100644 index 0000000..0c25645 --- /dev/null +++ b/test/test_queue.test.js @@ -0,0 +1,118 @@ +'use strict'; + +const TestQueue = require('../src/test_queue'); +const Test = require('../src/test'); + +test('should construct', () => { + const tq = new TestQueue({ + tests: ['a', 'b'], + workerAmount: 1, + completeQueueHandler: () => { }, + completeTestHandler: () => { }, + stageTestHandler: () => { } + }); + + expect(tq).toBeInstanceOf(TestQueue); + expect(tq.workerAmount).toEqual(1); +}); + +test('should be idle with no test and zero test amount ', () => { + const tq = new TestQueue({ + tests: [], + workerAmount: 1, + completeQueueHandler: () => { }, + completeTestHandler: () => { }, + stageTestHandler: () => { } + }); + + expect(tq.isIdle()).toEqual(true); + expect(tq.getTestAmount()).toEqual(0); +}); + +test('shouldn\'t be idle with tests', () => { + const tq = new TestQueue({ + tests: ['a', 'b'], + workerAmount: 1, + completeQueueHandler: () => { }, + completeTestHandler: () => { }, + stageTestHandler: () => { } + }); + + expect(tq.isIdle()).toEqual(false); + expect(tq.getTestAmount()).toEqual(2); +}); + +test('should return correct failed and passed tests', () => { + const tq = new TestQueue({ + tests: [ + { name: 'a', status: Test.TEST_STATUS_FAILED }, + { name: 'b', status: Test.TEST_STATUS_SUCCESSFUL } + ], + workerAmount: 1, + completeQueueHandler: () => { }, + completeTestHandler: () => { }, + stageTestHandler: () => { } + }); + + expect(tq.getFailedTests()).toHaveLength(1); + expect(tq.getPassedTests()).toHaveLength(1); +}); + +test('should enqueue a test', () => { + const tq = new TestQueue({ + tests: [ + { name: 'a', status: Test.TEST_STATUS_FAILED }, + { name: 'b', status: Test.TEST_STATUS_SUCCESSFUL } + ], + workerAmount: 1, + completeQueueHandler: () => { }, + completeTestHandler: () => { }, + stageTestHandler: () => { } + }); + + tq.enqueue({ name: 'c', status: Test.TEST_STATUS_NEW }, 1); +}); + +test('should terminate a queue early', () => { + const tq = new TestQueue({ + tests: [ + { name: 'a', status: Test.TEST_STATUS_FAILED }, + { name: 'b', status: Test.TEST_STATUS_SUCCESSFUL } + ], + workerAmount: 1, + completeQueueHandler: () => Promise.resolve('aha'), + completeTestHandler: () => { }, + stageTestHandler: () => { } + }); + + expect(tq.earlyTerminate()).resolves.toEqual('aha'); +}); + +test('should proceed if no test in queue', () => { + const tq = new TestQueue({ + tests: [ + { name: 'a', status: Test.TEST_STATUS_FAILED }, + { name: 'b', status: Test.TEST_STATUS_SUCCESSFUL } + ], + workerAmount: 1, + completeQueueHandler: () => { }, + completeTestHandler: () => { }, + stageTestHandler: () => { } + }); + + tq.proceed(); + + expect(tq.isIdle()).toEqual(false); +}); + +test('should proceed if no test in queue', () => { + const tq = new TestQueue({ + tests: [], + workerAmount: 1, + completeQueueHandler: () => Promise.resolve('aha'), + completeTestHandler: () => { }, + stageTestHandler: () => { } + }); + + expect(tq.proceed()).resolves.toEqual('aha'); +}); \ No newline at end of file diff --git a/test/test_runner.js b/test/test_runner.js deleted file mode 100644 index ee722a1..0000000 --- a/test/test_runner.js +++ /dev/null @@ -1,524 +0,0 @@ -"use strict"; - -const chai = require("chai"); -const chaiAsPromise = require("chai-as-promised"); -const _ = require("lodash"); - -const TestRunner = require("../src/test_runner"); -const logger = require("../src/logger"); - -const BailStrategy = require("../src/bail"); - -const BAIL_FAST = process.cwd() + "/src/strategies/bail_fast"; -const BAIL_NEVER = process.cwd() + "/src/strategies/bail_never"; -const BAIL_EARLY = process.cwd() + "/src/strategies/bail_early"; - -chai.use(chaiAsPromise); - -const expect = chai.expect; -const assert = chai.assert; - -const settings = { - buildId: "FADSFASDF_ASDFSADF2", - gatherTrends: true, - testFramework: { - TestRun: function () { - return { - getEnvironment() { }, - enableExecutor() { } - } - } - } -}; - -const tests = [ - { filename: 'tests/demo-app.js' }, - { filename: 'tests/demo-web.js' } -]; - -const executors = { - "sauce": { - name: "testarmada-magellan-sauce-executor", - shortName: "sauce", - - getPorts(opts) { - return { - seleniumPort: opts.portOffset, - mockingPort: opts.portOffset + 1 - } - }, - getProfiles(opts) { - return new Promise((resolve) => { - resolve(opts.profiles); - }); - }, - getCapabilities(profile, opts) { - return new Promise((resolve) => { - resolve(profile); - }); - }, - setupTest(callback) { - callback(null, "FAKE_EXECUTOR_TOKEN"); - }, - teardownTest(token, callback) { - callback(); - }, - execute() { - return { - on(code, callback) { - if (code === "message") { - callback({ type: "test-meta-data", metadata: { resultURL: "FAKE_URL", sessionId: "FAKE_SESSION" } }) - } - else { - callback(0); - } - }, - send() { }, - removeAllListeners() { }, - stdout: { - on(type, callback) { callback() }, - removeAllListeners() { }, - unpipe() { } - }, - stderr: { - on(type, callback) { callback() }, - removeAllListeners() { }, - unpipe() { } - } - } - }, - summerizeTest(buildid, metadat, callback) { callback(); }, - wrapup(callback) { callback(); } - } -}; - -const profiles = [ - { browser: "chrome", executor: "sauce" }, - { browser: "firefox", executor: "sauce" } -]; - -const allocator = { - get(callback) { callback(null, { token: "FAKE_WORKER_TOKEN" }); }, - release() { } -}; - -const options = { - debug: true, - maxWorkers: 1, - maxTestAttempts: 1, - serial: true, - onFailure() { }, - onSuccess() { }, - allocator: {}, - listeners: [{ - flush() { return new Promise((resolve) => { resolve() }); }, - listenTo() { } - }] -}; - -let optsMock = { - fs: { - readFileSync() { - return "{\"failures\":{\"a\":1}}"; - }, - writeFileSync() { } - }, - setTimeout(callback) { callback(); }, - path: { - resolve() { return "FAKE_TEMP_PATH"; } - }, - mkdirSync() { }, - setInterval(callback) { callback(); } -}; - -let optionsMock = {}; - -describe("test_runner", () => { - beforeEach(() => { - optsMock.settings = _.cloneDeep(settings); - optionsMock = _.cloneDeep(options); - optionsMock.profiles = _.cloneDeep(profiles); - optionsMock.executors = _.cloneDeep(executors); - optionsMock.allocator = _.cloneDeep(allocator); - optionsMock.bailStrategy = new BailStrategy(BAIL_NEVER); - }); - - describe("initialize", () => { - it("should pass", () => { - const tr = new TestRunner(tests, optionsMock, optsMock); - expect(tr.numTests).to.equal(4); - expect(tr.profiles.length).to.equal(2); - }); - - it("[backward compatibility] should pass with bail fast", () => { - optionsMock.bailStrategy = new BailStrategy(BAIL_FAST); - const tr = new TestRunner(tests, optionsMock, optsMock); - expect(tr.bailStrategy.name).to.equal(optionsMock.bailStrategy.name); - }); - - it("[backward compatibility] should pass with bail early", () => { - optionsMock.bailStrategy = new BailStrategy(BAIL_EARLY); - const tr = new TestRunner(tests, optionsMock, optsMock); - expect(tr.bailStrategy.name).to.equal(optionsMock.bailStrategy.name); - }); - - it("[backward compatibility] should pass with bail never", () => { - const tr = new TestRunner(tests, optionsMock, optsMock); - expect(tr.bailStrategy.name).to.equal(optionsMock.bailStrategy.name); - }); - }); - - it("notIdle", () => { - const tr = new TestRunner(tests, optionsMock, optsMock); - tr.notIdle(); - expect(tr.busyCount).to.equal(1); - }); - - it("maybeIdle", () => { - const tr = new TestRunner(tests, optionsMock, optsMock); - tr.busyCount = 1; - tr.maybeIdle(); - expect(tr.busyCount).to.equal(0); - }); - - it("logFailedTest", () => { - const tr = new TestRunner(tests, optionsMock, optsMock); - tr.failedTests = [{ - toString() { }, - attempts: 3, - stdout: "", - stderr: "" - }]; - - tr.logFailedTests(); - }); - - it("gatherTrends", () => { - const tr = new TestRunner(tests, optionsMock, optsMock); - tr.trends.failures = { - a: 1 - }; - tr.gatherTrends(); - }); - - describe("summarizeCompletedBuild", () => { - it("no failed test", () => { - const tr = new TestRunner(tests, optionsMock, optsMock); - tr.startTime = (new Date()).getTime() - 300000; - return tr.summarizeCompletedBuild(); - }); - - it("two failed tests, bail", () => { - optionsMock.bailStrategy = new BailStrategy(BAIL_FAST); - const tr = new TestRunner(tests, optionsMock, optsMock); - tr.bailStrategy.hasBailed = true; - tr.tests[0].status = 3; - tr.tests[0].getRetries = () => 3; - tr.failedTests = [{ attempts: 3 }]; - tr.startTime = (new Date()).getTime() - 300000; - return tr.summarizeCompletedBuild(); - }); - - it("two failed tests, bail with existing retries", () => { - optionsMock.bailStrategy = new BailStrategy(BAIL_FAST); - const tr = new TestRunner(tests, optionsMock, optsMock); - tr.bailStrategy.hasBailed = true; - tr.tests[0].status = 3; - tr.tests[0].getRetries = () => 3; - tr.tests[1].status = 3; - tr.tests[1].getRetries = () => 3; - tr.failedTests = [{ attempts: 3 }]; - tr.startTime = (new Date()).getTime() - 300000; - return tr.summarizeCompletedBuild(); - }); - - it("two failed tests, no bail", () => { - const tr = new TestRunner(tests, optionsMock, optsMock); - tr.failedTests = [{ attempts: 3 }, { attempts: 3 }]; - tr.startTime = (new Date()).getTime() - 300000; - return tr.summarizeCompletedBuild(); - }); - - it("listener doesn't flush function", () => { - optionsMock.listeners = [{ flush: "asdf" }]; - const tr = new TestRunner(tests, optionsMock, optsMock); - tr.startTime = (new Date()).getTime() - 300000; - return tr.summarizeCompletedBuild(); - }); - - it("listener doesn't flush promise", () => { - optionsMock.listeners = [{ flush() { } }]; - const tr = new TestRunner(tests, optionsMock, optsMock); - tr.startTime = (new Date()).getTime() - 300000; - return tr.summarizeCompletedBuild(); - }); - - it("listener doesn't flush promise resolve", () => { - optionsMock.listeners = [{ flush() { return new Promise((resolve, reject) => { reject(); }) } }]; - const tr = new TestRunner(tests, optionsMock, optsMock); - tr.startTime = (new Date()).getTime() - 300000; - return tr.summarizeCompletedBuild(); - }); - }); - - describe("buildFinished", () => { - it("should succeed", (done) => { - const tr = new TestRunner(tests, optionsMock, optsMock); - tr.onFailure = () => assert(false, "shouldn't be here"); - tr.onSuccess = () => done(); - tr.startTime = (new Date()).getTime() - 300000; - tr.buildFinished(); - }); - - it("should fail", (done) => { - const tr = new TestRunner(tests, optionsMock, optsMock); - tr.onFailure = () => done(); - tr.onSuccess = () => assert(false, "shouldn't be here"); - tr.failedTests = [{}]; - tr.startTime = (new Date()).getTime() - 300000; - tr.buildFinished(); - }); - }); - - it("checkBuild", () => { - optionsMock.bailStrategy = new BailStrategy(BAIL_EARLY); - const tr = new TestRunner(tests, optionsMock, optsMock); - tr.bailStrategy.setConfiguration({ "early_bail_min_attempts": 1 }); - tr.startTime = (new Date()).getTime() - 300000; - tr.checkBuild(); - }); - - describe("onTestComplete", () => { - const failedTest = { - locator: { filename: 'tests/demo-app.js' }, - maxAttempts: 3, - attempts: 0, - status: 2, - profile: { browser: 'chrome' }, - executor: undefined, - workerIndex: -1, - error: undefined, - stdout: '', - stderr: '', - getRetries() { }, - canRun() { return false }, - getRuntime() { } - }; - - const successfulTest = { - locator: { filename: 'tests/demo-app.js' }, - maxAttempts: 1, - attempts: 0, - status: 3, - profile: { browser: 'chrome' }, - executor: executors["sauce"], - workerIndex: -1, - error: undefined, - stdout: '', - stderr: '', - getRetries() { } - }; - - it("has bailed", () => { - const tr = new TestRunner(tests, optionsMock, optsMock); - tr.bailStrategy.hasBailed = true; - tr.onTestComplete(null, failedTest); - }); - - it("successful test", () => { - const tr = new TestRunner(tests, optionsMock, optsMock); - tr.onTestComplete(null, successfulTest); - }); - - it("successful test without serial", () => { - optionsMock.serial = false; - const tr = new TestRunner(tests, optionsMock, optsMock); - tr.onTestComplete(null, successfulTest); - }); - - it("failed test", () => { - const tr = new TestRunner(tests, optionsMock, optsMock); - tr.onTestComplete(null, failedTest); - }); - }); - - describe("start", () => { - it("no test", () => { - const tr = new TestRunner(tests, optionsMock, optsMock); - tr.tests = []; - tr.start(); - }); - - it("multi tests without serial", () => { - optionsMock.serial = false; - optionsMock.executors["sauce"].summerizeTest = (buildid, metadat, callback) => callback("wt"); - const tr = new TestRunner(tests, optionsMock, optsMock); - tr.start(); - }); - }); - - describe("runTest", () => { - const worker = { portOffset: 1 }; - - it("no bail", () => { - const tr = new TestRunner(tests, optionsMock, optsMock); - return tr.runTest(tr.tests[0], worker).then(); - }); - - it("throws error", () => { - optsMock.settings.testFramework.TestRun = function () { - throw new Error("FAKE_ERROR"); - }; - - const tr = new TestRunner(tests, optionsMock, optsMock); - tr.bailStrategy.hasBailed = false; - return tr.runTest(tr.tests[0], worker) - .then() - .catch(err => expect(err.message).to.equal("FAKE_ERROR")); - }); - }); - - describe("execute", () => { - const successfulTest = { - locator: { filename: 'tests/demo-app.js' }, - maxAttempts: 1, - attempts: 0, - status: 3, - profile: { browser: 'chrome', executor: "sauce" }, - executor: executors["sauce"], - workerIndex: -1, - error: undefined, - stdout: '', - stderr: '', - getRetries() { }, - startClock() { }, - getRuntime() { }, - stopClock() { } - }; - - it("getEnvironment failed", () => { - const testRun = { - getEnvironment() { throw new Error("FAKE_ERROR") }, - enableExecutor() { } - }; - - const tr = new TestRunner(tests, optionsMock, optsMock); - return tr.execute(testRun, successfulTest) - .then() - .catch(err => { - expect(err.message).to.equal("FAKE_ERROR"); - }) - }); - - it("bail fast", () => { - const testRun = { - getEnvironment() { }, - enableExecutor() { } - }; - - - optionsMock.executors["sauce"].execute = () => { - return { - on(code, callback) { - if (code === "message") { - callback({ type: "test-meta-data", metadata: { resultURL: "FAKE_URL", sessionId: "FAKE_SESSION" } }); - } - else { - callback(1); - } - }, - send() { }, - removeAllListeners() { }, - stdout: { - on() { }, - removeAllListeners() { }, - unpipe() { } - }, - stderr: { - on() { }, - removeAllListeners() { }, - unpipe() { } - } - } - } - - optionsMock.bailStrategy = new BailStrategy(BAIL_FAST); - const tr = new TestRunner(tests, optionsMock, optsMock); - tr.bailStrategy.hasBailed = true; - return tr.execute(testRun, successfulTest) - .then(result => expect(result.error).to.equal("Child test run process exited with code 1")); - }); - - it("no bail", () => { - const testRun = { - getEnvironment() { }, - enableExecutor() { } - }; - - optionsMock.executors["sauce"].execute = () => { - return { - on(code, callback) { - if (code === "message") { - callback({ type: "test-meta-data", metadata: { resultURL: "FAKE_URL", sessionId: "FAKE_SESSION" } }) - } - else { - callback(1); - } - }, - send() { }, - removeAllListeners() { }, - stdout: { - on() { }, - removeAllListeners() { }, - unpipe() { } - }, - stderr: { - on() { }, - removeAllListeners() { }, - unpipe() { } - } - } - }; - - optionsMock.bailStrategy = new BailStrategy(BAIL_NEVER); - const tr = new TestRunner(tests, optionsMock, optsMock); - - return tr.execute(testRun, successfulTest) - .then(result => expect(result.error).to.equal("Child test run process exited with code 1")); - }); - }); - - describe("stageTest", () => { - it("executor stage error", (done) => { - const onTestComplete = () => done(); - optionsMock.executors["sauce"].setupTest = (callback) => callback("error"); - const tr = new TestRunner(tests, optionsMock, optsMock); - tr.stageTest(tr.tests[0], onTestComplete); - }); - - it("allocator get error", (done) => { - const onTestComplete = () => done(); - optionsMock.allocator.get = (callback) => callback("error"); - - const tr = new TestRunner(tests, optionsMock, optsMock); - tr.stageTest(tr.tests[0], onTestComplete); - }); - - it("runTestError", () => { - optsMock.settings.testFramework.TestRun = function () { - throw new Error("FAKE_ERROR"); - }; - - const onTestComplete = () => done(); - const tr = new TestRunner(tests, optionsMock, optsMock); - tr.stageTest(tr.tests[0], onTestComplete); - }); - - it("successful", (done) => { - const onTestComplete = () => done(); - - const tr = new TestRunner(tests, optionsMock, optsMock); - tr.stageTest(tr.tests[0], onTestComplete); - }); - }); -}); \ No newline at end of file diff --git a/test/test_runner.test.js b/test/test_runner.test.js new file mode 100644 index 0000000..481fd73 --- /dev/null +++ b/test/test_runner.test.js @@ -0,0 +1,753 @@ +"use strict"; + +const _ = require("lodash") + +const analytics = require("../src/global_analytics"); +const fs = require("fs"); +const logger = require("../src/logger"); +const mkdirSync = require("../src/util/mkdir_sync"); +const path = require("path"); +const settings = require("../src/settings"); +const ChildProcessHandler = require("../src/util/childProcess"); +const Reporter = require("../src/reporters/stdout/reporter"); +const TestRunner = require("../src/test_runner"); +const Test = require("../src/test"); +const TestQueue = require("../src/test_queue"); + + +jest.mock("../src/global_analytics", () => { + return { + push: () => { }, + mark: () => { } + }; +}); + +jest.mock("fs"); + +jest.mock("../src/util/childProcess"); + +jest.mock("../src/reporters/stdout/reporter"); + +describe("test_runner", () => { + let tests = []; + let options = {}; + + beforeEach(() => { + tests = [{ filename: "tests/demo-web.js" }]; + + options = { + debug: false, + maxWorkers: 1, + maxTestAttempts: 3, + profiles: + [{ + desiredCapabilities: {}, + nightwatchEnv: "invisible_chrome", + id: "invisible_chrome", + executor: "local" + }], + executors: + { + local: + { + name: "testarmada-magellan-local-executor", + shortName: "local", + execute: () => { } + } + }, + listeners: [], + strategies: { + bail: + { + hasBailed: false, + name: "testarmada-magellan-fast-bail-strategy", + description: "Magellan will bail as long as one test fails", + bailReason: "At least one test has failed", + shouldBail: () => { }, + getBailReason: () => "FAKE_BAIL_REASON", + decide: () => false + }, + resource: { + holdTestResource: (opts) => Promise.resolve(), + releaseTestResource: (opts) => Promise.resolve() + } + }, + serial: true, + allocator: { + get: (cb) => cb(null, { index: 1 }), + release: (worker) => true + }, + onFinish: () => Promise.resolve() + } + }) + + test("constructor", () => { + const testRunner = initTestRunner(tests, options); + }); + + describe("stageTestHandler", () => { + test("should stage test properly", (done) => { + const testRunner = initTestRunner(tests, options); + const test = stubPassTest(); + + testRunner.runTest = (test, worker) => Promise.resolve({ erroror: false }); + + testRunner.stageTestHandler(test, (error, test) => { + expect(error).toBeNull(); + expect(test.erroror).toBe(false); + done(); + }); + }); + + test("should have erroror in cb test setup errors out", (done) => { + const testRunner = initTestRunner(tests, options); + const test = stubFailTest(); + + testRunner.runTest = (test, worker) => Promise.resolve({ erroror: false }); + + testRunner.stageTestHandler(test, (error, test) => { + expect(error).toBe("FAKE_ERROR"); + done(); + }); + }); + + test("test run exception results in test failure", (done) => { + const testRunner = initTestRunner(tests, options); + const test = stubPassTest(); + + testRunner.runTest = (test, worker) => Promise.reject({ erroror: true }); + testRunner.stageTestHandler(test, (error, test) => { + expect(error).toBeDefined(); + expect(test.fail).toHaveBeenCalled(); + done(); + }); + }); + + test("test run error results in test failure", (done) => { + const testRunner = initTestRunner(tests, options); + const test = stubPassTest(); + + testRunner.runTest = (test, worker) => Promise.resolve({ error: true }); + testRunner.stageTestHandler(test, (error, test) => { + expect(error).toBeDefined(); + expect(test.fail).toHaveBeenCalled(); + done(); + }); + }); + + test("catch runtime resource error", (done) => { + options.strategies.resource.holdTestResource = () => Promise.reject("FAILURE"); + + const testRunner = initTestRunner(tests, options); + const test = stubPassTest(); + + testRunner.stageTestHandler(test, (error, test) => { + expect(error).toBeDefined(); + done(); + }); + }); + + test("worker error results in test failure", (done) => { + const testRunner = initTestRunner(tests, options); + const test = stubPassTest(); + + testRunner.allocator.get = (cb) => cb(true, { index: 1 }); + + testRunner.stageTestHandler(test, (error, test) => { + expect(error).toBeDefined(); + done(); + }); + }); + }); + + describe("completeTestHandler", () => { + test("successful test", (done) => { + const testRunner = initTestRunner(tests, options); + const error = jest.fn(); + + testRunner.completeTestHandler(error, { + status: Test.TEST_STATUS_SUCCESSFUL, + canRun: () => true, + maxAttempts: 3, + attempts: 2, + workerIndex: 1, + getRuntime: () => 3 + }); + + expect(error).not.toHaveBeenCalled(); + done(); + }); + + test("failed test", (done) => { + options.strategies.bail.hasBailed = false; + options.strategies.bail.shouldBail = function () { this.hasBailed = true; }; + const testRunner = initTestRunner(tests, options); + + testRunner.completeTestHandler(null, { + status: Test.TEST_STATUS_FAILED, + canRun: () => true, + maxAttempts: 3, + attempts: 2, + workerIndex: 1, + getRuntime: () => 3 + }) + .then((v) => { + expect(v).toBe(1); + done(); + }); + }); + + test("bailed test", (done) => { + options.strategies.bail.hasBailed = true; + + const testRunner = initTestRunner(tests, options); + const test = { + status: Test.TEST_STATUS_SUCCESSFUL, + canRun: () => true, + maxAttempts: 3, + attempts: 2, + workerIndex: 1, + getRuntime: () => 3 + }; + + testRunner.completeTestHandler(null, test); + + expect(test.status).toBe(Test.TEST_STATUS_SKIPPED) + done(); + }); + + test("new test", (done) => { + const testRunner = initTestRunner(tests, options); + const error = jest.fn(); + + testRunner.completeTestHandler(error, { + status: Test.TEST_STATUS_NEW, + canRun: () => true, + maxAttempts: 3, + attempts: 2, + workerIndex: 1, + getRuntime: () => 3 + }); + + expect(error).not.toHaveBeenCalled(); + done(); + }); + + test("test that fail are enqueued again", (done) => { + const testRunner = initTestRunner(tests, options); + const error = jest.fn(); + const queueEnqueueSpy = jest.spyOn(testRunner.queue, "enqueue"); + + testRunner.completeTestHandler(error, { + status: Test.TEST_STATUS_FAILED, + canRun: () => false, + maxAttempts: 3, + attempts: 2, + workerIndex: 1, + getRuntime: () => 3 + }); + + expect(queueEnqueueSpy).toHaveBeenCalled(); + done(); + }); + }); + + describe("completeQueueHandler", () => { + beforeEach(() => { + jest.useFakeTimers(); + }); + + test("listeners are successfully resolved", (done) => { + const listener = new Reporter(); + listener.flush = () => Promise.resolve(10); + const listenerFlushSpy = jest.spyOn(listener, "flush"); + + options.listeners = [listener]; + + const testRunner = initTestRunner(tests, options); + testRunner.completeQueueHandler(); + + jest.runAllTimers(); + + expect(setTimeout).toHaveBeenCalledTimes(1); + expect(listenerFlushSpy).toHaveBeenCalled(); + done(); + }); + + test("listeners are successfully resolved even when flush has error", (done) => { + const listener = new Reporter(); + listener.flush = () => Promise.reject(); + const listenerFlushSpy = jest.spyOn(listener, "flush"); + + options.listeners = [listener]; + + const testRunner = initTestRunner(tests, options); + testRunner.completeQueueHandler(); + + jest.runAllTimers(); + + expect(setTimeout).toHaveBeenCalledTimes(1); + expect(listenerFlushSpy).toHaveBeenCalled(); + done(); + }); + }); + + describe("run", () => { + test("run is executed successfully", (done) => { + const testRunner = initTestRunner(tests, options); + testRunner.queue.proceed = jest.fn(); + + testRunner.run(); + + expect(testRunner.queue.proceed).toHaveBeenCalled(); + done(); + }); + + test("run in serial is executed successfully", (done) => { + const testRunner = initTestRunner(tests, options); + testRunner.serial = true; + testRunner.queue.proceed = jest.fn(); + + testRunner.run(); + + expect(testRunner.queue.proceed).toHaveBeenCalled(); + done(); + }); + }); + + describe("runTest", () => { + beforeEach(() => { + jest.useFakeTimers(); + }); + + test("test is run successfully", (done) => { + fs.mkdirSync.mockImplementation((p) => { }); + + const testRunner = initTestRunner(tests, options); + const test = stubPassTest(); + + let worker = { + index: 1, + occupied: true, + portOffset: 12000 + }; + + testRunner.execute = (testRun, test) => Promise.resolve({ error: false }); + const executeSpy = jest.spyOn(testRunner, "execute"); + + testRunner.runTest(test, worker).then((v) => { + expect(executeSpy).toHaveBeenCalledTimes(1); + done(); + }); + + jest.runAllTimers(); + }); + + test("test in serial is run successfully", (done) => { + fs.mkdirSync.mockImplementation((p) => { }); + + const testRunner = initTestRunner(tests, options); + const test = stubPassTest(); + + let worker = { + index: 1, + occupied: true, + portOffset: 12000, + token: "SOME_TOKEN" + }; + + testRunner.execute = (testRun, test) => Promise.resolve({ error: false }); + const executeSpy = jest.spyOn(testRunner, "execute"); + + testRunner.serial = true; + testRunner.runTest(test, worker).then((v) => { + expect(executeSpy).toHaveBeenCalledTimes(1); + done(); + }); + + jest.runAllTimers(); + }); + + test("failed test is rejected", (done) => { + fs.mkdirSync.mockImplementation((p) => { }); + + const testRunner = initTestRunner(tests, options); + const test = stubPassTest(); + + let worker = { + index: 1, + occupied: true, + portOffset: 12000, + token: "SOME_TOKEN" + }; + + testRunner.execute = (testRun, test) => Promise.reject("EXECUTION_ERROR"); + const executeSpy = jest.spyOn(testRunner, "execute"); + + testRunner.runTest(test, worker).catch((v) => { + expect(executeSpy).toHaveBeenCalledTimes(1); + done(); + }); + + jest.runAllTimers(); + }); + + test("empty buildId is rejected", (done) => { + fs.mkdirSync.mockImplementation((p) => { }); + + const testRunner = initTestRunner(tests, options); + const test = stubPassTest(); + + let worker = { + index: 1, + occupied: true, + portOffset: 12000, + token: "SOME_TOKEN" + }; + + testRunner.buildId = null; + + testRunner.runTest(test, worker).catch((v) => { + done(); + }); + + jest.runAllTimers(); + }); + }); + + describe("execute", () => { + beforeEach(() => { + jest.useFakeTimers(); + }); + + test("test is executed successfully", (done) => { + const listener = new Reporter(); + const listenerListenToSpy = jest.spyOn(listener, "listenTo"); + + options.listeners = [listener]; + + const testRunner = initTestRunner(tests, options); + const test = stubPassTest(); + const testRun = { + guid: "", + getEnvironment: (opts) => "" + }; + + testRunner.execute(testRun, test); + jest.runOnlyPendingTimers(); + jest.runOnlyPendingTimers(); + + expect(listenerListenToSpy).toHaveBeenCalled(); + done(); + }); + + test("enable executor", (done) => { + const listener = new Reporter(); + const listenerListenToSpy = jest.spyOn(listener, "listenTo"); + + options.listeners = [listener]; + + const testRunner = initTestRunner(tests, options); + testRunner.queue.isIdle = () => false; + const test = stubPassTest(); + const testRun = { + enableExecutor: (executor) => { }, + guid: "", + getEnvironment: (opts) => "" + }; + + testRunner.execute(testRun, test); + jest.runOnlyPendingTimers(); + jest.runOnlyPendingTimers(); + + expect(listenerListenToSpy).toHaveBeenCalled(); + done(); + }); + + test("failed test with time out", (done) => { + const listener = new Reporter(); + const listenerListenToSpy = jest.spyOn(listener, "listenTo"); + + options.listeners = [listener]; + + const testRunner = initTestRunner(tests, options); + const test = stubFailTest(); + const testRun = { + guid: "", + getEnvironment: (opts) => "" + }; + + testRunner.execute(testRun, test); + jest.runOnlyPendingTimers(); + jest.runOnlyPendingTimers(); + + expect(listenerListenToSpy).toHaveBeenCalled(); + done(); + }); + + test("bailing a test closes the execution worker", (done) => { + const listener = new Reporter(); + const listenerListenToSpy = jest.spyOn(listener, "listenTo"); + + options.listeners = [listener]; + options.strategies.bail.hasBailed = true; + options.strategies.bail.getBailReason = jest.fn(); + + const testRunner = initTestRunner(tests, options); + const test = stubPassTest(); + const testRun = { + guid: "", + getEnvironment: (opts) => "" + }; + + testRunner.execute(testRun, test); + jest.runOnlyPendingTimers(); + jest.runOnlyPendingTimers(); + + expect(listenerListenToSpy).toHaveBeenCalled(); + expect(options.strategies.bail.getBailReason).toHaveBeenCalled(); + done(); + }); + + test("test environment error results in promise rejection", () => { + const listener = new Reporter(); + const listenerListenToSpy = jest.spyOn(listener, "listenTo"); + + options.listeners = [listener]; + + const testRunner = initTestRunner(tests, options); + const test = stubPassTest(); + const testRun = { + guid: "", + getEnvironment: (opts) => { + throw "ENVIRONMENT_ERROR"; + } + }; + + return testRunner.execute(testRun, test).catch(e => expect(e).toMatch("ENVIRONMENT_ERROR")); + }); + + test("child process error results in promise rejection", () => { + const listener = new Reporter(); + const listenerListenToSpy = jest.spyOn(listener, "listenTo"); + + options.listeners = [listener]; + options.executors.local.execute = () => { + throw "CHILD_PROCESS_ERROR"; + }; + + const testRunner = initTestRunner(tests, options); + const test = stubPassTest(); + const testRun = { + guid: "", + getEnvironment: (opts) => "" + }; + + return testRunner.execute(testRun, test).catch(e => expect(e).toMatch("CHILD_PROCESS_ERROR")); + }); + + test("listener error results in promise rejection", () => { + const listener = new Reporter(); + listener.listenTo = () => { + throw "LISTENER_ERROR"; + }; + + options.listeners = [listener]; + + const testRunner = initTestRunner(tests, options); + const test = stubPassTest(); + const testRun = { + guid: "", + getEnvironment: (opts) => "" + }; + + return testRunner.execute(testRun, test).catch(e => expect(e).toMatch("LISTENER_ERROR")); + }); + }); + + describe("logTestsSummary", () => { + test("empty test print no warning", (done) => { + const warnSpy = jest.spyOn(logger, "warn"); + + const testRunner = initTestRunner(tests, options); + + testRunner.logTestsSummary(); + + expect(warnSpy).not.toHaveBeenCalled(); + done(); + }); + + test("passing 1 test and failing 1 test prints 4 warnings", (done) => { + const loggerLogSpy = jest.spyOn(logger, "log"); + const loggerWarnSpy = jest.spyOn(logger, "warn"); + const analyticsMarkSpy = jest.spyOn(analytics, "mark"); + + const testRunner = initTestRunner(tests, options); + enqueuePassedTest(testRunner); + enqueueFailedTest(testRunner); + + testRunner.logTestsSummary(); + + expect(loggerLogSpy).toHaveBeenCalled(); + expect(loggerWarnSpy).toHaveBeenCalled(); + expect(analyticsMarkSpy).toHaveBeenCalledWith("magellan-run", "failed"); + done(); + }); + + test("failing 1 test with bail prints 4 warnings with bail reason", (done) => { + options.strategies.bail.hasBailed = true; + options.strategies.bail.getBailReason = () => "Some bail reason"; + + const loggerLogSpy = jest.spyOn(logger, "log"); + const loggerWarnSpy = jest.spyOn(logger, "warn"); + const analyticsMarkSpy = jest.spyOn(analytics, "mark"); + const bailReasonSpy = jest.spyOn(options.strategies.bail, "getBailReason"); + + const testRunner = initTestRunner(tests, options); + + testRunner.queue = new TestQueue({ + tests: [{ + status: Test.TEST_STATUS_SUCCESSFUL, + getRetries: () => 2, + }, { + status: Test.TEST_STATUS_FAILED, + getRetries: () => 2, + }, { + status: Test.TEST_STATUS_NEW, + getRetries: () => 0, + }], + getTestAmount: () => 3, + getPassedTests: () => 1, + getFailedTests: () => 1, + workerAmount: 1, + completeQueueHandler: () => Promise.resolve(1), + stageTestHandler: (test, cb) => cb() + }); + + testRunner.logTestsSummary(); + + expect(loggerLogSpy).toHaveBeenCalled(); + expect(loggerWarnSpy).toHaveBeenCalled(); + expect(bailReasonSpy).toHaveBeenCalled(); + expect(analyticsMarkSpy).toHaveBeenCalledWith("magellan-run", "failed"); + done(); + }); + + }); + + test("gather trends", () => { + const loggerLogSpy = jest.spyOn(logger, "log"); + const testRunner = initTestRunner(tests, options); + + jest.mock("fs", () => { + return { + readFileSync: () => { + console.log("some fake call") + return { + failures: { + "FAKE_FAILURE": 2, + "ANOTHER_FAILURE": 1 + } + } + }, + writeFileSync: () => { } + } + }); + + testRunner.trends = { + failures: { + "FAKE_FAILURE": 1 + } + }; + + testRunner.gatherTrends(); + + expect(loggerLogSpy).toHaveBeenCalled(); + }); +}); + +function initTestRunner(tests, options, numPassedTests, numFailedTests) { + const testRunner = new TestRunner(tests, options, { + settings: { + gatherTrends: true, + debugVerbose: true, + buildId: "FAKE_BUILD_ID", + testFramework: { + iterator: () => ["a", "b", "c"], + filters: { + a: () => true, + b: () => true + }, + TestRun: class { } + }, + BASE_PORT_SPACING: 3 + }, + startTime: (new Date()).getTime() + }); + + testRunner.queue = new TestQueue({ + tests: [], + workerAmount: 1, + completeQueueHandler: () => Promise.resolve(1), + stageTestHandler: (test, cb) => cb() + }); + + return testRunner; +} + +function enqueuePassedTest(testRunner) { + testRunner.queue.tests.push({ + status: Test.TEST_STATUS_SUCCESSFUL, + getRetries: () => 0 + }); +} + +function enqueueFailedTest(testRunner) { + testRunner.queue.tests.push({ + status: Test.TEST_STATUS_FAILED, + getRetries: () => 2 + }); +} + +function stubPassTest() { + return { + executor: { + shortName: "FAKE_EXECUTOR_SHORT_NAME", + setupTest: (cb) => cb(null, "FAKE_TOKEN"), + teardownTest: (token, cb) => cb(), + summerizeTest: () => { }, + getPorts: jest.fn() + }, + profile: { + executor: "local" + }, + locator: "", + startClock: () => { }, + stopClock: () => { }, + getRuntime: () => 0, + stdout: () => { }, + pass: jest.fn(), + fail: jest.fn() + } +} + +function stubFailTest() { + return { + executor: { + setupTest: (cb) => cb("FAKE_ERROR", "FAKE_TOKEN"), + teardownTest: (token, cb) => cb(), + summerizeTest: (buildId, metadata, cb) => cb(), + getPorts: jest.fn() + }, + profile: { + executor: "local" + }, + locator: "", + startClock: () => { }, + stopClock: () => { }, + getRuntime: () => 1000000000, + stdout: () => { }, + pass: jest.fn(), + fail: jest.fn() + } +} \ No newline at end of file diff --git a/test/utils/check_ports.js b/test/utils/check_ports.js deleted file mode 100644 index 17da128..0000000 --- a/test/utils/check_ports.js +++ /dev/null @@ -1,36 +0,0 @@ -/* eslint no-undef: 0, no-magic-numbers: 0, no-unused-expressions: 0 */ -"use strict"; -const expect = require("chai").expect; -const checkPorts = require("../../src/util/check_ports"); -const sinon = require("sinon"); - -describe("check_ports", () => { - it("should check some ports", () => { - const spy = sinon.spy(); - checkPorts([10, 20, 30], spy, - { - request: (opts, cb) => { - cb(null); - }, - console: {log: () => {}} - }); - expect(spy.called).to.be.true; - }); - - it("should check find ports", () => { - const spy = sinon.spy(); - checkPorts([10, 20, 30], spy, - { - request: (opts, cb) => { - cb({code: "ECONNREFUSED"}); - }, - portscanner: { - checkPortStatus: (port, host, cb) => { - cb(null, "open"); - } - }, - console: {log: () => {}} - }); - expect(spy.called).to.be.true; - }); -}); diff --git a/test/utils/check_ports.test.js b/test/utils/check_ports.test.js new file mode 100644 index 0000000..8ba8dbd --- /dev/null +++ b/test/utils/check_ports.test.js @@ -0,0 +1,58 @@ +/* eslint no-undef: 0, no-magic-numbers: 0, no-unused-expressions: 0 */ +'use strict'; + +const request = require('request'); +const portscanner = require('portscanner'); + +const checkPorts = require('../../src/util/check_ports'); +const sinon = require('sinon'); + +jest.mock('request'); +jest.mock('portscanner'); + +test('port isn\'t available', () => { + request.mockImplementation((opts, cb) => cb(null)); + + const spy = sinon.spy(); + + checkPorts([10], spy); + + expect(spy.called).toEqual(true); + expect(spy.args[0][0]).toEqual([{ 'port': 10, 'available': false }]); +}); + +test('port is occupied by other process', () => { + request.mockImplementation((opts, cb) => { + const err = new Error('fake selenium error'); + err.code = 'ECONNREFUSED'; + + return cb(err); + }); + + portscanner.checkPortStatus.mockImplementation((port, host, cb) => cb(null, 'open')); + + const spy = sinon.spy(); + + checkPorts([10], spy); + + expect(spy.called).toEqual(true); + expect(spy.args[0][0]).toEqual([{ 'port': 10, 'available': false }]); +}); + +test('port is available', () => { + request.mockImplementation((opts, cb) => { + const err = new Error('fake selenium error'); + err.code = 'ECONNREFUSED'; + + return cb(err); + }); + + portscanner.checkPortStatus.mockImplementation((port, host, cb) => cb(null, 'closed')); + + const spy = sinon.spy(); + + checkPorts([10], spy); + + expect(spy.called).toEqual(true); + expect(spy.args[0][0]).toEqual([{ 'port': 10, 'available': true }]); +}); diff --git a/test/utils/child_process.test.js b/test/utils/child_process.test.js new file mode 100644 index 0000000..e48b9bd --- /dev/null +++ b/test/utils/child_process.test.js @@ -0,0 +1,78 @@ +'use strict'; + +const child_process = require('child_process'); +const ChildProcess = require('../../src/util/childProcess'); + +jest.mock('child_process'); + +const handler = { + removeAllListeners: () => { }, + stdout: { + on: () => { }, + removeAllListeners: () => { }, + unpipe: () => { } + }, + stderr: { + on: () => { }, + removeAllListeners: () => { }, + unpipe: () => { } + }, + send: (msg) => { }, + on: (msg, cb) => cb(msg) +}; + +describe('Child process', () => { + + test('should construct new object', () => { + const cp = new ChildProcess(handler); + }); + + test('should enable debug message', () => { + const cp = new ChildProcess(handler); + cp.enableDebugMsg(); + }); + + test('should enable onMessage', (done) => { + const cp = new ChildProcess(handler); + + cp.onMessage((msg) => { + expect(msg).toEqual('message'); + done(); + }); + }); + + test('should enable onClose', (done) => { + const cp = new ChildProcess(handler); + + cp.onClose((msg) => { + expect(msg).toEqual('close'); + done(); + }); + }); + + test('should enable send', () => { + const cp = new ChildProcess(handler); + cp.send('fake message'); + }); + + test('should emit message', () => { + const cp = new ChildProcess(handler); + cp.emitMessage('fake message'); + }); + + test('should tearDown', () => { + const cp = new ChildProcess(handler); + cp.teardown(); + }); + + test('should append data to stdout', () => { + const cp = new ChildProcess(handler); + + cp.onDataCallback('fake data'); + cp.onDataCallback('real data'); + cp.onDataCallback(''); + + expect(cp.stdout).toContain('fake data'); + expect(cp.stdout).toContain('real data'); + }); +}); \ No newline at end of file diff --git a/test/utils/guid.js b/test/utils/guid.js deleted file mode 100644 index 75aad02..0000000 --- a/test/utils/guid.js +++ /dev/null @@ -1,10 +0,0 @@ -/* eslint no-undef: 0, no-unused-expressions: 0 */ -"use strict"; -const expect = require("chai").expect; -const guid = require("../../src/util/guid"); - -describe("guid", () => { - it("should create a guid", () => { - expect(guid()).to.not.be.null; - }); -}); diff --git a/test/utils/guid.test.js b/test/utils/guid.test.js new file mode 100644 index 0000000..d372bde --- /dev/null +++ b/test/utils/guid.test.js @@ -0,0 +1,8 @@ +/* eslint no-undef: 0, no-unused-expressions: 0 */ +'use strict'; + +const guid = require('../../src/util/guid'); + +it('should create a guid', () => { + expect(guid()).not.toBeNull(); +}); diff --git a/test/utils/load_relative_module.js b/test/utils/load_relative_module.js deleted file mode 100644 index 233818b..0000000 --- a/test/utils/load_relative_module.js +++ /dev/null @@ -1,93 +0,0 @@ -/* eslint no-undef: 0, no-unused-expressions: 0, no-throw-literal: 0 */ -"use strict"; -const expect = require("chai").expect; -const loadRelativeModule = require("../../src/util/load_relative_module"); - -class TestClass { -} - -describe("loadRelativeModule", () => { - it("should load by name", () => { - const mod = loadRelativeModule("foo", false, { - require: () => { - return TestClass; - } - }); - expect(mod).not.to.be.null; - expect(mod).not.to.eql("foo!"); - }); - - it("should load relatively", () => { - const mod = loadRelativeModule("./foo.js", false, { - require: () => { - return TestClass; - } - }); - expect(mod).not.to.be.null; - expect(mod).not.to.eql("foo!"); - }); - - it("should fail with non-optional module not found", () => { - let thrown = false; - try { - loadRelativeModule("foo.js", false, { - require: () => { - throw {code: "MODULE_NOT_FOUND"}; - }, - console: { - error: () => {} - } - }); - } catch (e) { - thrown = e; - } - expect(thrown).to.not.be.null; - }); - - it("should fail with undefined error code", () => { - let thrown = false; - try { - loadRelativeModule("foo.js", true, { - require: () => { - throw {code: undefined}; - }, - console: { - error: () => {} - } - }); - } catch (e) { - thrown = e; - } - expect(thrown).to.not.be.null; - }); - - it("should fail with optional", () => { - const mod = loadRelativeModule("foo.js", true, { - require: () => { - throw {code: "MODULE_NOT_FOUND"}; - }, - console: { - error: () => {} - } - }); - expect(mod).to.be.null; - }); - - it("should not throw error with optional module not found", () => { - let thrown = false; - try { - loadRelativeModule("foo.js", true, { - require: () => { - throw {code: "MODULE_NOT_FOUND"}; - }, - console: { - error: () => {} - } - }); - } catch (e) { - thrown = e; - } - expect(thrown).to.be.false; - }); - -}); diff --git a/test/utils/load_relative_module.test.js b/test/utils/load_relative_module.test.js new file mode 100644 index 0000000..1fc7022 --- /dev/null +++ b/test/utils/load_relative_module.test.js @@ -0,0 +1,68 @@ +/* eslint no-undef: 0, no-unused-expressions: 0, no-throw-literal: 0 */ +'use strict'; + +const loadRelativeModule = require('../../src/util/load_relative_module'); + +class T { }; + +describe('loadRelativeModule', () => { + test('should load by name', () => { + const mod = loadRelativeModule('foo', false, { + require: (m) => T + }); + + expect(mod).toEqual(new T()); + }); + + test('should load relatively', () => { + const mod = loadRelativeModule('./foo', false, { + require: (m) => T + }); + + expect(mod).toEqual(new T()); + }); + + test('should fail with non-optional module not found', () => { + try { + loadRelativeModule('foo.js', false, { + require: () => { + throw { code: 'MODULE_NOT_FOUND' } + } + }); + fail(); + } catch (e) { + expect(e).not.toBeNull(); + } + }); + + test('should fail with undefined error code', () => { + try { + loadRelativeModule('foo.js', true, { + require: () => { + throw { code: undefined }; + } + }); + fail(); + } catch (e) { + expect(e).not.toBeNull(); + } + }); + + // test('should fail with optional', () => { + // const mod = loadRelativeModule('foo.js', true, { + // require: () => { + // throw { code: 'MODULE_NOT_FOUND' }; + // } + // }); + // expect(mod).toBeNull(); + // }); + + test('should not throw error with optional module not found', () => { + const mod = loadRelativeModule('foo.js', true, { + require: () => { + throw { code: 'MODULE_NOT_FOUND' }; + } + }); + expect(mod).toBeNull(); + }); +}); diff --git a/test/utils/logstamp.js b/test/utils/logstamp.js deleted file mode 100644 index 6c82187..0000000 --- a/test/utils/logstamp.js +++ /dev/null @@ -1,10 +0,0 @@ -/* eslint no-undef: 0, no-unused-expressions: 0 */ -"use strict"; -const expect = require("chai").expect; -const logstamp = require("../../src/util/logstamp"); - -describe("logstamp", () => { - it("should create a logstamp", () => { - expect(logstamp()).to.not.be.null; - }); -}); diff --git a/test/utils/logstamp.test.js b/test/utils/logstamp.test.js new file mode 100644 index 0000000..d085e6b --- /dev/null +++ b/test/utils/logstamp.test.js @@ -0,0 +1,8 @@ +/* eslint no-undef: 0, no-unused-expressions: 0 */ +'use strict'; + +const logstamp = require('../../src/util/logstamp'); + +test('should create a logstamp', () => { + expect(logstamp()).not.toBeNull(); +}); diff --git a/test/utils/mkdir_sync.test.js b/test/utils/mkdir_sync.test.js new file mode 100644 index 0000000..772d34b --- /dev/null +++ b/test/utils/mkdir_sync.test.js @@ -0,0 +1,37 @@ +/* eslint no-undef: 0, no-unused-expressions: 0 */ +'use strict'; + +const fs = require('fs'); +const mkdirSync = require('../../src/util/mkdir_sync'); + +jest.mock('fs'); + +test('should create path', () => { + fs.mkdirSync.mockImplementation((p) => { }); + + const r = mkdirSync('fakePath'); + + expect(r).toBeUndefined(); +}); + +test('should throw error', () => { + fs.mkdirSync.mockImplementation((p) => { throw { code: 'FAKE_ERROR' } }); + + try { + mkdirSync('fakePath'); + fail(); + } catch (e) { + expect(e.code).toEqual('FAKE_ERROR'); + } +}); + +test('shouldn\'t throw error if error is EEXIST', () => { + fs.mkdirSync.mockImplementation((p) => { throw { code: 'EEXIST' } }); + + try { + mkdirSync('fakePath'); + + } catch (e) { + fail(); + } +}); diff --git a/test/utils/port_util.js b/test/utils/port_util.js deleted file mode 100644 index 5df79a8..0000000 --- a/test/utils/port_util.js +++ /dev/null @@ -1,43 +0,0 @@ -/* eslint no-undef: 0, no-unused-expressions: 0, no-magic-numbers: 0, callback-return: 0 */ -"use strict"; -const expect = require("chai").expect; -const portUtil = require("../../src/util/port_util"); -const sinon = require("sinon"); - -describe("port_util", () => { - it("should get the next port", () => { - expect(portUtil.getNextPort()).to.eql(12000); - expect(portUtil.getNextPort()).to.eql(12003); - }); - - it("should acquire a port", () => { - const spy = sinon.spy(); - portUtil.acquirePort(spy, { - checkPorts: (arr, cb) => { - cb([{ - port: arr[0], - available: true - }]); - } - }); - expect(spy.called).to.be.true; - }); - - it("should acquire a port after a retry", () => { - const spy = sinon.spy(); - let attempt = 0; - portUtil.acquirePort(spy, { - checkPorts: (arr, cb) => { - attempt === 0 ? cb([{ - port: arr[0], - available: false - }]) : cb([{ - port: arr[0], - available: true - }]); - attempt = 1; - } - }); - expect(spy.called).to.be.true; - }); -}); diff --git a/test/utils/port_util.test.js b/test/utils/port_util.test.js new file mode 100644 index 0000000..7c40393 --- /dev/null +++ b/test/utils/port_util.test.js @@ -0,0 +1,61 @@ +/* eslint no-undef: 0, no-unused-expressions: 0, no-magic-numbers: 0, callback-return: 0 */ +'use strict'; + +const sinon = require('sinon'); + +const checkPorts = require('../../src/util/check_ports'); +const portUtil = require('../../src/util/port_util'); + +jest.mock('../../src/util/check_ports'); + +test('should get the next port', () => { + expect(portUtil.getNextPort()).toEqual(12000) + expect(portUtil.getNextPort()).toEqual(12003); +}); + +test('should acquire a port', () => { + checkPorts.mockImplementation((arr, cb) => cb([{ + port: arr[0], + available: true + }])); + + const spy = sinon.spy(); + + portUtil.acquirePort(spy); + + expect(spy.called).toEqual(true); + expect(spy.args[0]).toEqual([null, 12006]); +}); + +test('should acquire a port after several retries', () => { + checkPorts.mockImplementation((arr, cb) => { + arr[0] < 12020 ? cb([{ + port: arr[0], + available: false + }]) : cb([{ + port: arr[0], + available: true + }]); + }); + + const spy = sinon.spy(); + + portUtil.acquirePort(spy); + + expect(spy.called).toEqual(true); + expect(spy.args[0]).toEqual([null, 12021]); +}); + +test('should throw exception after maximum retries', () => { + checkPorts.mockImplementation((arr, cb) => cb([{ + port: arr[0], + available: false + }])); + + const spy = sinon.spy(); + + portUtil.acquirePort(spy); + + expect(spy.called).toEqual(true); + expect(spy.args[0][0].message).toEqual('Gave up looking for an available port after 100 attempts.'); +}); diff --git a/test/utils/process_cleanup.js b/test/utils/process_cleanup.js deleted file mode 100644 index 39c4a51..0000000 --- a/test/utils/process_cleanup.js +++ /dev/null @@ -1,87 +0,0 @@ -/* eslint no-undef: 0, no-unused-expressions: 0, no-magic-numbers: 0 */ -"use strict"; -const expect = require("chai").expect; -const processCleanup = require("../../src/util/process_cleanup"); -const sinon = require("sinon"); - -describe("process_cleanup", () => { - it("cleanup no processes", () => { - const spy = sinon.spy(); - processCleanup(spy, { - console: { - log: () => {} - }, - treeUtil: { - getZombieChildren: (a1, a2, cb) => { - expect(a1).to.not.be.null; - expect(a2).to.not.be.null; - cb([]); - } - } - }); - }); - - it("cleanup no processes with debugging", () => { - const spy = sinon.spy(); - processCleanup(spy, { - console: { - log: () => {} - }, - settings: { - debug: true - }, - treeUtil: { - getZombieChildren: (a1, a2, cb) => { - expect(a1).to.not.be.null; - expect(a2).to.not.be.null; - cb([]); - } - } - }); - }); - - it("cleanup processes", () => { - const spy = sinon.spy(); - processCleanup(spy, { - console: { - log: () => {} - }, - treeUtil: { - getZombieChildren: (a1, a2, cb) => { - expect(a1).to.not.be.null; - expect(a2).to.not.be.null; - cb([10, 20, 30]); - }, - kill: (a1, a2, cb) => { - expect(a1).to.not.be.null; - expect(a2).to.not.be.null; - cb(); - } - } - }); - }); - - it("cleanup processes with debug", () => { - const spy = sinon.spy(); - processCleanup(spy, { - console: { - log: () => {} - }, - settings: { - debug: true - }, - treeUtil: { - getZombieChildren: (a1, a2, cb) => { - expect(a1).to.not.be.null; - expect(a2).to.not.be.null; - cb([10, 20, 30]); - }, - kill: (a1, a2, cb) => { - expect(a1).to.not.be.null; - expect(a2).to.not.be.null; - cb(); - } - } - }); - }); -}); diff --git a/test/utils/process_cleanup.test.js b/test/utils/process_cleanup.test.js new file mode 100644 index 0000000..d21d651 --- /dev/null +++ b/test/utils/process_cleanup.test.js @@ -0,0 +1,33 @@ +/* eslint no-undef: 0, no-unused-expressions: 0, no-magic-numbers: 0 */ +'use strict'; + +const treeKill = require('testarmada-tree-kill'); +const processCleanup = require('../../src/util/process_cleanup') + +jest.mock('testarmada-tree-kill'); + +test('cleanup no child process', () => { + treeKill.getZombieChildren.mockImplementation((pid, maxtime, cb) => cb([])); + + return expect(processCleanup(null)).resolves.toBe(undefined); +}); + +test('cleanup 3 child processes', ()=>{ + treeKill.getZombieChildren.mockImplementation((pid, maxtime, cb) => cb([10, 20, 30])); + treeKill.kill.mockImplementation((pid, sig, cb) => cb()); + + return expect(processCleanup(null)).resolves.toBe(undefined); +}); + +test('cleanup no child process with error passing through', ()=>{ + treeKill.getZombieChildren.mockImplementation((pid, maxtime, cb) => cb([])); + + return expect(processCleanup('error')).rejects.toBe('error'); +}); + +test('cleanup 3 child processes with error passing through', ()=>{ + treeKill.getZombieChildren.mockImplementation((pid, maxtime, cb) => cb([10, 20, 30])); + treeKill.kill.mockImplementation((pid, sig, cb) => cb()); + + return expect(processCleanup('error')).rejects.toBe('error'); +}); \ No newline at end of file diff --git a/test/worker_allocator.js b/test/worker_allocator.js deleted file mode 100644 index f04c838..0000000 --- a/test/worker_allocator.js +++ /dev/null @@ -1,136 +0,0 @@ -/* eslint no-undef: 0, no-magic-numbers: 0, no-unused-expressions: 0 */ -"use strict"; -const expect = require("chai").expect; -const sinon = require("sinon"); -const WorkerAllocator = require("../src/worker_allocator"); - -describe("WorkerAllocator", () => { - it("should act like a class", () => { - expect(new WorkerAllocator(10, { - console: { - log: () => {} - }, - debug: true - })).to.be.an.instanceof(WorkerAllocator); - }); - - it("should initialize", () => { - const spy = sinon.spy(); - const worker = new WorkerAllocator(10, { - console: { - log: () => {} - } - }); - worker.initialize(spy); - expect(spy.called).to.be.true; - }); - - it("should teardown", () => { - const spy = sinon.spy(); - const worker = new WorkerAllocator(10, { - console: { - log: () => {} - } - }); - worker.teardown(spy); - expect(spy.called).to.be.true; - }); - - it("should release", () => { - const workers = new WorkerAllocator(10, { - console: { - log: () => {} - } - }); - workers.release(workers.workers[0]); - expect(workers.workers[0].occupied).to.be.false; - }); - - it("should get", () => { - const spy = sinon.spy(); - let port = 1; - const workers = new WorkerAllocator(10, { - console: { - log: () => {} - }, - setTimeout: (cb) => { - cb(); - }, - getNextPort: () => { - port++; - return port; - }, - checkPorts: (ports, cb) => { - const out = []; - for (const p in ports) { - out.push({ - port: ports[p], - available: true - }); - } - cb(out); - } - }); - workers.get(spy); - expect(spy.called).to.be.true; - }); - - it("should get and occupy everything", () => { - const spy = sinon.spy(); - let port = 1; - const workers = new WorkerAllocator(1, { - console: { - log: () => {} - }, - setTimeout: (cb) => { - cb(); - }, - getNextPort: () => { - port++; - return port; - }, - checkPorts: (ports, cb) => { - const out = []; - for (const p in ports) { - out.push({ - port: ports[p], - available: true - }); - } - cb(out); - } - }); - workers.get(spy); - expect(spy.called).to.be.true; - workers.get(spy); - }); - - it("should get with nothing available", () => { - const spy = sinon.spy(); - let port = 1; - const workers = new WorkerAllocator(10, { - console: { - log: () => {} - }, - setTimeout: (cb) => { - cb(); - }, - getNextPort: () => { - port++; - return port; - }, - checkPorts: (ports, cb) => { - const out = []; - for (const p in ports) { - out.push({ - port: ports[p], - available: false - }); - } - cb(out); - } - }); - workers.get(spy); - expect(spy.called).to.be.true; - }); -}); diff --git a/test/worker_allocator.test.js b/test/worker_allocator.test.js new file mode 100644 index 0000000..065bce0 --- /dev/null +++ b/test/worker_allocator.test.js @@ -0,0 +1,95 @@ +/* eslint no-undef: 0, no-magic-numbers: 0, no-unused-expressions: 0 */ +'use strict'; + +const settings = require('../src/settings'); +const WorkerAllocator = require('../src/worker_allocator'); + +jest.mock('../src/settings', () => { + return { + BASE_PORT_START: 12000, + BASE_PORT_RANGE: 2000, + BASE_PORT_SPACING: 3, + MAX_WORKERS: 3, + MAX_ALLOCATION_ATTEMPTS: 2, + WORKER_START_DELAY: 100 + } +}); + +describe('WorkerAllocator', () => { + test('should construct', () => { + const w = new WorkerAllocator(10); + expect(w.workers).toHaveLength(10); + }); + + test('should set up', () => { + const w = new WorkerAllocator(10); + expect(w.setup()).resolves.toBeUndefined(); + }); + + test('should tear down without error', () => { + const w = new WorkerAllocator(10); + expect(w.teardown()).resolves.toBeUndefined(); + }); + + test('should tear down with error', () => { + const w = new WorkerAllocator(10); + expect(w.teardown('err')).rejects.toEqual('err'); + }); + + test('should release worker', () => { + const w = new WorkerAllocator(10); + const worker = { occupied: true }; + w.release(worker); + + expect(worker.occupied).toEqual(false); + }); + + test('should get worker when at least one is available', (done) => { + const w = new WorkerAllocator(10); + + w.get((err, worker) => { + expect(worker.occupied).toEqual(true); + done(); + }); + }); + + test('shouldn throw error if no worker is available', (done) => { + const w = new WorkerAllocator(1); + w.workers = [{ index: 1, occupied: true, portOffset: 12000 }]; + + w.get((err, worker) => { + expect(err).toEqual('Couldn\'t allocate a worker after 2 attempts'); + done(); + }); + }); + + test('shouldn skip current port if occupied already', (done) => { + const portUtil = require('../src/util/port_util'); + jest.mock('../src/util/port_util'); + + portUtil.getNextPort.mockImplementation(() => 12000); + + portUtil.checkPorts.mockImplementation((ports, cb) => { + if (_.includes(ports, 12003)) { + cb([ + { port: 12003, available: true }, + { port: 12004, available: false }, + { port: 12005, available: true } + ]); + } else { + cb([ + { port: 12006, available: true }, + { port: 12007, available: true }, + { port: 12008, available: true } + ]); + } + }); + + const w = new WorkerAllocator(1); + w.name = "lei" + w.get((err, worker) => { + console.log(worker) + done(); + }); + }); +});