diff --git a/.gitignore b/.gitignore index a7ab81621..c04dd5653 100644 --- a/.gitignore +++ b/.gitignore @@ -66,9 +66,13 @@ ltximg auto latexmk-out *.tex - svg-inkscape *.html +tmp +*.tar +*.md +README.tex larsoft-cfg *.dot configure + diff --git a/README.org b/README.org index 980068984..3d16340f9 100644 --- a/README.org +++ b/README.org @@ -1,38 +1,67 @@ #+TITLE: Wire-Cell Toolkit +#+SETUPFILE: setup-readme.org -Welcome to the Wire-Cell Toolkit source repository. +Welcome to the Wire-Cell Toolkit (WCT) source repository at https://github.com/wirecell/wire-cell-toolkit. -See http://wirecell.bnl.gov/ for documentation including user manual and news "blog". +* Overview -[[https://travis-ci.org/WireCell/wire-cell-toolkit][https://api.travis-ci.org/WireCell/wire-cell-toolkit.svg?branch=master]] +The WCT is a multi-faceted, high performance software project developed for liquid argon time projection chamber (LArTPC) simulation and data processing. Some features of wCT include: + +- Layered tool design culminating in a modular execution policy and reference command-line interface program. +- A multi-threaded execution model constructed following the data flow programming paradigm. +- Plugin, factory and configuration subsystems. +- Components providing simulation, signal processing and physics reconstruction algorithms. +- Suite of abstract interface classes. +- Low level utility algorithms, data structures and streaming data I/O formats. + +Additional "README" information is available in the WCT sub packages: + +- [[file:apps/README.org][apps]] +- [[file:aux/README.org][aux]] +- [[file:cfg/README.org][cfg]] +- [[file:cuda/README.org][cuda]] +- [[file:gen/README.org][gen]] +- [[file:iface/README.org][iface]] +- [[file:img/README.org][img]] +- [[file:pgraph/README.org][pgraph]] +- [[file:pytorch/README.org][pytorch]] +- [[file:root/README.org][root]] +- [[file:sigproc/README.org][sigproc]] +- [[file:sio/README.org][sio]] +- [[file:tbb/README.org][tbb]] +- [[file:test/README.org][test]] +- [[file:util/README.org][util]] +- [[file:waft/README.org][waft]] +- [[file:zio/README.org][zio]] + +See http://wirecell.bnl.gov/ for the home of Wire-Cell Toolkit documentation and news "blog". * Installation -Wire-Cell Toolkit provides simple and automatic installation which -gives you, the installer, some options. +Wire-Cell Toolkit provides simple and automated installation while allowing you to adapt it so you may provide the required dependencies in a variety of ways. ** External software dependencies -The WCT dependency tree: +The WCT dependencies are curated and minimized with some required and some optional. Below shows the intra- and inter-package dependency tree: [[file:wct-deps.png]] -Anything that the ~WireCellUtil~ package depends on is required. The -rest are optional. Missing optional dependencies will cause the -dependent WCT package to not be built. +Black arrows are library dependencies, blue are for applications and gray are for testing programs. They represent compile/link time dependencies. + +The dependencies for the ~WireCellUtil~ package are required. The rest are optional. Missing optional dependencies, or ones specifically turned off, will cause the dependent WCT package to not be built. Some external dependencies have explicit minimum required versions: - TBB (oneAPI) 2021.1.1 - Boost 1.75.0 -You may provide the necessary external software dependencies in a -manner of your own choosing and some options include: +You may provide the necessary external software dependencies in a manner of your own choosing and some options include: +- Packages provided by your OS or built "by hand". - [[https://github.com/WireCell/wire-cell-spack][Spack-based install]] automatically builds all (non-OS) externals and WCT itself - Some WCT releases are built at FNAL as a UPS product named =wirecell=. -- Exploit the above with a [[https://github.com/WireCell/wire-cell-singularity][Singularity container and CVMFS]] (currently recommended) +- Exploit the above with a [[https://github.com/WireCell/wire-cell-singularity][Singularity container and CVMFS]]. ** Developer Source @@ -44,9 +73,7 @@ Developers check out =master= branch via SSH. ** User Source -Users typically should build a release branch, either the tip or a -tagged release on that branch. Tagged releases are shown on the [[https://github.com/WireCell/wire-cell-toolkit/releases][this -GitHub release page]]. +Users typically should build a release branch, either the tip or a tagged release on that branch. Tagged releases are shown on the [[https://github.com/WireCell/wire-cell-toolkit/releases][this GitHub release page]]. Users may also anonymously clone in the usual way: @@ -62,16 +89,13 @@ On well-behaved systems, configuring the source may be as simple as: $ ./wcb configure --prefix=/path/to/install #+END_EXAMPLE -Software dependencies which can not automatically be located in system -areas or via ~pkg-config~ can be manually specified. For a list of -options run: +Software dependencies which can not automatically be located in system areas or via ~pkg-config~ can be manually specified. For a list of options run: #+BEGIN_EXAMPLE $ ./wcb --help #+END_EXAMPLE -Here is an example where some packages are found automatically and -some need help and others are explicitly turned off: +Here is an example where some packages are found automatically and some need help and others are explicitly turned off: #+begin_example $ ./wcb configure \ @@ -89,69 +113,42 @@ some need help and others are explicitly turned off: ** Building -It is suggested to first build the code before running tests. +The libraries and programs may be built with: #+BEGIN_EXAMPLE - $ ./wcb -p --notests + $ ./wcb #+END_EXAMPLE ** Installing -To install the built toolkit and its configuration support files while -still avoiding the tests do: +To install: #+BEGIN_EXAMPLE - $ ./wcb -p --notests install + $ ./wcb install #+END_EXAMPLE -Optionally, the *reference* configuration and data files for one or more -supported experiments may be installed by giving naming them with the -~--install-config~ option. A name matches a sub-directory under -[[file:cfg/pgrapher/experiment/][cfg/pgrapher/experiment/]] or the special ~all~ name will install all. +Optionally, the *reference* configuration and data files for one or more supported experiments may be installed by giving naming them with the ~--install-config~ option. A name matches a sub-directory under [[file:cfg/pgrapher/experiment/][cfg/pgrapher/experiment/]] or the special ~all~ name will install all. #+begin_example - $ ./wcb -p --notests --install-config= install + $ ./wcb --install-config= install #+end_example ** Testing -Running the tests can take a while but should be run on new -installations and after any significant development. The developers -try not to leave broken tests so any test failure should be treated as -important. However, some tests require proper environment to run -successfully. In particular, tests need to find Wire-Cell -configuration and the shared libraries of the external software -dependencies need to be located. Below shows an example: +Running the tests can take a while but are off by default. They may be run with: +#+begin_example + $ ./wcb --tests +#+end_example -#+BEGIN_EXAMPLE - $ export WIRECELL_PATH=$HOME/dev/wct/wire-cell-data:$HOME/dev/wct/wire-cell-toolkit/cfg - $ export LD_LIBRARY_PATH=$HOME/dev/wct/install/lib:$HOME/opt/jsonnet/lib - $ ./wcb -p --alltests - ... - execution summary - tests that pass 83/83 - ... - tests that fail 0/83 - 'build' finished successfully (15.192s) -#+END_EXAMPLE +See [[file:tests/README.org]] for more details on testing. * Release management -To make releases, the above details are baked into two test scripts -[[https://github.com/WireCell/waf-tools/blob/master/make-release.sh][make-release.sh]] and [[https://github.com/WireCell/waf-tools/blob/master/test-release.sh][test-release.sh]]. See comments at the top of each -for how to run them. These scripts can be used by others but are -meant for developers to make official releases. - -* Meta - -A new =wcb= build script is made from [[https://github.com/waf-project/waf][waf source]] via: +WCT uses an ~X.Y.Z~ version string. While ~X=0~, a ~0.Y.0~ version indicates a new release that may extend or break API or ABI compared to ~Y-1~. A ~Z>0~ indicates a bug fix to ~Z-1~ which should otherwise retain the API and ABI. Bug fixes will be made on a branch rooted on ~0.X.0~ called ~0.X.x~. -#+BEGIN_EXAMPLE - $ cd waf-tools - $ ./refresh-wcb -o /path/to/wire-cell-toolkit/wcb - $ cd /path/to/wire-cell-toolkit/ - $ git commit -am "update wcb" && git push -#+END_EXAMPLE +To make releases, the above details are baked into two test scripts [[https://github.com/WireCell/waf-tools/blob/master/make-release.sh][make-release.sh]] and [[https://github.com/WireCell/waf-tools/blob/master/test-release.sh][test-release.sh]]. See comments at the top of each for how to run them. These scripts can be used by others but are meant for developers to make official releases. +* Meta +Prior to 0.25.0, ~wcb~ was a custom version of [[https://waf.io][Waf]] and is now simply a copy of ~waf~. The customized tools are held in the [[file:waft/]] directory. diff --git a/apps/README.md b/apps/README.md deleted file mode 100644 index 513070a06..000000000 --- a/apps/README.md +++ /dev/null @@ -1,2 +0,0 @@ -# Wire Cell Applications - diff --git a/apps/README.org b/apps/README.org new file mode 100644 index 000000000..222e14a3e --- /dev/null +++ b/apps/README.org @@ -0,0 +1,136 @@ +#+title: Wire Cell Toolkit Apps +#+SETUPFILE: ../setup-readme.org + + +* Overview + +This package provides toolkit layers at the top of an application +stack. This includes + +- main command line programs, in particular ~wire-cell~. + +- a ~Main~ class used in CLI and other application interfaces (such as /art/). + +- a few so called "apps" which are WCT components that provide a + high-level execution policy. See also ~TbbFlow~ in sub package ~tbb~ + and ~Pgrapher~ in + +* Programs + +** ~wire-cell~ + +The ~wire-cell~ command line program provides a "reference" application +of the toolkit. It is a generic, "policy free" program that is fully +driven by configuration. + +#+begin_src bash :results output + wire-cell --help + wire-cell --version +#+end_src + +#+RESULTS: +#+begin_example +Command line interface to the Wire-Cell Toolkit + +Usage: + wire-cell [options] [configuration ...] + +Options: + -h [ --help ] produce help message + -l [ --logsink ] arg set log sink as or 'stdout' or 'stderr', a + log level for the sink may be given by appending + ':' + -L [ --loglevel ] arg set lowest log level for a log in form 'name:level' or + just give 'level' value for all (level one of: + critical,error,warn,info,debug,trace) + -a [ --app ] arg application component to invoke + -c [ --config ] arg provide a configuration file + -p [ --plugin ] arg specify a plugin as name[:lib] + -V [ --ext-str ] arg specify a Jsonnet external variable= + -C [ --ext-code ] arg specify a Jsonnet external variable= + -A [ --tla-str ] arg specify a Jsonnet top level arguments variable= + --tla-code arg specify a Jsonnet top level arguments variable= + -P [ --path ] arg add to JSON/Jsonnet search path + -t [ --threads ] arg limit number of threads used + -v [ --version ] print the compiled version to stdout + +0.24.0-33-gf9d92c77 +#+end_example + +** ~wcsonnet~ + +The ~wcsonnet~ program is a thin wrapper around the Jsonnet library used to build WCT. It can be preferable to the standard ~jsonnet~ program for the following reasons: + +- It uses the Go Jsonnet library which is substantially faster than the C/C++ library used by ~jsonnet~. +- It honors the ~WIRECELL_PATH~ to locate files. + +#+begin_src shell :results output + wcsonnet --help +#+end_src + +#+RESULTS: +#+begin_example +wcsonnet is a Wire-Cell Toolkit aware Jsonnet compiler +Usage: wcsonnet [OPTIONS] [file] + +Positionals: + file TEXT Jsonnet file to compile + +Options: + -h,--help Print this help message and exit + -o,--output TEXT Output file + -P,--path TEXT ... Search paths to consider in addition to those in WIRECELL_PATH + -V,--ext-str TEXT ... Jsonnet external variable as = + -C,--ext-code TEXT ... Jsonnet external code as = + -A,--tla-str TEXT ... Jsonnet level argument value = + -S,--tla-code TEXT ... Jsonnet level argument code = + +#+end_example + +** ~wcwires~ + +One of the main input configurations to many WCT algorithms is the +"wire geometry". This is typically an exhaustive list of wire (or +strip) endpoints and their channel and other identifiers. In many +cases, the "wires files" are provided with errors. They may not +follow correct ordering conventions or they may have poor precision in +wire endpoints. WCT provides a way to validate and correct the wire +geometry when a "wires file" is read in and ~wcwires~ provides this +functionality in a convenient command line interface. + +#+begin_src shell :results output + wcwires --help +#+end_src + +#+RESULTS: +#+begin_example +wcwires converts and validates Wire-Cell Toolkit wire descriptions +Usage: wcwires [OPTIONS] [file] + +Positionals: + file TEXT wires file + +Options: + -h,--help Print this help message and exit + -P,--path TEXT ... Search paths to consider in addition to those in WIRECELL_PATH + -o,--output TEXT Write out a wires file (def=none) + -c,--correction INT Correction level: 1=load,2=order,3=direction,4=pitch (def=4) + -v,--validate Perform input validation (def=false) + -f,--fail-fast Fail on first validation error (def=false) + -e,--epsilon FLOAT Unitless relative error determining imprecision during validation (def=1e-6) + +#+end_example + + +* WCT ~Main~ + +WCT provides a C++ class called ~Main~ which may be used to easily integrate WCT functionality into other applications. The ~wire-cell~ program provides a command line interface to ~Main~. Likewise, the ~WCLS_tool~ in the ~larwirecell~ packages of LArSoft providse an /art/ / FHiCL interface to ~Main~. + +* WCT "apps" + +Finally, this package provides a number of simple WCT "apps" classes. +Typically, one or more "app" instance is used via ~Main~ to provide some +top-level execution. Provided here are ~ConfigDumper~ and ~NodeDumper~ +which are more examples than useful. See ~TbbFlow~ from the ~tbb~ sub +package and ~Pgrapher~ from the ~pgraph~ package for the two most used +apps. diff --git a/apps/apps/wcsonnet.cxx b/apps/apps/wcsonnet.cxx index 76ac70e82..c1392ceda 100644 --- a/apps/apps/wcsonnet.cxx +++ b/apps/apps/wcsonnet.cxx @@ -33,9 +33,11 @@ int main(int argc, char** argv) { CLI::App app{"wcsonnet is a Wire-Cell Toolkit aware Jsonnet compiler"}; - std::string filename; + std::string filename, output="/dev/stdout"; std::vector load_path, extvars, extcode, tlavars, tlacode; + app.add_option("-o,--output", output, + "Output file"); app.add_option("-P,--path", load_path, "Search paths to consider in addition to those in WIRECELL_PATH")->type_size(1)->allow_extra_args(false); app.add_option("-V,--ext-str", extvars, @@ -86,7 +88,8 @@ int main(int argc, char** argv) m_tlavars, m_tlacode); auto jdat = parser.load(filename); - std::cout << jdat << std::endl; + std::ofstream out(output); + out << jdat << std::endl; return 0; } diff --git a/apps/apps/wire-cell.cxx b/apps/apps/wire-cell.cxx index 3370f5e7b..01203005a 100644 --- a/apps/apps/wire-cell.cxx +++ b/apps/apps/wire-cell.cxx @@ -15,11 +15,10 @@ int main(int argc, char* argv[]) try { rc = m.cmdline(argc, argv); - if (rc) { - return rc; + if (rc == 0) { + m.initialize(); + m(); } - m.initialize(); - m(); } catch (Exception& e) { cerr << errstr(e) << endl; diff --git a/apps/inc/WireCellApps/Main.h b/apps/inc/WireCellApps/Main.h index 94239bae6..b9d910e0f 100644 --- a/apps/inc/WireCellApps/Main.h +++ b/apps/inc/WireCellApps/Main.h @@ -30,6 +30,10 @@ namespace WireCell { /// /// Or, one can use subsequent methods for more fine-grained /// setup and execution. + /// + /// Return code rc: + /// rc = 1 : if --help or --version, + /// rc = 0 : if normal running should commence int cmdline(int argc, char* argv[]); /// Individual setup methods called by cmdline() or called diff --git a/apps/src/Main.cxx b/apps/src/Main.cxx index 09ece90a5..68230d36c 100644 --- a/apps/src/Main.cxx +++ b/apps/src/Main.cxx @@ -104,11 +104,16 @@ int Main::cmdline(int argc, char* argv[]) po::notify(opts); - if (opts.count("help")) { + if (argc == 1 or opts.count("help")) { std::cout << desc << "\n"; return 1; } + if (opts.count("version")) { + std::cout << version() << std::endl; + return 1; + } + if (opts.count("config")) { for (auto fname : opts["config"].as >()) { add_config(fname); @@ -195,10 +200,6 @@ int Main::cmdline(int argc, char* argv[]) } #endif - if (opts.count("version")) { - std::cout << version() << std::endl; - } - return 0; } diff --git a/apps/test/test_apps.bats b/apps/test/test_apps.bats new file mode 100644 index 000000000..747f38c63 --- /dev/null +++ b/apps/test/test_apps.bats @@ -0,0 +1,27 @@ +#!/usr/bin/env bats + +bats_load_library wct-bats.sh + +@test "test wct bats in apps" { + usepkg util apps + t=$(topdir) + echo "TOP $t" + [ -f "$t/build/apps/wire-cell" ] + [ -n "$util_src" ] + [ -n "$(wcsonnet)" ] +} + +@test "wire-cell help" { + + # Defined by wcb_env, "run" by bats + run wct --help + + # Bats will only show this if test fails. + echo "$output" + + # Assert no error status code + [[ "$status" -eq 0 ]] + + # Assert expected info + [[ -n "$(echo $output | grep 'Wire-Cell Toolkit')" ]] +} diff --git a/apps/wscript_build b/apps/wscript_build index 1ac032a12..cd9c794af 100644 --- a/apps/wscript_build +++ b/apps/wscript_build @@ -1,4 +1,4 @@ use = ['WireCellIface', 'FFTWTHREADS'] if 'HAVE_TBB' in bld.env: use.append('TBB') -bld.smplpkg('WireCellApps', use=use, app_use='WireCellApps') +bld.smplpkg('WireCellApps', use=use) # , app_use='WireCellApps') diff --git a/aux/README.org b/aux/README.org index 5bcf12f87..d7e1c85db 100644 --- a/aux/README.org +++ b/aux/README.org @@ -1,5 +1,7 @@ - #+title: Wire-Cell Toolkit Auxiliary +#+SETUPFILE: ../setup-readme.org + +* Overview This core WCT package enjoys a special position in the dependency tree. It depends on ~WireCellIface~ as do other "implementation" @@ -13,6 +15,9 @@ Some categories of code appropriate for ~aux~: - ~INode~ implementations that provide general purpose data converters (eg, ~ITensorSet <--> IFrame~) - general utility code that operate on interfaces (eg, ~FrameTools~) +#+include: docs/aux.org + +* Links to more docs :noexport: When describing a class or code outgrows what can easily be expressed in comments a file in [[./docs][docs/]] may be provided. diff --git a/aux/docs/ClusterArrays.org b/aux/docs/ClusterArrays.org index 469d4b6b8..ce864b28f 100644 --- a/aux/docs/ClusterArrays.org +++ b/aux/docs/ClusterArrays.org @@ -1,4 +1,4 @@ -#+title: ~WireCell::Aux::ClusterArrays~ +* Cluster Arrays Provides array representations of ~ICluster~. @@ -10,18 +10,18 @@ leading to an even more complex graph. The ~ClusterArrays~ class This document describes the ~ClusterArrays~ interface and give guidance on how to use the array its produces. -* Low-level array representation +** Low-level array representation The API provides arrays in the form of [[https://www.boost.org/doc/libs/1_79_0/libs/multi_array/doc/user.html][Boost.MultiArray]] types. -* Array schema +** Array schema The array schema closely matches that provided by the python-geometric ~HeteroData~ interface. It factors into *node array* and *edge array* schema. -** Node array schema +*** Node array schema An ~ICluster~ graph node of one of a fixed set of *node types* (/channel, wire, blob, slice, measure/). The first letter of the type name is @@ -36,7 +36,7 @@ marked with "(int)". Take care that the enumerated lists below are 1-based counts, one more than the 0-based array indices. -*** Channel +**** Channel A channel represents an amount of signal collected from its attached wire segments over the duration of a time slice. @@ -47,7 +47,7 @@ wire segments over the duration of a time slice. 4. /index/, (int) the channel index 5. /wpid/, (int) the wire plane id -*** Wire +**** Wire The wire array reproduces purely static "geometric" information about physical wire segments. @@ -65,7 +65,7 @@ physical wire segments. 11. /headz/, the z coordinate of the head endpoint of the wire. -*** Blob +**** Blob A blob describes a volume in space bounded in the longitudinal direction by the duration of a time slice and in the transverse @@ -88,7 +88,7 @@ signal contained by this region. 14. /ncorners/, (int) the number of corners 15. 24 columns holding /corners as (y,z) pairs/, 12 pairs, of which /ncorners/ are valid. -*** Slice +**** Slice A slice represents a duration in drift/readout time. @@ -102,7 +102,7 @@ A slice represents a duration in drift/readout time. The ~ISlice~ also holds the "activity map" mapping channels in slice to charge. -*** Measure +**** Measure A measure represents the collection of channels in a given plane connected to a set of wires that span a blob in one wire plane. @@ -113,7 +113,7 @@ Its signal is the sum of channel signals. 3. /uncertainty/, the uncertainty in the value. 4. /wpid/, the wire plane ID -** Edge array schema +*** Edge array schema ~ICluster~ does not associate any data with edges and so only connectivity information is covered by the edge array schema. There @@ -132,7 +132,7 @@ the slice type node array. The rows of edge arrays follow the order of edges in the ~ICluster~ graph. -* Implementation +** Implementation The ~ClusterArrays~ class will convert ~ICluster~ to arrays following above schema. See ~ClusterFileSink::numpify()~ for example usage. diff --git a/aux/docs/aux.org b/aux/docs/aux.org new file mode 100644 index 000000000..676764381 --- /dev/null +++ b/aux/docs/aux.org @@ -0,0 +1,7 @@ +#+include: noise.org +#+include: tensor-data-model.org +#+include: frame-tensor.org +#+include: frame-files.org +#+include: cluster-shadow.org +#+include: ClusterArrays.org + diff --git a/aux/docs/cluster-shadow.org b/aux/docs/cluster-shadow.org index e49783721..1c77c24ca 100644 --- a/aux/docs/cluster-shadow.org +++ b/aux/docs/cluster-shadow.org @@ -1,6 +1,6 @@ -#+title Cluster Shadow +* Cluster Shadow -* Introduction +** Introduction In the context of the ~ClusterShadow~ API, the unqualifed term /cluster/ is short for /geometric cluster/ (GC). A set of GCs is represented by a @@ -14,8 +14,8 @@ sub-package is one example of a GC. A /blob shadow/ (BS) is an overlap between two blobs in a given tomographic view (wire plane). The set of blob shadows for a set of -blobs and over all views is represented as a /blob shadow graph/. See -[[blob-shadow.org]] for more info on blob shadows. +blobs and over all views is represented as a /blob shadow graph/. See the presentation +on Blob Shadows for more info on blob shadows. A /cluster shadow/ (CS) then represents the combination of these two relationships. A CS is formed from a b-b GC graph and a b-b BS graph @@ -29,14 +29,14 @@ edge carries the ~WirePlaneId~ from which a view index may be derived. Note, a cluster may posses a /self-shadow/ and this is represented by a loop/buckle edge connecting its vertex to itself. -* Implementation +** Implementation The main method of the ~ClusterShadow~ API produces a CS graph and a connected component property map. The vertex descriptor of the CS graph is merely the component index. Edges provide measures of the shadow. -** Constructing +*** Constructing #+begin_src c++ #include "WireCellAux/ClusterShadow.h" @@ -54,7 +54,7 @@ shadow. If you don't care about the blob content of clusters you may call ~ClusterShadow::shadow(csgraph,bsgraph)~. -** Using +*** Using Each CS graph vertex has one ~Rectangles~ object for each view. You may iterate on the edges of the CS graph, retrieve the vertex object for diff --git a/aux/docs/frame-files.org b/aux/docs/frame-files.org index 7e25b5b8b..66c3cb440 100644 --- a/aux/docs/frame-files.org +++ b/aux/docs/frame-files.org @@ -1,7 +1,6 @@ -#+title: Frame representions -#+setupfile: ../../util/docs/setup-note.org +* Frame files -* Intro +** Intro A WCT "frame" provides a simple representation of data from different types of "readout" of a LArTPC detector with some additional concepts @@ -32,7 +31,7 @@ array spanning the channels and ticks of the readout. -* Frame files +** Frame files WCT ~sio~ subpackage provides support for reading and writing /frame files/. The format is actually a stream or archive of Numpy ~.npy~ @@ -80,7 +79,7 @@ Despite the limitations of being both info-lossy and potentially data-inflating, this format is convenient for producing per-tag dense arrays for quick debugging using Python/Numpy. -* Frame tensor files +** Frame tensor files A second format called /frame tensor files/ improves on the above by mapping the frame data model to the one supported by @@ -145,7 +144,7 @@ A reading is simlar but reversed file -> [TensorFileSource] -> (ITensorSet) -> [TensorFrame] -> (IFrame) #+end_example -** Set-level +*** Set-level The ~ITensorSet~ class and its metadata accepts frame information which is not dependent on trace-level information. To start with, the @@ -168,7 +167,7 @@ are expected to hold tensors and must be contiguous in the stream but otherwise their order is not defined. These tensors are described in the remaining sections. -** Tensors +*** Tensors An ~ITensor~ represents some aspect of an ~IFrame~ not already represented in the set-level metadata. Each tensor provides at least these two @@ -186,7 +185,7 @@ information are assumed to take the forms, respectively The remaining sections describe each accepted type of tensor. -** Trace +*** Trace A trace tensor provides waveform samples from a number of channels. Its array spans a single or an ordered collection of channels. A @@ -211,7 +210,7 @@ appended to this collection. This allows sparse or dense or a hybrid mix of trace information. It also allows a collection of tagged traces to have their associated waveforms represented together. -** Index +*** Index A subset of traces held by the frame is identified by a string ("trace tag") and its associated collection of indices into the full and final @@ -220,7 +219,7 @@ collection of traces. - ~tag="tag"~ :: a unique string ("trace tag") identifying this subset -** Summary +*** Summary A trace summary tensor provides values associated to indexed (tagged) traces. The tensor array elements are assumed to map one-to-one with diff --git a/aux/docs/frame-tensor.org b/aux/docs/frame-tensor.org index b9d759672..9c2f23ccf 100644 --- a/aux/docs/frame-tensor.org +++ b/aux/docs/frame-tensor.org @@ -1,6 +1,6 @@ -#+title: Frames and their representations +* Frames and their representations -* Intro +** Intro A WCT "frame" provides a simple representation of data from different types of "readout" of a LArTPC detector with some additional concepts @@ -29,7 +29,7 @@ array spanning the channels and ticks of the readout. At least two persistent representations of ~IFrame~ are supported by WCT. The rest of this d -* Initial I/O with FrameFileSink/FrameFileSource +** Initial I/O with FrameFileSink/FrameFileSource WCT ~sio~ subpackage provides support for sending ~IFrame~ through file I/O based on ~boost::iostreams~ and ~custard~ to support streams through @@ -72,7 +72,7 @@ lossless I/O. And the array ordering requirements of ~FrameFileSource~ make producing compatible files by software other than ~FrameFileSink~ awkward. -* Tag solutions +** Tag solutions To fix the above, it was attempted to modify the ~FF{Sink,Source}~ to work in a new mode, which retaining backward compatibility and to add @@ -94,12 +94,12 @@ relieves the burden on using naming schemes to hold metadata. Second, files and ZeroMQ transport and thus by converting between ~IFrame~ and ~ITensor~ representations, frames get new I/O methods "for free". -* Frame decomposition +** Frame decomposition The ~IFrame~ info is decomposed into an ~ITensorSet/ITensor~ representation. -** Set metadata +*** Set metadata The ~IFrame::ident()~ is mapped directly to ~ITensorSet::ident()~. @@ -120,7 +120,7 @@ are expected to hold tensors and must be contiguous in the stream but otherwise their order is not defined. These tensors are described in the remaining sections. -** Tensors +*** Tensors An ~ITensor~ represents some aspect of an ~IFrame~ not already represented in the set-level metadata. Each tensor provides at least these two @@ -138,7 +138,7 @@ information are assumed to take the forms, respectively The remaining sections describe each accepted type of tensor. -** Trace +*** Trace A trace tensor provides waveform samples from a number of channels. Its array spans a single or an ordered collection of channels. A @@ -163,7 +163,7 @@ appended to this collection. This allows sparse or dense or a hybrid mix of trace information. It also allows a collection of tagged traces to have their associated waveforms represented together. -** Index +*** Index A subset of traces held by the frame is identified by a string ("trace tag") and its associated collection of indices into the collection of @@ -179,7 +179,7 @@ traces - ~tag="tag"~ :: a unique string ("trace tag") identifying this subset - ~traces=~ :: a trace tensor name or the empty string. -** Summary +*** Summary A trace summary tensor provides values associated to indexed (tagged) traces. The tensor array elements are assumed to map one-to-one with diff --git a/aux/docs/noise.org b/aux/docs/noise.org index e10c03cd5..44ad562ab 100644 --- a/aux/docs/noise.org +++ b/aux/docs/noise.org @@ -1,54 +1,6 @@ -#+title: Wire-Cell Toolkit Noise -#+options: ':t -#+latex_header: \usepackage[margin=1in]{geometry} -#+LaTeX_HEADER: \lstloadlanguages{[ISO]C++} -#+LaTeX_HEADER: \definecolor{background}{RGB}{255,255,255} -#+LaTeX_HEADER: \definecolor{delim}{RGB}{20,105,176} -#+LaTeX_HEADER: \definecolor{keyword}{RGB}{20,105,176} -#+LaTeX_HEADER: \definecolor{comment}{RGB}{20,200,10} -#+LaTeX_HEADER: \lstdefinelanguage{jsonnet}{ -#+LaTeX_HEADER: basicstyle=\normalfont\ttfamily, -#+LaTeX_HEADER: numbers=left, -#+LaTeX_HEADER: numberstyle=\scriptsize, -#+LaTeX_HEADER: stepnumber=1, -#+LaTeX_HEADER: numbersep=8pt, -#+LaTeX_HEADER: showstringspaces=false, -#+LaTeX_HEADER: breaklines=true, -#+LaTeX_HEADER: frame=lines, -#+LaTeX_HEADER: backgroundcolor=\color{background}, -#+LaTeX_HEADER: keywords = {$}, -#+LaTeX_HEADER: keywords = [2]{self, import, local, true, false, null, function}, -#+LaTeX_HEADER: keywordstyle=[2]\color{keyword}, -#+LaTeX_HEADER: comment=[l]{//}, -#+LaTeX_HEADER: commentstyle=\color{comment}\ttfamily, -#+LaTeX_HEADER: stringstyle=\color{blue}\ttfamily, -#+LaTeX_HEADER: morestring=[b]', -#+LaTeX_HEADER: morestring=[b]", -#+LaTeX_HEADER: otherkeywords={:, =, ==} -#+LaTeX_HEADER: } -#+LaTeX_HEADER: \lstloadlanguages{Jsonnet} -#+LaTeX_HEADER: \lstdefinestyle{note}{ -#+LaTeX_HEADER: basicstyle=\normalfont\ttfamily, -#+LaTeX_HEADER: numbers=none, -#+LaTeX_HEADER: numberstyle=\scriptsize, -#+LaTeX_HEADER: stepnumber=1, -#+LaTeX_HEADER: numbersep=8pt, -#+LaTeX_HEADER: showstringspaces=false, -#+LaTeX_HEADER: breaklines=true, -#+LaTeX_HEADER: frame=lines, -#+LaTeX_HEADER: backgroundcolor=\color{background}, -#+LaTeX_HEADER: keywords = [2]{include, if, for, const, typedef, using, public, private, virtual, class, return, void}, -#+LaTeX_HEADER: keywordstyle=[2]\color{keyword}, -#+LaTeX_HEADER: comment=[l]{//}, -#+LaTeX_HEADER: commentstyle=\color{comment}\ttfamily, -#+LaTeX_HEADER: stringstyle=\color{blue}\ttfamily, -#+LaTeX_HEADER: morestring=[b]', -#+LaTeX_HEADER: morestring=[b]", -#+LaTeX_HEADER: otherkeywords={:, =, ==, ::} -#+LaTeX_HEADER: } -#+LaTeX_HEADER: \lstset{style=note} - -* Overview +* Noise + +** Overview WCT provides support for "noise" in various sub-packages: @@ -64,13 +16,13 @@ WCT provides support for "noise" in various sub-packages: See also this presentation: [[file:noise-presentation.org][org]], [[file:noise-presentation.pdf][pdf]] -* WCT Noise Spectra +** WCT Noise Spectra WCT *noise spectra* defines a dataset describing mean spectral amplitudes and related metadata. They are provided to WCT C++ code as objects following a schema defined here. The datasets are provided via WCT configuration mechanism either directly as Jsonnet configuration data or by naming stand-alone data files (usually as compressed JSON but Jsonnet may also be provided). WCT *noise spectra* are in the form of an array of *spectrum objects*. Each *spectrum object* follows a schema describing what attributes it must or may have. The attributes are categorized as *required*, *optional* or *undefined*. A spectrum object *must* contain all *required attributes*. A consumer of a WCT noise file *must* ignore *undefined attributes*. An *optional attribute* is one that is *required* for some consumers and *undefined* for others. Consumers of optional attributes *may* provide default values for use if the attribute is unspecified. -** Required attributes +*** Required attributes A WCT *spectral object* *must* provide these attributes: @@ -82,7 +34,7 @@ A WCT *spectral object* *must* provide these attributes: - ~amps~ :: an array of floating-point numbers providing an estimate of a mean spectral amplitude in units of [voltage] in the WCT system of units. Note that here "amps" an abbreviation of "amplitude" and not "amperage". The value of an element of ~amps~ may be derived from some sub-sampling or interpolation of an original distribution of DFT coefficients. That is, an element of ~amps~ is the an average $\langle|X_k|\rangle,\ k\in [0,N^{(fft)}-1]$ with $N^{(fft)} \ge$ ~nsamples~, over some number of waveforms of size ~nsamples~. The inequality is typically due to zero-padding of the waveform prior to taking the DFT. Note: in preparing ~amps~ the user is recommended to provide a number of waveforms approximately equal to ~nsamples~ in order to co-optimize spectral resolution and statistical stability. User is also recommended to utilize ~NoiseTools::Collector~ for low-level noise modeling code or execute a job using ~NoiseModeler~ for a high-level development. -** Optional attributes required by ~EmpiricalNoiseModel~ +*** Optional attributes required by ~EmpiricalNoiseModel~ The ~EmpiricalNoiseModel~ component requires these optional attributes: @@ -96,13 +48,13 @@ The ~EmpiricalNoiseModel~ component requires these optional attributes: - ~wirelen~ :: a floating-point number giving a wire length expressed in the WCT system of units for [length]. This value should be representative of (eg, binned over) wires for which the associated spectrum applies. -** Optional attributes required by ~GroupNoiseModel~ +*** Optional attributes required by ~GroupNoiseModel~ The ~GroupNoiseModel~ provides a model interface for both coherent and incoherent noise where spectra are grouped in some manner. It requires this optional attribute: - ~group~ :: an integer identifying an abstract group to which channels may be associated. The association to channels may be provided by a WCT *channel groups* array. The use of ~groupID~ as this attribute name is deprecated. -* WCT channel groups +** WCT channel groups The ~GroupNoiseModel~ and potentially other components require information on how to collect channels into distinct groups. The user provides this information in the form of WCT *channel-groups* data structures. These are in the form of an array of WCT *channel-group* objects, each of which has these *required* attributes: @@ -110,7 +62,7 @@ The ~GroupNoiseModel~ and potentially other components require information on ho - ~channels~ :: an array of integer values providing the channel ID numbers to associate as a group. The channel IDs are as used in the WCT *wire object* configuration provided and described elsewhere. -* Providing the above data +** Providing the above data WCT *noise spectra* and *channel group* datasets are sometimes highly structured, even algorithmically generated, and sometimes unstructured and voluminous such as when they are derived from some external analysis. @@ -132,7 +84,7 @@ Developers of WCT components can provide the user this flexibility with just a f } #+end_src -* Round-trip Validation +** Round-trip Validation The WCT noise code supports both modeling and simulating noise. Each is effectively the inverses of the other and so we may check that we get out what we put in. The "round-trip" check consists of these steps: @@ -147,7 +99,7 @@ The WCT noise code supports both modeling and simulating noise. Each is effecti - Finally, the grouped spectra are saved to a WCT *noise spectra* file. - Plots are made . -** Input spectra +*** Input spectra The input spectra can be viewed with: @@ -172,7 +124,7 @@ It's arguments are as listed: See below for guidance on how to provide meaningful values for ~peak~ and ~rms~. -** Model details +*** Model details The user requires some understanding of the noise spectral model that is used in this test in order to provide proper values. The spectral shape in the frequency domain is chosen to follow the Rayleigh distribution, \[R(x;\sigma) = \frac{x}{\sigma^2}e^{-x^2/(2\sigma^2)},\ x \ge 0\] @@ -202,7 +154,7 @@ Thus the ~rms~ parameter is identified as providing the desired value of the $\s See ~test-noise-roundtrip.sh~ for exact command. The commands to reproduce such plots are described next. -** Visualize the model +*** Visualize the model The above plot was made with a command like the following: #+begin_example @@ -222,7 +174,7 @@ For comparison, an example of a spectrum modeling real-world noise from the Prot The noise in PDSP is about 4 ADC RMS and its 12 bit ADC sees voltage in the range of 200 to 1600 mV and so expects about 1.3 mV RMS of noise measured in voltage input to the ADC. As a reminder, the simple model above has ~rms =~ 1 mV and achieves a smilar peak of 200 mV in amplitude for similar ~peak~ and same ~nsamples~. -** Performing the round-trip +*** Performing the round-trip A main configuration file for ~wire-cell~ is provided that uses the same ~test-noise-spectra.jsonnet~ described above to provide the input to the round-trip. The round-trip job can be exercised with default parameters like: diff --git a/aux/docs/blob-shadow.org b/aux/docs/talks/blob-shadow.org similarity index 97% rename from aux/docs/blob-shadow.org rename to aux/docs/talks/blob-shadow.org index f4a778b88..d352c8671 100644 --- a/aux/docs/blob-shadow.org +++ b/aux/docs/talks/blob-shadow.org @@ -35,7 +35,7 @@ Notes: * #+begin_center -\includegraphics[height=\textheight,page=9]{../../img/docs/dpf-raytiling.pdf} +\includegraphics[height=\textheight,page=9]{../../../img/docs/dpf-raytiling.pdf} #+end_center diff --git a/aux/docs/noise-presentation.org b/aux/docs/talks/noise-presentation.org similarity index 100% rename from aux/docs/noise-presentation.org rename to aux/docs/talks/noise-presentation.org diff --git a/aux/docs/tensor-data-model.org b/aux/docs/tensor-data-model.org index 7aa36356f..18938531e 100644 --- a/aux/docs/tensor-data-model.org +++ b/aux/docs/tensor-data-model.org @@ -1,7 +1,7 @@ -#+title: Wire-Cell Toolkit Tensor Set Data Model -#+setupfile: ../../util/docs/setup-note.org +* Tensor Set Data Model -* Introduction + +** Introduction This document defines the Wire-Cell Toolkit (WCT) /tensor data model/. It factors into two layers: "generic" and "specific". @@ -18,9 +18,9 @@ generic tensor data model. The next two sections define the generic and specific tensor data models and are written in rfc2119 language. -* Generic tensor data model +** Generic tensor data model -** Tensor +*** Tensor A *tensor* shall be composed of two conceptual parts: an *array* part and a *metadata* (MD) part. @@ -62,12 +62,12 @@ JSON type object but otherwise this tensor data model places no requirements on its contents. The tensor set MD must be faithfully passed through any tensor set converter round trip. -* Specific tensor data model +** Specific tensor data model The specific tensor data model defines a number of complex specific data types in terms of the generic tensor data model. -** Common metadata conventions +*** Common metadata conventions A tensor MD shall have an attribute named *datapath* of type string that identifies the tensor in a logical hierarchical structure of multiple @@ -90,7 +90,7 @@ Complex data types may be represented as an *aggregation* of multiple tensors. These shall be defined on a per data type basis below as a set of MD attributes providing *datapath* values. -** Overview of specific types +*** Overview of specific types The following specific types are mapped to the basic tensor data model. Each item in the list gives the *datatype* MD attribute value @@ -109,14 +109,14 @@ The specific requirements for each data type are given in the following sections in terms of their tensor array and metadata and in some cases in terms of other types defined previously. -** pcarray +*** pcarray The *datatype* of *pcarray* indicates a tensor representing one ~PointCloud::Array~. The tensor array information shall map directly to that of ~Array~. A *pcarray* places no additional requirements on its tensor MD. -** pcdataset +*** pcdataset The *datatype* of *pcdataset* indicates a tensor representing on ~PointCloud::Dataset~. @@ -129,7 +129,7 @@ The tensor MD shall have the following attributes: Additional user application ~Dataset~ metadata may reside in the tensor MD. -** pcgraph +*** pcgraph The *datatype* of *pcgraph* indicates a tensor representing a "point cloud graph". This extends a point cloud to include relationships between @@ -147,7 +147,7 @@ shall provide indices into the *nodes* point cloud representing the tail and head endpoint of graph edges. A node or edge dataset may be shared between different *pcgraph* instances. -** trace +*** trace The *datatype* of *trace* indicates a tensor representing a single ~ITrace~ or a collection of ~ITrace~ which have been combined. @@ -169,7 +169,7 @@ The tensor MD may include the attribute *tbin* with integer value and providing the number of sample periods (ticks) between the frame reference time and the first sample (column) in the array. -** tracedata +*** tracedata The *datatype* of *tracedata* provides per-trace information for a subset of. It is similar to a *pcdataset* and in fact may carry that value as @@ -194,7 +194,7 @@ and neither *index* nor *summary* is recognized. If the *tracedata* has a may provide a *chid* array each corresponding to the traces identified by *index*. -** frame +*** frame The *datatype* of *frame* represents an ~IFrame~. @@ -223,22 +223,22 @@ truncated to *type* ~"i2"~. A frame tensor of type ~"i2"~ shall have its sample values inflated to type ~float~ when converted to an ~IFrame~. -** cluster +*** cluster A ~cluster~ is a ~pcgraph~ with convention for how to serialize each of its node types as described here. -*** wire +**** wire -*** channel +**** channel -*** measure +**** measure -*** blob +**** blob -*** slice +**** slice -* Similarity to HDF5 +** Similarity to HDF5 The data model is intentionally similar to HDF5 abstract data model and there is a conceptual mapping between the two: diff --git a/aux/docs/test-noise-roundtrip-flow-graph.png b/aux/docs/test-noise-roundtrip-flow-graph.png index c162829c8..89e564b8f 100644 Binary files a/aux/docs/test-noise-roundtrip-flow-graph.png and b/aux/docs/test-noise-roundtrip-flow-graph.png differ diff --git a/aux/docs/test-noise-spectra-in-0.png b/aux/docs/test-noise-spectra-in-0.png index 68bcdb5ba..521610e9c 100644 Binary files a/aux/docs/test-noise-spectra-in-0.png and b/aux/docs/test-noise-spectra-in-0.png differ diff --git a/cfg/README.org b/cfg/README.org index 29e6c8398..b638b56d7 100644 --- a/cfg/README.org +++ b/cfg/README.org @@ -1,4 +1,5 @@ -#+TITLE: Wire Cell Toolkit Configuration +#+title: Wire Cell Toolkit Configuration +#+SETUPFILE: ../setup-readme.org * What is here @@ -39,17 +40,17 @@ date with the reality on the ground. Files at top level provide general support and utilities. Primarily there is the file: -- [[./wirecell.jsonnet]] +- [[file:wirecell.jsonnet]] This holds a Jsonnet version of the WCT /system of units/ (which is essentially identical to CLHEP's). It also includes a number of Jsonnet functions to form some common configuration data structures. -- [[./vector.jsonnet]] +- [[file:vector.jsonnet]] This holds some functions to assist in doing vector arithmetic in Jsonnet. -- [[./pgraph.jsonnet]] +- [[file:pgraph.jsonnet]] This holds functions to support building a processing graph for use by the ~Pgrapher~ WCT app component. Some details are [[https://wirecell.github.io/news/posts/pgrapher-configuration-improvements/][here]]. @@ -60,13 +61,13 @@ Across all types of *detectors* and *jobs* which are supported by WCT we define a layered configuration structure. The top layer is most generic and the bottom most specific. An Jsonnet API is defined for each layer. Users defining new "main" Jsonnet files, or improving -legacy forms, should read the document [[layers/README.org]]. +legacy forms, should read the document [[file:layers/README.org]]. ** Legacy forms The original configuration structure which does not allow for detector-independent job configuration definition is held under -[[./pgrapher/]]. See [[./pgrapher/README.org][it's README]] for more info on the conventions it +[[file:pgrapher/]]. See [[file:pgrapher/README.org][it's README]] for more info on the conventions it follows. New detector configuration should follow the *structured layers* form introduced above. In some cases, legacy detector configuration may used in an adapter that provides *structured layers* @@ -76,7 +77,7 @@ forms. Configuration can become obsolete when the C++ changes or when new paradigms of configuration organization are adopted. That -configuration may be moved to [[./obsolete/]] for some time before being +configuration may be moved to [[file:obsolete/]] for some time before being dropped from the tip of the master branch. * Tests diff --git a/cfg/layers/high/fileio.jsonnet b/cfg/layers/high/fileio.jsonnet index b74782e0f..562e727c8 100644 --- a/cfg/layers/high/fileio.jsonnet +++ b/cfg/layers/high/fileio.jsonnet @@ -154,7 +154,8 @@ local wc = import "wirecell.jsonnet"; celltree_file_source :: function(filename, recid, branches = ["calibWiener", "calibGaussian"], frame_tags=["gauss"], - trace_tags = ["wiener", "gauss"]) + trace_tags = ["wiener", "gauss"], + extra_params = {}) pg.pnode({ type: "CelltreeSource", name: filename, @@ -164,7 +165,7 @@ local wc = import "wirecell.jsonnet"; frames: frame_tags, "in_branch_base_names": branches, "out_trace_tags": trace_tags, - }, + } + extra_params, }, nin=0, nout=1), diff --git a/cfg/pgrapher/experiment/uboone/wct-sim-ideal-sig.jsonnet b/cfg/pgrapher/experiment/uboone/wct-sim-ideal-sig.jsonnet index d491f3d41..55e26e7b7 100644 --- a/cfg/pgrapher/experiment/uboone/wct-sim-ideal-sig.jsonnet +++ b/cfg/pgrapher/experiment/uboone/wct-sim-ideal-sig.jsonnet @@ -11,7 +11,8 @@ local wc = import "wirecell.jsonnet"; local g = import "pgraph.jsonnet"; -local io = import "pgrapher/common/fileio.jsonnet"; +// local io = import "pgrapher/common/fileio.jsonnet"; +local io = import "layers/high/fileio.jsonnet"; local params = import "pgrapher/experiment/uboone/simparams.jsonnet"; local tools_maker = import "pgrapher/common/tools.jsonnet"; @@ -61,17 +62,18 @@ local anode = tools.anodes[0]; // [sim.ar39(), sim.tracks(tracklist)]); local depos = sim.tracks(tracklist); -local deposio = io.numpy.depos(output); +//local deposio = io.numpy.depos(output); local drifter = sim.drifter; local bagger = sim.make_bagger(); local transform = sim.make_depotransform("nominal", anode, tools.pirs[0]); local digitizer = sim.digitizer(anode); -local frameio = io.numpy.frames(output); -local sink = sim.frame_sink; +// local frameio = io.numpy.frames(output); +// local sink = sim.frame_sink; +local sink = io.frame_file_sink(output); -local graph = g.pipeline([depos, deposio, drifter, bagger, transform, +local graph = g.pipeline([depos, drifter, bagger, transform, digitizer, - frameio, sink]); + sink]); local app = { type: "Pgrapher", diff --git a/cfg/pgrapher/experiment/uboone/wct-sim-ideal-sn-nf-sp.jsonnet b/cfg/pgrapher/experiment/uboone/wct-sim-ideal-sn-nf-sp.jsonnet index 036758608..b90f0d1f1 100644 --- a/cfg/pgrapher/experiment/uboone/wct-sim-ideal-sn-nf-sp.jsonnet +++ b/cfg/pgrapher/experiment/uboone/wct-sim-ideal-sn-nf-sp.jsonnet @@ -13,7 +13,8 @@ local wc = import "wirecell.jsonnet"; local g = import "pgraph.jsonnet"; -local io = import "pgrapher/common/fileio.jsonnet"; +//local io = import "pgrapher/common/fileio.jsonnet"; +local io = import "layers/high/fileio.jsonnet"; local params = import "pgrapher/experiment/uboone/simparams.jsonnet"; local tools_maker = import "pgrapher/common/tools.jsonnet"; @@ -50,7 +51,7 @@ local sim = sim_maker(params, tools); // [sim.ar39(), sim.tracks(tracklist)]); local depos = sim.tracks(tracklist); -local deposio = io.numpy.depos(output); +// local deposio = io.numpy.depos(output); local drifter = sim.drifter; @@ -62,7 +63,11 @@ local noise_model = sim.make_noise_model(anode, sim.miscfg_csdb); local noise = sim.add_noise(noise_model); local digitizer = sim.digitizer(anode, tag="orig"); -local sim_frameio = io.numpy.frames(output, "simframeio", tags="orig"); +//local sim_frameio = io.numpy.frames(output, "simframeio", tags="orig"); +local sim_frameio = io.tap('FrameFanout', + io.frame_file_sink(output, tags=["raw"]), + name="sim"); + local magnifio = g.pnode({ type: "MagnifySink", data: { @@ -102,14 +107,18 @@ local noise_epoch = "perfect"; //local noise_epoch = "after"; local chndb = chndb_maker(params, tools).wct(noise_epoch); local nf = nf_maker(params, tools, chndb); -local nf_frameio = io.numpy.frames(output, "nfframeio", tags="raw"); +local nf_frameio = io.tap('FrameFanout', + io.frame_file_sink(output, tags=["raw"]), + name="nf"); local sp = sp_maker(params, tools); -local sp_frameio = io.numpy.frames(output, "spframeio", tags="gauss"); +local sp_frameio = io.tap('FrameFanout', + io.frame_file_sink(output, tags=["gauss"]), + name="sp"); local sink = sim.frame_sink; -local graph = g.pipeline([depos, deposio, drifter, signal, +local graph = g.pipeline([depos, drifter, signal, miscon, noise, digitizer, sim_frameio, magnifio, diff --git a/cfg/test/cfg_diffuser.jsonnet b/cfg/test/cfg_diffuser.jsonnet deleted file mode 100644 index bce3314f9..000000000 --- a/cfg/test/cfg_diffuser.jsonnet +++ /dev/null @@ -1,46 +0,0 @@ -local wc = import "wirecell.jsonnet"; -local p = import "params.jsonnet"; -[ - { - type:"Diffuser", - name:"diffuserU", - data: { - pitch_origin: { x:p.wires.u.x, y:0.0, z:0.0 }, - pitch_direction: { x:0.0, y:-0.866025, z:0.5}, - pitch_distance: p.wires.u.pitch, - timeslice: p.timeslice, - timeoffset: 0.0, - starttime: p.wires.u.x/p.drift_velocity, - drift_velocity: p.drift_velocity, - max_sigma_l: 2.5*p.timeslice - } - }, - { - type:"Diffuser", - name:"diffuserV", - data: { - pitch_origin: { x:p.wires.v.x, y:0.0, z:0.0 }, - pitch_direction: { x:0.0, y:0.866025, z:0.5 }, - pitch_distance: p.wires.v.pitch, - timeslice: p.timeslice, - timeoffset: 0.0, - starttime: p.wires.v.x/p.drift_velocity, - drift_velocity: p.drift_velocity, - max_sigma_l: 2.5*p.timeslice - } - }, - { - type:"Diffuser", - name:"diffuserW", - data: { - pitch_origin: { x:p.wires.w.x, y:0.0, z:0.0 }, - pitch_direction: { x:0.0, y:0.0, z:1.0 }, - pitch_distance: p.wires.w.pitch, - timeslice: p.timeslice, - timeoffset: 0.0, - starttime: p.wires.w.x/p.drift_velocity, - drift_velocity: p.drift_velocity, - max_sigma_l: 2.5*p.timeslice - } - }, -] diff --git a/cfg/test/cfg_drifter.jsonnet b/cfg/test/cfg_drifter.jsonnet deleted file mode 100644 index 6e12fcc6e..000000000 --- a/cfg/test/cfg_drifter.jsonnet +++ /dev/null @@ -1,8 +0,0 @@ -local wc = import "wirecell.jsonnet"; -local Drifter = {type: "Drifter", name: "", data:{drift_velocity:wc.nominal_drift_velocity}}; -[ - Drifter { name: "drifterU", data: Drifter.data {location: 15*wc.mm} }, - Drifter { name: "drifterV", data: Drifter.data {location: 10*wc.mm} }, - Drifter { name: "drifterW", data: Drifter.data {location: 5*wc.mm} }, -] - diff --git a/cfg/test/cfg_trackdepos.jsonnet b/cfg/test/cfg_trackdepos.jsonnet deleted file mode 100644 index 5475cafa9..000000000 --- a/cfg/test/cfg_trackdepos.jsonnet +++ /dev/null @@ -1,28 +0,0 @@ -local wc = import "wirecell.jsonnet"; -[ - { - type:"TrackDepos", - data: { - step_size: 1.0 * wc.millimeter, - tracks: [ - { - time: 10.0*wc.ns, - charge: -1, - ray : wc.ray(wc.point(10,0,0,wc.cm), wc.point(100,10,10,wc.cm)) - }, - { - time: 120.0*wc.ns, - charge: -2, - ray : wc.ray(wc.point(2,0,0,wc.cm), wc.point(3, -100,0,wc.cm)) - }, - { - time: 99.0*wc.ns, - charge: -3, - ray : wc.ray(wc.point(130,50,50,wc.cm), wc.point(11,-50,-30,wc.cm)) - } - ], - } - } -] - - diff --git a/cfg/test/cfg_wire_cell.jsonnet b/cfg/test/cfg_wire_cell.jsonnet deleted file mode 100644 index fea1047af..000000000 --- a/cfg/test/cfg_wire_cell.jsonnet +++ /dev/null @@ -1,9 +0,0 @@ -[ - { - type: "wire-cell", - data: { - plugins: ["WireCellGen","WireCellApps", "WireCellTbb"], - apps: ["TbbFlow"] - } - }, -] diff --git a/cfg/test/test-pgrapher-common-funcs.jsonnet b/cfg/test/test-pgrapher-common-funcs.jsonnet index d80f1b65c..0b447ba0f 100644 --- a/cfg/test/test-pgrapher-common-funcs.jsonnet +++ b/cfg/test/test-pgrapher-common-funcs.jsonnet @@ -1,10 +1,15 @@ -local g = import "pgraph.jsonnet"; -local f = import "pgrapher/common/funcs.jsonnet"; +local pg = import "pgraph.jsonnet"; + +local pipes = [ + pg.pnode({ + type:"Test", + name:"test%d"%n, + data:{number:n,index:n-1} + }, nin=1, nout=1) for n in std.range(1,6)]; +local fpg = pg.fan.pipe('TestFanout', pipes, 'TestFanin', name="testpipe", outtags=["testtag"]); +local fpga = { type: "Pgrapher", data: { edges: pg.edges(fpg), }, }; + +// make something looking like a config so dotify works +pg.uses(fpg) + [ fpga ] -{ - local fpg = f.fanpipe([g.pnode({type:"Test",name:"test%d"%n, data:{number:n,index:n-1}}, nin=1, nout=1) - for n in std.range(1,6)], "testpipe", ["testtag"]), - local fpga = { type: "Pgrapher", data: { edges: g.edges(fpg), }, }, - seq: g.uses(fpg) + [ fpga ], // make something looking like a config so dotify works -}.seq diff --git a/cfg/test/test_all.sh b/cfg/test/test_all.sh deleted file mode 100755 index dcb4b7ceb..000000000 --- a/cfg/test/test_all.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -set -e -set -x - -testdir=$(dirname $(readlink -f $BASH_SOURCE)) - -for what in trackdepos drifter -do - $testdir/test_${what}.sh -done diff --git a/cfg/test/test_diffuser.jsonnet b/cfg/test/test_diffuser.jsonnet deleted file mode 100644 index 99df62485..000000000 --- a/cfg/test/test_diffuser.jsonnet +++ /dev/null @@ -1,41 +0,0 @@ -local wc = import "wirecell.jsonnet"; - -(import "cfg_wire_cell.jsonnet") + -(import "cfg_trackdepos.jsonnet") + -(import "cfg_drifter.jsonnet") + -(import "cfg_diffuser.jsonnet") + -[ - { - type:"TbbFlow", - data: { - graph:[ - { - tail: wc.Node {type:"TrackDepos"}, - head: wc.Node {type:"Drifter", name:"drifterU"} - }, - { - tail: wc.Node {type:"TrackDepos"}, - head: wc.Node {type:"Drifter", name:"drifterV"} - }, - { - tail: wc.Node {type:"TrackDepos"}, - head: wc.Node {type:"Drifter", name:"drifterW"} - }, - { - tail: wc.Node {type:"Drifter", name:"drifterU"}, - head: wc.Node {type:"Diffuser", name:"diffuserU"} - }, - { - tail: wc.Node {type:"Drifter", name:"drifterV"}, - head: wc.Node {type:"Diffuser", name:"diffuserV"} - }, - { - tail: wc.Node {type:"Drifter", name:"drifterW"}, - head: wc.Node {type:"Diffuser", name:"diffuserW"} - }, - - ] - } - }, -] - diff --git a/cfg/test/test_drifter.jsonnet b/cfg/test/test_drifter.jsonnet deleted file mode 100644 index 4a10ab0e9..000000000 --- a/cfg/test/test_drifter.jsonnet +++ /dev/null @@ -1,46 +0,0 @@ -local wc = import "wirecell.jsonnet"; - -(import "cfg_wire_cell.jsonnet") + -(import "cfg_trackdepos.jsonnet") + -(import "cfg_drifter.jsonnet") + -[ - { - type:"TbbFlow", - data: { - graph:[ - { - tail: wc.Node {type:"TrackDepos"}, - head: wc.Node {type:"Drifter", name:"drifterU"} - }, - { - tail: wc.Node {type:"TrackDepos"}, - head: wc.Node {type:"Drifter", name:"drifterV"} - }, - { - tail: wc.Node {type:"TrackDepos"}, - head: wc.Node {type:"Drifter", name:"drifterW"} - }, - - { - tail: wc.Node {type:"Drifter", name:"drifterU"}, - head: wc.Node {type:"DumpDepos"} - //head: wc.Node {type:"DumpDepos", name:"ddU"} - }, - { - tail: wc.Node {type:"Drifter", name:"drifterV"}, - head: wc.Node {type:"DumpDepos"} - //head: wc.Node {type:"DumpDepos", name:"ddV"} - }, - { - tail: wc.Node {type:"Drifter", name:"drifterW"}, - head: wc.Node {type:"DumpDepos"} - //head: wc.Node {type:"DumpDepos", name:"ddW"} - }, - - ] - } - }, - -] - - diff --git a/cfg/test/test_drifter.sh b/cfg/test/test_drifter.sh deleted file mode 100755 index 9624ea436..000000000 --- a/cfg/test/test_drifter.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -set -e -set -x -testdir=$(dirname $(readlink -f $BASH_SOURCE)) - -jpath="$testdir/.." -input="$testdir/test_drifter.jsonnet" -cfg="$testdir/test_drifter.cfg" - -jsonnet -J $jpath $input > $cfg - -wire-cell -p WireCellGen -p WireCellTbb -c $cfg -a TbbFlow diff --git a/cfg/test/test_one.sh b/cfg/test/test_one.sh deleted file mode 100755 index 4aba4ae76..000000000 --- a/cfg/test/test_one.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -set -e -set -x - -testdir=$(dirname $(readlink -f $BASH_SOURCE)) -what="$1" - -jpath="$testdir/.." -input="$testdir/test_${what}.jsonnet" -cfg="$testdir/test_${what}.cfg" - -jsonnet -J $jpath $input > $cfg - -wire-cell -c $cfg - diff --git a/cfg/test/test_p2p.jsonnet b/cfg/test/test_p2p.jsonnet deleted file mode 100644 index 84f4b1d0b..000000000 --- a/cfg/test/test_p2p.jsonnet +++ /dev/null @@ -1,105 +0,0 @@ -local wc = import "wirecell.jsonnet"; -local p = import "params.jsonnet"; -[ - { - type: "wire-cell", - data: { - plugins: ["WireCellGen","WireCellApps", "WireCellTbb"], - apps: ["TbbFlow"] - } - }, - - { - // same as in test_tbb_dfp_diffuser.cxx - type:"TrackDepos", - data: { - step_size: 1.0 * wc.millimeter, - tracks: [ - { - time: 10.0*wc.us, - ray : wc.ray(wc.point(1,0,0,wc.cm), wc.point(2,0,0,wc.cm)) - }, - { - time: 20.0*wc.us, - ray : wc.ray(wc.point(1,0,0,wc.cm), wc.point(1,0,1,wc.cm)) - }, - { - time: 30.0*wc.us, - ray : wc.ray(wc.point(1,-1,1,wc.cm), wc.point(1,1,1,wc.cm)) - } - ], - } - }, - - { - type:"Drifter", - data: { - location: 15*wc.mm, - drift_velocity: 2.0*wc.mm/wc.us, - } - }, - - - { - type:"Diffuser", - name:"diffuserU", - data: { - pitch_origin: { x:p.wires.u.x, y:0.0, z:0.0 }, - pitch_direction: { x:0.0, y:-0.866025, z:0.5}, - pitch_distance: p.wires.u.pitch, - timeslice: p.timeslice, - timeoffset: 0.0, - starttime: p.wires.u.x/p.drift_velocity, - drift_velocity: p.drift_velocity, - max_sigma_l: 2.5*p.timeslice - } - }, - { - type:"Diffuser", - name:"diffuserV", - data: { - pitch_origin: { x:p.wires.v.x, y:0.0, z:0.0 }, - pitch_direction: { x:0.0, y:0.866025, z:0.5 }, - pitch_distance: p.wires.v.pitch, - timeslice: p.timeslice, - timeoffset: 0.0, - starttime: p.wires.v.x/p.drift_velocity, - drift_velocity: p.drift_velocity, - max_sigma_l: 2.5*p.timeslice - } - }, - { - type:"Diffuser", - name:"diffuserW", - data: { - pitch_origin: { x:p.wires.w.x, y:0.0, z:0.0 }, - pitch_direction: { x:0.0, y:0.0, z:1.0 }, - pitch_distance: p.wires.w.pitch, - timeslice: p.timeslice, - timeoffset: 0.0, - starttime: p.wires.w.x/p.drift_velocity, - drift_velocity: p.drift_velocity, - max_sigma_l: 2.5*p.timeslice - } - }, - - - { - type:"TbbFlow", - data: { - graph:[ - { - tail: wc.Node {type:"TrackDepos"}, - head: wc.Node {type:"Drifter"} - }, - { - tail: wc.Node {type:"Drifter"}, - head: wc.Node {type:"Diffuser"} - } - ] - } - }, - -] - - diff --git a/cfg/test/test_trackdepos.jsonnet b/cfg/test/test_trackdepos.jsonnet deleted file mode 100644 index cec6121e0..000000000 --- a/cfg/test/test_trackdepos.jsonnet +++ /dev/null @@ -1,20 +0,0 @@ -local wc = import "wirecell.jsonnet"; - -(import "cfg_wire_cell.jsonnet") + -(import "cfg_trackdepos.jsonnet") + -[ - { - type:"TbbFlow", - data: { - graph:[ - { - tail: wc.Node {type:"TrackDepos"}, - head: wc.Node {type:"DumpDepos"} - }, - ] - } - }, - -] - - diff --git a/cfg/test/test_trackdepos.sh b/cfg/test/test_trackdepos.sh deleted file mode 100755 index cf6f6415d..000000000 --- a/cfg/test/test_trackdepos.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -set -e -set -x -testdir=$(dirname $(readlink -f $BASH_SOURCE)) - -jpath="$testdir/.." -input="$testdir/tbbdfp_trackdepos.jsonnet" -cfg="$testdir/test_trackdepos.cfg" - -jsonnet -J $jpath $input > $cfg - -wire-cell -p WireCellGen -p WireCellTbb -c $cfg -a TbbFlow diff --git a/cfg/test/test_uboone_mains.bats b/cfg/test/test_uboone_mains.bats new file mode 100644 index 000000000..f2eb1fa87 --- /dev/null +++ b/cfg/test/test_uboone_mains.bats @@ -0,0 +1,44 @@ +#!/bin/bash + +# Test some main jsonnet that are expected to compile w/out args + +bats_load_library wct-bats.sh + +@test "compile various main configs" { + + cd_tmp + + local mains=(wct-jsondepo-sim-nf-sp.jsonnet + wct-sim-check.jsonnet + wct-sim-deposplat.jsonnet + wct-sim-zipper-check.jsonnet + wcls-sim-drift.jsonnet + wcls-sim-drift-simchannel.jsonnet + wcls-sim.jsonnet + wcls-sim-nf-sp.jsonnet + wct-sim-ideal-sig.jsonnet + wct-sim-ideal-sn-nf-sp.jsonnet) + + for main in ${mains[*]} + do + local cfgfile="$(config_path pgrapher/experiment/uboone/$main)" + [[ -s "$cfgfile" ]] + local jsonfile="$(basename $cfgfile .jsonnet).json" + + t1=$(date +%s) + compile_jsonnet "$cfgfile" "$jsonfile" + t2=$(date +%s) + dt=$(( $t2 - $t2 )) + echo "$jsonfile took $dt seconds" + [[ $dt -le 2 ]] + + if [ -z "$(wcb_env_value WCPGRAPH)" ] ; then + continue; + fi + + local svgfile="$(basename $jsonfile .json).svg" + dotify_graph "$jsonfile" "$svgfile" + saveout $svgfile + done + +} diff --git a/cfg/test/test_uboone_mains.sh b/cfg/test/test_uboone_mains.sh deleted file mode 100755 index 6195ec164..000000000 --- a/cfg/test/test_uboone_mains.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -testdir=$(dirname $(readlink -f $BASH_SOURCE)) -topdir=$(dirname $testdir) - -failed="" -for try in $topdir/pgrapher/experiment/uboone/{wct,wcls}-*.jsonnet -do - echo $try - time jsonnet -J $topdir $try >/dev/null - if [ "$?" != "0" ] ; then - echo "failed: $try" - failed="$failed $try" - fi -done -if [ -n "$failed" ] ; then - exit 1 -fi diff --git a/cfg/wscript_build b/cfg/wscript_build index a6df415eb..85e88894c 100644 --- a/cfg/wscript_build +++ b/cfg/wscript_build @@ -1,5 +1,5 @@ bld.install_files('${PREFIX}/share/wirecell', - bld.path.ant_glob("*.jsonnet") + + bld.path.ant_glob("*.jsonnet") + bld.path.ant_glob("pgrapher/common/**/*.jsonnet") + bld.path.ant_glob("pgrapher/ui/**/*.jsonnet"), relative_trick=True) @@ -11,3 +11,5 @@ if exps: bld.path.ant_glob("pgrapher/experiment/%s/**/*.jsonnet" % exp) + bld.path.ant_glob("pgrapher/experiment/%s/**/*.fcl" % exp), relative_trick=True) + +bld.smplpkg('WireCellCfg') diff --git a/cuda/README.org b/cuda/README.org index a0cf0a360..0a1e04fb1 100644 --- a/cuda/README.org +++ b/cuda/README.org @@ -1,4 +1,5 @@ #+title: Wire-Cell CUDA +#+SETUPFILE: ../setup-readme.org This package is a mostly empty place holder for now. In the future it may provide Wire-Cell Toolkit components that depend on CUDA. diff --git a/cuda/test/test_idft_cufftdft.bats b/cuda/test/test_idft_cufftdft.bats new file mode 100644 index 000000000..82cb1fe45 --- /dev/null +++ b/cuda/test/test_idft_cufftdft.bats @@ -0,0 +1,11 @@ +#!/usr/bin/env bats + +bats_load_library wct-bats.sh + +@test "test_idft with torchdft" { + usepkg aux # to get test_dft + run test_idft cuFftDFT WireCellCuda + echo "$output" + [[ "$status" -eq 0 ]] +} + diff --git a/gen/README.md b/gen/README.md deleted file mode 100644 index 3d82fdd97..000000000 --- a/gen/README.md +++ /dev/null @@ -1,24 +0,0 @@ -Wire Cell Gen -============== - -This is the Wire Cell Gen package. It provides reference -implementation of Wire Cell interfaces in ways that can be generated -with no outside dependencies. - -Functionality includes: - -- wire and cell geometry, -- drifting charge deposition in the volume of the detector, -- diffusion of these charges as they drift, -- digitization of charge on the wire planes. - -These layers can be used with other Wire Cell implementations. For -example: - - - depositions from detailed simulation can be provided and then Gen's -drifting, diffusing and digitization can be used. - - - your own wire geometry can be used with Gen's cell construction or -Gen's wire generated wire geometry can be used as input to your own -cell construction. - diff --git a/gen/README.org b/gen/README.org index d12e2037c..5c5f7bfa6 100644 --- a/gen/README.org +++ b/gen/README.org @@ -1,4 +1,5 @@ #+title: Wire-Cell Gen +#+SETUPFILE: ../setup-readme.org This is the Wire-Cell "gen" sub-package. It provides components related to generating simulated data. In particular: diff --git a/gen/test/check-issue202.bats b/gen/test/check-issue202.bats new file mode 100644 index 000000000..579adfd56 --- /dev/null +++ b/gen/test/check-issue202.bats @@ -0,0 +1,355 @@ +#!/usr/bin/env bats + +# Test for: +# https://github.com/WireCell/wire-cell-toolkit/issues/202 + +# bats file_tags=issue:202,topic:noise,time:2 + +# Commentary +# +# The way noise generaion is supposed to work: For each frequency bin +# we draw 2 normal (mean=0, sigma=1) random numbers and multiply each +# by the input real-valued "mode" (not technically "mean") spectral +# amplitude at that frequency. We then interpret the two numbers as +# the real and imaginary parts of the sampled spectrum at that +# frequency. This is equivalent to a Rayleigh-sampled radius and a +# uniform-sampled phase angle. +# +# The bug shifted the two sampled "normals" from being distributed +# around mean=0 to being distributed around mean=0.04. This shifts +# the center of the complex distribution from 0 to 0.04*(1+i). Thus +# the "mode" of the sampled (real part) distribution should be +# increased by the bug. Right? What is the mean distance from origin +# to a circle that is offset from the origin by less than one radius? +# +# In the process of understanding that but it was found that a change +# in the recent past introduced round() on the floating point ADC +# value prior to its return by Digitizer. It's effect is also +# checked here. +# +# CAUTION: after fixing 202, this tests relies an undocumented option +# [redacted] to the {Incoherent,Coherent}AddNoise components in order +# to explicitly reinstate this bug. This is to produce plots to show +# the effect which may be useful to experiments with results derived +# from the buggy noise. When/if the option [redacted] option is +# removed in the future this file must be reduced to merely test for +# it instead of reproducing it. +# +# The bug fixed in 202 cause the use of a "normal" distribution with +# mean=0.04 instead of mean=0.0. We test for this mean and a "twice +# sized bug" mean. When/if removing the [redacted] option, any use of +# "smlbug" and "bigbug" must be deleted. +declare -A bugmean=( [nobug]=0.0 [smlbug]=0.04 [bigbug]=0.08 ) + +bats_load_library wct-bats.sh + +# Policy helpers +# +# Sub directory name for a variant test. +# +variant_subdir () { + local trunc=$1 ; shift + local bug=${1:-nobug} + echo "${trunc}-${bug}" +} +# +# A file name for a data tier relative to a variant subdir +# +tier_path () { + local ntype="${1}"; shift + local tier=${1:-adc}; + local ext="npz" + if [ "$tier" = "spectra" ] ; then + ext="json.bz2" + fi + echo "test-issue202-${ntype}-${tier}.${ext}" +} + +# Run a wire-cell job to make files that span: +# +# ntype in: +# - inco : parameterized group noise model applied incoherently +# - cohe : parameterized group noise model applied coherently +# - empno : measured empirical noise model applied incoherently +# +# tier in: +# - vin = voltage level +# - adc = ADC counts +# - dac = ADC scaled to voltage +# - spectra = output of NoiseModeler +# +# files like: +# test-issue202-{ntype}-{tier}.npz +# test-issue202-{ntype}-spectra.json.bz2 +# +setup_file () { + + local cfgfile="${BATS_TEST_FILENAME%.bats}.jsonnet" + + # files named commonly in each variant subidr + local jsonfile="$(basename ${cfgfile} .jsonnet).json" + local logfile="$(basename ${cfgfile} .jsonnet).log" + + # explicitly compile cfg to json to pre-check [redacted] is implemented. + for bugsiz in nobug smlbug bigbug + do + for trunc in round floor + do + cd_tmp + + wd="$(variant_subdir $trunc $bugsiz)" + mkdir -p $wd + cd $wd + + if [ -f "${jsonfile}" ] ; then + echo "already have ${wd}/${jsonfile}" + continue + fi + + local bug=${bugmean[$bugsiz]} + run bash -c "wcsonnet -A bug202=$bug -A round=$trunc $cfgfile > ${jsonfile}" + echo "$output" + [[ "$status" -eq 0 ]] + if [ "$trunc" = "round" ] ; then + [[ "$(grep -c '"round" : true' ${jsonfile})" -eq 3 ]] + else + [[ "$(grep -c '"round" : false' ${jsonfile})" -eq 3 ]] + fi + + # representative output file + local adcfile="$(tier_path empno)"; + if [ -f "${adcfile}" ] ; then + echo "Already have ${wd}/${adcfile}" 1>&3 + continue + fi + + # run actual job on pre-compiled config + run wire-cell -l "${logfile}" -L debug "${jsonfile}" + echo "$output" + [[ "$status" -eq 0 ]] + [[ -s "$logfile" ]] + if [ "$trunc" = "round" ] ; then + [[ -n "$(grep round=1 ${logfile})" ]] + else + [[ -n "$(grep round=0 ${logfile})" ]] + fi + echo "$adcfile" + [[ -s "$adcfile" ]] + + done + done + cd_tmp +} + + +@test "no weird endpoints of mean waveform" { + + cd_tmp file + + ## good lines are like: + # U t 4096 0.02282684326171875 0.06892286163071752 4096 1291 185 13 2 0 0 0 0 0 0 + # U c 800 0.02282684326171875 0.4955734777788893 800 376 0 0 0 0 0 0 0 0 0 + # V t 4096 0.01374267578125 0.03856538100586203 4096 1334 192 13 1 0 0 0 0 0 0 + # V c 800 0.01374267578125 0.49734063258678657 800 386 0 0 0 0 0 0 0 0 0 + # W t 4096 0.009838104248046875 0.016252509189760622 4096 1328 177 14 0 0 0 0 0 0 0 + # W c 960 0.009838104248046875 0.49116500530978957 960 463 0 0 0 0 0 0 0 0 0 + ## bad lines are like: + # U t 4096 0.23914672851562502 1.838315680187899 4096 30 22 15 15 14 13 13 13 11 10 + # U c 800 0.239146728515625 0.4333089727023359 800 201 0 0 0 0 0 0 0 0 0 + # V t 4096 0.24620086669921876 1.0176595165745093 4096 30 22 15 15 14 13 13 13 11 10 + # V c 800 0.24620086669921876 0.43083546962103825 800 198 0 0 0 0 0 0 0 0 0 + # W t 4096 0.23283335367838542 0.20440386815418213 4096 39 26 15 14 14 14 13 11 10 10 + # W c 960 0.23283335367838542 0.4332668683930272 960 249 0 0 0 0 0 0 0 0 0 + + local wcgen=$(wcb_env_value WCGEN) + + # Neither round nor floor must exhibit the bug + for trunc in round floor + do + local wd="$(variant_subdir $trunc)" + + local adcfile="$wd/$(tier_path empno)" + echo "$adcfile" + [[ -s "$adcfile" ]] + + while read line ; do + parts=($line) + + echo "$line" + if [ "${parts[1]}" = "c" ] ; then + continue + fi + + # Bash does not speak floating point. To avoid relying on + # dc/bc to be installed, we do these FP tests lexically. + + # require nothing past 5 sigma + [[ "${parts[10]}" = "0" ]] + + # mean should be small. + [[ -n "$(echo ${parts[3]} | grep '^0\.0')" ]] + + # rms should be small. + [[ -n "$(echo ${parts[4]} | grep '^0\.0')" ]] + + done < <($wcgen frame_stats "$adcfile") + done +} + + +# bats test_tags=implicit,plots +@test "plot adc frame means" { + + local tname="$(basename $BATS_TEST_FILENAME .bats)" + local plotter=$(wcb_env_value WCPLOT) + + for bugsiz in nobug smlbug bigbug + do + for trunc in round floor + do + cd_tmp file + local wd="$(variant_subdir $trunc $bugsiz)" + [[ -d "$wd" ]] + cd "$wd" + for ntype in inco cohe + do + + local adcfile="$(tier_path $ntype)" + local figfile="$(basename $adcfile .npz)-means.png" + + run $plotter frame-means -o "$figfile" "$adcfile" + echo "$output" + [[ "$status" -eq 0 ]] + [[ -s "$figfile" ]] + saveout -c plots "$figfile" + done + done + done +} + + +# bats test_tags=implicit,pltos +@test "plot spectra" { + + local wcsigproc="$(wcb_env_value WCSIGPROC)" + + for bugsiz in nobug smlbug bigbug + do + for trunc in round floor + do + + cd_tmp file + local wd="$(variant_subdir ${trunc} ${bugsiz})" + [[ -d "$wd" ]] + cd "$wd" + + for ntype in inco empno + do + local sfile="$(tier_path $ntype spectra)" + + # Plot OUTPUT spectra + local ofile="$(basename $sfile .json.bz2)-output.pdf" + run $wcsigproc plot-noise-spectra -z "${sfile}" "${ofile}" + echo "$output" + [[ "$status" -eq 0 ]] + + + # Plot INPUT spectra. + local ifile="$(basename $sfile .json.bz2)-input.pdf" + # The method depends on the type. + + # Spectra is inside configuration + if [ "$ntype" = "inco" ] ; then + run $wcsigproc plot-configured-spectra -c ac -n "$ntype" test-issue202.json "${ifile}" + echo "$output" + [[ "$status" -eq 0 ]] + fi + # Spectra is in auxiliary file + if [ "$ntype" = "empno" ] ; then + # warning: hard-coding empno spectra out of laziness + # and assuming it's the one used in the Jsonnet! + run $wcsigproc plot-noise-spectra -z \ + protodune-noise-spectra-v1.json.bz2 "${ifile}" + echo "$output" + [[ "$status" -eq 0 ]] + fi + + saveout -c plots "$ifile" "$ofile" + + done + done + done +} + +# bats test_tags=implicit,plots +@test "compare round vs floor no bug" { + # cd_tmp file + cd_tmp file + + local fadc="$(variant_subdir floor)/$(tier_path inco)" + local radc="$(variant_subdir round)/$(tier_path inco)" + + for bl in ac none + do + for thing in wave spec + do + for grp in $(seq 10) + do + local num=$(( $grp - 1 )) + local chmin=$(( $num * 256 )) + local chmax=$(( $grp * 256 )) + + local outpng="comp1d-floor+round-nobug-${thing}-${bl}-grp${grp}.png" + + wirecell-plot comp1d \ + --transform $bl \ + --single \ + --tier '*' -n $thing --chmin $chmin --chmax $chmax \ + --output "$outpng" \ + "$fadc" "$radc" + + if [[ $grp -le 3 ]] ; then + saveout -c plots $outpng + fi + done + done + done +} + +# bats test_tags=implicit,plots +@test "compare bug and no bug with round and floor" { + + cd_tmp file + + for trunc in round floor + do + local radcnb="$(variant_subdir $trunc nobug)/$(tier_path inco)" + local radcsb="$(variant_subdir $trunc smlbug)/$(tier_path inco)" + local radcbb="$(variant_subdir $trunc bigbug)/$(tier_path inco)" + + for bl in ac none + do + for thing in wave spec + do + for grp in $(seq 10) + do + local num=$(( $grp - 1 )) + local chmin=$(( $num * 256 )) + local chmax=$(( $grp * 256 )) + + outpng="comp1d-${trunc}-bugs-${thing}-${bl}-grp${grp}.png" + + wirecell-plot comp1d \ + --transform $bl \ + --single \ + --tier '*' -n $thing --chmin $chmin --chmax $chmax \ + --output "$outpng" \ + "$radcnb" "$radcsb" "$radcbb" + if [[ $grp -eq 10 ]] ; then + saveout -c plots $outpng + fi + done + done + done + done +} diff --git a/gen/test/history-addnoise.bats b/gen/test/history-addnoise.bats new file mode 100644 index 000000000..6a26b0d0c --- /dev/null +++ b/gen/test/history-addnoise.bats @@ -0,0 +1,59 @@ +#!/usr/bin/env bats + +# This test consumes historical files produced by test-addnoise.bats +# +# See eg bv-generate-history-haiku for example how to automate +# producing historical files. O.w. collect one or more history/ +# directories and add their parent locations to WCTEST_DATA_PATH. + +bats_load_library wct-bats.sh + +# bats test_tags=time:1 + +@test "historical addnoise comp1d plots" { + + local wcplot=$(wcb_env_value WCPLOT) + + # will cd here to make plots have minimal filename labels + local rundir=$(blddir)/tests/history + # but will deposite plot files to our temp dir + local outdir=$(tmpdir) + + + local inpath="test-addnoise/test-addnoise-empno-6000.tar.gz" + local frame_files=( $(historical_files -v $(version) -l 2 $inpath) ) + # yell "frame files: ${frame_files[@]}" + frame_files=( $(realpath --relative-to=$rundir ${frame_files[*]}) ) + + cd $rundir + + for plot in wave spec + do + local plotfile="$outdir/comp1d-${plot}-history.png" + $wcplot \ + comp1d -n $plot --markers 'o + x .' -t '*' \ + --chmin 0 --chmax 800 -s --transform ac \ + -o $plotfile ${frame_files[*]} + saveout -c reports $plotfile + + done + + local plotfile="$outdir/comp1d-spec-history-zoom1.png" + $wcplot \ + comp1d -n spec --markers 'o + x .' -t '*' \ + --chmin 0 --chmax 800 -s --transform ac \ + --xrange 100 800 \ + -o $plotfile ${frame_files[*]} + saveout -c reports $plotfile + + local plotfile="$outdir/comp1d-spec-history-zoom2.png" + $wcplot \ + comp1d -n spec --markers 'o + x .' -t '*' \ + --chmin 0 --chmax 800 -s --transform ac \ + --xrange 1000 2000 \ + -o $plotfile ${frame_files[*]} + saveout -c reports $plotfile + + +} + diff --git a/gen/test/test-addnoise.bats b/gen/test/test-addnoise.bats index 3a099d8dd..8c3d802d9 100644 --- a/gen/test/test-addnoise.bats +++ b/gen/test/test-addnoise.bats @@ -1,85 +1,52 @@ +#!/usr/bin/env bats -file_base () { - local noise="$1" ; shift - local nsamples="${1:-6000}" +# A test to run on current, past and future releases to check for any +# unexepcted change in noise simulation. +# +# The test is meant to be self locating so that one can do: +# +# cd ~/path/to/new-wct/ +# alias bats-$(realpath test/bats/bin/bats) +# cd ~/path/to/old/release +# bats ~/path/to/new-wct/gen/test/test-addnoise.bats - local ver="$(wire-cell --version)" - local base="$(basename ${BATS_TEST_FILENAME} .bats)" - echo "test-addnoise-${ver}-${noise}-${nsamples}" -} +# bats file_tags=noise,history -make_noise () { - local noise="$1" ; shift - local nsamples="${1:-6000}" +bats_load_library wct-bats.sh - local base="$(file_base $noise $nsamples)" +# The intention is to run this test in multiple releases and compare across releases. +# bats test_tags=history,plots,implicit +@test "generate simple noise for comparison with older releases" { - local cfgfile="${BATS_TEST_FILENAME%.bats}.jsonnet" + cd_tmp - local outfile="${base}.tar.gz" - local logfile="${base}.log" + local nsamples=6000 + local noise=empno + local name="test-addnoise-${noise}-${nsamples}" + local adcfile="${name}.tar.gz" # format with support going back the longest + local cfgfile="${BATS_TEST_FILENAME%.bats}.jsonnet" - rm -f "$logfile" # appends otherwise - if [ -s "$outfile" ] ; then - echo "already have $outfile" 1>&3 - return - fi run wire-cell -l "$logfile" -L debug \ - -A nsamples=$nsamples -A noise=$noise -A outfile="$outfile" \ + -A nsamples=$nsamples -A noise=$noise -A outfile="$adcfile" \ -c "$cfgfile" echo "$output" [[ "$status" -eq 0 ]] - [[ -s "$outfile" ]] -} - - -# The intention is to run this test in multiple releases and compare across releases. -@test "generate simple noise for comparison with older releases" { - - - - for nsamples in 6000 - do - for noise in empno - do - make_noise $noise $nsamples - local base="$(file_base $noise $nsamples)" - - wirecell-plot comp1d -o comp1d-addnoise-${ver}-${noise}-${nsamples}.pdf \ - --markers 'o + x .' -t '*' -n spec \ - --chmin 0 --chmax 800 -s --transform ac \ - "${base}.tar.gz" - done - done - # wirecell-plot comp1d -o comp1d-addnoise-${ver}-all-all.pdf \ - # --markers 'o + x .' -t '*' -n spec \ - # --chmin 0 --chmax 800 -s --transform ac \ - # "test-addnoise-${ver}-empno-4096.tar.gz" \ - # "test-addnoise-${ver}-inco-4096.tar.gz" \ - # "test-addnoise-${ver}-empno-6000.tar.gz" \ - # "test-addnoise-${ver}-inco-6000.tar.gz" - -} - - - -@test "inco and empno with external spectra file is identical" { + [[ -s "$adcfile" ]] + saveout -c history "$adcfile" - for nsamples in 4095 4096 6000 + local wcplot=$(wcb_env_value WCPLOT) + for what in spec wave do - for noise in empno inco - do - make_noise $noise $nsamples - done - - wirecell-plot comp1d \ - -o "$(file_base inco+empno $nsamples)-comp1d.pdf" \ - --markers 'o .' -t '*' -n spec \ - --chmin 0 --chmax 800 -s --transform ac \ - "$(file_base inco $nsamples).tar.gz" \ - "$(file_base empno $nsamples).tar.gz" - + local pout="${name}-comp1d-${what}.png" + $wcplot comp1d \ + -o $pout \ + -t '*' -n $what \ + --chmin 0 --chmax 800 -s --transform ac \ + "${adcfile}" + echo "$output" + [[ "$status" -eq 0 ]] + [[ -s "$pout" ]] + saveout -c plots "$pout" done - } diff --git a/gen/test/test-issue202.jsonnet b/gen/test/test-issue202.jsonnet new file mode 100644 index 000000000..fbb76665e --- /dev/null +++ b/gen/test/test-issue202.jsonnet @@ -0,0 +1,239 @@ +// Test the "roundtrip": +// +// noise: file -> sim -> modeler -> file + +// CAUTION: this Jsonnet file does not follow good configuration +// style. In order to be be self-contained it is overly verbose, +// underly generic and not at all extensible. DO NOT USE IT AS AN +// EXAMPLE. + +local wc = import "wirecell.jsonnet"; +local pg = import "pgraph.jsonnet"; + +// These define the noise spectra and groups. We may either pass +// JSON/Jsonnet file NAMES or we may provide the data structures +// directly. Since this tests algorithmically generates everything, +// it's easier to bring these inline. Also, we happen to use the same +// spectra for both grouped coherent and grouped incoherent. In real +// world, such a coincidence is very unlikely. +local tns = import "test-noise-spectra.jsonnet"; +local tng = { + inco: import "test-noise-groups-incoherent.jsonnet", + cohe: import "test-noise-groups-coherent.jsonnet", + empno: import "test-noise-groups-incoherent.jsonnet", +}; + +// Size of original (fictional) waveforms +local nsamples_modelin = 4096; +// and how many subsamples to store. +local nsamples_modelinsave = 64; + +// Size of generated waveforms. +//local nsamples_generate = 6000; +local nsamples_generate = 4096; + +// As above but for saving out the model. +local nsamples_modelout = 4096; +local nsamples_modeloutsave = 64; + +// services +local svcs = {rng: { type: 'Random' }, dft: { type: 'FftwDFT' }}; +local wires = { + type: "WireSchemaFile", + data: { + filename: "protodune-wires-larsoft-v4.json.bz2", + }}; +local isnoise = { + type: "NoiseRanker", + data: { + maxdev: 10*wc.mV, + }, +}; + +// pdsp apa 0, dumped from "real" jsonnet +local faces = [{ + "anode": -3578.36, + "cathode": -1.5875, + "response": -3487.8875 +}, { + "anode": -3683.14, + "cathode": -7259.9125, + "response": -3773.6125 +}]; + +local anode = { + type: "AnodePlane", + name: "", + data: { + ident: 0, + wire_schema: wc.tn(wires), + faces: faces, + }, + uses: [wires]}; + +// From nothing comes all. +local absurd = pg.pnode({ + type: 'SilentNoise', + data: { + noutputs: 1, + nchannels: 2560, + }}, nin=0, nout=1); + +local reframer = pg.pnode({ + type: 'Reframer', + data: { + nticks: nsamples_generate, + anode: wc.tn(anode), + }}, nin=1, nout=1, uses=[anode]); + + +// same data for digitizing and undigitizing +local digidata = { + anode: wc.tn(anode), + gain: 1.0, + resolution: 12, + baselines: [wc.volt,wc.volt,wc.volt], + fullscale: [0, 2*wc.volt], +}; + + +local tick = 0.5*wc.us; + +// The graph will have two major pipelines split based on coherent and +// incoherent noise. Here are the short nick names to ID each: +local group_nicks = ["inco", "cohe"]; +local adder_types = { + inco: "IncoherentAddNoise", + cohe: "CoherentAddNoise", + empno: "IncoherentAddNoise", +}; +local models = { + [one]: { + type: "GroupNoiseModel", + name: one, + data: { + // This can also be given as a JSON/Jsonnet file + spectra: tns(nsamples=nsamples_modelin, nsave=nsamples_modelinsave, + rms=1*wc.mV), // PDSP is eg 1.3 mV + groups: tng[one], + nsamples: nsamples_generate, + tick: tick, + } + } + for one in group_nicks} + { + empno: { + type: "EmpiricalNoiseModel", + name: "empno", + data: { + anode: wc.tn(anode), + chanstat: "", + spectra_file: "protodune-noise-spectra-v1.json.bz2", + nsamples: nsamples_generate, + period: tick, + wire_length_scale: 1*wc.cm, + }, uses: [anode] + }, + }; + + +local pipes(round=true, bug202 = 0.0) = { + [one]: [ + + // add noise + pg.pnode({ + type: adder_types[one], + name: one, + data: { + dft: wc.tn(svcs.dft), + rng: wc.tn(svcs.rng), + model: wc.tn(models[one]), + nsamples: nsamples_generate, + bug202: bug202, + }}, nin=1, nout=1, uses=[models[one], svcs.dft, svcs.rng]), + + // tap input to modeler + pg.fan.tap('FrameFanout', pg.pnode({ + type: "FrameFileSink", + name: one+'vin', + data: { + outname: "test-issue202-%s-vin.npz"%one, + digitize: false, + }, + }, nin=1, nout=0), one+'vin'), + + // digitize + pg.pnode({ + type: "Digitizer", + name: one, + data: digidata + {round: round} + }, nin=1, nout=1, uses=[anode]), + + // tap generated noise + pg.fan.tap('FrameFanout', pg.pnode({ + type: "FrameFileSink", + name: one+'adc', + data: { + outname: "test-issue202-%s-adc.npz"%one, + digitize: true, + }, + }, nin=1, nout=0), one+'adc'), + + // undigitize + pg.pnode({ + type: "Undigitizer", + name: one, + data: digidata, + }, nin=1, nout=1, uses=[anode]), + + // tap input to modeler + pg.fan.tap('FrameFanout', pg.pnode({ + type: "FrameFileSink", + name: one+'dac', + data: { + outname: "test-issue202-%s-dac.npz"%one, + digitize: false, + }, + }, nin=1, nout=0), one+'dac'), + + // and finally the modeler itself + pg.pnode({ + type: "NoiseModeler", + name: one, + data: { + dft: wc.tn(svcs.dft), + isnoise: wc.tn(isnoise), + threshold: 0.9, + groups: tng[one], + outname: "test-issue202-%s-spectra.json.bz2"%one + }, + }, nin=1, nout=0, uses=[svcs.dft, isnoise])] // one pipe + for one in std.objectFields(models) +}; // pipes + +function(round=true, bug202=0.0) + local bool_round = round == true || round == "true" || round == "round"; + local number_bug202 = if std.type(bug202) == "number" then bug202 else std.parseJson(bug202); + + local nicks_to_use = ["empno", "cohe", "inco"]; + local fanout = pg.fan.fanout('FrameFanout', [ + pg.pipeline(pipes(bool_round, number_bug202)[nick]) for nick in nicks_to_use + ]); + local graph = pg.pipeline([absurd, reframer, fanout]); + + local app = { + type: 'Pgrapher', + data: { + edges: pg.edges(graph), + }, + }; + local cmdline = { + type: "wire-cell", + data: { + plugins: ["WireCellAux", "WireCellSigProc", "WireCellGen", + "WireCellApps", "WireCellPgraph", "WireCellSio"], + apps: [app.type] + } + }; + [cmdline] + pg.uses(graph) + [app] + + diff --git a/gen/test/test-noise-roundtrip.bats b/gen/test/test-noise-roundtrip.bats index 9cb34f937..615f0d40b 100644 --- a/gen/test/test-noise-roundtrip.bats +++ b/gen/test/test-noise-roundtrip.bats @@ -1,287 +1,128 @@ #!/usr/bin/env bats -# fixme: this is first developed outside of check-test branch. It -# should be refactored to use that Bats support, once merged. -# It does assume a new bats such as the one provided by check-test. +# Test noise "round tripo" -# coherent/incoherent -# vin = voltage level -# adc = ADC counts -# dac = ADC scaled to voltage -# spectra output of NoiseModeler -# test-noise-roundtrip-inco-vin.npz -# test-noise-roundtrip-inco-dac.npz -# test-noise-roundtrip-inco-adc.npz -# test-noise-roundtrip-inco-spectra.json.bz2 -# test-noise-roundtrip-cohe-vin.npz -# test-noise-roundtrip-cohe-dac.npz -# test-noise-roundtrip-cohe-adc.npz -# test-noise-roundtrip-cohe-spectra.json.bz2 +# bats file_tags=topic:noise -# The bug 202 used "normal" distribution at mean=0.04 instead of -# mean=0.0. -# -# The way noise generaion is supposed to work: For each frequency bin -# we draw 2 normal (mean=0, sigma=1) random numbers and multiply each -# by the input real-valued "mode" (not technically "mean") spectral -# amplitude at that frequency. We then interpret the two numbers as -# the real and imaginary parts of the sampled spectrum at that -# frequency. This is equivalent to a Rayleigh-sampled radius and a -# uniform-sampled phase angle. -# -# The bug shifted the two sampled "normals" from being distributed -# around mean=0 to being distributed around mean=0.04. This shifts -# the center of the complex distribution from 0 to 0.04*(1+i). Thus -# the "mode" of the sampled (real part) distribution should be -# increased by the bug. Right? What is the mean distance from origin -# to a circle that is offset from the origin by less than one radius? - - -declare -A bugmean=( [nobug]=0.0 [smlbug]=0.04 [bigbug]=0.08 ) +bats_load_library wct-bats.sh setup_file () { - # cd_tmp local cfgfile="${BATS_TEST_FILENAME%.bats}.jsonnet" - local base="$(basename ${BATS_TEST_FILENAME} .bats)" - for bugsiz in nobug smlbug bigbug - do - for trunc in round floor - do - wrkdir="${trunc}-${bugsiz}" - if [ -d "$wrkdir" ] ; then - echo "already have $wrkdir" 1>&3 - continue - else - mkdir -p $wrkdir - cd $wrkdir - local bug=${bugmean[$bugsiz]} - echo "BUG=$bug" 1>&3 - run bash -c "wcsonnet -A bug202=$bug -A round=$trunc $cfgfile > ${base}.json" - echo "$output" - [[ "$status" -eq 0 ]] - if [ "$trunc" = "round" ] ; then - [[ "$(grep -c '"round" : true' ${base}.json)" -eq 3 ]] - else - [[ "$(grep -c '"round" : false' ${base}.json)" -eq 3 ]] - fi - cd .. - fi - done - done + # files named commonly in each variant subidr + local jsonfile="$(basename ${cfgfile} .jsonnet).json" + local logfile="$(basename ${cfgfile} .jsonnet).log" - for bugsiz in nobug smlbug bigbug - do - for trunc in round floor - do - wrkdir="${trunc}-${bugsiz}" - cd $wrkdir - if [ -f "${base}.log" ] ; then - echo "Already ran wire-cell for $wrkdir" 1>&3 - else - run wire-cell -l "${base}.log" -L debug "${base}.json" - echo "$output" - [[ "$status" -eq 0 ]] - if [ "$trunc" = "round" ] ; then - [[ -n "$(grep round=1 ${base}.log)" ]] - else - [[ -n "$(grep round=0 ${base}.log)" ]] - fi - fi - cd .. - done - done -} + cd_tmp -@test "plot adc" { - # cd_tmp file - local mydir="$(dirname ${BATS_TEST_FILENAME})" - local base="$(basename ${BATS_TEST_FILENAME} .bats)" + run bash -c "wcsonnet $cfgfile > ${jsonfile}" + echo "$output" + [[ "$status" -eq 0 ]] - for bugsiz in nobug smlbug bigbug - do - for trunc in round floor - do - wrkdir="${trunc}-${bugsiz}" - cd $wrkdir - for name in inco cohe empno - do - nbase="${base}-${name}-adc" - if [ -f "${nbase}.npz" ] ; then - python $mydir/check_noise_roundtrip.py plot \ - -o "${nbase}-plot.png" "${nbase}.npz" - fi - done - cd .. - done - done - # archive these -} + # representative output file + if [ -f "$logfile" ] ; then + echo "Already have $logfile" 1>&3 + continue + fi + # run actual job on pre-compiled config + run wire-cell -l "${logfile}" -L debug "${jsonfile}" + echo "$output" + [[ "$status" -eq 0 ]] + [[ -s "$logfile" ]] -@test "plot spectra" { - # cd_tmp file - local mydir="$(dirname ${BATS_TEST_FILENAME})" - local jsonfile="$(basename ${BATS_TEST_FILENAME} .bats).json" - local base="$(basename ${BATS_TEST_FILENAME} .bats)" - - for bugsiz in nobug smlbug bigbug - do - for trunc in round floor - do - wrkdir="${trunc}-${bugsiz}" - cd $wrkdir - for name in inco empno - do - local nbase="test-noise-roundtrip-${name}-spectra" - if [ ! -f "${nbase}.json.bz2" ] ; then - continue - fi - wirecell-sigproc plot-noise-spectra -z \ - "${nbase}.json.bz2" \ - "${nbase}-output.pdf" - if [ "$name" = "inco" ] ; then - python $mydir/check_noise_roundtrip.py \ - configured-spectra \ - -n $name \ - "$jsonfile" \ - ${nbase}-input.pdf - fi - if [ "$name" = "empno" ] ; then - # warning: hard-coding empno spectra out of laziness - # and assuming it's the one used in the Jsonnet! - wirecell-sigproc plot-noise-spectra -z \ - protodune-noise-spectra-v1.json.bz2 \ - "${nbase}-input.pdf" - fi - - done - cd .. - done - done } -@test "compare round vs floor no bug" { - # cd_tmp file +@test "no weird endpoints of mean waveform" { - local base="$(basename ${BATS_TEST_FILENAME} .bats)" + cd_tmp file - for bl in ac none - do - for thing in wave spec - do - for grp in $(seq 10) - do - local num=$(( $grp - 1 )) - local chmin=$(( $num * 256 )) - local chmax=$(( $grp * 256 )) + ## good lines are like: + # U t 4096 0.02282684326171875 0.06892286163071752 4096 1291 185 13 2 0 0 0 0 0 0 + # U c 800 0.02282684326171875 0.4955734777788893 800 376 0 0 0 0 0 0 0 0 0 + # V t 4096 0.01374267578125 0.03856538100586203 4096 1334 192 13 1 0 0 0 0 0 0 + # V c 800 0.01374267578125 0.49734063258678657 800 386 0 0 0 0 0 0 0 0 0 + # W t 4096 0.009838104248046875 0.016252509189760622 4096 1328 177 14 0 0 0 0 0 0 0 + # W c 960 0.009838104248046875 0.49116500530978957 960 463 0 0 0 0 0 0 0 0 0 + ## bad lines are like: + # U t 4096 0.23914672851562502 1.838315680187899 4096 30 22 15 15 14 13 13 13 11 10 + # U c 800 0.239146728515625 0.4333089727023359 800 201 0 0 0 0 0 0 0 0 0 + # V t 4096 0.24620086669921876 1.0176595165745093 4096 30 22 15 15 14 13 13 13 11 10 + # V c 800 0.24620086669921876 0.43083546962103825 800 198 0 0 0 0 0 0 0 0 0 + # W t 4096 0.23283335367838542 0.20440386815418213 4096 39 26 15 14 14 14 13 11 10 10 + # W c 960 0.23283335367838542 0.4332668683930272 960 249 0 0 0 0 0 0 0 0 0 - wirecell-plot comp1d \ - --transform $bl \ - --single \ - --tier '*' -n $thing --chmin $chmin --chmax $chmax \ - --output comp1d-floor+round-nobug-${thing}-${bl}-grp${grp}.png \ - floor-nobug/test-noise-roundtrip-inco-adc.npz \ - round-nobug/test-noise-roundtrip-inco-adc.npz - done - done - done -} + local wcgen=$(wcb_env_value WCGEN) -@test "compare bug and no bug with round" { + for noise in empno inco + do - # cd_tmp file - local base="$(basename ${BATS_TEST_FILENAME} .bats)" + local adcfile="test-noise-roundtrip-${noise}-adc.npz" + [[ -s "$adcfile" ]] - for bl in ac none - do - for thing in wave spec - do - for grp in $(seq 10) - do - local num=$(( $grp - 1 )) - local chmin=$(( $num * 256 )) - local chmax=$(( $grp * 256 )) + while read line ; do + parts=($line) - wirecell-plot comp1d \ - --transform $bl \ - --single \ - --tier '*' -n $thing --chmin $chmin --chmax $chmax \ - --output comp1d-round-bugs-${thing}-${bl}-grp${grp}.png \ - round-bigbug/test-noise-roundtrip-inco-adc.npz \ - round-smlbug/test-noise-roundtrip-inco-adc.npz \ - round-nobug/test-noise-roundtrip-inco-adc.npz - done - done - done -} + echo "$line" + if [ "${parts[1]}" = "c" ] ; then + continue + fi -@test "compare bug and no bug with floor" { + # Bash does not speak floating point. To avoid relying on + # dc/bc to be installed, we do these FP tests lexically. - # cd_tmp file - local base="$(basename ${BATS_TEST_FILENAME} .bats)" + # require nothing past 5 sigma + [[ "${parts[10]}" = "0" ]] - for bl in ac none - do - for thing in wave spec - do - for grp in $(seq 10) - do - local num=$(( $grp - 1 )) - local chmin=$(( $num * 256 )) - local chmax=$(( $grp * 256 )) + # mean should be small. + [[ -n "$(echo ${parts[3]} | grep '^0\.0')" ]] - wirecell-plot comp1d \ - --transform $bl \ - --single \ - --tier '*' -n $thing --chmin $chmin --chmax $chmax \ - --output comp1d-floor-bugs-${thing}-${bl}-grp${grp}.png \ - floor-bigbug/test-noise-roundtrip-inco-adc.npz \ - floor-smlbug/test-noise-roundtrip-inco-adc.npz \ - floor-nobug/test-noise-roundtrip-inco-adc.npz - done - done + # rms should be small. + [[ -n "$(echo ${parts[4]} | grep '^0\.0')" ]] + + done < <($wcgen frame_stats "$adcfile") done } -# @test "no weird endpoints of mean waveform" { -# local mydir="$(dirname ${BATS_TEST_FILENAME})" -# declare -a line -# ## good lines are: -# # U t 4096 0.02282684326171875 0.06892286163071752 4096 1291 185 13 2 0 0 0 0 0 0 -# # U c 800 0.02282684326171875 0.4955734777788893 800 376 0 0 0 0 0 0 0 0 0 -# # V t 4096 0.01374267578125 0.03856538100586203 4096 1334 192 13 1 0 0 0 0 0 0 -# # V c 800 0.01374267578125 0.49734063258678657 800 386 0 0 0 0 0 0 0 0 0 -# # W t 4096 0.009838104248046875 0.016252509189760622 4096 1328 177 14 0 0 0 0 0 0 0 -# # W c 960 0.009838104248046875 0.49116500530978957 960 463 0 0 0 0 0 0 0 0 0 -# ## bad lines are like: -# # U t 4096 0.23914672851562502 1.838315680187899 4096 30 22 15 15 14 13 13 13 11 10 -# # U c 800 0.239146728515625 0.4333089727023359 800 201 0 0 0 0 0 0 0 0 0 -# # V t 4096 0.24620086669921876 1.0176595165745093 4096 30 22 15 15 14 13 13 13 11 10 -# # V c 800 0.24620086669921876 0.43083546962103825 800 198 0 0 0 0 0 0 0 0 0 -# # W t 4096 0.23283335367838542 0.20440386815418213 4096 39 26 15 14 14 14 13 11 10 10 -# # W c 960 0.23283335367838542 0.4332668683930272 960 249 0 0 0 0 0 0 0 0 0 +# bats test_tags=implicit,plots +@test "plot spectra" { -# while read line ; do -# parts=($line) + local wcsigproc="$(wcb_env_value WCSIGPROC)" + + cd_tmp file -# echo "$line" -# if [ "${parts[1]}" = "c" ] ; then -# continue -# fi -# # require nothing past 5 sigma -# [[ "${parts[10]}" = "0" ]] - -# # mean should be small. dumb subst for floating point check. -# [[ -n "$(echo ${parts[3]} | grep '^0\.0')" ]] + for ntype in inco empno + do + local sfile="test-noise-roundtrip-${ntype}-spectra.json.bz2" + + # Plot OUTPUT spectra + local ofile="$(basename $sfile .json.bz2)-output.pdf" + run $wcsigproc plot-noise-spectra -z "${sfile}" "${ofile}" + echo "$output" + [[ "$status" -eq 0 ]] -# # rms should be small. dumb subst for floating point check. -# [[ -n "$(echo ${parts[4]} | grep '^0\.0')" ]] + # Plot INPUT spectra. + local ifile="$(basename $sfile .json.bz2)-input.pdf" + # The method depends on the type. -# done < <(python $mydir/check_noise_roundtrip.py stats test-noise-roundtrip-inco-adc.npz) - - -# } + # Spectra is inside configuration + if [ "$ntype" = "inco" ] ; then + run $wcsigproc plot-configured-spectra -c ac -n "$ntype" test-noise-roundtrip.json "${ifile}" + echo "$output" + [[ "$status" -eq 0 ]] + fi + # Spectra is in auxiliary file + if [ "$ntype" = "empno" ] ; then + # warning: hard-coding empno spectra out of laziness + # and assuming it's the one used in the Jsonnet! + run $wcsigproc plot-noise-spectra -z protodune-noise-spectra-v1.json.bz2 "${ifile}" + echo "$output" + [[ "$status" -eq 0 ]] + fi + + saveout -c plots "$ifile" "$ofile" + done +} diff --git a/gen/test/test-noise-roundtrip.jsonnet b/gen/test/test-noise-roundtrip.jsonnet index 22766c30b..a8b2e30fd 100644 --- a/gen/test/test-noise-roundtrip.jsonnet +++ b/gen/test/test-noise-roundtrip.jsonnet @@ -20,7 +20,7 @@ local tns = import "test-noise-spectra.jsonnet"; local tng = { inco: import "test-noise-groups-incoherent.jsonnet", cohe: import "test-noise-groups-coherent.jsonnet", - empno: import "test-noise-groups-incoherent.jsonnet", + empno: import "test-noise-groups-pdsp.jsonnet", }; // Size of original (fictional) waveforms diff --git a/gen/test/test_noise.py b/gen/test/test_noise.py deleted file mode 100755 index b8fefb342..000000000 --- a/gen/test/test_noise.py +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env python - -# this holds a few ugly hacks. nicer stuff in wire-cell-python. - -import io -import sys -import numpy -import pathlib -import tarfile -import matplotlib.pyplot as plt -from matplotlib.backends.backend_pdf import PdfPages - -try: - tfpath = sys.argv[1] -except IndexError: - p = pathlib.Path(__file__) - tfpath = p.parent.parent.parent / "build" / "gen" / "test_noise.tar" -assert(tfpath.exists()) - - -tf = tarfile.open(tfpath) -def get(name): - dat = tf.extractfile(name + ".npy").read() - a = io.BytesIO() - a.write(dat) - a.seek(0) - return numpy.load(a) - - - -ss_freq = get("ss_freq") -ss_spec = get("ss_spec") - -freqs = get("freqs") -spec = get("true_spectrum") -aspec = get("average_spectrum") - -pdfpath = tfpath.parent / (tfpath.stem + ".pdf") -with PdfPages(pdfpath) as pdf: - - fig,ax = plt.subplots(1,1) - ax.plot(freqs, spec) - ax.plot(ss_freq, ss_spec) - pdf.savefig(fig) - plt.close() - - fig,ax = plt.subplots(1,1) - for ind in range(5): - w = get(f'example_wave{ind}') - plt.plot(w) - pdf.savefig(fig) - plt.close() - - fig,ax = plt.subplots(1,1) - ax.plot(freqs, aspec, label="mean") - ax.plot(freqs, spec, label="true") - fig.legend() - pdf.savefig(fig) - plt.close() - - -print(str(pdfpath)) diff --git a/iface/README.org b/iface/README.org index cf76f4926..aa5c9b438 100644 --- a/iface/README.org +++ b/iface/README.org @@ -1,4 +1,5 @@ #+title: Wire Cell Interfaces +#+SETUPFILE: ../setup-readme.org This package provides a set of abstract base classes aka interface classes. In general, there should be no implementation code in this diff --git a/img/README.org b/img/README.org index e7c71dcc2..66be5d499 100644 --- a/img/README.org +++ b/img/README.org @@ -1,9 +1,10 @@ #+title: Wire-Cell Imaging +#+SETUPFILE: ../setup-readme.org -This sub-package holds Wire-Cell Toolkit 3D imaging components. +* Overeview -- Some notes and docs are in [[doc/]]. +This sub-package holds Wire-Cell Toolkit 3D imaging components. -- An informal note on some of the algorithms is in [[https://www.phy.bnl.gov/~bviren/tmp/wctimg/raygrid.pdf][raygrid.pdf]] +An informal note on some of the algorithms is in [[https://www.phy.bnl.gov/~bviren/tmp/wctimg/raygrid.pdf][raygrid.pdf]] - +#+include: docs/BlobDepoFill.org diff --git a/img/docs/BlobDepoFill.org b/img/docs/BlobDepoFill.org index 3b82bf78b..e757c0af8 100644 --- a/img/docs/BlobDepoFill.org +++ b/img/docs/BlobDepoFill.org @@ -1,8 +1,8 @@ -#+title: ~WireCell::Img::BlobDepoFill~ +* Blob Depo Fill -Replaces charge in blobs with that from overlapping depos. +This replaces charge in blobs with that from overlapping depos. -* Overview +** Overview The ~BlobDepoFill~ WCT flow graph node is in the "join" DFP category. It accepts an ~ICluster~ on input port 0 and an ~IDepoSet~ on input port 1 @@ -16,10 +16,10 @@ blobs. See section [[Selection]] for details on which depos are considered and section [[Alignment]] for details on how blobs and depos are correlated. -See also presentation [[blob-depo-fill.pdf]]. +See also presentation blob-depo-fill.pdf -* Algorithm +** Algorithm The goal is to integrate the depo charge distribution over the volume of each blob. This ends up being rather complex for a few reasons. @@ -72,7 +72,7 @@ boundary rays from the other planes. The three integrals from each dimension then scale the depo charge and the result is added to a sum associated with the current blob. -* Selection +** Selection The ~Drifter~ produces depos at a fixed location along the drift direction which is near to anode plane faces. However, the set of @@ -88,7 +88,7 @@ context where it receives an ~ICluster~ with blobs from a single context where the ~ICluster~ spans multiple anode planes (eg, after some form of cross-anode "stitching"). -* Alignment +** Alignment In order to correlate portions of the 3D Gaussian extent of a depo with a blob, it is likely necessary to apply a time offset to match diff --git a/img/docs/notes.org b/img/docs/notes.org index 107522e0d..aed9d6c76 100644 --- a/img/docs/notes.org +++ b/img/docs/notes.org @@ -129,7 +129,7 @@ The direction of $p^l$ defines an axis for ROCS $l$ and its magnitude defines a A second ROCS $m$ is now considered which has rays at some non-zero angle with ROCS $l$. The set of crossing points of rays from each form a regular, non-orthogonal 2D grid. We define: -- $r^{lm}_{ij}$ :: a vector giving the location (expressed in global Cartesian coordinates) of the crossing point of the $i^{th}$ ray in ROCS $l$ and the $j^{th$ ray in ROCS $m$, $l \ne m$ and with one set of rays having a non-zero angle w.r.t. the other. +- $r^{lm}_{ij}$ :: a vector giving the location (expressed in global Cartesian coordinates) of the crossing point of the $i^{th}$ ray in ROCS $l$ and the $j^{th}$ ray in ROCS $m$, $l \ne m$ and with one set of rays having a non-zero angle w.r.t. the other. - $w^{lm}$ :: a relative vector giving the displacement between the intersections of a pair of neighboring rays of ROCS $m$ with a ray in ROCS $l$. That is, this vector allows one to "hop" along a ray in $l$ from one crossing point of a ray in $m$ to the crossing point of its neighbor. @@ -138,7 +138,7 @@ Without loss of generality one ray in each ROCS is given index $i = 0$ and it is In forming "blobs" it is typical that one must test if a particular point is "inside" some boundary defined by a pair of rays. This is equivalent to calculating the pitch location of the point in the ROCS coordinate system. If the only points which may be tested are themselves calculated as $r^{lm}_{ij}$ then one evaluate the pitch location in a third ROCS in terms of the original crossings of two other ROCS, $P^{lmn}_{ij} = (r^{lm}_{ij} - c^n) \cdot \hat{p}^n$. Expanding, one arrives at, $P^{lmn}_{ij} = r^{lm}_{00}\cdot \hat{p}^n + jw^{lm} \cdot \hat{p}^n + iw^{ml} \cdot \hat{p}^n - c^n \cdot \hat{p}^n$. Note, that this decomposes into the a form of two three dimensional tensors $P^{lmn}_{ij} = ja^{lmn} + ia^{mln} + b^{lmn}$ where $b^{lmn}$ is symmetric about the exchange of $l$ and $m$. The size of each tensor dimension is that of the number of ROCS considered. -Finally, one may divide this tensor by the magnitude of the pitch of the third ROCS in order to find the index of the closest ray. This pitch index is $I^{lmn}_ij} = P^{lmn}_{ij}/|p^n|$. +Finally, one may divide this tensor by the magnitude of the pitch of the third ROCS in order to find the index of the closest ray. This pitch index is $I^{lmn}_{ij} = P^{lmn}_{ij}/|p^n|$. *** Slice Overlaps @@ -233,7 +233,7 @@ A per-slice blobs can be viewed: paraview test-pdsp-748-blobs.vtu #+END_EXAMPLE -[[file:x.svg]] +[[file:x.pdf]] On Mate desktop, paraview renders tool tips with white text on yellow background which is unreadable. [[https://community.slickedit.com/index.php/topic,14388.0.html][This solution]] works. Install ~qtconfig~, "Appearance" tab, "Tune Palette..." button "ToolTipBase" and "ToolTipText" under "Central color roles". Pick nicer color (I just made text black) and save. diff --git a/img/test/test-pdsp-6apas-bee.sh b/img/test/test-pdsp-6apas-bee.sh deleted file mode 100755 index 3206e971d..000000000 --- a/img/test/test-pdsp-6apas-bee.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash - -# Leave this script in place. -# -# Run PDSP sig+noise sim + sigproc + imaging using one or more input -# depo files in Bee JSON format. -# -# Example JSON files provided: -# -# $ unzip sio/test/test_beedeposource.zip -# $ ls -R data/ -# -# Then run this script like: -# -# $ ./img/test/test-pdsp-6apas-bee.sh data/?/?-truthDepo.json -# -# or for just one "event": -# -# $ ./img/test/test-pdsp-6apas-bee.sh data/0/0-truthDepo.json - -if [ -z "$WIRECELL_PATH" ] ; then - echo "This test requires WIRECELL_PATH to be set" - exit 1 -fi - -testdir=$(dirname $(realpath $BASH_SOURCE)) -cfgfile="$testdir/in.$(basename $BASH_SOURCE .sh).jsonnet" -if [ ! -f "$cfgfile" ] ; then - echo "expect to find $cfgfile" - exit 1 -fi - -depofiles="\"$testdir/0-truthDepo.json.bz2\"" -if [ -n "$1" ] ; then - depofiles=$(for n in $@; do echo -n '"'$n'"',; done) -fi - -set -x -wire-cell -L debug -l stdout -C "depofiles=["$depofiles"]" -c $cfgfile diff --git a/img/test/test-wct-uboone-img.bats b/img/test/test-wct-uboone-img.bats new file mode 100644 index 000000000..6765a47d6 --- /dev/null +++ b/img/test/test-wct-uboone-img.bats @@ -0,0 +1,43 @@ +#!/usr/bin/env bats + +# Run tests related to applying imaging to a rich uboone event. + +bats_load_library wct-bats.sh + +setup_file () { + skip_if_no_input + + cd_tmp file + + # was: uboone/celltree/celltreeOVERLAY-event6501.tar.bz2 + local infile="$(input_file frames/celltreeOVERLAY-event6501.tar.bz2)" + [[ -s "$infile" ]] + + local cfgfile="$(relative_path test-wct-uboone-img.jsonnet)" + [[ -s "$cfgfile" ]] + local jsonfile="test-wct-uboone-img.json" + + # explicit compilation to JSON to make subsequent tests faster + if [ -f "$jsonfile" ] ; then + echo "resusing $jsonfile" + else + compile_jsonnet "$cfgfile" "$jsonfile" -A infile="$infile" -A outfile="$outfile" + fi + + local outfile="test-wct-uboone-img.tar.gz" + if [ -f "$outfile" ] ; then + echo "reusing $outfile" + else + wct "$jsonfile" + fi +} + +@test "create graph" { + cd_tmp file + + local jsonfile="test-wct-uboone-img.json" + [[ -f "$jsonfile" ]] + local svgfile="test-wct-uboone-img-graph.svg" + dotify_graph "$jsonfile" "$svgfile" + saveout -c plots "$svgfile" +} diff --git a/img/test/test-wct-uboone-img.jsonnet b/img/test/test-wct-uboone-img.jsonnet new file mode 100644 index 000000000..fc7cfd985 --- /dev/null +++ b/img/test/test-wct-uboone-img.jsonnet @@ -0,0 +1,406 @@ +// +// This is modified version of: +// https://github.com/HaiwangYu/wcp-porting-img/blob/main/wct-celltree-img.jsonnet +// +// It is changed in these ways: +// +// 1) Produces a top-level function with all default TLAs +// +// 2) Reads a WCT tar stream file with a single event instead of the +// "celltreeOVERLAY.root" ROOT file with 100 events. +// +// The default WCT tar stream file used was produced with: +// +// wire-cell -A infile=celltreeOVERLAY.root \ +// -A outfile=celltreeOVERLAY-event6501.tar.bz2 \ +// root/test/test-celltree-to-framefile.jsonnet +// +// This file is provided by the wire-cell-test-data repo. +// +// 3) The ancillary img.jsonnet file at the URL above is incporated +// directly into this file. +// +// 4) Some previous options-set-via-commenting are exposed as TLAs. +// +// 5) Some unused code is deleted. +// +// 6) Slight variable rename. + + +local wc = import "wirecell.jsonnet"; +local pg = import "pgraph.jsonnet"; +local params = import "pgrapher/experiment/uboone/simparams.jsonnet"; +local tools_maker = import 'pgrapher/common/tools.jsonnet'; +local tools = tools_maker(params); +local anodes = tools.anodes; + +local img = { + // A functio that sets up slicing for an APA. + slicing :: function(anode, aname, tag="", span=4, active_planes=[0,1,2], masked_planes=[], dummy_planes=[]) { + ret: pg.pnode({ + type: "MaskSlices", + name: "slicing-"+aname, + data: { + tag: tag, + tick_span: span, + anode: wc.tn(anode), + min_tbin: 0, + max_tbin: 9592, + active_planes: active_planes, + masked_planes: masked_planes, + dummy_planes: dummy_planes, + }, + }, nin=1, nout=1, uses=[anode]), + }.ret, + + // A function sets up tiling for an APA incuding a per-face split. + tiling :: function(anode, aname) { + + local slice_fanout = pg.pnode({ + type: "SliceFanout", + name: "slicefanout-" + aname, + data: { multiplicity: 2 }, + }, nin=1, nout=2), + + local tilings = [pg.pnode({ + type: "GridTiling", + name: "tiling-%s-face%d"%[aname, face], + data: { + anode: wc.tn(anode), + face: face, + } + }, nin=1, nout=1, uses=[anode]) for face in [0,1]], + + local blobsync = pg.pnode({ + type: "BlobSetSync", + name: "blobsetsync-" + aname, + data: { multiplicity: 2 } + }, nin=2, nout=1), + + // ret: pg.intern( + // innodes=[slice_fanout], + // outnodes=[blobsync], + // centernodes=tilings, + // edges= + // [pg.edge(slice_fanout, tilings[n], n, 0) for n in [0,1]] + + // [pg.edge(tilings[n], blobsync, 0, n) for n in [0,1]], + // name='tiling-' + aname), + ret : tilings[0], + }.ret, + + // + multi_active_slicing_tiling :: function(anode, name, tag="gauss", span=4) { + local active_planes = [[0,1,2],[0,1],[1,2],[0,2],], + local masked_planes = [[],[2],[0],[1]], + local iota = std.range(0,std.length(active_planes)-1), + local slicings = [$.slicing(anode, name+"_%d"%n, tag, span, active_planes[n], masked_planes[n]) + for n in iota], + local tilings = [$.tiling(anode, name+"_%d"%n) + for n in iota], + local multipass = [pg.pipeline([slicings[n],tilings[n]]) for n in iota], + ret: pg.fan.pipe("FrameFanout", multipass, "BlobSetMerge", "multi_active_slicing_tiling"), + }.ret, + + // + multi_masked_2view_slicing_tiling :: function(anode, name, tag="gauss", span=109) { + local dummy_planes = [[2],[0],[1]], + local masked_planes = [[0,1],[1,2],[0,2]], + local iota = std.range(0,std.length(dummy_planes)-1), + local slicings = [$.slicing(anode, name+"_%d"%n, tag, span, + active_planes=[],masked_planes=masked_planes[n], dummy_planes=dummy_planes[n]) + for n in iota], + local tilings = [$.tiling(anode, name+"_%d"%n) + for n in iota], + local multipass = [pg.pipeline([slicings[n],tilings[n]]) for n in iota], + ret: pg.fan.pipe("FrameFanout", multipass, "BlobSetMerge", "multi_masked_slicing_tiling"), + }.ret, + // + multi_masked_slicing_tiling :: function(anode, name, tag="gauss", span=109) { + local active_planes = [[2],[0],[1],[]], + local masked_planes = [[0,1],[1,2],[0,2],[0,1,2]], + local iota = std.range(0,std.length(active_planes)-1), + local slicings = [$.slicing(anode, name+"_%d"%n, tag, span, active_planes[n], masked_planes[n]) + for n in iota], + local tilings = [$.tiling(anode, name+"_%d"%n) + for n in iota], + local multipass = [pg.pipeline([slicings[n],tilings[n]]) for n in iota], + ret: pg.fan.pipe("FrameFanout", multipass, "BlobSetMerge", "multi_masked_slicing_tiling"), + }.ret, + + // Just clustering + clustering :: function(anode, aname, spans=1.0) { + ret : pg.pnode({ + type: "BlobClustering", + name: "blobclustering-" + aname, + data: { spans : spans } + }, nin=1, nout=1), + }.ret, + + // this bundles clustering, grouping and solving. Other patterns + // should be explored. Note, anode isn't really needed, we just + // use it for its ident and to keep similar calling pattern to + // above.. + solving :: function(anode, aname, spans=1.0, threshold=0.0) { + local bc = pg.pnode({ + type: "BlobClustering", + name: "blobclustering-" + aname, + data: { spans : spans } + }, nin=1, nout=1), + local bg = pg.pnode({ + type: "BlobGrouping", + name: "blobgrouping-" + aname, + data: { + } + }, nin=1, nout=1), + local bs = pg.pnode({ + type: "BlobSolving", + name: "blobsolving-" + aname, + data: { threshold: threshold } + }, nin=1, nout=1), + local cs0 = pg.pnode({ + type: "ChargeSolving", + name: "chargesolving0-" + aname, + data: { + weighting_strategies: ["uniform"], //"uniform", "simple" + } + }, nin=1, nout=1), + local cs1 = pg.pnode({ + type: "ChargeSolving", + name: "chargesolving1-" + aname, + data: { + weighting_strategies: ["uniform"], //"uniform", "simple" + } + }, nin=1, nout=1), + local lcbr = pg.pnode({ + type: "LCBlobRemoval", + name: "lcblobremoval-" + aname, + data: { + blob_value_threshold: 1e6, + blob_error_threshold: 0, + } + }, nin=1, nout=1), + local cs = pg.intern( + innodes=[cs0], outnodes=[cs1], centernodes=[], + edges=[pg.edge(cs0,cs1)], + name="chargesolving-" + aname), + local csp = pg.intern( + innodes=[cs0], outnodes=[cs1], centernodes=[lcbr], + edges=[pg.edge(cs0,lcbr), pg.edge(lcbr,cs1)], + name="chargesolving-" + aname), + local solver = cs0, + ret: pg.intern( + innodes=[bc], outnodes=[solver], centernodes=[bg], + edges=[pg.edge(bc,bg), pg.edge(bg,solver)], + name="solving-" + aname), + // ret: bc, + }.ret, + + dump(outfile) :: { + + local cs = pg.pnode({ + type: "ClusterFileSink", + name: outfile, + data: { + outname: outfile, + format: "json", + } + }, nin=1, nout=0), + ret: cs + }.ret, + +}; +// end img. + +// local celltreesource = pg.pnode({ +// type: "CelltreeSource", +// name: "celltreesource", +// data: { +// filename: "celltreeOVERLAY.root", +// EventNo: 6501, +// // in_branch_base_names: raw [default], calibGaussian, calibWiener +// in_branch_base_names: ["calibWiener", "calibGaussian"], +// out_trace_tags: ["wiener", "gauss"], // orig, gauss, wiener +// in_branch_thresholds: ["channelThreshold", ""], +// }, +// }, nin=0, nout=1); +local filesource(infile, tags=[]) = pg.pnode({ + type: "FrameFileSource", + name: infile, + data: { + inname: infile, + tags: tags, + }, +}, nin=0, nout=1); + +local dumpframes = pg.pnode({ + type: "DumpFrames", + name: 'dumpframe', +}, nin=1, nout=0); + +local magdecon = pg.pnode({ + type: 'MagnifySink', + name: 'magdecon', + data: { + output_filename: "mag.root", + root_file_mode: 'UPDATE', + frames: ['gauss', 'wiener', 'gauss_error'], + cmmtree: [['bad','bad']], + summaries: ['gauss', 'wiener', 'gauss_error'], + trace_has_tag: true, + anode: wc.tn(anodes[0]), + }, +}, nin=1, nout=1, uses=[anodes[0]]); + +local waveform_map = { + type: 'WaveformMap', + name: 'wfm', + data: { + filename: "microboone-charge-error.json.bz2", + }, + uses: [], +}; + +local charge_err = pg.pnode({ + type: 'ChargeErrorFrameEstimator', + name: 'cefe', + data: { + intag: "gauss", + outtag: 'gauss_error', + anode: wc.tn(anodes[0]), + rebin: 4, // this number should be consistent with the waveform_map choice + fudge_factors: [2.31, 2.31, 1.1], // fudge factors for each plane [0,1,2] + time_limits: [12, 800], // the unit of this is in ticks + errors: wc.tn(waveform_map), + }, +}, nin=1, nout=1, uses=[waveform_map, anodes[0]]); + +local cmm_mod = pg.pnode({ + type: 'CMMModifier', + name: '', + data: { + cm_tag: "bad", + trace_tag: "gauss", + anode: wc.tn(anodes[0]), + start: 0, // start veto ... + end: 9592, // end of veto + ncount_cont_ch: 2, + cont_ch_llimit: [296, 2336+4800 ], // veto if continues bad channels + cont_ch_hlimit: [671, 2463+4800 ], + ncount_veto_ch: 1, + veto_ch_llimit: [3684], // direct veto these channels + veto_ch_hlimit: [3699], + dead_ch_ncount: 10, + dead_ch_charge: 1000, + ncount_dead_ch: 2, + dead_ch_llimit: [2160, 2080], // veto according to the charge size for dead channels + dead_ch_hlimit: [2176, 2096], + ncount_org: 5, // organize the dead channel ranges according to these boundaries + org_llimit: [0 , 1920, 3840, 5760, 7680], // must be ordered ... + org_hlimit: [1919, 3839, 5759, 7679, 9592], // must be ordered ... + }, +}, nin=1, nout=1, uses=[anodes[0]]); + +local frame_quality_tagging = pg.pnode({ + type: 'FrameQualityTagging', + name: '', + data: { + trace_tag: 'gauss', + anode: wc.tn(anodes[0]), + nrebin: 4, // rebin count ... + length_cut: 3, + time_cut: 3, + ch_threshold: 100, + n_cover_cut1: 12, + n_fire_cut1: 14, + n_cover_cut2: 6, + n_fire_cut2: 6, + fire_threshold: 0.22, + n_cover_cut3: [1200, 1200, 1800 ], + percent_threshold: [0.25, 0.25, 0.2 ], + threshold1: [300, 300, 360 ], + threshold2: [150, 150, 180 ], + min_time: 3180, + max_time: 7870, + flag_corr: 1, + }, +}, nin=1, nout=1, uses=[anodes[0]]); + +local frame_masking = pg.pnode({ + type: 'FrameMasking', + name: '', + data: { + cm_tag: "bad", + trace_tags: ['gauss','wiener'], + anode: wc.tn(anodes[0]), + }, +}, nin=1, nout=1, uses=[anodes[0]]); + +local anode = anodes[0]; +// multi slicing includes 2-view tiling and dead tiling +local active_planes = [[0,1,2],[0,1],[1,2],[0,2],]; +local masked_planes = [[],[2],[0],[1]]; +// single, multi, active, masked +function(infile="celltreeOVERLAY-event6501.tar.bz2", + outfile="clusters-event6501.tar.gz", + slicing = "single") + + local multi_slicing = slicing; +local imgpipe = + if multi_slicing == "single" then + pg.pipeline([ + img.slicing(anode, anode.name, "gauss", 109, active_planes=[0,1,2], masked_planes=[],dummy_planes=[]), // 109*22*4 + img.tiling(anode, anode.name), + img.solving(anode, anode.name), + // img.clustering(anode, anode.name), + ], "img-" + anode.name) + else if multi_slicing == "active" then + pg.pipeline([ + img.multi_active_slicing_tiling(anode, anode.name+"-ms-active", "gauss", 4), + img.solving(anode, anode.name+"-ms-active") + ]) + else if multi_slicing == "masked" then + pg.pipeline([ + // img.multi_masked_slicing_tiling(anode, anode.name+"-ms-masked", "gauss", 109), + img.multi_masked_2view_slicing_tiling(anode, anode.name+"-ms-masked", "gauss", 109), + img.clustering(anode, anode.name+"-ms-masked"), + ]) + else { + local active_fork = pg.pipeline([ + img.multi_active_slicing_tiling(anode, anode.name+"-ms-active", "gauss", 4), + img.solving(anode, anode.name+"-ms-active"), + ]), + local masked_fork = pg.pipeline([ + // img.multi_masked_slicing_tiling(anode, anode.name+"-ms-masked", "gauss", 109), + img.multi_masked_2view_slicing_tiling(anode, anode.name+"-ms-masked", "gauss", 109), + img.clustering(anode, anode.name+"-ms-masked"), + ]), + ret: pg.fan.fanout("FrameFanout",[active_fork,masked_fork], "fan_active_masked"), + }.ret; +local graph = pg.pipeline([ + filesource(infile, tags=["gauss","wiener"]), + // frame_quality_tagging, // event level tagging + // cmm_mod, // CMM modification + // frame_masking, // apply CMM + charge_err, // calculate charge error + // magdecon, // magnify out + // dumpframes, + imgpipe, + img.dump(outfile), +], "main"); + +local app = { + type: 'Pgrapher', + data: { + edges: pg.edges(graph), + }, +}; + +local cmdline = { + type: "wire-cell", + data: { + plugins: ["WireCellGen", "WireCellPgraph", "WireCellSio", "WireCellSigProc", "WireCellRoot", "WireCellImg"], + apps: ["Pgrapher"] + } +}; + +[cmdline] + pg.uses(graph) + [app] diff --git a/pgraph/README.org b/pgraph/README.org new file mode 100644 index 000000000..e28177151 --- /dev/null +++ b/pgraph/README.org @@ -0,0 +1,8 @@ +#+TITLE: Wire-Cell Toolkit Pgraph +#+SETUPFILE: ../setup-readme.org + +* Overview + +Pgraph provides a WCT "app" that implements the /data flow programming +paradigm/. It executes WCT "flow graphs" following a single-threaded, +low-memory policy. See also ~TbbFlow~ from sub-package ~tbb~. diff --git a/pytorch/README.org b/pytorch/README.org index b9e4181b5..e6b49a362 100644 --- a/pytorch/README.org +++ b/pytorch/README.org @@ -1,4 +1,5 @@ #+title: WCT usages of libtorch +#+SETUPFILE: ../setup-readme.org We call this package "pytorch" but really it is C++. diff --git a/pytorch/test/test_idft_torchdft.bats b/pytorch/test/test_idft_torchdft.bats new file mode 100644 index 000000000..8e367d347 --- /dev/null +++ b/pytorch/test/test_idft_torchdft.bats @@ -0,0 +1,10 @@ +#!/usr/bin/env bats + +bats_load_library wct-bats.sh + +@test "test_idft with torchdft" { + usepkg aux # to get test_dft + run test_idft TorchDFT WireCellPytorch + echo "$output" + [[ "$status" -eq 0 ]] +} diff --git a/pytorch/wscript_build b/pytorch/wscript_build index 26088720d..599f70fbc 100644 --- a/pytorch/wscript_build +++ b/pytorch/wscript_build @@ -1,4 +1,5 @@ bld.smplpkg('WireCellPytorch', use='WireCellAux LIBTORCH') +# p.rule("./${SRC}", source=["test_idft", "TorchDFT", "WireCellPytorch"]) ## when building with __DEBUG__ defined in DNNROIFinding you need to ## temporarily add HDF5 as a dependency. diff --git a/root/README.org b/root/README.org index 5a7172331..9ad0fcc53 100644 --- a/root/README.org +++ b/root/README.org @@ -1,4 +1,5 @@ #+title: wire-cell-root +#+SETUPFILE: ../setup-readme.org This packages houses dependencies on ROOT. diff --git a/root/test/test-celltree-to-framefile.jsonnet b/root/test/test-celltree-to-framefile.jsonnet index f8d5a3bd8..e089ee94b 100644 --- a/root/test/test-celltree-to-framefile.jsonnet +++ b/root/test/test-celltree-to-framefile.jsonnet @@ -11,9 +11,10 @@ function(infile="celltree.root", outfile="frames.npz", local source = high.fio.celltree_file_source( infile, eventid, branches = ["calibWiener", "calibGaussian"], - frame_tags = ["gauss"], trace_tags = trace_tags); + frame_tags = ["gauss","weiner"], + trace_tags = trace_tags); local target = high.fio.frame_file_sink(outfile, trace_tags); local graph = high.pg.pipeline([source, target]); - high.main(graph) + high.main(graph, extra_plugins=['WireCellRoot']) diff --git a/setup-readme.org b/setup-readme.org new file mode 100644 index 000000000..a21bc41a6 --- /dev/null +++ b/setup-readme.org @@ -0,0 +1,3 @@ +#+latex_header: \usepackage[margin=1in]{geometry} +#+options: ':t toc:t +#+SETUPFILE: https://fniessen.github.io/org-html-themes/org/theme-readtheorg.setup diff --git a/sigproc/README.org b/sigproc/README.org index 9252c21ba..4decc7985 100644 --- a/sigproc/README.org +++ b/sigproc/README.org @@ -1,10 +1,13 @@ -* Wire Cell Signal Processing +#+title: Wire Cell Signal Processing +#+SETUPFILE: ../setup-readme.org + +* Overview This is a Wire Cell Toolkit package which provides signal processing functionality. The algorithm and performance of this package (0.6.2) have been summarized in [[https://arxiv.org/abs/1802.08709][arXiv:1802.08709]] (MicroBooNE collaboration). -It also contains Python code to help prepare a Wire Cell Toolkit [[./docs/field-response-data-file.org][field response data file]]. See the main [[https://github.com/wirecell/wire-cell-toolkit][wire-cell-toolkit]] package for entry points to the toolkit. +#+include: docs/field-response-data-file.org diff --git a/sigproc/docs/adc-noise-sig.org b/sigproc/docs/talks/adc-noise-sig.org similarity index 100% rename from sigproc/docs/adc-noise-sig.org rename to sigproc/docs/talks/adc-noise-sig.org diff --git a/sigproc/test/_history_pdsp_sim_sp.bats b/sigproc/test/_history_pdsp_sim_sp.bats new file mode 100644 index 000000000..2880754d1 --- /dev/null +++ b/sigproc/test/_history_pdsp_sim_sp.bats @@ -0,0 +1,54 @@ +#!/usr/bin/env bats + +# This test consumes historical files produced by check_pdsp_sim_sp.bats + +bats_load_library wct-bats.sh + +# bats test_tags=time:1 + +@test "historical pdsp_sim_sp comp1d plots" { + + local wcplot=$(wcb_env_value WCPLOT) + + # will cd here to make plots have minimal filename labels + local rundir=$(blddir)/tests/history + # but will deposite plot files to our temp dir + local outdir=$(tmpdir) + + + local inpath="check_pdsp_sim_sp/check_pdsp_sim_sp.tar.bz2" + local frame_files=( $(historical_files -v $(version) -l 2 $inpath) ) + yell "frame files: ${frame_files[@]}" + frame_files=( $(realpath --relative-to=$rundir ${frame_files[*]}) ) + + cd $rundir + + for plot in wave + do + local plotfile="$outdir/comp1d-${plot}-history.png" + $wcplot \ + comp1d -n $plot --markers 'o + x .' -t 'orig' \ + --chmin 700 --chmax 701 -s \ + -o $plotfile ${frame_files[*]} + saveout -c reports $plotfile + + local plotfile="$outdir/comp1d-${plot}-history-zoom1.png" + $wcplot \ + comp1d -n $plot --markers 'o + x .' -t 'orig' \ + --chmin 700 --chmax 701 -s \ + --xrange 2000 3200 \ + -o $plotfile ${frame_files[*]} + saveout -c reports $plotfile + + local plotfile="$outdir/comp1d-${plot}-history-zoom2.png" + $wcplot \ + comp1d -n $plot --markers 'o + x .' -t 'orig' \ + --chmin 700 --chmax 701 -s \ + --xrange 4400 5200 \ + -o $plotfile ${frame_files[*]} + saveout -c reports $plotfile + done + + +} + diff --git a/sigproc/test/check_pdsp_sim_sp.bats b/sigproc/test/check_pdsp_sim_sp.bats new file mode 100644 index 000000000..2514ddedd --- /dev/null +++ b/sigproc/test/check_pdsp_sim_sp.bats @@ -0,0 +1,45 @@ +#!/usr/bin/env bats + +# composited check for sim and sigproc using PDSP + +# bats file_tags=sim,sigproc,PDSP,history + +bats_load_library wct-bats.sh + +# The intention is to run this test in multiple releases and compare across releases. +# bats test_tags=history,plots,implicit +@test "composited check for sim and sigproc using PDSP" { + + cd_tmp + + local name="check_pdsp_sim_sp" + local outfile="${name}.tar.bz2" # format with support going back the longest + local cfgfile="${BATS_TEST_FILENAME%.bats}.jsonnet" + local depofile=( $(input_file depos/cosmic-500-1.npz) ) + yell "depofile: ${depofile}" + + run wire-cell -l "$logfile" -L debug \ + -V input=$depofile \ + -V output="$outfile" \ + -c "$cfgfile" + yell "outfile: ${outfile}" + [[ "$status" -eq 0 ]] + [[ -s "$outfile" ]] + saveout -c history "$outfile" + + local wcplot=$(wcb_env_value WCPLOT) + for what in wave + do + local pout="${name}-comp1d-${what}.png" + $wcplot comp1d \ + -o $pout \ + -t 'orig' -n $what \ + --chmin 700 --chmax 701 -s \ + "${outfile}" + echo "$output" + [[ "$status" -eq 0 ]] + [[ -s "$pout" ]] + saveout -c plots "$pout" + done +} + diff --git a/sio/README.org b/sio/README.org index 598f30e01..86b262e2b 100644 --- a/sio/README.org +++ b/sio/README.org @@ -1,4 +1,5 @@ -* Simple I/O for Wire Cell Toolkit +#+title: Simple I/O for Wire Cell Toolkit +#+SETUPFILE: ../setup-readme.org This package provides some WCT "simple" input/ouput file support. It does not depend on ROOT and some ROOT based I/O components may be diff --git a/tbb/README.org b/tbb/README.org index 14de72346..e8c3981ba 100644 --- a/tbb/README.org +++ b/tbb/README.org @@ -1,4 +1,5 @@ -#+TITLE: Wire Cell Interface to TBB flow graph +#+title: Wire Cell Interface to TBB flow graph +#+SETUPFILE: ../setup-readme.org Note, TBB docs seem to be under constant churn so expect links here to become dead. diff --git a/test/README.org b/test/README.org new file mode 100644 index 000000000..a4c1aaa0a --- /dev/null +++ b/test/README.org @@ -0,0 +1,20 @@ +#+title: Wire-Cell Toolkit Testing +#+SETUPFILE: ../setup-readme.org + +The Wire-Cell Toolkit (WCT) includes a large number of tests. Developers and users are strongly encouraged to contribute even more. + +The tests are *not* built nor run by default. Typically, *normal users* to not need to expend the processing time to build and run them. Tests are most needed by *developers* as they create and fix bugs, *release managers* preparing a release and *expert users* that are porting WCT to new platforms. + +If you wish to write tests (and, developers, this means you!) read the first topics on the framework and the general document on how to write tests. You may then skip to a specific language. The topics on the data repository +will be of interest for basic tests needing input and for historical tests which are then explained. + +#+include: docs/testing.org + +* Links to more docs :noexport: + + +- [[file:docs/framework.org][WCT test framework]] +- [[file:docs/writing.org][How to write tests]] +- [[file:docs/languages.org][Programming language support]] +- [[file:docs/datarepo.org][Test data repository]] +- [[file:docs/history.org][Historical testing]] diff --git a/test/bats/.devcontainer/Dockerfile b/test/bats/.devcontainer/Dockerfile new file mode 100644 index 000000000..f3c3ed9a6 --- /dev/null +++ b/test/bats/.devcontainer/Dockerfile @@ -0,0 +1,15 @@ +ARG bashver=latest + +FROM bash:${bashver} + +# Install parallel and accept the citation notice (we aren't using this in a +# context where it make sense to cite GNU Parallel). +RUN echo "@edgecomm http://dl-cdn.alpinelinux.org/alpine/edge/community" >> /etc/apk/repositories && \ + apk update && \ + apk add --no-cache parallel ncurses shellcheck@edgecomm && \ + mkdir -p ~/.parallel && touch ~/.parallel/will-cite + +RUN ln -s /opt/bats/bin/bats /usr/sbin/bats +COPY . /opt/bats/ + +ENTRYPOINT ["bash", "/usr/sbin/bats"] diff --git a/test/bats/.devcontainer/devcontainer.json b/test/bats/.devcontainer/devcontainer.json new file mode 100644 index 000000000..2b81e3f20 --- /dev/null +++ b/test/bats/.devcontainer/devcontainer.json @@ -0,0 +1,5 @@ +{ + "name": "Bats core development environment", + "dockerFile": "Dockerfile", + "build": {"args": {"bashver": "4.3"}} +} \ No newline at end of file diff --git a/test/bats/.editorconfig b/test/bats/.editorconfig new file mode 100644 index 000000000..457107ead --- /dev/null +++ b/test/bats/.editorconfig @@ -0,0 +1,35 @@ +root = true + +[*] +end_of_line = lf +indent_style = space +indent_size = 2 +insert_final_newline = true +max_line_length = 80 +trim_trailing_whitespace = true + +# The JSON files contain newlines inconsistently +[*.json] +indent_size = 2 +insert_final_newline = ignore + +# YAML +[*.{yml,yaml}] +indent_style = space +indent_size = 2 + +# Makefiles always use tabs for recipe indentation +[{Makefile,*.mak}] +indent_style = tab + +# Markdown +[*.{md,rmd,mkd,mkdn,mdwn,mdown,markdown,litcoffee}] +max_line_length = 80 +# tabs behave as if they were replaced by spaces with a tab stop of 4 characters +tab_width = 4 +# trailing spaces indicates word wrap +trim_trailing_spaces = false +trim_trailing_whitespace = false + +[test/fixtures/bats/*_no_shellcheck.bats] +ignore = true \ No newline at end of file diff --git a/test/bats/.gitattributes b/test/bats/.gitattributes new file mode 100755 index 000000000..20cad1f8b --- /dev/null +++ b/test/bats/.gitattributes @@ -0,0 +1,3 @@ +* text=auto +*.sh eol=lf +libexec/* eol=lf diff --git a/test/bats/.github/ISSUE_TEMPLATE/bug_report.md b/test/bats/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 000000000..8c6ea2148 --- /dev/null +++ b/test/bats/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,29 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: 'Priority: NeedsTriage, Type: Bug' +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Steps to reproduce the behavior: +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Environment (please complete the following information):** + - Bats Version [e.g. 1.4.0 or commit hash] + - OS: [e.g. Linux, FreeBSD, MacOS] + - Bash version: [e.g. 5.1] + +**Additional context** +Add any other context about the problem here. diff --git a/test/bats/.github/ISSUE_TEMPLATE/feature_request.md b/test/bats/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 000000000..506b98beb --- /dev/null +++ b/test/bats/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: 'Priority: NeedsTriage, Type: Enhancement' +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context about the feature request here. diff --git a/test/bats/.github/workflows/check_pr_label.sh b/test/bats/.github/workflows/check_pr_label.sh new file mode 100755 index 000000000..2ff5723c7 --- /dev/null +++ b/test/bats/.github/workflows/check_pr_label.sh @@ -0,0 +1,10 @@ +#!/usr/bin/bash + +get_pr_json() { + curl -s -H "Accept: application/vnd.github.v3+json" "https://api.github.com/repos/bats-core/bats-core/pulls/$1" +} + +PR_NUMBER="$1" +LABEL="$2" + +get_pr_json "$PR_NUMBER" | jq .labels[].name | grep "$LABEL" diff --git a/test/bats/.github/workflows/release.yml b/test/bats/.github/workflows/release.yml new file mode 100644 index 000000000..b38f8201d --- /dev/null +++ b/test/bats/.github/workflows/release.yml @@ -0,0 +1,30 @@ +name: Release + +on: + release: { types: [published] } + workflow_dispatch: + +jobs: + npmjs: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-node@v2 + with: + registry-url: "https://registry.npmjs.org" + - run: npm publish --ignore-scripts + env: + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} + + github-npm: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-node@v2 + with: + registry-url: "https://npm.pkg.github.com" + - name: scope package name as required by GitHub Packages + run: npm init -y --scope ${{ github.repository_owner }} + - run: npm publish --ignore-scripts + env: + NODE_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/test/bats/.github/workflows/release_dockerhub.yml b/test/bats/.github/workflows/release_dockerhub.yml new file mode 100644 index 000000000..e5faec1c8 --- /dev/null +++ b/test/bats/.github/workflows/release_dockerhub.yml @@ -0,0 +1,47 @@ +name: Release to docker hub + +on: + release: { types: [published] } + workflow_dispatch: + inputs: + version: + description: 'Version to simulate for deploy' + required: true + +jobs: + dockerhub: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - id: version + run: | + EXPECTED_VERSION=${{ github.event.inputs.version }} + TAG_VERSION=${GITHUB_REF#refs/tags/v} # refs/tags/v1.2.3 -> 1.2.3 + echo ::set-output name=version::${EXPECTED_VERSION:-$TAG_VERSION} + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + + + - name: Login to DockerHub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Set up Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v1 + + - uses: docker/build-push-action@v2 + with: + platforms: linux/amd64,linux/arm64,linux/ppc64le,linux/s390x,linux/386,linux/arm/v7,linux/arm/v6 + tags: ${{ secrets.DOCKER_USERNAME }}/bats:${{ steps.version.outputs.version }},${{ secrets.DOCKER_USERNAME }}/bats:latest + push: true + + - uses: docker/build-push-action@v2 + with: + platforms: linux/amd64,linux/arm64,linux/ppc64le,linux/s390x,linux/386,linux/arm/v7,linux/arm/v6 + tags: ${{ secrets.DOCKER_USERNAME }}/bats:${{ steps.version.outputs.version }}-no-faccessat2,${{ secrets.DOCKER_USERNAME }}/bats:latest-no-faccessat2 + push: true + build-args: bashver=5.1.4 diff --git a/test/bats/.github/workflows/set_nounset.bash b/test/bats/.github/workflows/set_nounset.bash new file mode 100644 index 000000000..8925c2d81 --- /dev/null +++ b/test/bats/.github/workflows/set_nounset.bash @@ -0,0 +1 @@ +set -u diff --git a/test/bats/.github/workflows/tests.yml b/test/bats/.github/workflows/tests.yml new file mode 100644 index 000000000..448c9bdcb --- /dev/null +++ b/test/bats/.github/workflows/tests.yml @@ -0,0 +1,265 @@ +name: Tests + +# Controls when the action will run. +on: [push, pull_request, workflow_dispatch] + +jobs: + changelog: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v2 + - name: Check that PR is mentioned in Changelog + run: | + if ! ./.github/workflows/check_pr_label.sh "${{github.event.pull_request.number}}" "no changelog"; then + grep "#${{github.event.pull_request.number}}" docs/CHANGELOG.md + fi + if: ${{github.event.pull_request}} + + shfmt: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v2 + - run: | + curl https://github.com/mvdan/sh/releases/download/v3.5.1/shfmt_v3.5.1_linux_amd64 -o shfmt + chmod a+x shfmt + - run: ./shfmt --diff . + + shellcheck: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v2 + - name: Run shellcheck + run: | + sudo apt-get update -y + sudo apt-get install shellcheck + ./shellcheck.sh + + linux: + strategy: + matrix: + os: ['ubuntu-20.04', 'ubuntu-22.04'] + env_vars: + - '' + # allow for some parallelity without GNU parallel, since it is not installed by default + - 'BATS_NO_PARALLELIZE_ACROSS_FILES=1 BATS_NUMBER_OF_PARALLEL_JOBS=2' + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v2 + - name: Run test on OS ${{ matrix.os }} + shell: 'script -q -e -c "bash {0}"' # work around tty issues + env: + TERM: linux # fix tput for tty issue work around + run: | + bash --version + bash -c "time ${{ matrix.env_vars }} bin/bats --print-output-on-failure --formatter tap test" + + unset_variables: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Check unset variables + shell: 'script -q -e -c "bash {0}"' # work around tty issues + env: + TERM: linux # fix tput for tty issue work around + BASH_ENV: ${GITHUB_WORKSPACE}/.github/workflows/set_nounset.bash + run: bin/bats test --print-output-on-failure + + npm_on_linux: + strategy: + matrix: + os: ['ubuntu-20.04', 'ubuntu-22.04'] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-node@v2 + - name: Run test on OS ${{ matrix.os }} + shell: 'script -q -e -c "bash {0}"' # work around tty issues + env: + TERM: linux # fix tput for tty issue work around + run: | + npm pack ./ + sudo npm install -g ./bats-*.tgz + bats test --print-output-on-failure + + windows: + runs-on: windows-2019 + steps: + - uses: actions/checkout@v2 + - run: | + bash --version + bash -c "time bin/bats --print-output-on-failure --formatter tap test" + + npm_on_windows: + strategy: + matrix: + os: ['windows-2019'] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-node@v2 + - run: npm pack ./ + - run: npm install -g (get-item .\bats-*.tgz).FullName + - run: bats -T --print-output-on-failure test + + macos: + strategy: + matrix: + os: ['macos-11', 'macos-12'] + env_vars: + - '' + # allow for some parallelity without GNU parallel, since it is not installed by default + - 'BATS_NO_PARALLELIZE_ACROSS_FILES=1 BATS_NUMBER_OF_PARALLEL_JOBS=2' + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v2 + - name: Install unbuffer via expect + run: brew install expect + - name: Run test on OS ${{ matrix.os }} + shell: 'unbuffer bash {0}' # work around tty issues + env: + TERM: linux # fix tput for tty issue work around + run: | + bash --version + bash -c "time ${{ matrix.env_vars }} bin/bats --print-output-on-failure --formatter tap test" + + npm_on_macos: + strategy: + matrix: + os: ['macos-11', 'macos-12'] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-node@v2 + - name: Install unbuffer via expect + run: brew install expect + - name: Run test on OS ${{ matrix.os }} + shell: 'unbuffer bash {0}' # work around tty issues + env: + TERM: linux # fix tput for tty issue work around + run: | + npm pack ./ + # somehow there is already an installed bats version around + npm install --force -g ./bats-*.tgz + bats --print-output-on-failure test + + bash-version: + strategy: + matrix: + version: ['3.2', '4.0', '4.1', '4.2', '4.3', '4.4', '4', '5.0', '5.1', '5', 'rc'] + env_vars: + - '' + # also test running (recursively!) in parallel + - '-e BATS_NUMBER_OF_PARALLEL_JOBS=2' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Run test on bash version ${{ matrix.version }} + shell: 'script -q -e -c "bash {0}"' # work around tty issues + run: | + set -e + docker build --build-arg bashver="${{ matrix.version }}" --tag "bats/bats:bash-${{ matrix.version }}" . + docker run -it "bash:${{ matrix.version }}" --version + time docker run -it ${{ matrix.env_vars }} "bats/bats:bash-${{ matrix.version }}" --print-output-on-failure --tap /opt/bats/test + + alpine: + runs-on: ubuntu-latest + container: alpine:latest + steps: + - uses: actions/checkout@v2 + - name: Install dependencies + run: apk add bash ncurses util-linux + - name: Run test on bash version ${{ matrix.version }} + shell: 'script -q -e -c "bash {0}"' # work around tty issues + env: + TERM: linux # fix tput for tty issue work around + run: + time ./bin/bats --print-output-on-failure test/ + + freebsd: + runs-on: macos-12 + strategy: + matrix: + packages: + - flock + - "" + steps: + - uses: actions/checkout@v2 + - uses: vmactions/freebsd-vm@v0 + with: + prepare: pkg install -y bash parallel ${{ matrix.packages }} + run: | + time ./bin/bats --print-output-on-failure test/ + + find_broken_symlinks: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + # list symlinks that are broken and force non-zero exit if there are any + - run: "! find . -xtype l | grep ." + + rpm: + runs-on: ubuntu-latest + container: almalinux:8 + steps: + - uses: actions/checkout@v2 + - run: dnf install -y rpm-build rpmdevtools + - name: Build and install RPM and dependencies + run: | + rpmdev-setuptree + version=$(rpmspec -q --qf '%{version}' contrib/rpm/bats.spec) + tar --transform "s,^,bats-core-${version}/," -cf /github/home/rpmbuild/SOURCES/v${version}.tar.gz ./ + rpmbuild -v -bb ./contrib/rpm/bats.spec + ls -al /github/home/rpmbuild/RPMS/noarch/ + dnf install -y /github/home/rpmbuild/RPMS/noarch/bats-*.rpm + dnf -y install procps-ng # avoid timeout failure + - name: Run tests + shell: 'script -q -e -c "bash {0}"' # work around tty issues + env: + TERM: linux # fix tput for tty issue work around + run: bats --print-output-on-failure --filter-tags !dep:install_sh test/ + + dockerfile: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Set up Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v1 + + - uses: docker/build-push-action@v2 + with: + platforms: linux/amd64 + tags: bats:test + load: true + + - run: docker run -itv "$PWD":/code bats:test --tap --print-output-on-failure test/ + shell: 'script -q -e -c "bash {0}"' # work around tty issues + env: + TERM: linux # fix tput for tty issue work around + + - uses: actions/checkout@v2 + with: + repository: bats-core/bats-assert + path: bats-assert + + - uses: actions/checkout@v2 + with: + repository: bats-core/bats-support + path: bats-support + + - uses: actions/checkout@v2 + with: + repository: bats-core/bats-file + path: bats-file + + - run: | + <test.sh + apk add sudo python3 # install bats-file's dependencies + ln -sf python3 /usr/bin/python # bats-file uses python without version + bats --tap --print-output-on-failure bats-*/test/ + EOF + docker run -itv "$PWD":/code --entrypoint bash bats:test test.sh + shell: 'script -q -e -c "bash {0}"' # work around tty issues + env: + TERM: linux # fix tput for tty issue work around \ No newline at end of file diff --git a/test/bats/.gitignore b/test/bats/.gitignore new file mode 100644 index 000000000..9895e9ffe --- /dev/null +++ b/test/bats/.gitignore @@ -0,0 +1,10 @@ +/docker-compose.override.yml +/docs/build + +# npm +/bats-*.tgz +# we don't have any deps; un-ignore if that changes +/package-lock.json +test/.bats/run-logs/ +# scratch file that should never be committed +/test.bats \ No newline at end of file diff --git a/test/bats/.gitrepo b/test/bats/.gitrepo new file mode 100644 index 000000000..8ffad77c2 --- /dev/null +++ b/test/bats/.gitrepo @@ -0,0 +1,12 @@ +; DO NOT EDIT (unless you know what you are doing) +; +; This subdirectory is a git "subrepo", and this file is maintained by the +; git-subrepo command. See https://github.com/ingydotnet/git-subrepo#readme +; +[subrepo] + remote = https://github.com/bats-core/bats-core.git + branch = master + commit = a710639259917292afd3a997390586f126a4b9dc + parent = bd90b968876af2fd3d53c93d5404cb22c55296d9 + method = merge + cmdver = 0.4.5 diff --git a/test/bats/.readthedocs.yml b/test/bats/.readthedocs.yml new file mode 100644 index 000000000..4959a3ac4 --- /dev/null +++ b/test/bats/.readthedocs.yml @@ -0,0 +1,9 @@ +version: 2 + +sphinx: + configuration: docs/source/conf.py + +python: + version: 3.7 + install: + - requirements: docs/source/requirements.txt \ No newline at end of file diff --git a/test/bats/AUTHORS b/test/bats/AUTHORS new file mode 100644 index 000000000..71df331b1 --- /dev/null +++ b/test/bats/AUTHORS @@ -0,0 +1,4 @@ +Andrew Martin (https://control-plane.io/) +Bianca Tamayo (https://biancatamayo.me/) +Jason Karns (http://jasonkarns.com/) +Mike Bland (https://mike-bland.com/) diff --git a/test/bats/Dockerfile b/test/bats/Dockerfile new file mode 100644 index 000000000..85f3c5fc5 --- /dev/null +++ b/test/bats/Dockerfile @@ -0,0 +1,43 @@ +ARG bashver=latest + +FROM bash:${bashver} +ARG TINI_VERSION=v0.19.0 +ARG TARGETPLATFORM +ARG LIBS_VER_SUPPORT=0.3.0 +ARG LIBS_VER_FILE=0.3.0 +ARG LIBS_VER_ASSERT=2.1.0 +ARG LIBS_VER_DETIK=1.1.0 +ARG UID=1001 +ARG GID=115 + + +# https://github.com/opencontainers/image-spec/blob/main/annotations.md +LABEL maintainer="Bats-core Team" +LABEL org.opencontainers.image.authors="Bats-core Team" +LABEL org.opencontainers.image.title="Bats" +LABEL org.opencontainers.image.description="Bash Automated Testing System" +LABEL org.opencontainers.image.url="https://hub.docker.com/r/bats/bats" +LABEL org.opencontainers.image.source="https://github.com/bats-core/bats-core" +LABEL org.opencontainers.image.base.name="docker.io/bash" + +COPY ./docker /tmp/docker +# default to amd64 when not running in buildx environment that provides target platform +RUN /tmp/docker/install_tini.sh "${TARGETPLATFORM-linux/amd64}" +# Install bats libs +RUN /tmp/docker/install_libs.sh support ${LIBS_VER_SUPPORT} +RUN /tmp/docker/install_libs.sh file ${LIBS_VER_FILE} +RUN /tmp/docker/install_libs.sh assert ${LIBS_VER_ASSERT} +RUN /tmp/docker/install_libs.sh detik ${LIBS_VER_DETIK} + +# Install parallel and accept the citation notice (we aren't using this in a +# context where it make sense to cite GNU Parallel). +RUN apk add --no-cache parallel ncurses && \ + mkdir -p ~/.parallel && touch ~/.parallel/will-cite \ + && mkdir /code + +RUN ln -s /opt/bats/bin/bats /usr/local/bin/bats +COPY . /opt/bats/ + +WORKDIR /code/ + +ENTRYPOINT ["/tini", "--", "bash", "bats"] diff --git a/test/bats/LICENSE.md b/test/bats/LICENSE.md new file mode 100644 index 000000000..0c7429978 --- /dev/null +++ b/test/bats/LICENSE.md @@ -0,0 +1,53 @@ +Copyright (c) 2017 bats-core contributors + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +--- + +* [bats-core] is a continuation of [bats]. Copyright for portions of the + bats-core project are held by Sam Stephenson, 2014 as part of the project + [bats], licensed under MIT: + +Copyright (c) 2014 Sam Stephenson + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +For details, please see the [version control history][commits]. + +[bats-core]: https://github.com/bats-core/bats-core +[bats]:https://github.com/sstephenson/bats +[commits]:https://github.com/bats-core/bats-core/commits/master diff --git a/test/bats/README.md b/test/bats/README.md new file mode 100644 index 000000000..b41b3c0e3 --- /dev/null +++ b/test/bats/README.md @@ -0,0 +1,127 @@ +# Bats-core: Bash Automated Testing System + +[![Latest release](https://img.shields.io/github/release/bats-core/bats-core.svg)](https://github.com/bats-core/bats-core/releases/latest) +[![npm package](https://img.shields.io/npm/v/bats.svg)](https://www.npmjs.com/package/bats) +[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://github.com/bats-core/bats-core/blob/master/LICENSE.md) +[![Continuous integration status](https://github.com/bats-core/bats-core/workflows/Tests/badge.svg)](https://github.com/bats-core/bats-core/actions?query=workflow%3ATests) +[![Read the docs status](https://readthedocs.org/projects/bats-core/badge/)](https://bats-core.readthedocs.io) + +[![Join the chat in bats-core/bats-core on gitter](https://badges.gitter.im/bats-core/bats-core.svg)][gitter] + +Bats is a [TAP](https://testanything.org/)-compliant testing framework for Bash. It provides a simple +way to verify that the UNIX programs you write behave as expected. + +A Bats test file is a Bash script with special syntax for defining test cases. +Under the hood, each test case is just a function with a description. + +```bash +#!/usr/bin/env bats + +@test "addition using bc" { + result="$(echo 2+2 | bc)" + [ "$result" -eq 4 ] +} + +@test "addition using dc" { + result="$(echo 2 2+p | dc)" + [ "$result" -eq 4 ] +} +``` + +Bats is most useful when testing software written in Bash, but you can use it to +test any UNIX program. + +Test cases consist of standard shell commands. Bats makes use of Bash's +`errexit` (`set -e`) option when running test cases. If every command in the +test case exits with a `0` status code (success), the test passes. In this way, +each line is an assertion of truth. + +## Table of contents + +**NOTE** The documentation has moved to + + + +- [Testing](#testing) +- [Support](#support) +- [Contributing](#contributing) +- [Contact](#contact) +- [Version history](#version-history) +- [Background](#background) + * [What's the plan and why?](#whats-the-plan-and-why) + * [Why was this fork created?](#why-was-this-fork-created) +- [Copyright](#copyright) + + + +## Testing + +```sh +bin/bats --tap test +``` + +See also the [CI](./.github/workflows/tests.yml) settings for the current test environment and +scripts. + +## Support + +The Bats source code repository is [hosted on +GitHub](https://github.com/bats-core/bats-core). There you can file bugs on the +issue tracker or submit tested pull requests for review. + +For real-world examples from open-source projects using Bats, see [Projects +Using Bats](https://github.com/bats-core/bats-core/wiki/Projects-Using-Bats) on +the wiki. + +To learn how to set up your editor for Bats syntax highlighting, see [Syntax +Highlighting](https://github.com/bats-core/bats-core/wiki/Syntax-Highlighting) +on the wiki. + +## Contributing + +For now see the [`docs`](docs) folder for project guides, work with us on the wiki +or look at the other communication channels. + +## Contact + +- You can find and chat with us on our [Gitter]. + +## Version history + +See `docs/CHANGELOG.md`. + +## Background + + +### What's the plan and why? + + +**Tuesday, September 19, 2017:** This was forked from [Bats][bats-orig] at +commit [0360811][]. It was created via `git clone --bare` and `git push +--mirror`. + +[bats-orig]: https://github.com/sstephenson/bats +[0360811]: https://github.com/sstephenson/bats/commit/03608115df2071fff4eaaff1605768c275e5f81f + +This [bats-core repo](https://github.com/bats-core/bats-core) is the community-maintained Bats project. + + +### Why was this fork created? + + +There was an initial [call for maintainers][call-maintain] for the original Bats repository, but write access to it could not be obtained. With development activity stalled, this fork allowed ongoing maintenance and forward progress for Bats. + +[call-maintain]: https://github.com/sstephenson/bats/issues/150 + +## Copyright + +© 2017-2022 bats-core organization + +© 2011-2016 Sam Stephenson + +Bats is released under an MIT-style license; see `LICENSE.md` for details. + +See the [parent project](https://github.com/bats-core) at GitHub or the +[AUTHORS](AUTHORS) file for the current project maintainer team. + +[gitter]: https://gitter.im/bats-core/bats-core diff --git a/test/bats/bin/bats b/test/bats/bin/bats new file mode 100755 index 000000000..892470c1e --- /dev/null +++ b/test/bats/bin/bats @@ -0,0 +1,59 @@ +#!/usr/bin/env bash + +set -euo pipefail + +if command -v greadlink >/dev/null; then + bats_readlinkf() { + greadlink -f "$1" + } +else + bats_readlinkf() { + readlink -f "$1" + } +fi + +fallback_to_readlinkf_posix() { + bats_readlinkf() { + [ "${1:-}" ] || return 1 + max_symlinks=40 + CDPATH='' # to avoid changing to an unexpected directory + + target=$1 + [ -e "${target%/}" ] || target=${1%"${1##*[!/]}"} # trim trailing slashes + [ -d "${target:-/}" ] && target="$target/" + + cd -P . 2>/dev/null || return 1 + while [ "$max_symlinks" -ge 0 ] && max_symlinks=$((max_symlinks - 1)); do + if [ ! "$target" = "${target%/*}" ]; then + case $target in + /*) cd -P "${target%/*}/" 2>/dev/null || break ;; + *) cd -P "./${target%/*}" 2>/dev/null || break ;; + esac + target=${target##*/} + fi + + if [ ! -L "$target" ]; then + target="${PWD%/}${target:+/}${target}" + printf '%s\n' "${target:-/}" + return 0 + fi + + # `ls -dl` format: "%s %u %s %s %u %s %s -> %s\n", + # , , , , + # , , , + # https://pubs.opengroup.org/onlinepubs/9699919799/utilities/ls.html + link=$(ls -dl -- "$target" 2>/dev/null) || break + target=${link#*" $target -> "} + done + return 1 + } +} + +if ! BATS_PATH=$(bats_readlinkf "${BASH_SOURCE[0]}" 2>/dev/null); then + fallback_to_readlinkf_posix + BATS_PATH=$(bats_readlinkf "${BASH_SOURCE[0]}") +fi + +export BATS_ROOT=${BATS_PATH%/*/*} +export -f bats_readlinkf +exec env BATS_ROOT="$BATS_ROOT" "$BATS_ROOT/libexec/bats-core/bats" "$@" diff --git a/test/bats/contrib/release.sh b/test/bats/contrib/release.sh new file mode 100755 index 000000000..2e4805e1f --- /dev/null +++ b/test/bats/contrib/release.sh @@ -0,0 +1,178 @@ +#!/usr/bin/env bash +# +# bats-core git releaser +# +## Usage: %SCRIPT_NAME% [options] +## +## Options: +## --major Major version bump +## --minor Minor version bump +## --patch Patch version bump +## +## -v, --version Print version +## --debug Enable debug mode +## -h, --help Display this message +## + +set -Eeuo pipefail + +DIR=$(cd "$(dirname "${0}")" && pwd) +THIS_SCRIPT="${DIR}/$(basename "${0}")" +BATS_VERSION=$( + # shellcheck disable=SC1090 + source <(grep '^export BATS_VERSION=' libexec/bats-core/bats) + echo "${BATS_VERSION}" +) +declare -r DIR +declare -r THIS_SCRIPT +declare -r BATS_VERSION + +BUMP_INTERVAL="" +NEW_BATS_VERSION="" + +main() { + handle_arguments "${@}" + + if [[ "${BUMP_INTERVAL:-}" == "" ]]; then + echo "${BATS_VERSION}" + exit 0 + fi + + local NEW_BATS_VERSION + NEW_BATS_VERSION=$(semver bump "${BUMP_INTERVAL}" "${BATS_VERSION}") + declare -r NEW_BATS_VERSION + + local BATS_RELEASE_NOTES="/tmp/bats-release-${NEW_BATS_VERSION}" + + echo "Releasing: ${BATS_VERSION} to ${NEW_BATS_VERSION}" + echo + + echo "Ensure docs/CHANGELOG.md is correctly updated" + + replace_in_files + + write_changelog + + git diff --staged + + cat </dev/null + +get_version() { + echo "${THIS_SCRIPT_VERSION:-0.1}" +} + +main "${@}" diff --git a/test/bats/contrib/rpm/bats.spec b/test/bats/contrib/rpm/bats.spec new file mode 100644 index 000000000..91c383026 --- /dev/null +++ b/test/bats/contrib/rpm/bats.spec @@ -0,0 +1,66 @@ +%global provider github.com +%global project bats-core +%global repo bats-core + +Name: bats +Version: 1.9.0 +Release: 1%{?dist} +Summary: Bash Automated Testing System + +Group: Development/Libraries +License: MIT +URL: https://%{provider}/%{project}/%{repo} +Source0: https://%{provider}/%{project}/%{repo}/archive/v%{version}.tar.gz + +BuildArch: noarch + +Requires: bash + +%description +Bats is a TAP-compliant testing framework for Bash. +It provides a simple way to verify that the UNIX programs you write behave as expected. +Bats is most useful when testing software written in Bash, but you can use it to test any UNIX program. + +%prep +%setup -q -n %{repo}-%{version} + +%install +mkdir -p ${RPM_BUILD_ROOT}%{_prefix} ${RPM_BUILD_ROOT}%{_libexecdir} ${RPM_BUILD_ROOT}%{_mandir} +./install.sh ${RPM_BUILD_ROOT}%{_prefix} + +%clean +rm -rf $RPM_BUILD_ROOT + +%check + +%files +%doc README.md LICENSE.md +%{_bindir}/%{name} +%{_libexecdir}/%{repo} +%{_mandir}/man1/%{name}.1.gz +%{_mandir}/man7/%{name}.7.gz +/usr/lib/%{repo}/common.bash +/usr/lib/%{repo}/formatter.bash +/usr/lib/%{repo}/preprocessing.bash +/usr/lib/%{repo}/semaphore.bash +/usr/lib/%{repo}/test_functions.bash +/usr/lib/%{repo}/tracing.bash +/usr/lib/%{repo}/validator.bash +/usr/lib/%{repo}/warnings.bash + +%changelog +* Wed Sep 07 2022 Marcel Hecko - 1.2.0-1 +- Fix and test RPM build on Rocky Linux release 8.6 + +* Sun Jul 08 2018 mbland - 1.1.0-1 +- Increase version to match upstream release + +* Mon Jun 18 2018 pixdrift - 1.0.2-1 +- Increase version to match upstream release +- Relocate libraries to bats-core subdirectory + +* Sat Jun 09 2018 pixdrift - 1.0.1-1 +- Increase version to match upstream release + +* Fri Jun 08 2018 pixdrift - 1.0.0-1 +- Initial package build of forked (bats-core) github project diff --git a/test/bats/contrib/semver b/test/bats/contrib/semver new file mode 100755 index 000000000..ab75586cc --- /dev/null +++ b/test/bats/contrib/semver @@ -0,0 +1,358 @@ +#!/usr/bin/env bash + +# v3.0.0 +# https://github.com/fsaintjacques/semver-tool + +set -o errexit -o nounset -o pipefail + +NAT='0|[1-9][0-9]*' +ALPHANUM='[0-9]*[A-Za-z-][0-9A-Za-z-]*' +IDENT="$NAT|$ALPHANUM" +FIELD='[0-9A-Za-z-]+' + +SEMVER_REGEX="\ +^[vV]?\ +($NAT)\\.($NAT)\\.($NAT)\ +(\\-(${IDENT})(\\.(${IDENT}))*)?\ +(\\+${FIELD}(\\.${FIELD})*)?$" + +PROG=semver +PROG_VERSION="3.0.0" + +USAGE="\ +Usage: + $PROG bump (major|minor|patch|release|prerel |build ) + $PROG compare + $PROG get (major|minor|patch|release|prerel|build) + $PROG --help + $PROG --version + +Arguments: + A version must match the following regular expression: + \"${SEMVER_REGEX}\" + In English: + -- The version must match X.Y.Z[-PRERELEASE][+BUILD] + where X, Y and Z are non-negative integers. + -- PRERELEASE is a dot separated sequence of non-negative integers and/or + identifiers composed of alphanumeric characters and hyphens (with + at least one non-digit). Numeric identifiers must not have leading + zeros. A hyphen (\"-\") introduces this optional part. + -- BUILD is a dot separated sequence of identifiers composed of alphanumeric + characters and hyphens. A plus (\"+\") introduces this optional part. + + See definition. + + A string as defined by PRERELEASE above. + + A string as defined by BUILD above. + +Options: + -v, --version Print the version of this tool. + -h, --help Print this help message. + +Commands: + bump Bump by one of major, minor, patch; zeroing or removing + subsequent parts. \"bump prerel\" sets the PRERELEASE part and + removes any BUILD part. \"bump build\" sets the BUILD part. + \"bump release\" removes any PRERELEASE or BUILD parts. + The bumped version is written to stdout. + + compare Compare with , output to stdout the + following values: -1 if is newer, 0 if equal, 1 if + older. The BUILD part is not used in comparisons. + + get Extract given part of , where part is one of major, minor, + patch, prerel, build, or release. + +See also: + https://semver.org -- Semantic Versioning 2.0.0" + +function error { + echo -e "$1" >&2 + exit 1 +} + +function usage-help { + error "$USAGE" +} + +function usage-version { + echo -e "${PROG}: $PROG_VERSION" + exit 0 +} + +function validate-version { + local version=$1 + if [[ "$version" =~ $SEMVER_REGEX ]]; then + # if a second argument is passed, store the result in var named by $2 + if [ "$#" -eq "2" ]; then + local major=${BASH_REMATCH[1]} + local minor=${BASH_REMATCH[2]} + local patch=${BASH_REMATCH[3]} + local prere=${BASH_REMATCH[4]} + local build=${BASH_REMATCH[8]} + eval "$2=(\"$major\" \"$minor\" \"$patch\" \"$prere\" \"$build\")" + else + echo "$version" + fi + else + error "version $version does not match the semver scheme 'X.Y.Z(-PRERELEASE)(+BUILD)'. See help for more information." + fi +} + +function is-nat { + [[ "$1" =~ ^($NAT)$ ]] +} + +function is-null { + [ -z "$1" ] +} + +function order-nat { + [ "$1" -lt "$2" ] && { + echo -1 + return + } + [ "$1" -gt "$2" ] && { + echo 1 + return + } + echo 0 +} + +function order-string { + [[ $1 < $2 ]] && { + echo -1 + return + } + [[ $1 > $2 ]] && { + echo 1 + return + } + echo 0 +} + +# given two (named) arrays containing NAT and/or ALPHANUM fields, compare them +# one by one according to semver 2.0.0 spec. Return -1, 0, 1 if left array ($1) +# is less-than, equal, or greater-than the right array ($2). The longer array +# is considered greater-than the shorter if the shorter is a prefix of the longer. +# +function compare-fields { + local l="$1[@]" + local r="$2[@]" + local leftfield=("${!l}") + local rightfield=("${!r}") + local left + local right + + local i=$((-1)) + local order=$((0)) + + while true; do + [ $order -ne 0 ] && { + echo $order + return + } + + : $((i++)) + left="${leftfield[$i]}" + right="${rightfield[$i]}" + + is-null "$left" && is-null "$right" && { + echo 0 + return + } + is-null "$left" && { + echo -1 + return + } + is-null "$right" && { + echo 1 + return + } + + is-nat "$left" && is-nat "$right" && { + order=$(order-nat "$left" "$right") + continue + } + is-nat "$left" && { + echo -1 + return + } + is-nat "$right" && { + echo 1 + return + } + { + order=$(order-string "$left" "$right") + continue + } + done +} + +# shellcheck disable=SC2206 # checked by "validate"; ok to expand prerel id's into array +function compare-version { + local order + validate-version "$1" V + validate-version "$2" V_ + + # compare major, minor, patch + + local left=("${V[0]}" "${V[1]}" "${V[2]}") + local right=("${V_[0]}" "${V_[1]}" "${V_[2]}") + + order=$(compare-fields left right) + [ "$order" -ne 0 ] && { + echo "$order" + return + } + + # compare pre-release ids when M.m.p are equal + + local prerel="${V[3]:1}" + local prerel_="${V_[3]:1}" + local left=(${prerel//./ }) + local right=(${prerel_//./ }) + + # if left and right have no pre-release part, then left equals right + # if only one of left/right has pre-release part, that one is less than simple M.m.p + + [ -z "$prerel" ] && [ -z "$prerel_" ] && { + echo 0 + return + } + [ -z "$prerel" ] && { + echo 1 + return + } + [ -z "$prerel_" ] && { + echo -1 + return + } + + # otherwise, compare the pre-release id's + + compare-fields left right +} + +function command-bump { + local new + local version + local sub_version + local command + + case $# in + 2) case $1 in + major | minor | patch | release) + command=$1 + version=$2 + ;; + *) usage-help ;; + esac ;; + 3) case $1 in + prerel | build) + command=$1 + sub_version=$2 version=$3 + ;; + *) usage-help ;; + esac ;; + *) usage-help ;; + esac + + validate-version "$version" parts + # shellcheck disable=SC2154 + local major="${parts[0]}" + local minor="${parts[1]}" + local patch="${parts[2]}" + local prere="${parts[3]}" + local build="${parts[4]}" + + case "$command" in + major) new="$((major + 1)).0.0" ;; + minor) new="${major}.$((minor + 1)).0" ;; + patch) new="${major}.${minor}.$((patch + 1))" ;; + release) new="${major}.${minor}.${patch}" ;; + prerel) new=$(validate-version "${major}.${minor}.${patch}-${sub_version}") ;; + build) new=$(validate-version "${major}.${minor}.${patch}${prere}+${sub_version}") ;; + *) usage-help ;; + esac + + echo "$new" + exit 0 +} + +function command-compare { + local v + local v_ + + case $# in + 2) + v=$(validate-version "$1") + v_=$(validate-version "$2") + ;; + *) usage-help ;; + esac + + set +u # need unset array element to evaluate to null + compare-version "$v" "$v_" + exit 0 +} + +# shellcheck disable=SC2034 +function command-get { + local part version + + if [[ "$#" -ne "2" ]] || [[ -z "$1" ]] || [[ -z "$2" ]]; then + usage-help + exit 0 + fi + + part="$1" + version="$2" + + validate-version "$version" parts + local major="${parts[0]}" + local minor="${parts[1]}" + local patch="${parts[2]}" + local prerel="${parts[3]:1}" + local build="${parts[4]:1}" + local release="${major}.${minor}.${patch}" + + case "$part" in + major | minor | patch | release | prerel | build) echo "${!part}" ;; + *) usage-help ;; + esac + + exit 0 +} + +case $# in +0) + echo "Unknown command: $*" + usage-help + ;; +esac + +case $1 in +--help | -h) + echo -e "$USAGE" + exit 0 + ;; +--version | -v) usage-version ;; +bump) + shift + command-bump "$@" + ;; +get) + shift + command-get "$@" + ;; +compare) + shift + command-compare "$@" + ;; +*) + echo "Unknown arguments: $*" + usage-help + ;; +esac diff --git a/test/bats/docker-compose.override.dist b/test/bats/docker-compose.override.dist new file mode 100644 index 000000000..f2b1ed8fc --- /dev/null +++ b/test/bats/docker-compose.override.dist @@ -0,0 +1,8 @@ +# Copy this file to docker-compose.override.yml +version: '3.6' +services: + bats: + entrypoint: + - "bash" +networks: + default: diff --git a/test/bats/docker-compose.yml b/test/bats/docker-compose.yml new file mode 100644 index 000000000..21cd99481 --- /dev/null +++ b/test/bats/docker-compose.yml @@ -0,0 +1,13 @@ +version: '3.6' +services: + bats: + build: + context: "." + dockerfile: "Dockerfile" + networks: + - "default" + user: "root" + volumes: + - "./:/opt/bats" +networks: + default: diff --git a/test/bats/docker/install_libs.sh b/test/bats/docker/install_libs.sh new file mode 100755 index 000000000..455865375 --- /dev/null +++ b/test/bats/docker/install_libs.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset + + +LIBNAME="${1:-support}" +LIVERSION="${2:-0.3.0}" +BASEURL='https://github.com/bats-core' +DESTDIR="${BATS_LIBS_DEST_DIR:-/usr/lib/bats}" +TMPDIR=$(mktemp -d -t bats-libs-XXXXXX) +USAGE="Please provide the bats libe name and version \nFor example: install_libs.sh support 2.0.0\n" + +trap 'test -d "${TMPDIR}" && rm -fr "${TMPDIR}"' EXIT ERR SIGINT SIGTERM + +[[ $# -ne 2 ]] && { _log FATAL "$USAGE"; exit 1; } + +_log() { + printf "$(date "+%Y-%m-%d %H:%M:%S") - %s - %s\n" "${1}" "${2}" +} + +create_temp_dirs() { + mkdir -p "${TMPDIR}/${1}" + if [[ ${LIBNAME} != "detik" ]]; then + mkdir -p "${DESTDIR}/bats-${1}/src" + else + _log INFO "Skipping src 'cause Detik does not need it" + fi +} + +download_extract_source() { + wget -qO- ${BASEURL}/bats-"${1}"/archive/refs/tags/v"${2}".tar.gz | tar xz -C "${TMPDIR}/${1}" --strip-components 1 +} + +install_files() { + if [[ ${LIBNAME} != "detik" ]]; then + install -Dm755 "${TMPDIR}/${1}/load.bash" "${DESTDIR}/bats-${1}/load.bash" + for fn in "${TMPDIR}/${1}/src/"*.bash; do install -Dm755 "$fn" "${DESTDIR}/bats-${1}/src/$(basename "$fn")"; done + else + for fn in "${TMPDIR}/${1}/lib/"*.bash; do install -Dm755 "$fn" "${DESTDIR}/bats-${1}/$(basename "$fn")"; done + fi +} + +_log INFO "Starting to install ${LIBNAME} ver ${LIVERSION}" +_log INFO "Creating directories" +create_temp_dirs "${LIBNAME}" +_log INFO "Downloading" +download_extract_source "${LIBNAME}" "${LIVERSION}" +_log INFO "Installation" +install_files "${LIBNAME}" +_log INFO "Done, cleaning.." diff --git a/test/bats/docker/install_tini.sh b/test/bats/docker/install_tini.sh new file mode 100755 index 000000000..8d98da14c --- /dev/null +++ b/test/bats/docker/install_tini.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +set -e + +case ${1#linux/} in +386) + TINI_PLATFORM=i386 + ;; +arm/v7) + TINI_PLATFORM=armhf + ;; +arm/v6) + TINI_PLATFORM=armel + ;; +*) + TINI_PLATFORM=${1#linux/} + ;; +esac + +echo "Installing tini for $TINI_PLATFORM" + +wget "https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-static-${TINI_PLATFORM}" -O /tini +wget "https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-static-${TINI_PLATFORM}.asc" -O /tini.asc + +chmod +x /tini + +apk add gnupg +gpg --import ` (without space) (#657) + +## [1.8.0] - 2022-09-15 + +### Added + +* using external formatters via `--formatter ` (also works for + `--report-formatter`) (#602) +* running only tests that failed in the last run via `--filter-status failed` (#483) +* variable `BATS_TEST_RETRIES` that specifies how often a test should be + reattempted before it is considered failed (#618) +* Docker tags `latest-no-faccessat2` and `-no-faccessat2` for + avoiding `bash: bats: No such file or directory` on `docker<20.10` (or + `runc`/`# bats file_tags=` and + `--filter-tags ` for tagging tests for execution filters (#642) +* warning BW03: inform about `setup_suite` in wrong file (`.bats` instead of `setup_suite.bash`) (#652) + +#### Documentation + +* update gotcha about negated statements: Recommend using `run !` on Bats + versions >=1.5.0 (#593) +* add documentation for `bats_require_minimum_version` (#595) +* improve documentation about `setup_suite` (#652) + +### Fixed + +* added missing shebang (#597) +* remaining instances of `run -` being incorrectly documented as `run =` (#599) +* allow `--gather-test-outputs-in ` to work with existing, empty + directories (#603) + * also add `--clean-and-gather-test-outputs-in ` for improved UX +* double slashes in paths derived from TMPDIR on MacOS (#607) +* fix `load` in `teardown` marking failed tests as not run (#612) +* fix unset variable errors (with set -u) and add regression test (#621) +* `teardown_file` errors don't swallow `setup_file` errors anymore, the behavior + is more like `teardown`'s now (only `return`/last command can trigger `teardown` + errors) (#623) +* upgraded from deprecated CI envs for MacOS (10 -> 11,12) and Ubuntu + (18.04 -> 22.04) (#630) +* add `/usr/lib/bats` as default value for `BATS_LIB_PATH` (#628) +* fix unset variable in `bats-formatter-junit` when `setup_file` fails (#632) +* unify error behavior of `teardown`/`teardown_file`/`teardown_suite` functions: + only fail via return code, not via ERREXIT (#633) +* fix unbound variable errors with `set -u` on `setup_suite` failures (#643) +* fix `load` not being available in `setup_suite` (#644) +* fix RPM spec, add regression test (#648) +* fix handling of `IFS` by `run` (#650) +* only print `setup_suite`'s stderr on errors (#649) + +#### Documentation + +* fix typos, spelling and links (#596, #604, #619, #627) +* fix redirection order of an example in the tutorial (#617) + +## [1.7.0] - 2022-05-14 + +### Added + +* Pretty formatter print filename when entering file (#561) +* BATS_TEST_NAME_PREFIX allows prefixing test names on stdout and in reports (#561) +* setup_suite and teardown_suite (#571, #585) +* out-of-band warning infrastructure, with following warnings: + * BW01: run command not found (exit code 127) (#586) + * BW02: run uses flags without proper `bats_require_minimum_version` guard (#587) +* `bats_require_minimum_version` to guard code that would not run on older + versions (#587) + +#### Documentation + +* document `$BATS_VERSION` (#557) +* document new warning infrastructure (#589, #587, #586) + +### Fixed + +* unbound variable errors in formatters when using `SHELLOPTS=nounset` (`-u`) (#558) +* don't require `flock` *and* `shlock` for parallel mode test (#554) +* print name of failing test when using TAP13 with timing information (#559, #555) +* removed broken symlink, added regression test (#560) +* don't show empty lines as `#` with pretty formatter (#561) +* prevent `teardown`, `teardown_file`, and `teardown_suite` from overriding bats' + exit code by setting `$status` (e.g. via calling `run`) (#581, #575) + * **CRITICAL**: this can return exit code 0 despite failed tests, thus preventing + your CI from reporting test failures! The regression happened in version 1.6.0. +* `run --keep-empty-lines` now reports 0 lines on empty `$output` (#583) + +#### Documentation + +* remove 2018 in title, update copyright dates in README.md (#567) +* fix broken links (#568) +* corrected invalid documentation of `run -N` (had `=N` instead) (#579) + * **CRITICAL**: using the incorrect form can lead to silent errors. See + [issue #578](https://github.com/bats-core/bats-core/issues/578) for more + details and how to find out if your tests are affected. + +## [1.6.1] - 2022-05-14 + +### Fixed + +* prevent `teardown`, `teardown_file`, and `teardown_suite` from overriding bats' + exit code by setting `$status` (e.g. via calling `run`) (#581, #575) + * **CRITICAL**: this can return exit code 0 despite failed tests, thus preventing + your CI from reporting test failures! The regression happened in version 1.6.0. + +#### Documentation + +* corrected invalid documentation of `run -N` (had `=N` instead) (#579) + * **CRITICAL**: using the incorrect form can lead to silent errors. See + [issue #578](https://github.com/bats-core/bats-core/issues/578) for more + details and how to find out if your tests are affected. + +## [1.6.0] - 2022-02-24 + +### Added + +* new flag `--code-quote-style` (and `$BATS_CODE_QUOTE_STYLE`) to customize +quotes around code blocks in error output (#506) +* an example/regression test for running background tasks without blocking the + test run (#525, #535) +* `bats_load_library` for loading libraries from the search path + `$BATS_LIB_PATH` (#548) + +### Fixed + +* improved error trace for some broken cases (#279) +* removed leftover debug file `/tmp/latch` in selftest suite + (single use latch) (#516) +* fix recurring errors on CTRL+C tests with NPM on Windows in selftest suite (#516) +* fixed leaking of local variables from debug trap (#520) +* don't mark FD3 output from `teardown_file` as `` in junit output (#532) +* fix unbound variable error with Bash pre 4.4 (#550) + +#### Documentation + +* remove links to defunct freenode IRC channel (#515) +* improved grammar (#534) +* fixed link to TAP spec (#537) + +## [1.5.0] - 2021-10-22 + +### Added + +* new command line flags (#488) + * `--verbose-run`: Make `run` print `$output` by default + * `-x`, `--trace`: Print test commands as they are executed (like `set -x`)` + * `--show-output-of-passing-tests`: Print output of passing tests + * `--print-output-on-failure`: Automatically print the value of `$output` on + failed tests + * `--gather-test-outputs-in `: Gather the output of failing **and** + passing tests as files in directory +* Experimental: add return code checks to `run` via `!`/`-` (#367, #507) +* `install.sh` and `uninstall.sh` take an optional second parameter for the lib + folder name to allow for multilib install, e.g. into lib64 (#452) +* add `run` flag `--keep-empty-lines` to retain empty lines in `${lines[@]}` (#224, + a894fbfa) +* add `run` flag `--separate-stderr` which also fills `$stderr` and + `$stderr_lines` (#47, 5c9b173d, #507) + +### Fixed + +* don't glob `run`'s `$output` when splitting into `${lines[@]}` + (#151, #152, #158, #156, #281, #289) +* remove empty line after test with pretty formatter on some terminals (#481) +* don't run setup_file/teardown_file on files without tests, e.g. due to + filtering (#484) +* print final line without newline on Bash 3.2 for midtest (ERREXIT) failures + too (#495, #145) +* abort with error on missing flock/shlock when running in parallel mode (#496) +* improved `set -u` test and fixed some unset variable accesses (#498, #501) +* shorten suite/file/test temporary folder paths to leave enough space even on + restricted systems (#503) + +#### Documentation + +* minor edits (#478) + +## [1.4.1] - 2021-07-24 + +### Added + +* Docker image architectures amd64, 386, arm64, arm/v7, arm/v6, ppc64le, s390x (#438) + +### Fixed + +* automatic push to Dockerhub (#438) + +## [1.4.0] - 2021-07-23 + +### Added + +* added BATS_TEST_TMPDIR, BATS_FILE_TMPDIR, BATS_SUITE_TMPDIR (#413) +* added checks and improved documentation for `$BATS_TMPDIR` (#410) +* the docker container now uses [tini](https://github.com/krallin/tini) as the + container entrypoint to improve signal forwarding (#407) +* script to uninstall bats from a given prefix (#400) +* replace preprocessed file path (e.g. `/tmp/bats-run-22908-NP0f9h/bats.23102.src`) + with original filename in stdout/err (but not FD3!) (#429) +* print aborted command on SIGINT/CTRL+C (#368) +* print error message when BATS_RUN_TMPDIR could not be created (#422) + +#### Documentation + +* added tutorial for new users (#397) +* fixed example invocation of docker container (#440) +* minor edits (#431, #439, #445, #463, #464, #465) + +### Fixed + +* fix `bats_tap_stream_unknown: command not found` with pretty formatter, when + writing non compliant extended output (#412) +* avoid collisions on `$BATS_RUN_TMPDIR` with `--no-tempdir-cleanup` and docker + by using `mktemp` additionally to PID (#409) +* pretty printer now puts text that is printed to FD 3 below the test name (#426) +* `rm semaphores/slot-: No such file or directory` in parallel mode on MacOS + (#434, #433) +* fix YAML blocks in TAP13 formatter using `...` instead of `---` to start + a block (#442) +* fixed some typos in comments (#441, #447) +* ensure `/code` exists in docker container, to make examples work again (#440) +* also display error messages from free code (#429) +* npm installed version on Windows: fix broken internal LIBEXEC paths (#459) + +## [1.3.0] - 2021-03-08 + +### Added + +* custom test-file extension via `BATS_FILE_EXTENSION` when searching for test + files in a directory (#376) +* TAP13 formatter, including millisecond timing (#337) +* automatic release to NPM via GitHub Actions (#406) + +#### Documentation + +* added documentation about overusing `run` (#343) +* improved documentation of `load` (#332) + +### Changed + +* recursive suite mode will follow symlinks now (#370) +* split options for (file-) `--report-formatter` and (stdout) `--formatter` (#345) + * **WARNING**: This changes the meaning of `--formatter junit`. + stdout will now show unified xml instead of TAP. From now on, please use + `--report-formatter junit` to obtain the `.xml` report file! +* removed `--parallel-preserve-environment` flag, as this is the default + behavior (#324) +* moved CI from Travis/AppVeyor to GitHub Actions (#405) +* preprocessed files are no longer removed if `--no-tempdir-cleanup` is + specified (#395) + +#### Documentation + +* moved documentation to [readthedocs](https://bats-core.readthedocs.io/en/latest/) + +### Fixed + +#### Correctness + +* fix internal failures due to unbound variables when test files use `set -u` (#392) +* fix internal failures due to changes to `$PATH` in test files (#387) +* fix test duration always being 0 on busybox installs (#363) +* fix hangs on CTRL+C (#354) +* make `BATS_TEST_NUMBER` count per file again (#326) +* include `lib/` in npm package (#352) + +#### Performance + +* don't fork bomb in parallel mode (#339) +* preprocess each file only once (#335) +* avoid running duplicate files n^2 times (#338) + +#### Documentation + +* fix documentation for `--formatter junit` (#334) +* fix documentation for `setup_file` variables (#333) +* fix link to examples page (#331) +* fix link to "File Descriptor 3" section (#301) + +## [1.2.1] - 2020-07-06 + +### Added + +* JUnit output and extensible formatter rewrite (#246) +* `load` function now reads from absolute and relative paths, and $PATH (#282) +* Beginner-friendly examples in /docs/examples (#243) +* @peshay's `bats-file` fork contributed to `bats-core/bats-file` (#276) + +### Changed + +* Duplicate test names now error (previous behaviour was to issue a warning) (#286) +* Changed default formatter in Docker to pretty by adding `ncurses` to + Dockerfile, override with `--tap` (#239) +* Replace "readlink -f" dependency with Bash solution (#217) + +## [1.2.0] - 2020-04-25 + +Support parallel suite execution and filtering by test name. + +### Added + +* docs/CHANGELOG.md and docs/releasing.md (#122) +* The `-f, --filter` flag to run only the tests matching a regular expression (#126) +* Optimize stack trace capture (#138) +* `--jobs n` flag to support parallel execution of tests with GNU parallel (#172) + +### Changed + +* AppVeyor builds are now semver-compliant (#123) +* Add Bash 5 as test target (#181) +* Always use upper case signal names to avoid locale dependent err… (#215) +* Fix for tests reading from stdin (#227) +* Fix wrong line numbers of errors in bash < 4.4 (#229) +* Remove preprocessed source after test run (#232) + +## [1.1.0] - 2018-07-08 + +This is the first release with new features relative to the original Bats 0.4.0. + +### Added + +* The `-r, --recursive` flag to scan directory arguments recursively for + `*.bats` files (#109) +* The `contrib/rpm/bats.spec` file to build RPMs (#111) + +### Changed + +* Travis exercises latest versions of Bash from 3.2 through 4.4 (#116, #117) +* Error output highlights invalid command line options (#45, #46, #118) +* Replaced `echo` with `printf` (#120) + +### Fixed + +* Fixed `BATS_ERROR_STATUS` getting lost when `bats_error_trap` fired multiple + times under Bash 4.2.x (#110) +* Updated `bin/bats` symlink resolution, handling the case on CentOS where + `/bin` is a symlink to `/usr/bin` (#113, #115) + +## [1.0.2] - 2018-06-18 + +* Fixed sstephenson/bats#240, whereby `skip` messages containing parentheses + were truncated (#48) +* Doc improvements: + * Docker usage (#94) + * Better README badges (#101) + * Better installation instructions (#102, #104) +* Packaging/installation improvements: + * package.json update (#100) + * Moved `libexec/` files to `libexec/bats-core/`, improved `install.sh` (#105) + +## [1.0.1] - 2018-06-09 + +* Fixed a `BATS_CWD` bug introduced in #91 whereby it was set to the parent of + `PWD`, when it should've been set to `PWD` itself (#98). This caused file + names in stack traces to contain the basename of `PWD` as a prefix, when the + names should've been purely relative to `PWD`. +* Ensure the last line of test output prints when it doesn't end with a newline + (#99). This was a quasi-bug introduced by replacing `sed` with `while` in #88. + +## [1.0.0] - 2018-06-08 + +`1.0.0` generally preserves compatibility with `0.4.0`, but with some Bash +compatibility improvements and a massive performance boost. In other words: + +* all existing tests should remain compatible +* tests that might've failed or exhibited unexpected behavior on earlier + versions of Bash should now also pass or behave as expected + +Changes: + +* Added support for Docker. +* Added support for test scripts that have the [unofficial strict + mode](http://redsymbol.net/articles/unofficial-bash-strict-mode/) enabled. +* Improved stability on Windows and macOS platforms. +* Massive performance improvements, especially on Windows (#8) +* Workarounds for inconsistent behavior between Bash versions (#82) +* Workaround for preserving stack info after calling an exported function under + Bash < 4.4 (#87) +* Fixed TAP compliance for skipped tests +* Added support for tabs in test names. +* `bin/bats` and `install.sh` now work reliably on Windows (#91) + +## [0.4.0] - 2014-08-13 + +* Improved the display of failing test cases. Bats now shows the source code of + failing test lines, along with full stack traces including function names, + filenames, and line numbers. +* Improved the display of the pretty-printed test summary line to include the + number of skipped tests, if any. +* Improved the speed of the preprocessor, dramatically shortening test and suite + startup times. +* Added support for absolute pathnames to the `load` helper. +* Added support for single-line `@test` definitions. +* Added bats(1) and bats(7) manual pages. +* Modified the `bats` command to default to TAP output when the `$CI` variable + is set, to better support environments such as Travis CI. + +## [0.3.1] - 2013-10-28 + +* Fixed an incompatibility with the pretty formatter in certain environments + such as tmux. +* Fixed a bug where the pretty formatter would crash if the first line of a test + file's output was invalid TAP. + +## [0.3.0] - 2013-10-21 + +* Improved formatting for tests run from a terminal. Failing tests are now + colored in red, and the total number of failing tests is displayed at the end + of the test run. When Bats is not connected to a terminal (e.g. in CI runs), + or when invoked with the `--tap` flag, output is displayed in standard TAP + format. +* Added the ability to skip tests using the `skip` command. +* Added a message to failing test case output indicating the file and line + number of the statement that caused the test to fail. +* Added "ad-hoc" test suite support. You can now invoke `bats` with multiple + filename or directory arguments to run all the specified tests in aggregate. +* Added support for test files with Windows line endings. +* Fixed regular expression warnings from certain versions of Bash. +* Fixed a bug running tests containing lines that begin with `-e`. + +## [0.2.0] - 2012-11-16 + +* Added test suite support. The `bats` command accepts a directory name + containing multiple test files to be run in aggregate. +* Added the ability to count the number of test cases in a file or suite by + passing the `-c` flag to `bats`. +* Preprocessed sources are cached between test case runs in the same file for + better performance. + +## [0.1.0] - 2011-12-30 + +* Initial public release. + +[Unreleased]: https://github.com/bats-core/bats-core/compare/v1.7.0...HEAD +[1.7.0]: https://github.com/bats-core/bats-core/compare/v1.6.1...v1.7.0 +[1.6.1]: https://github.com/bats-core/bats-core/compare/v1.6.0...v1.6.1 +[1.6.0]: https://github.com/bats-core/bats-core/compare/v1.5.0...v1.6.0 +[1.5.0]: https://github.com/bats-core/bats-core/compare/v1.4.1...v1.5.0 +[1.4.1]: https://github.com/bats-core/bats-core/compare/v1.4.0...v1.4.1 +[1.4.0]: https://github.com/bats-core/bats-core/compare/v1.3.0...v1.4.0 +[1.3.0]: https://github.com/bats-core/bats-core/compare/v1.2.1...v1.3.0 +[1.2.1]: https://github.com/bats-core/bats-core/compare/v1.2.0...v1.2.1 +[1.2.0]: https://github.com/bats-core/bats-core/compare/v1.1.0...v1.2.0 +[1.1.0]: https://github.com/bats-core/bats-core/compare/v1.0.2...v1.1.0 +[1.0.2]: https://github.com/bats-core/bats-core/compare/v1.0.1...v1.0.2 +[1.0.1]: https://github.com/bats-core/bats-core/compare/v1.0.0...v1.0.1 +[1.0.0]: https://github.com/bats-core/bats-core/compare/v0.4.0...v1.0.0 +[0.4.0]: https://github.com/bats-core/bats-core/compare/v0.3.1...v0.4.0 +[0.3.1]: https://github.com/bats-core/bats-core/compare/v0.3.0...v0.3.1 +[0.3.0]: https://github.com/bats-core/bats-core/compare/v0.2.0...v0.3.0 +[0.2.0]: https://github.com/bats-core/bats-core/compare/v0.1.0...v0.2.0 +[0.1.0]: https://github.com/bats-core/bats-core/commits/v0.1.0 diff --git a/test/bats/docs/CODEOWNERS b/test/bats/docs/CODEOWNERS new file mode 100644 index 000000000..2eb233305 --- /dev/null +++ b/test/bats/docs/CODEOWNERS @@ -0,0 +1,4 @@ +# This enables automatic code review requests per: +# - https://help.github.com/articles/about-codeowners/ +# - https://help.github.com/articles/enabling-required-reviews-for-pull-requests/ +* @bats-core/bats-core diff --git a/test/bats/docs/CODE_OF_CONDUCT.md b/test/bats/docs/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..d8d6972d5 --- /dev/null +++ b/test/bats/docs/CODE_OF_CONDUCT.md @@ -0,0 +1,92 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting one of the current [project maintainers](#project-maintainers) listed below. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Project Maintainers + +### Current Maintainers + +* [Bianca Tamayo][bt-gh] +* [Mike Bland][mb-gh] +* [Jason Karns][jk-gh] +* [Andrew Martin][am-gh] + +### Past Maintainers + +* Sam Stephenson <> (Original author) + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[bt-gh]: https://github.com/btamayo +[mb-gh]: https://github.com/mbland +[jk-gh]: https://github.com/jasonkarns +[am-gh]: https://github.com/sublimino + +[homepage]: https://contributor-covenant.org +[version]: https://contributor-covenant.org/version/1/4/ diff --git a/test/bats/docs/CONTRIBUTING.md b/test/bats/docs/CONTRIBUTING.md new file mode 100644 index 000000000..263e41197 --- /dev/null +++ b/test/bats/docs/CONTRIBUTING.md @@ -0,0 +1,392 @@ +# Contributing Guidelines + +## Welcome! + +Thank you for considering contributing to the development of this project's +development and/or documentation. Just a reminder: if you're new to this project +or to OSS and want to find issues to work on, please check the following labels +on issues: + +- [help wanted][helpwantedlabel] +- [docs][docslabel] +- [good first issue][goodfirstissuelabel] + +[docslabel]: https://github.com/bats-core/bats-core/labels/docs +[helpwantedlabel]: https://github.com/bats-core/bats-core/labels/help%20wanted +[goodfirstissuelabel]: https://github.com/bats-core/bats-core/labels/good%20first%20issue + +To see all labels and their meanings, [check this wiki page][labelswiki]. + +This guide borrows **heavily** from [@mbland's go-script-bash][gsb] (with some +sections directly quoted), which in turn was +drafted with tips from [Wrangling Web Contributions: How to Build +a CONTRIBUTING.md][moz] and with some inspiration from [the Atom project's +CONTRIBUTING.md file][atom]. + +[gsb]: https://github.com/mbland/go-script-bash/blob/master/CONTRIBUTING.md +[moz]: https://mozillascience.github.io/working-open-workshop/contributing/ +[atom]: https://github.com/atom/atom/blob/master/CONTRIBUTING.md + +[labelswiki]: https://github.com/bats-core/bats-core/wiki/GitHub-Issue-Labels + +## Table of contents + +* [Contributing Guidelines](#contributing-guidelines) + * [Welcome!](#welcome) + * [Table of contents](#table-of-contents) + * [Quick links](#quick-links) + * [Contributor License Agreement](#contributor-license-agreement) + * [Code of conduct](#code-of-conduct) + * [Asking questions and reporting issues](#asking-questions-and-reporting-issues) + * [Updating documentation](#updating-documentation) + * [Environment setup](#environment-setup) + * [Workflow](#workflow) + * [Testing](#testing) + * [Coding conventions](#coding-conventions) + * [Formatting](#formatting) + * [Naming](#naming) + * [Function declarations](#function-declarations) + * [Variable and parameter declarations](#variable-and-parameter-declarations) + * [Command substitution](#command-substitution) + * [Process substitution](#process-substitution) + * [Conditionals and loops](#conditionals-and-loops) + * [Generating output](#generating-output) + * [Gotchas](#gotchas) + * [Open Source License](#open-source-license) + * [Credits](#credits) + +## Quick links + +- [Gitter channel →][gitterurl]: Feel free to come chat with us on Gitter +- [README →][README] +- [Code of conduct →][CODE_OF_CONDUCT] +- [License information →][LICENSE] +- [Original repository →][repohome] +- [Issues →][repoissues] +- [Pull requests →][repoprs] +- [Milestones →][repomilestones] +- [Projects →][repoprojects] + +[README]: https://github.com/bats-core/bats-core/blob/master/README.md +[CODE_OF_CONDUCT]: https://github.com/bats-core/bats-core/blob/master/docs/CODE_OF_CONDUCT.md +[LICENSE]: https://github.com/bats-core/bats-core/blob/master/LICENSE.md + +## Contributor License Agreement + +Per the [GitHub Terms of Service][gh-tos], be aware that by making a +contribution to this project, you agree: + +* to license your contribution under the same terms as [this project's + license][osmit], and +* that you have the right to license your contribution under those terms. + +See also: ["Does my project need an additional contributor agreement? Probably + not."][cla-needed] + +[gh-tos]: https://help.github.com/articles/github-terms-of-service/#6-contributions-under-repository-license +[osmit]: #open-source-license +[cla-needed]: https://opensource.guide/legal/#does-my-project-need-an-additional-contributor-agreement + + +## Code of conduct + +Harassment or rudeness of any kind will not be tolerated, period. For +specifics, see the [CODE_OF_CONDUCT][] file. + +## Asking questions and reporting issues + +### Asking questions + +Please check the [README][] or existing [issues][repoissues] first. + +If you cannot find an answer to your question, please feel free to hop on our +[Gitter][gitterurl]. [![Gitter](https://badges.gitter.im/bats-core/bats-core.svg)](https://gitter.im/bats-core/bats-core) + +### Reporting issues + +Before reporting an issue, please use the search feature on the [issues +page][repoissues] to see if an issue matching the one you've observed has already +been filed. + +### Updating or filing a new issue + +#### Information to include + +Try to be as specific as possible about your environment and the problem you're +observing. At a minimum, include: + +#### Installation issues + +1. State the version of Bash you're using `bash --version` +1. State your operating system and its version +1. If you're installing through homebrew, run `brew doctor`, and attach the +output of `brew info bats-core` + +#### Bugs/usage issues + +1. State the version of Bash you're using `bash --version` +1. State your operating system and its version +1. Command line steps or code snippets that reproduce the issue +1. Any apparently relevant information from the [Bash changelog][bash-changes] + +[bash-changes]: https://tiswww.case.edu/php/chet/bash/CHANGES + +Also consider using: + +- Bash's `time` builtin to collect running times +- a regression test to add to the suite +- memory usage as reported by a tool such as + [memusg](https://gist.github.com/netj/526585) + +### On existing issues + +1. DO NOT add a +1 comment: Use the reactions provided instead +1. DO add information if you're facing a similar issue to someone else, but +within a different context (e.g. different steps needed to reproduce the issue +than previous stated, different version of Bash or BATS, different OS, etc.) +You can read on how to do that here: [Information to include](#information-to-include) +1. DO remember that you can use the *Subscribe* button on the right side of the +page to receive notifications of further conversations or a resolution. + +## Updating documentation + +We love documentation and people who love documentation! + +If you love writing clear, accessible docs, please don't be shy about pull +requests. Remember: docs are just as important as code. + +Also: _no typo is too small to fix!_ Really. Of course, batches of fixes are +preferred, but even one nit is one nit too many. + +## Environment setup + +Make sure you have Bash installed per the [Environment setup in the +README][env-setup]. + +[env-setup]: https://github.com/bats-core/bats-core/blob/master/README.md#environment-setup + +## Workflow + +The basic workflow for submitting changes resembles that of the [GitHub Git +Flow][github-flow] (a.k.a. GitHub Flow), except that you will be working with +your own fork of the repository and issuing pull requests to the original. + +[github-flow]: https://guides.github.com/introduction/flow/ + +1. Fork the repo on GitHub (look for the "Fork" button) +1. Clone your forked repo to your local machine +1. Create your feature branch (`git checkout -b my-new-feature`) +1. Develop _and [test](#testing)_ your changes as necessary. +1. Commit your changes (`git commit -am 'Add some feature'`) +1. Push to the branch (`git push origin my-new-feature`) +1. Create a new [GitHub pull request][gh-pr] for your feature branch based + against the original repository's `master` branch +1. If your request is accepted, you can [delete your feature branch][rm-branch] + and pull the updated `master` branch from the original repository into your + fork. You may even [delete your fork][rm-fork] if you don't anticipate making + further changes. + +[gh-pr]: https://help.github.com/articles/using-pull-requests/ +[rm-branch]: https://help.github.com/articles/deleting-unused-branches/ +[rm-fork]: https://help.github.com/articles/deleting-a-repository/ + +## Testing + +- Continuous integration status: [![Tests](https://github.com/bats-core/bats-core/workflows/Tests/badge.svg)](https://github.com/bats-core/bats-core/actions?query=workflow%3ATests) + +## Coding conventions + +- [Formatting](#formatting) +- [Naming](#naming) +- [Variable and parameter declarations](#variable-and-parameter-declarations) +- [Command substitution](#command-substitution) +- [Conditions and loops](#conditionals-and-loops) +- [Gotchas](#gotchas) + +### Formatting + +- Keep all files 80 characters wide. +- Indent using two spaces. +- Enclose all variables in double quotes when used to avoid having them + interpreted as glob patterns (unless the variable contains a glob pattern) + and to avoid word splitting when the value contains spaces. Both scenarios + can introduce errors that often prove difficult to diagnose. + - **This is especially important when the variable is used to generate a + glob pattern**, since spaces may appear in a path value. + - If the variable itself contains a glob pattern, make sure to set + `IFS=$'\n'` before using it so that the pattern itself and any matching + file names containing spaces are not split apart. + - Exceptions: Quotes are not required within math contexts, i.e. `(( ))` or + `$(( ))`, and must not be used for variables on the right side of the `=~` + operator. +- Enclose all string literals in single quotes. + - Exception: If the string contains an apostrophe, use double quotes. +- Use quotes around variables and literals even inside of `[[ ]]` conditions. + - This is because strings that contain '[' or ']' characters may fail to + compare equally when they should. + - Exception: Do not quote variables that contain regular expression patterns + appearing on the right side of the `=~` operator. +- _Only_ quote arguments to the right of `=~` if the expression is a literal + match without any metacharacters. + +The following are intended to prevent too-compact code: + +- Declare only one item per `declare`, `local`, `export`, or `readonly` call. + - _Note:_ This also helps avoid subtle bugs, as trying to initialize one + variable using the value of another declared in the same statement will + not do what you may expect. The initialization of the first variable will + not yet be complete when the second variable is declared, so the first + variable will have an empty value. +- Do not use one-line `if`, `for`, `while`, `until`, `case`, or `select` + statements. +- Do not use `&&` or `||` to avoid writing `if` statements. +- Do not write functions entirely on one line. +- For `case` statements: put each pattern on a line by itself; put each command + on a line by itself; put the `;;` terminator on a line by itself. + +### Naming + +- Use `snake_case` for all identifiers. + +### Function declarations + +- Declare functions without the `function` keyword. +- Strive to always use `return`, never `exit`, unless an error condition is + severe enough to warrant it. + - Calling `exit` makes it difficult for the caller to recover from an error, + or to compose new commands from existing ones. + +### Variable and parameter declarations + +- _Gotcha:_ Never initialize an array on the same line as an `export` or + `declare -g` statement. See [the Gotchas section](#gotchas) below for more + details. +- Declare all variables inside functions using `local`. +- Declare temporary file-level variables using `declare`. Use `unset` to remove + them when finished. +- Don't use `local -r`, as a readonly local variable in one scope can cause a + conflict when it calls a function that declares a `local` variable of the same + name. +- Don't use type flags with `declare` or `local`. Assignments to integer + variables in particular may behave differently, and it has no effect on array + variables. +- For most functions, the first lines should use `local` declarations to + assign the original positional parameters to more meaningful names, e.g.: + ```bash + format_summary() { + local cmd_name="$1" + local summary="$2" + local longest_name_len="$3" + ``` + For very short functions, this _may not_ be necessary, e.g.: + ```bash + has_spaces() { + [[ "$1" != "${1//[[:space:]]/}" ]] + } + ``` + +### Command substitution + +- If possible, don't. While this capability is one of Bash's core strengths, + every new process created by Bats makes the framework slower, and speed is + critical to encouraging the practice of automated testing. (This is especially + true on Windows, [where process creation is one or two orders of magnitude + slower][win-slow]. See [bats-core/bats-core#8][pr-8] for an illustration of + the difference avoiding subshells makes.) Bash is quite powerful; see if you + can do what you need in pure Bash first. +- If you need to capture the output from a function, store the output using + `printf -v` instead if possible. `-v` specifies the name of the variable into + which to write the result; the caller can supply this name as a parameter. +- If you must use command substitution, use `$()` instead of backticks, as it's + more robust, more searchable, and can be nested. + +[win-slow]: https://rufflewind.com/2014-08-23/windows-bash-slow +[pr-8]: https://github.com/bats-core/bats-core/pull/8 + +### Process substitution + +- If possible, don't use it. See the advice on avoiding subprocesses and using + `printf -v` in the **Command substitution** section above. +- Use wherever necessary and possible, such as when piping input into a `while` + loop (which avoids having the loop body execute in a subshell) or running a + command taking multiple filename arguments based on output from a function or + pipeline (e.g. `diff`). +- *Warning*: It is impossible to directly determine the exit status of a process + substitution; emitting an exit status as the last line of output is a possible + workaround. + +### Conditionals and loops + +- Always use `[[` and `]]` for evaluating variables. Per the guideline under + **Formatting**, quote variables and strings within the brackets, but not + regular expressions (or variables containing regular expressions) appearing + on the right side of the `=~` operator. + +### Generating output + +- Use `printf` instead of `echo`. Both are Bash builtins, and there's no + perceptible performance difference when running Bats under the `time` builtin. + However, `printf` provides a more consistent experience in general, as `echo` + has limitations to the arguments it accepts, and even the same version of Bash + may produce different results for `echo` based on how the binary was compiled. + See [Stack Overflow: Why is printf better than echo?][printf-vs-echo] for + excruciating details. + +[printf-vs-echo]: https://unix.stackexchange.com/a/65819 + +### Signal names + +Always use upper case signal names (e.g. `trap - INT EXIT`) to avoid locale +dependent errors. In some locales (for example Turkish, see +[Turkish dotless i](https://en.wikipedia.org/wiki/Dotted_and_dotless_I)) lower +case signal names cause Bash to error. An example of the problem: + +```bash +$ echo "tr_TR.UTF-8 UTF-8" >> /etc/locale.gen && locale-gen tr_TR.UTF-8 # Ubuntu derivatives +$ LC_CTYPE=tr_TR.UTF-8 LC_MESSAGES=C bash -c 'trap - int && echo success' +bash: line 0: trap: int: invalid signal specification +$ LC_CTYPE=tr_TR.UTF-8 LC_MESSAGES=C bash -c 'trap - INT && echo success' +success +``` + +### Gotchas + +- If you wish to use command substitution to initialize a `local` variable, and + then check the exit status of the command substitution, you _must_ declare the + variable on one line and perform the substitution on another. If you don't, + the exit status will always indicate success, as it is the status of the + `local` declaration, not the command substitution. +- To work around a bug in some versions of Bash whereby arrays declared with + `declare -g` or `export` and initialized in the same statement eventually go + out of scope, always `export` the array name on one line and initialize it the + next line. See: + - https://lists.gnu.org/archive/html/bug-bash/2012-06/msg00068.html + - ftp://ftp.gnu.org/gnu/bash/bash-4.2-patches/bash42-025 + - http://lists.gnu.org/archive/html/help-bash/2012-03/msg00078.html +- [ShellCheck](https://www.shellcheck.net/) can help to identify many of these issues + + +## Open Source License + +This software is made available under the [MIT License][osmit]. +For the text of the license, see the [LICENSE][] file. + +## Credits + +- This guide was heavily written by BATS-core member [@mbland](https://github.com/mbland) +for [go-script-bash](https://github.com/mbland/go-script-bash), tweaked for [BATS-core][repohome] +- Table of Contents created by [gh-md-toc](https://github.com/ekalinin/github-markdown-toc) +- The [official bash logo](https://github.com/odb/official-bash-logo) is copyrighted +by the [Free Software Foundation](https://www.fsf.org/), 2016 under the [Free Art License](http://artlibre.org/licence/lal/en/) + + + +[repoprojects]: https://github.com/bats-core/bats-core/projects +[repomilestones]: https://github.com/bats-core/bats-core/milestones +[repoprs]: https://github.com/bats-core/bats-core/pulls +[repoissues]: https://github.com/bats-core/bats-core/issues +[repohome]: https://github.com/bats-core/bats-core + +[osmit]: https://opensource.org/licenses/MIT + +[gitterurl]: https://gitter.im/bats-core/bats-core diff --git a/test/bats/docs/Makefile b/test/bats/docs/Makefile new file mode 100644 index 000000000..d0c3cbf10 --- /dev/null +++ b/test/bats/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/test/bats/docs/PULL_REQUEST_TEMPLATE.md b/test/bats/docs/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..c7c58d58b --- /dev/null +++ b/test/bats/docs/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,5 @@ +- [ ] I have reviewed the [Contributor Guidelines][contributor]. +- [ ] I have reviewed the [Code of Conduct][coc] and agree to abide by it + +[contributor]: https://github.com/bats-core/bats-core/blob/master/docs/CONTRIBUTING.md +[coc]: https://github.com/bats-core/bats-core/blob/master/docs/CODE_OF_CONDUCT.md diff --git a/test/bats/docs/examples/README.md b/test/bats/docs/examples/README.md new file mode 100644 index 000000000..5ef51342f --- /dev/null +++ b/test/bats/docs/examples/README.md @@ -0,0 +1,6 @@ +# Examples + +This directory contains example .bats files. +See the [bats-core wiki][examples] for more details. + +[examples]: https://github.com/bats-core/bats-core/wiki/Examples \ No newline at end of file diff --git a/test/bats/docs/examples/package-tarball b/test/bats/docs/examples/package-tarball new file mode 100644 index 000000000..b51cffd17 --- /dev/null +++ b/test/bats/docs/examples/package-tarball @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +# "unofficial" bash strict mode +# See: http://redsymbol.net/articles/unofficial-bash-strict-mode +set -o errexit # Exit when simple command fails 'set -e' +set -o errtrace # Exit on error inside any functions or subshells. +set -o nounset # Trigger error when expanding unset variables 'set -u' +set -o pipefail # Do not hide errors within pipes 'set -o pipefail' +set -o xtrace # Display expanded command and arguments 'set -x' +IFS=$'\n\t' # Split words on \n\t rather than spaces + +main() { + tar -czf "$dst_tarball" -C "$src_dir" . +} + +main "$@" diff --git a/test/bats/docs/examples/package-tarball.bats b/test/bats/docs/examples/package-tarball.bats new file mode 100755 index 000000000..144318ae1 --- /dev/null +++ b/test/bats/docs/examples/package-tarball.bats @@ -0,0 +1,51 @@ +#!/usr/bin/env bats + +setup() { + export dst_tarball="${BATS_TMPDIR}/dst.tar.gz" + export src_dir="${BATS_TMPDIR}/src_dir" + + rm -rf "${dst_tarball}" "${src_dir}" + mkdir "${src_dir}" + touch "${src_dir}"/{a,b,c} +} + +main() { + bash "${BATS_TEST_DIRNAME}"/package-tarball +} + +@test "fail when \$src_dir and \$dst_tarball are unbound" { + unset src_dir dst_tarball + + run main + [ "${status}" -ne 0 ] +} + +@test "fail when \$src_dir is a non-existent directory" { + # shellcheck disable=SC2030 + src_dir='not-a-dir' + + run main + [ "${status}" -ne 0 ] +} + +# shellcheck disable=SC2016 +@test "pass when \$src_dir directory is empty" { + # shellcheck disable=SC2031,SC2030 + rm -rf "${src_dir:?}/*" + + run main + echo "$output" + [ "${status}" -eq 0 ] +} + +# shellcheck disable=SC2016 +@test "files in \$src_dir are added to tar archive" { + run main + [ "${status}" -eq 0 ] + + run tar tf "$dst_tarball" + [ "${status}" -eq 0 ] + [[ "${output}" =~ a ]] + [[ "${output}" =~ b ]] + [[ "${output}" =~ c ]] +} diff --git a/test/bats/docs/make.bat b/test/bats/docs/make.bat new file mode 100644 index 000000000..9534b0181 --- /dev/null +++ b/test/bats/docs/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=source +set BUILDDIR=build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/test/bats/docs/releasing.md b/test/bats/docs/releasing.md new file mode 100644 index 000000000..cd0717b7e --- /dev/null +++ b/test/bats/docs/releasing.md @@ -0,0 +1,127 @@ +# Releasing a new Bats version + +These notes reflect the current process. There's a lot more we could do, in +terms of automation and expanding the number of platforms to which we formally +release (see #103). + +## Update docs/CHANGELOG.md + +Create a new entry at the top of `docs/CHANGELOG.md` that enumerates the +significant updates to the new version. + +## Bumping the version number + +Bump the version numbers in the following files: + +- contrib/rpm/bats.spec +- libexec/bats-core/bats +- package.json + +Commit these changes (including the `docs/CHANGELOG.md` changes) in a commit +with the message `Bats `, where `` is the new version number. + +Create a new signed, annotated tag with: + +```bash +$ git tag -a -s +``` + +Include the `docs/CHANGELOG.md` notes corresponding to the new version as the +tag annotation, except the first line should be: `Bats - YYYY-MM-DD` +and any Markdown headings should become plain text, e.g.: + +```md +### Added +``` + +should become: + +```md +Added: +``` + +## Create a GitHub release + +Push the new version commit and tag to GitHub via the following: + +```bash +$ git push --follow-tags +``` + +Then visit https://github.com/bats-core/bats-core/releases, and: + +* Click **Draft a new release**. +* Select the new version tag. +* Name the release: `Bats `. +* Paste the same notes from the version tag annotation as the description, + except change the first line to read: `Released: YYYY-MM-DD`. +* Click **Publish release**. + +For more on `git push --follow-tags`, see: + +* [git push --follow-tags in the online manual][ft-man] +* [Stack Overflow: How to push a tag to a remote repository using Git?][ft-so] + +[ft-man]: https://git-scm.com/docs/git-push#git-push---follow-tags +[ft-so]: https://stackoverflow.com/a/26438076 + +## NPM + +`npm publish`. Pretty easy! + +For the paranoid, use `npm pack` and install the resulting tarball locally with +`npm install` before publishing. + +## Homebrew + +The basic instructions are in the [Submit a new version of an existing +formula][brew] section of the Homebrew docs. + +[brew]: https://github.com/Homebrew/brew/blob/master/docs/How-To-Open-a-Homebrew-Pull-Request.md#submit-a-new-version-of-an-existing-formula + +An example using v1.1.0 (notice that this uses the sha256 sum of the tarball): + +```bash +$ curl -LOv https://github.com/bats-core/bats-core/archive/v1.1.0.tar.gz +$ openssl sha256 v1.1.0.tar.gz +SHA256(v1.1.0.tar.gz)=855d8b8bed466bc505e61123d12885500ef6fcdb317ace1b668087364717ea82 + +# Add the --dry-run flag to see the individual steps without executing. +$ brew bump-formula-pr \ + --url=https://github.com/bats-core/bats-core/archive/v1.1.0.tar.gz \ + --sha256=855d8b8bed466bc505e61123d12885500ef6fcdb317ace1b668087364717ea82 +``` +This resulted in https://github.com/Homebrew/homebrew-core/pull/29864, which was +automatically merged once the build passed. + +## Alpine Linux + +An example using v1.1.0 (notice that this uses the sha512 sum of the Zip file): + +```bash +$ curl -LOv https://github.com/bats-core/bats-core/archive/v1.1.0.zip +$ openssl sha512 v1.1.0.zip +SHA512(v1.1.0.zip)=accd83cfec0025a2be40982b3f9a314c2bbf72f5c85daffa9e9419611904a8d34e376919a5d53e378382e0f3794d2bd781046d810225e2a77812474e427bed9e +``` + +After cloning alpinelinux/aports, I used the above information to create: +https://github.com/alpinelinux/aports/pull/4696 + +**Note:** Currently users must enable the `edge` branch of the `community` repo +by adding/uncommenting the corresponding entry in `/etc/apk/repositories`. + +## Announce + +It's worth making a brief announcement like [the v1.1.0 announcement via +Gitter][gitter]: + +[gitter]: https://gitter.im/bats-core/bats-core?at=5b42c9a57b811a6d63daacb5 + +``` +v1.1.0 is now available via Homebrew and npm: +https://github.com/bats-core/bats-core/releases/tag/v1.1.0 + +It'll eventually be available in Alpine via the edge branch of the community +repo once alpinelinux/aports#4696 gets merged. (Check /etc/apk/repositories to +ensure this repo is enabled.) +``` diff --git a/test/bats/docs/source/_static/.gitkeep b/test/bats/docs/source/_static/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/test/bats/docs/source/_templates/.gitkeep b/test/bats/docs/source/_templates/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/test/bats/docs/source/conf.py b/test/bats/docs/source/conf.py new file mode 100644 index 000000000..6deda83d4 --- /dev/null +++ b/test/bats/docs/source/conf.py @@ -0,0 +1,72 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) + + +# -- Project information ----------------------------------------------------- + +project = 'bats-core' +copyright = '2022, bats-core organization' +author = 'bats-core organization' + +# The full version, including alpha/beta/rc tags +release = '1' + + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'recommonmark', + 'sphinxcontrib.programoutput' +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +html_sidebars = { '**': [ + 'about.html', + 'navigation.html', + 'relations.html', + 'searchbox.html', + 'donate.html'] } + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = [] + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +#html_theme = 'alabaster' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +#man_pages = [ ('man.1', 'bats', 'bats documentation', ['bats-core Contributors'], 1)] + +def setup(app): + app.add_config_value('recommonmark_config', {'enable_eval_rst': True}, True) + import recommonmark + from recommonmark.transform import AutoStructify + app.add_transform(AutoStructify) diff --git a/test/bats/docs/source/docker-usage.md b/test/bats/docs/source/docker-usage.md new file mode 100644 index 000000000..7318e566c --- /dev/null +++ b/test/bats/docs/source/docker-usage.md @@ -0,0 +1,58 @@ +# Docker Usage Guide + +- [Docker Usage Guide](#docker-usage-guide) + * [Basic Usage](#basic-usage) + * [Docker Gotchas](#docker-gotchas) + * [Extending from the base image](#extending-from-the-base-image) + +## Basic Usage + +To build and run `bats`' own tests: +```bash +$ git clone https://github.com/bats-core/bats-core.git +Cloning into 'bats-core'... +remote: Counting objects: 1222, done. +remote: Compressing objects: 100% (53/53), done. +remote: Total 1222 (delta 34), reused 55 (delta 21), pack-reused 1146 +Receiving objects: 100% (1222/1222), 327.28 KiB | 1.70 MiB/s, done. +Resolving deltas: 100% (661/661), done. + +$ cd bats-core/ +$ docker build --tag bats/bats:latest . +... +$ docker run -it bats/bats:latest --formatter tap /opt/bats/test +``` + +To mount your tests into the container, first build the image as above. Then, for example with `bats`: +```bash +$ docker run -it -v "$PWD:/opt/bats" bats/bats:latest /opt/bats/test +``` +This runs the `test/` directory from the bats-core repository inside the bats Docker container. + +For test suites that are intended to run in isolation from the project (i.e. the tests do not depend on project files outside of the test directory), you can mount the test directory by itself and execute the tests like so: + +```bash +$ docker run -it -v "$PWD:/code" bats/bats:latest /code/test +``` + +## Docker Gotchas + +Relying on functionality provided by your environment (ssh keys or agent, installed binaries, fixtures outside the mounted test directory) will fail when running inside Docker. + +`--interactive`/`-i` attaches an interactive terminal and is useful to kill hanging processes (otherwise has to be done via docker stop command). `--tty`/`-t` simulates a tty (often not used, but most similar to test runs from a Bash prompt). Interactivity is important to a user, but not a build, and TTYs are probably more important to a headless build. Everything's least-surprising to a new Docker use if both are used. + +## Extending from the base image + +Docker operates on a principle of isolation, and bundles all dependencies required into the Docker image. These can be mounted in at runtime (for test files, configuration, etc). For binary dependencies it may be better to extend the base Docker image with further tools and files. + +```dockerfile +FROM bats/bats + +RUN \ + apk \ + --no-cache \ + --update \ + add \ + openssh + +``` diff --git a/test/bats/docs/source/faq.rst b/test/bats/docs/source/faq.rst new file mode 100644 index 000000000..0cecfa7aa --- /dev/null +++ b/test/bats/docs/source/faq.rst @@ -0,0 +1,170 @@ +FAQ +=== + +How do I set the working directory? +----------------------------------- + +The working directory is simply the directory where you started when executing bats. +If you want to enforce a specific directory, you can use `cd` in the `setup_file`/`setup` functions. +However, be aware that code outside any function will run before any of these setup functions and might interfere with bats' internals. + + +How do I see the output of the command under `run` when a test fails? +--------------------------------------------------------------------- + +`run` captures stdout and stderr of its command and stores it in the `$output` and `${lines[@]}` variables. +If you want to see this output, you need to print it yourself, or use functions like `assert_output` that will reproduce it on failure. + +Can I use `--filter` to exclude files/tests? +-------------------------------------------- + +No, not directly. `--filter` uses a regex to match against test names. So you could try to invert the regex. +The filename won't be part of the strings that are tested, so you cannot filter against files. + +How can I exclude a single test from a test run? +------------------------------------------------ + +If you want to exclude only few tests from a run, you can either `skip` them: + +.. code-block:: bash + + @test "Testname" { + # yadayada + } + +becomes + +.. code-block:: bash + + @test "Testname" { + skip 'Optional skip message' + # yadayada + } + +or comment them out, e.g.: + +.. code-block:: bash + + @test "Testname" { + +becomes + +.. code-block:: bash + + disabled() { # @test "Testname" { + +For multiple tests or all tests of a file, this becomes tedious, so read on. + +How can I exclude all tests of a file from a test run? +-------------------------------------------------------- + +If you run your test suite by naming individual files like: + +.. code-block:: bash + + $ bats test/a.bats test/b.bats ... + +you can simply omit your file. When running a folder like + + +.. code-block:: bash + + $ bats test/ + +you can prevent test files from being picked up by changing their extension to something other than `.bats`. + +It is also possible to `skip` in `setup_file`/`setup` which will skip all tests in the file. + +How can I include my own `.sh` files for testing? +------------------------------------------------- + +You can simply `source .sh` files. However, be aware that `source`ing files with errors outside of any function (or inside `setup_file`) will trip up bats +and lead to hard to diagnose errors. +Therefore, it is safest to only `source` inside `setup` or the test functions themselves. + +How can I debug a failing test? +------------------------------- + +Short of using a bash debugger you should make sure to use appropriate asserts for your task instead of raw bash comparisons, e.g.: + +.. code-block:: bash + + @test test { + run echo test failed + assert_output "test" + # instead of + [ "$output" = "test" ] + } + +Because the former will print the output when the test fails while the latter won't. +Similarly, you should use `assert_success`/`assert_failure` instead of `[ "$status" -eq 0 ]` for return code checks. + +Is there a mechanism to add file/test specific functionality to a common setup function? +---------------------------------------------------------------------------------------- + +Often the setup consists of parts that are common between different files of a test suite and parts that are specific to each file. +There is no suite wide setup functionality yet, so you should extract these common setup steps into their own file (e.g. `common-test-setup.sh`) and function (e.g. `commonSetup() {}`), +which can be `source`d or `load`ed and call it in `setup_file` or `setup`. + +How can I use helper libraries like bats-assert? +------------------------------------------------ + +This is a short reproduction of https://github.com/ztombol/bats-docs. + +At first, you should make sure the library is installed. This is usually done in the `test_helper/` folders alongside the `.bats` files, giving you a filesystem layout like this: + +.. code-block:: + + test/ + test.bats + test_helper/ + bats-support/ + bats-assert/ + +Next, you should load those helper libraries: + +.. code-block:: bash + + setup() { + load 'test_helper/bats-support/load' # this is required by bats-assert! + load 'test_helper/bats-assert/load' + } + +Now, you should be able to use the functions from these helpers inside your tests, e.g.: + +.. code-block:: bash + + @test "test" { + run echo test + assert_output "test" + } + +Note that you obviously need to load the library before using it. +If you need the library inside `setup_file` or `teardown_file` you need to load it in `setup_file`. + +How to set a test timeout in bats? +---------------------------------- + +Set the variable `$BATS_TEST_TIMEOUT` before `setup()` starts. This means you can set it either on the command line, +in free code in the test file or in `setup_file()`. + +How can I lint/shell-format my bats tests? +------------------------------------------ + +Due to their custom syntax (`@test`), `.bats` files are not standard bash. This prevents most tools from working with bats. +However, there is an alternative syntax `function_name { # @test` to declare tests in a bash compliant manner. + +- shellcheck support since version 0.7 +- shfmt support since version 3.2.0 (using `-ln bats`) + + +How can I check if a test failed/succeeded during teardown? +----------------------------------------------------------- + +You can check `BATS_TEST_COMPLETED` which will be set to 1 if the test was successful or empty if it was not. +There is also `BATS_TEST_SKIPPED` which will be non-empty (contains the skip message or -1) when `skip` was called. + +How can I setup/cleanup before/after all tests? +----------------------------------------------- + +Currently, this is not supported. Please contribute your usecase to issue `#39 `_. diff --git a/test/bats/docs/source/gotchas.rst b/test/bats/docs/source/gotchas.rst new file mode 100644 index 000000000..485b67503 --- /dev/null +++ b/test/bats/docs/source/gotchas.rst @@ -0,0 +1,132 @@ +Gotchas +======= + +My test fails although I return true? +------------------------------------- + +Using `return 1` to signify `true` for a success as is done often in other languages does not mesh well with Bash's +convention of using return code 0 to signify success and everything non-zero to indicate a failure. + +Please adhere to this idiom while using bats, or you will constantly work against your environment. + +My negated statement (e.g. ! true) does not fail the test, even when it should. +------------------------------------------------------------------------------- + +Bash deliberately excludes negated return values from causing a pipeline to exit (see bash's `-e` option). +Use `run !` on Bats 1.5.0 and above. For older bats versions, use one of `! x || false` or `run` with `[ $status != 0 ]`. + +If the negated command is the final statement in a test, that final statement's (negated) exit status will propagate through to the test's return code as usual. +Negated statements of one of the correct forms mentioned above will explicitly fail the test when the pipeline returns true, regardless of where they occur in the test. + +I cannot register a test multiple times via for loop. +----------------------------------------------------- + +The usual bats tests (`@test`) are preprocessed into functions. +Wrapping them into a for loop only redeclares this function. + +If you are interested in registering multiple calls to the same function, contribute your wishes to issue `#306 `_. + +I cannot pass parameters to test or .bats files. +------------------------------------------------ + +Especially while using bats via shebang: + +.. code-block:: bash + + #!/usr/bin/env bats + + @test "test" { + # ... + } + +You could be tempted to pass parameters to the test invocation like `./test.bats param1 param2`. +However, bats does not support passing parameters to files or tests. +If you need such a feature, please let us know about your usecase. + +As a workaround you can use environment variables to pass parameters. + +Why can't my function return results via a variable when using `run`? +--------------------------------------------------------------------- + +The `run` function executes its command in a subshell which means the changes to variables won't be available in the calling shell. + +If you want to test these functions, you should call them without `run`. + +`run` doesn't fail, although the same command without `run` does. +----------------------------------------------------------------- + +`run` is a wrapper that always succeeds. The wrapped command's exit code is stored in `$status` and the stdout/stderr in `$output`. +If you want to fail the test, you should explicitly check `$status` or omit `run`. See also `when not to use run `_. + +`load` won't load my `.sh` files. +--------------------------------- + +`load` is intended as an internal helper function that always loads `.bash` files (by appending this suffix). +If you want to load an `.sh` file, you can simple `source` it. + +I can't lint/shell-format my bats tests. +---------------------------------------- + +Bats uses a custom syntax for annotating tests (`@test`) that is not bash compliant. +Therefore, standard bash tooling won't be able to interact directly with `.bats` files. +Shellcheck supports bats' native syntax as of version 0.7. + +Additionally, there is bash compatible syntax for tests: + +.. code-block:: bash + + function bash_compliant_function_name_as_test_name { # @test + # your code + } + + +The output (stdout/err) from commands under `run` is not visible in failed tests. +--------------------------------------------------------------------------------- + +By default, `run` only stores stdout/stderr in `$output` (and `${lines[@]}`). +If you want to see this output, you either should use bat-assert's assertions or have to print `$output` before the check that fails. + +My piped command does not work under run. +----------------------------------------- + +Be careful with using pipes and with `run`. While your mind model of `run` might wrap the whole command behind it, bash's parser won't + +.. code-block:: bash + + run echo foo | grep bar + +Won't `run (echo foo | grep bar)` but will `(run echo foo) | grep bar`. If you need to incorporate pipes, you either should do + +.. code-block:: bash + + run bash -c 'echo foo | grep bar' + +or use a function to wrap the pipe in: + +.. code-block:: bash + + fun_with_pipes() { + echo foo | grep bar + } + + run fun_with_pipes + +`[[ ]]` (or `(( ))` did not fail my test +---------------------------------------- + +The `set -e` handling of `[[ ]]` and `(( ))` changed in Bash 4.1. Older versions, like 3.2 on MacOS, +don't abort the test when they fail, unless they are the last command before the (test) function returns, +making their exit code the return code. + +`[ ]` does not suffer from this, but is no replacement for all `[[ ]]` usecases. Appending ` || false` will work in all cases. + +Background tasks prevent the test run from terminating when finished +-------------------------------------------------------------------- + +When running a task in background, it will inherit the opened FDs of the process it was forked from. +This means that the background task forked from a Bats test will hold the FD for the pipe to the formatter that prints to the terminal, +thus keeping it open until the background task finished. +Due to implementation internals of Bats and bash, this pipe might be held in multiple FDs which all have to be closed by the background task. + +You can use `close_non_std_fds from `test/fixtures/bats/issue-205.bats` in the background job to close all FDs except stdin, stdout and stderr, thus solving the problem. +More details about the issue can be found in [#205](https://github.com/bats-core/bats-core/issues/205#issuecomment-973572596). diff --git a/test/bats/docs/source/index.rst b/test/bats/docs/source/index.rst new file mode 100644 index 000000000..b1867d64c --- /dev/null +++ b/test/bats/docs/source/index.rst @@ -0,0 +1,18 @@ +Welcome to bats-core's documentation! +===================================== + +Versions before v1.2.1 are documented over `there `_. + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + tutorial + installation + usage + docker-usage + writing-tests + gotchas + faq + warnings/index + support-matrix diff --git a/test/bats/docs/source/installation.rst b/test/bats/docs/source/installation.rst new file mode 100644 index 000000000..43bdc0571 --- /dev/null +++ b/test/bats/docs/source/installation.rst @@ -0,0 +1,138 @@ + +Installation +============ + +Linux: Distribition Package Manager +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Following Linux distributions provide Bats via their package manager: + +* Arch Linux: `community/bash-bats `__ +* Alpine Linux: `bats `__ +* Debian Linux: `shells/bats `__ +* Fedora Linux: `rpms/bats `__ +* Gentoo Linux `dev-util/bats `__ +* OpenSUSE Linux: `bats `__ +* Ubuntu Linux `shells/bats `__ + +**Note**: Bats versions pre 1.0 are from sstephenson's original project. +Consider using one of the other installation methods below to get the latest Bats release. +The test matrix above only applies to the latest Bats version. + +If your favorite distribution is not listed above, +you can try one of the following package managers or install from source. + +MacOS: Homebrew +^^^^^^^^^^^^^^^ + +On macOS, you can install `Homebrew `__ if you haven't already, +then run: + +.. code-block:: bash + + $ brew install bats-core + +Any OS: npm +^^^^^^^^^^^ + +You can install the `Bats npm package `__ via: + +.. code-block:: + + # To install globally: + $ npm install -g bats + + # To install into your project and save it as one of the "devDependencies" in + # your package.json: + $ npm install --save-dev bats + +Any OS: Installing Bats from source +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Check out a copy of the Bats repository. Then, either add the Bats ``bin`` +directory to your ``$PATH``\ , or run the provided ``install.sh`` command with the +location to the prefix in which you want to install Bats. For example, to +install Bats into ``/usr/local``\ , + +.. code-block:: + + $ git clone https://github.com/bats-core/bats-core.git + $ cd bats-core + $ ./install.sh /usr/local + + +**Note:** You may need to run ``install.sh`` with ``sudo`` if you do not have +permission to write to the installation prefix. + +Windows: Installing Bats from source via Git Bash +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Check out a copy of the Bats repository and install it to ``$HOME``. This +will place the ``bats`` executable in ``$HOME/bin``\ , which should already be +in ``$PATH``. + +.. code-block:: + + $ git clone https://github.com/bats-core/bats-core.git + $ cd bats-core + $ ./install.sh $HOME + + +Running Bats in Docker +^^^^^^^^^^^^^^^^^^^^^^ + +There is an official image on the Docker Hub: + +.. code-block:: + + $ docker run -it bats/bats:latest --version + + +Building a Docker image +~~~~~~~~~~~~~~~~~~~~~~~ + +Check out a copy of the Bats repository, then build a container image: + +.. code-block:: + + $ git clone https://github.com/bats-core/bats-core.git + $ cd bats-core + $ docker build --tag bats/bats:latest . + + +This creates a local Docker image called ``bats/bats:latest`` based on `Alpine +Linux `__ +(to push to private registries, tag it with another organisation, e.g. +``my-org/bats:latest``\ ). + +To run Bats' internal test suite (which is in the container image at +``/opt/bats/test``\ ): + +.. code-block:: + + $ docker run -it bats/bats:latest /opt/bats/test + + +To run a test suite from a directory called ``test`` in the current directory of +your local machine, mount in a volume and direct Bats to its path inside the +container: + +.. code-block:: + + $ docker run -it -v "${PWD}:/code" bats/bats:latest test + + +.. + + ``/code`` is the working directory of the Docker image. "${PWD}/test" is the + location of the test directory on the local machine. + + +This is a minimal Docker image. If more tools are required this can be used as a +base image in a Dockerfile using ``FROM ``. In the future there may +be images based on Debian, and/or with more tools installed (\ ``curl`` and ``openssl``\ , +for example). If you require a specific configuration please search and +1 an +issue or `raise a new issue `__. + +Further usage examples are in +`the wiki `__. diff --git a/test/bats/docs/source/requirements.txt b/test/bats/docs/source/requirements.txt new file mode 100644 index 000000000..b61574d1e --- /dev/null +++ b/test/bats/docs/source/requirements.txt @@ -0,0 +1,2 @@ +sphinxcontrib-programoutput +recommonmark \ No newline at end of file diff --git a/test/bats/docs/source/support-matrix.rst b/test/bats/docs/source/support-matrix.rst new file mode 100644 index 000000000..b67be4238 --- /dev/null +++ b/test/bats/docs/source/support-matrix.rst @@ -0,0 +1,26 @@ +Support Matrix +============== + +Supported Bash versions +^^^^^^^^^^^^^^^^^^^^^^^ + +The following is a list of Bash versions that are currently supported by Bats and verified through automated tests: + + * 3.2.57(1) (macOS's highest bundled version) + * 4.0, 4.1, 4.2, 4.3, 4.4 + * 5.0, 5.1, 5.2 + +Supported Operating systems +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The following Operating Systems are supported and tested automatically (CI) or manually during development: + + * Linux: Alpine (CI), Alma 8 (CI), Arch Linux (manual), Ubuntu 20.04/22.04 (CI) + * FreeBSD: 11 (CI) + * macOS: 11 (CI), 12 (CI) + * Windows: Server 2019 (CI), 10 (manual) + + * Git for Windows Bash (MSYS2 based) + * Windows Subsystem for Linux + * MSYS2 + * Cygwin diff --git a/test/bats/docs/source/tutorial.rst b/test/bats/docs/source/tutorial.rst new file mode 100644 index 000000000..72a5409d8 --- /dev/null +++ b/test/bats/docs/source/tutorial.rst @@ -0,0 +1,661 @@ +Tutorial +======== + +This tutorial is intended for beginners with bats and possibly bash. +Make sure to also read the list of gotchas and the faq. + +For this tutorial we are assuming you already have a project in a git repository and want to add tests. +Ultimately they should run in the CI environment but will also be started locally during development. + +.. + TODO: link to example repository? + +Quick installation +------------------ + +Since we already have an existing git repository, it is very easy to include bats and its libraries as submodules. +We are aiming for following filesystem structure: + +.. code-block:: + + src/ + project.sh + ... + test/ + bats/ <- submodule + test_helper/ + bats-support/ <- submodule + bats-assert/ <- submodule + test.bats + ... + +So we start from the project root: + +.. code-block:: console + + git submodule add https://github.com/bats-core/bats-core.git test/bats + git submodule add https://github.com/bats-core/bats-support.git test/test_helper/bats-support + git submodule add https://github.com/bats-core/bats-assert.git test/test_helper/bats-assert + +Your first test +--------------- + +Now we want to add our first test. + +In the tutorial repository, we want to build up our project in a TDD fashion. +Thus, we start with an empty project and our first test is to just run our (nonexistent) shell script. + +We start by creating a new test file `test/test.bats` + +.. code-block:: bash + + @test "can run our script" { + ./project.sh + } + +and run it by + +.. code-block:: console + + $ ./test/bats/bin/bats test/test.bats + ✗ can run our script + (in test file test/test.bats, line 2) + `./project.sh' failed with status 127 + /tmp/bats-run-19605/bats.19627.src: line 2: ./project.sh: No such file or directory + + 1 test, 1 failure + +Okay, our test is red. Obviously, the project.sh doesn't exist, so we create the file `src/project.sh`: + +.. code-block:: console + + mkdir src/ + echo '#!/usr/bin/env bash' > src/project.sh + chmod a+x src/project.sh + +A new test run gives us + +.. code-block:: console + + $ ./test/bats/bin/bats test/test.bats + ✗ can run our script + (in test file test/test.bats, line 2) + `./project.sh' failed with status 127 + /tmp/bats-run-19605/bats.19627.src: line 2: ./project.sh: No such file or directory + + 1 test, 1 failure + +Oh, we still used the wrong path. No problem, we just need to use the correct path to `project.sh`. +Since we're still in the same directory as when we started `bats`, we can simply do: + +.. code-block:: bash + + @test "can run our script" { + ./src/project.sh + } + +and get: + +.. code-block:: console + + $ ./test/bats/bin/bats test/test.bats + ✓ can run our script + + 1 test, 0 failures + +Yesss! But that victory feels shallow: What if somebody less competent than us starts bats from another directory? + +Let's do some setup +------------------- + +The obvious solution to becoming independent of `$PWD` is using some fixed anchor point in the filesystem. +We can use the path to the test file itself as an anchor and rely on the internal project structure. +Since we are lazy people and want to treat our project's files as first class citizens in the executable world, we will also put them on the `$PATH`. +Our new `test/test.bats` now looks like this: + +.. code-block:: bash + + setup() { + # get the containing directory of this file + # use $BATS_TEST_FILENAME instead of ${BASH_SOURCE[0]} or $0, + # as those will point to the bats executable's location or the preprocessed file respectively + DIR="$( cd "$( dirname "$BATS_TEST_FILENAME" )" >/dev/null 2>&1 && pwd )" + # make executables in src/ visible to PATH + PATH="$DIR/../src:$PATH" + } + + @test "can run our script" { + # notice the missing ./ + # As we added src/ to $PATH, we can omit the relative path to `src/project.sh`. + project.sh + } + +still giving us: + +.. code-block:: console + + $ ./test/bats/bin/bats test/test.bats + ✓ can run our script + + 1 test, 0 failures + +It still works as expected. This is because the newly added `setup` function put the absolute path to `src/` onto `$PATH`. +This setup function is automatically called before each test. +Therefore, our test could execute `project.sh` directly, without using a (relative) path. + +.. important:: + + The `setup` function will be called before each individual test in the file. + Each file can only define one setup function for all tests in the file. + However, the setup functions can differ between different files. + +Dealing with output +------------------- + +Okay, we have a green test but our executable does not do anything useful. +To keep things simple, let us start with an error message. Our new `src/project.sh` now reads: + +.. code-block:: bash + + #!/usr/bin/env bash + + echo "Welcome to our project!" + + echo "NOT IMPLEMENTED!" >&2 + exit 1 + +And gives is this test output: + +.. code-block:: console + + $ ./test/bats/bin/bats test/test.bats + ✗ can run our script + (in test file test/test.bats, line 11) + `project.sh' failed + Welcome to our project! + NOT IMPLEMENTED! + + 1 test, 1 failure + +Okay, our test failed, because we now exit with 1 instead of 0. +Additionally, we see the stdout and stderr of the failing program. + +Our goal now is to retarget our test and check that we get the welcome message. +bats-assert gives us some help with this, so we should now load it (and its dependency bats-support), +so we change `test/test.bats` to + +.. code-block:: bash + + setup() { + load 'test_helper/bats-support/load' + load 'test_helper/bats-assert/load' + # ... the remaining setup is unchanged + + # get the containing directory of this file + # use $BATS_TEST_FILENAME instead of ${BASH_SOURCE[0]} or $0, + # as those will point to the bats executable's location or the preprocessed file respectively + DIR="$( cd "$( dirname "$BATS_TEST_FILENAME" )" >/dev/null 2>&1 && pwd )" + # make executables in src/ visible to PATH + PATH="$DIR/../src:$PATH" + } + + @test "can run our script" { + run project.sh # notice `run`! + assert_output 'Welcome to our project!' + } + +which gives us the following test output: + +.. code-block:: console + + $ LANG=C ./test/bats/bin/bats test/test.bats + ✗ can run our script + (from function `assert_output' in file test/test_helper/bats-assert/src/assert_output.bash, line 194, + in test file test/test.bats, line 14) + `assert_output 'Welcome to our project!'' failed + + -- output differs -- + expected (1 lines): + Welcome to our project! + actual (2 lines): + Welcome to our project! + NOT IMPLEMENTED! + -- + + + 1 test, 1 failure + +The first change in this output is the failure description. We now fail on assert_output instead of the call itself. +We prefixed our call to `project.sh` with `run`, which is a function provided by bats that executes the command it gets passed as parameters. +Then, `run` sucks up the stdout and stderr of the command it ran and stores it in `$output`, stores the exit code in `$status` and returns 0. +This means `run` never fails the test and won't generate any context/output in the log of a failed test on its own. + +Marking the test as failed and printing context information is up to the consumers of `$status` and `$output`. +`assert_output` is such a consumer, it compares `$output` to the parameter it got and tells us quite succinctly that it did not match in this case. + +For our current test we don't care about any other output or the error message, so we want it gone. +`grep` is always at our fingertips, so we tape together this ramshackle construct + +.. code-block:: bash + + run project.sh 2>&1 | grep Welcome + +which gives us the following test result: + +.. code-block:: console + + $ ./test/bats/bin/bats test/test.bats + ✗ can run our script + (in test file test/test.bats, line 13) + `run project.sh | grep Welcome' failed + + 1 test, 1 failure + +Huh, what is going on? Why does it fail the `run` line again? + +This is a common mistake that can happen when our mind parses the file differently than the bash parser. +`run` is just a function, so the pipe won't actually be forwarded into the function. Bash reads this as `(run project.sh) | grep Welcome`, +instead of our intended `run (project.sh | grep Welcome)`. + +Unfortunately, the latter is not valid bash syntax, so we have to work around it, e.g. by using a function: + +.. code-block:: bash + + get_projectsh_welcome_message() { + project.sh 2>&1 | grep Welcome + } + + @test "Check welcome message" { + run get_projectsh_welcome_message + assert_output 'Welcome to our project!' + } + +Now our test passes again but having to write a function each time we want only a partial match does not accommodate our laziness. +Isn't there an app for that? Maybe we should look at the documentation? + + Partial matching can be enabled with the --partial option (-p for short). When used, the assertion fails if the expected substring is not found in $output. + + -- the documentation for `assert_output `_ + +Okay, so maybe we should try that: + +.. code-block:: bash + + @test "Check welcome message" { + run project.sh + assert_output --partial 'Welcome to our project!' + } + +Aaannnd ... the test stays green. Yay! + +There are many other asserts and options but this is not the place for all of them. +Skimming the documentation of `bats-assert `_ will give you a good idea what you can do. +You should also have a look at the other helper libraries `here `_ like `bats-file `_, +to avoid reinventing the wheel. + + +Cleaning up your mess +--------------------- + +Often our setup or tests leave behind some artifacts that clutter our test environment. +You can define a `teardown` function which will be called after each test, regardless whether it failed or not. + +For example, we now want our project.sh to only show the welcome message on the first invocation. +So we change our test to this: + +.. code-block:: bash + + @test "Show welcome message on first invocation" { + run project.sh + assert_output --partial 'Welcome to our project!' + + run project.sh + refute_output --partial 'Welcome to our project!' + } + +This test fails as expected: + +.. code-block:: console + + $ ./test/bats/bin/bats test/test.bats + ✗ Show welcome message on first invocation + (from function `refute_output' in file test/test_helper/bats-assert/src/refute_output.bash, line 189, + in test file test/test.bats, line 17) + `refute_output --partial 'Welcome to our project!'' failed + + -- output should not contain substring -- + substring (1 lines): + Welcome to our project! + output (2 lines): + Welcome to our project! + NOT IMPLEMENTED! + -- + + + 1 test, 1 failure + +Now, to get the test green again, we want to store the information that we already ran in the file `/tmp/bats-tutorial-project-ran`, +so our `src/project.sh` becomes: + +.. code-block:: bash + + #!/usr/bin/env bash + + FIRST_RUN_FILE=/tmp/bats-tutorial-project-ran + + if [[ ! -e "$FIRST_RUN_FILE" ]]; then + echo "Welcome to our project!" + touch "$FIRST_RUN_FILE" + fi + + echo "NOT IMPLEMENTED!" >&2 + exit 1 + +And our test says: + +.. code-block:: console + + $ ./test/bats/bin/bats test/test.bats + ✓ Show welcome message on first invocation + + 1 test, 0 failures + +Nice, we're done, or are we? Running the test again now gives: + +.. code-block:: console + + $ ./test/bats/bin/bats test/test.bats + ✗ Show welcome message on first invocation + (from function `assert_output' in file test/test_helper/bats-assert/src/assert_output.bash, line 186, + in test file test/test.bats, line 14) + `assert_output --partial 'Welcome to our project!'' failed + + -- output does not contain substring -- + substring : Welcome to our project! + output : NOT IMPLEMENTED! + -- + + + 1 test, 1 failure + +Now the first assert failed, because of the leftover `$FIRST_RUN_FILE` from the last test run. + +Luckily, bats offers the `teardown` function, which can take care of that, we add the following code to `test/test.bats`: + +.. code-block:: bash + + teardown() { + rm -f /tmp/bats-tutorial-project-ran + } + +Now running the test again first give us the same error, as the teardown has not run yet. +On the second try we get a clean `/tmp` folder again and our test passes consistently now. + +It is worth noting that we could do this `rm` in the test code itself but it would get skipped on failures. + +.. important:: + + A test ends at its first failure. None of the subsequent commands in this test will be executed. + The `teardown` function runs after each individual test in a file, regardless of test success or failure. + Similarly to `setup`, each `.bats` file can have its own `teardown` function which will be the same for all tests in the file. + +Test what you can +----------------- + +Sometimes tests rely on the environment to provide infrastructure that is needed for the test. +If not all test environments provide this infrastructure but we still want to test on them, +it would be unhelpful to get errors on parts that are not testable. + +Bats provides you with the `skip` command which can be used in `setup` and `test`. + +.. tip:: + + You should `skip` as early as you know it does not make sense to continue. + +In our example project we rewrite the welcome message test to `skip` instead of doing cleanup: + +.. code-block:: bash + + teardown() { + : # Look Ma! No cleanup! + } + + @test "Show welcome message on first invocation" { + if [[ -e /tmp/bats-tutorial-project-ran ]]; then + skip 'The FIRST_RUN_FILE already exists' + fi + + run project.sh + assert_output --partial 'Welcome to our project!' + + run project.sh + refute_output --partial 'Welcome to our project!' + } + +The first test run still works due to the cleanup from the last round. However, our second run gives us: + +.. code-block:: console + + $ ./test/bats/bin/bats test/test.bats + - Show welcome message on first invocation (skipped: The FIRST_RUN_FILE already exists) + + 1 test, 0 failures, 1 skipped + +.. important:: + + Skipped tests won't fail a test suite and are counted separately. + No test command after `skip` will be executed. If an error occurs before `skip`, the test will fail. + An optional reason can be passed to `skip` and will be printed in the test output. + +Setting up a multifile test suite +--------------------------------- + +With a growing project, putting all tests into one file becomes unwieldy. +For our example project, we will extract functionality into the additional file `src/helper.sh`: + +.. code-block:: bash + + #!/usr/bin/env bash + + _is_first_run() { + local FIRST_RUN_FILE=/tmp/bats-tutorial-project-ran + if [[ ! -e "$FIRST_RUN_FILE" ]]; then + touch "$FIRST_RUN_FILE" + return 0 + fi + return 1 + } + +This allows for testing it separately in a new file `test/helper.bats`: + +.. code-block:: bash + + setup() { + load 'test_helper/common-setup' + _common_setup + + source "$PROJECT_ROOT/src/helper.sh" + } + + teardown() { + rm -f "$NON_EXISTANT_FIRST_RUN_FILE" + rm -f "$EXISTING_FIRST_RUN_FILE" + } + + @test "Check first run" { + NON_EXISTANT_FIRST_RUN_FILE=$(mktemp -u) # only create the name, not the file itself + + assert _is_first_run + refute _is_first_run + refute _is_first_run + + EXISTING_FIRST_RUN_FILE=$(mktemp) + refute _is_first_run + refute _is_first_run + } + +Since the setup function would have duplicated much of the other files', we split that out into the file `test/test_helper/common-setup.bash`: + +.. code-block:: bash + + #!/usr/bin/env bash + + _common_setup() { + load 'test_helper/bats-support/load' + load 'test_helper/bats-assert/load' + # get the containing directory of this file + # use $BATS_TEST_FILENAME instead of ${BASH_SOURCE[0]} or $0, + # as those will point to the bats executable's location or the preprocessed file respectively + PROJECT_ROOT="$( cd "$( dirname "$BATS_TEST_FILENAME" )/.." >/dev/null 2>&1 && pwd )" + # make executables in src/ visible to PATH + PATH="$PROJECT_ROOT/src:$PATH" + } + +with the following `setup` in `test/test.bats`: + +.. code-block:: bash + + setup() { + load 'test_helper/common-setup' + _common_setup + } + +Please note, that we gave our helper the extension `.bash`, which is automatically appended by `load`. + +.. important:: + + `load` automatically tries to append `.bash` to its argument. + +In our new `test/helper.bats` we can see, that loading `.sh` is simply done via `source`. + +.. tip:: + + Avoid using `load` and `source` outside of any functions. + If there is an error in the test file's "free code", the diagnostics are much worse than for code in `setup` or `@test`. + +With the new changes in place, we can run our tests again. However, our previous run command does not include the new file. +You could add the new file to the parameter list, e.g. by running `./test/bats/bin/bats test/*.bats`. +However, bats also can handle directories: + +.. code-block:: console + + $ ./test/bats/bin/bats test/ + ✓ Check first run + - Show welcome message on first invocation (skipped: The FIRST_RUN_FILE already exists) + + 2 tests, 0 failures, 1 skipped + +In this mode, bats will pick up all `.bats` files in the directory it was given. There is an additional `-r` switch that will recursively search for more `.bats` files. +However, in our project layout this would pick up the test files of bats itself from `test/bats/test`. We don't have test subfolders anyways, so we can do without `-r`. + + +Avoiding costly repeated setups +------------------------------- + +We already have seen the `setup` function in use, which is called before each test. +Sometimes our setup is very costly, such as booting up a service just for testing. +If we can reuse the same setup across multiple tests, we might want to do only one setup before all these tests. + +This usecase is exactly what the `setup_file` function was created for. +It can be defined per file and will run before all tests of the respective file. +Similarly, we have `teardown_file`, which will run after all tests of the file, even when you abort a test run or a test failed. + +As an example, we want to add an echo server capability to our project. First, we add the following `server.bats` to our suite: + +.. code-block:: bash + + setup_file() { + load 'test_helper/common-setup' + _common_setup + PORT=$(project.sh start-echo-server 2>&1 >/dev/null) + export PORT + } + + @test "server is reachable" { + nc -z localhost "$PORT" + } + +Which will obviously fail: + +Note that `export PORT` to make it visible to the test! +Running this gives us: + +.. + TODO: Update this example with fixed test name reporting from setup_file? (instead of "✗ ") + +.. code-block:: console + + $ ./test/bats/bin/bats test/server.bats + ✗ + (from function `setup_file' in test file test/server.bats, line 4) + `PORT=$(project.sh start-echo-server >/dev/null 2>&1)' failed + + 1 test, 1 failure + +Now that we got our red test, we need to get it green again. +Our new `project.sh` now ends with: + +.. code-block:: bash + + case $1 in + start-echo-server) + echo "Starting echo server" + PORT=2000 + ncat -l $PORT -k -c 'xargs -n1 echo' 2>/dev/null & # don't keep open this script's stderr + echo $! > /tmp/project-echo-server.pid + echo "$PORT" >&2 + ;; + *) + echo "NOT IMPLEMENTED!" >&2 + exit 1 + ;; + esac + +and the tests now say + +.. code-block:: console + + $ LANG=C ./test/bats/bin/bats test/server.bats + ✓ server is reachable + + 1 test, 0 failures + +However, running this a second time gives: + +.. code-block:: console + + $ ./test/bats/bin/bats test/server.bats + ✗ server is reachable + (in test file test/server.bats, line 14) + `nc -z -w 2 localhost "$PORT"' failed + 2000 + Ncat: bind to :::2000: Address already in use. QUITTING. + nc: port number invalid: 2000 + Ncat: bind to :::2000: Address already in use. QUITTING. + + 1 test, 1 failure + +Obviously, we did not turn off our server after testing. +This is a task for `teardown_file` in `server.bats`: + +.. code-block:: bash + + teardown_file() { + project.sh stop-echo-server + } + +Our `project.sh` should also get the new command: + +.. code-block:: bash + + stop-echo-server) + kill "$(< "/tmp/project-echo-server.pid")" + rm /tmp/project-echo-server.pid + ;; + +Now starting our tests again will overwrite the .pid file with the new instance's, so we have to do manual cleanup once. +From now on, our test should clean up after itself. + +.. note:: + + `teardown_file` will run regardless of tests failing or succeeding. diff --git a/test/bats/docs/source/usage.md b/test/bats/docs/source/usage.md new file mode 100644 index 000000000..bdbe407d5 --- /dev/null +++ b/test/bats/docs/source/usage.md @@ -0,0 +1,114 @@ +# Usage + +Bats comes with two manual pages. After installation you can view them with `man +1 bats` (usage manual) and `man 7 bats` (writing test files manual). Also, you +can view the available command line options that Bats supports by calling Bats +with the `-h` or `--help` options. These are the options that Bats currently +supports: + +``` eval_rst +.. program-output:: ../../bin/bats --help +``` + +To run your tests, invoke the `bats` interpreter with one or more paths to test +files ending with the `.bats` extension, or paths to directories containing test +files. (`bats` will only execute `.bats` files at the top level of each +directory; it will not recurse unless you specify the `-r` flag.) + +Test cases from each file are run sequentially and in isolation. If all the test +cases pass, `bats` exits with a `0` status code. If there are any failures, +`bats` exits with a `1` status code. + +When you run Bats from a terminal, you'll see output as each test is performed, +with a check-mark next to the test's name if it passes or an "X" if it fails. + +```text +$ bats addition.bats + ✓ addition using bc + ✓ addition using dc + +2 tests, 0 failures +``` + +If Bats is not connected to a terminal—in other words, if you run it from a +continuous integration system, or redirect its output to a file—the results are +displayed in human-readable, machine-parsable [TAP format][tap-format]. + +You can force TAP output from a terminal by invoking Bats with the `--formatter tap` +option. + +```text +$ bats --formatter tap addition.bats +1..2 +ok 1 addition using bc +ok 2 addition using dc +``` + +With `--formatter junit`, it is possible +to output junit-compatible report files. + +```text +$ bats --formatter junit addition.bats +1..2 +ok 1 addition using bc +ok 2 addition using dc +``` + +If you have your own formatter, you can use an absolute path to the executable +to use it: + +```bash +$ bats --formatter /absolute/path/to/my-formatter addition.bats +addition using bc WORKED +addition using dc FAILED +``` + +You can also generate test report files via `--report-formatter` which accepts +the same options as `--formatter`. By default, the file is stored in the current +workdir. However, it may be placed elsewhere by specifying the `--output` flag. + +```text +$ bats --report-formatter junit addition.bats --output /tmp +1..2 +ok 1 addition using bc +ok 2 addition using dc + +$ cat /tmp/report.xml + + + + + + + +``` + +## Parallel Execution + +``` eval_rst +.. versionadded:: 1.0.0 +``` + +By default, Bats will execute your tests serially. However, Bats supports +parallel execution of tests (provided you have [GNU parallel][gnu-parallel] or +a compatible replacement installed) using the `--jobs` parameter. This can +result in your tests completing faster (depending on your tests and the testing +hardware). + +Ordering of parallelised tests is not guaranteed, so this mode may break suites +with dependencies between tests (or tests that write to shared locations). When +enabling `--jobs` for the first time be sure to re-run bats multiple times to +identify any inter-test dependencies or non-deterministic test behaviour. + +When parallelizing, the results of a file only become visible after it has been finished. +You can use `--no-parallelize-across-files` to get immediate output at the cost of reduced +overall parallelity, as parallelization will only happen within files and files will be run +sequentially. + +If you have files where tests within the file would interfere with each other, you can use +`--no-parallelize-within-files` to disable parallelization within all files. +If you want more fine-grained control, you can `export BATS_NO_PARALLELIZE_WITHIN_FILE=true` in `setup_file()` +or outside any function to disable parallelization only within the containing file. + +[tap-format]: https://testanything.org +[gnu-parallel]: https://www.gnu.org/software/parallel/ diff --git a/test/bats/docs/source/warnings/BW01.rst b/test/bats/docs/source/warnings/BW01.rst new file mode 100644 index 000000000..8ae8bb67d --- /dev/null +++ b/test/bats/docs/source/warnings/BW01.rst @@ -0,0 +1,17 @@ +BW01: `run`'s command `` exited with code 127, indicating 'Command not found'. Use run's return code checks, e.g. `run -127`, to fix this message. +=========================================================================================================================================================== + +Due to `run`'s default behavior of always succeeding, errors in the command string can remain hidden from the user, e.g.[here](https://github.com/bats-core/bats-core/issues/578). +As a proxy for this problem, the return code is checked for value 127 ("Command not found"). + +How to fix +---------- + +If your command should actually return code 127, then you can simply use `run -127 ` to state your intent and the message will go away. + +If your command should not return 127, you should fix the problem with the command. +Take a careful look at the command string in the warning message, to see if it contains code that you did not intend to run. + +If your command should sometimes return 127, but never 0, you can use `run ! `. + +If your command can sometimes return 127 and sometimes 0, the please submit an issue. \ No newline at end of file diff --git a/test/bats/docs/source/warnings/BW02.rst b/test/bats/docs/source/warnings/BW02.rst new file mode 100644 index 000000000..33936128e --- /dev/null +++ b/test/bats/docs/source/warnings/BW02.rst @@ -0,0 +1,46 @@ +BW02: requires at least BATS_VERSION=. Use `bats_require_minimum_version ` to fix this message. +=========================================================================================================================== + +Using a feature that is only available starting with a certain version can be a problem when your tests also run on older versions of Bats. +In most cases, running this code in older versions will generate an error due to a missing command. +However, in cases like `run`'s where old version simply take all parameters as command to execute, the failure can be silent. + +How to fix +---------- + +When you encounter this warning, you can simply guard your code with `bats_require_minimum_version ` as the message says. +For example, consider the following code: + +.. code-block:: bash + + @test test { + bats_require_minimum_version 1.5.0 + # pre 1.5.0 the flag --separate-stderr would be interpreted as command to run + run --separate-stderr some-command + [ $output = "blablabla" ] + } + + +The call to `bats_require_minimum_version` can be put anywhere before the warning generating command, even in `setup`, `setup_file`, or even outside any function. +This can be used to give fine control over the version dependencies: + +.. code-block:: bash + + @test test { + bats_require_minimum_version 1.5.0 + # pre 1.5.0 the flag --separate-stderr would be interpreted as command to run + run --separate-stderr some-command + [ $output = "blablabla" ] + } + + @test test2 { + run some-other-command # no problem executing on earlier version + } + + +If the above code is executed on a system with a `BATS_VERSION` pre 1.5.0, the first test will fail on `bats_require_minimum_version 1.5.0`. + +Instances: +---------- + +- run's non command parameters like `--keep-empty-lines` are only available since 1.5.0 \ No newline at end of file diff --git a/test/bats/docs/source/warnings/BW03.rst b/test/bats/docs/source/warnings/BW03.rst new file mode 100644 index 000000000..2a0adc0dc --- /dev/null +++ b/test/bats/docs/source/warnings/BW03.rst @@ -0,0 +1,15 @@ +BW03: `setup_suite` is visible to test file '', but was not executed. It belongs into 'setup_suite.bash' to be picked up automatically. +============================================================================================================================================= + +In contrast to the other setup functions, `setup_suite` must not be defined in `*.bats` files but in `setup_suite.bash`. +When a file is executed and sees `setup_suite` defined but not run before the tests, this warning will be printed. + +How to fix +---------- + +The fix depends on your actual intention. There are basically two cases: + +1. You want a setup before all tests and accidentally put `setup_suite` into a test file instead of `setup_suite.bash`. + Simply move `setup_suite` (and `teardown_suite`!) into `setup_suite.bash`. +2. You did not mean to run a setup before any test but need to defined a function named `setup_suite` in your test file. + In this case, you can silence this warning by assigning `BATS_SETUP_SUITE_COMPLETED='suppress BW03'`. \ No newline at end of file diff --git a/test/bats/docs/source/warnings/index.rst b/test/bats/docs/source/warnings/index.rst new file mode 100644 index 000000000..957fc4305 --- /dev/null +++ b/test/bats/docs/source/warnings/index.rst @@ -0,0 +1,28 @@ +Warnings +======== + +Starting with version 1.7.0 Bats shows warnings about issues it found during the test run. +They are printed on stderr after all other output: + +.. code-block:: bash + + BW01.bats + ✓ Trigger BW01 + + 1 test, 0 failures + + + The following warnings were encountered during tests: + BW01: `run`'s command `=0 actually-intended-command with some args` exited with code 127, indicating 'Command not found'. Use run's return code checks, e.g. `run -127`, to fix this message. + (from function `run' in file lib/bats-core/test_functions.bash, line 299, + in test file test/fixtures/warnings/BW01.bats, line 3) + +A warning will not make a successful run fail but should be investigated and taken seriously, since it hints at a possible error. + +Currently, Bats emits the following warnings: + +.. toctree:: + + BW01 + BW02 + BW03 \ No newline at end of file diff --git a/test/bats/docs/source/writing-tests.md b/test/bats/docs/source/writing-tests.md new file mode 100644 index 000000000..25541aabc --- /dev/null +++ b/test/bats/docs/source/writing-tests.md @@ -0,0 +1,614 @@ +# Writing tests + +Each Bats test file is evaluated _n+1_ times, where _n_ is the number of +test cases in the file. The first run counts the number of test cases, +then iterates over the test cases and executes each one in its own +process. + +For more details about how Bats evaluates test files, see [Bats Evaluation +Process][bats-eval] on the wiki. + +For sample test files, see [examples](https://github.com/bats-core/bats-core/tree/master/docs/examples). + +[bats-eval]: https://github.com/bats-core/bats-core/wiki/Bats-Evaluation-Process + +## Tagging tests + +Starting with version 1.8.0, Bats comes with a tagging system that allows users +to categorize their tests and filter according to those categories. + +Each test has a list of tags attached to it. Without specification, this list is empty. +Tags can be defined in two ways. The first being `# bats test_tags=`: + +```bash +# bats test_tags=tag:1, tag:2, tag:3 +@test "first test" { + # ... +} + +@test "second test" { + # ... +} +``` + +These tags (`tag:1`, `tag:2`, `tag:3`) will be attached to the test `first test`. +The second test will have no tags attached. Values defined in the `# bats test_tags=` +directive will be assigned to the next `@test` that is being encountered in the +file and forgotten after that. Only the value of the last `# bats test_tags=` directive +before a given test will be used. + +Sometimes, we want to give all tests in a file a set of the same tags. This can +be achieved via `# bats file_tags=`. They will be added to all tests in the file +after that directive. An additional `# bats file_tags=` directive will override +the previously defined values: + +```bash +@test "Zeroth test" { + # will have no tags +} + +# bats file_tags=a:b +# bats test_tags=c:d + +@test "First test" { + # will be tagged a:b, c:d +} + +# bats file_tags= + +@test "Second test" { + # will have no tags +} +``` + +Tags are case sensitive and must only consist of alphanumeric characters and `_`, + `-`, or `:`. They must not contain whitespaces! +The colon is intended as a separator for (recursive) namespacing. + +Tag lists must be separated by commas and are allowed to contain whitespace. +They must not contain empty tags like `test_tags=,b` (first tag is empty), +`test_tags=a,,c`, `test_tags=a, ,c` (second tag is only whitespace/empty), +`test_tags=a,b,` (third tag is empty). + +Every tag starting with `bats:` (case insensitive!) is reserved for Bats' +internal use. + +### Special tags + +#### Focusing on tests with `bats:focus` tag + +If a test with the tag `bats:focus` is encountered in a test suite, +all other tests will be filtered out and only those tagged with this tag will be executed. + +In focus mode, the exit code of successful runs will be overriden to 1 to prevent CI from silently running on a subset of tests due to an accidentally commited `bats:focus` tag. +Should you require the true exit code, e.g. for a `git bisect` operation, you can disable this behavior by setting +`BATS_NO_FAIL_FOCUS_RUN=1` when running `bats`, but make sure not to commit this to CI! + +### Filtering execution + +Tags can be used for more finegrained filtering of which tests to run via `--filter-tags`. +This accepts a comma separated list of tags. Only tests that match all of these +tags will be executed. For example, `bats --filter-tags a,b,c` will pick up tests +with tags `a,b,c`, but not tests that miss one or more of those tags. + +Additionally, you can specify negative tags via `bats --filter-tags a,!b,c`, +which now won't match tests with tags `a,b,c`, due to the `b`, but will select `a,c`. +To put it more formally, `--filter-tags` is a boolean conjunction. + +To allow for more complex queries, you can specify multiple `--filter-tags`. +A test will be executed, if it matches at least one of them. +This means multiple `--filter-tags` form a boolean disjunction. + +A query of `--filter-tags a,!b --filter-tags b,c` can be translated to: +Execute only tests that (have tag a, but not tag b) or (have tag b and c). + +An empty tag list matches tests without tags. + +## Comment syntax + +External tools (like `shellcheck`, `shfmt`, and various IDE's) may not support +the standard `.bats` syntax. Because of this, we provide a valid `bash` +alternative: + +```bash +function invoking_foo_without_arguments_prints_usage { #@test + run foo + [ "$status" -eq 1 ] + [ "${lines[0]}" = "usage: foo " ] +} +``` + +When using this syntax, the function name will be the title in the result output +and the value checked when using `--filter`. + +## `run`: Test other commands + +Many Bats tests need to run a command and then make assertions about its exit +status and output. Bats includes a `run` helper that invokes its arguments as a +command, saves the exit status and output into special global variables, and +then returns with a `0` status code so you can continue to make assertions in +your test case. + +For example, let's say you're testing that the `foo` command, when passed a +nonexistent filename, exits with a `1` status code and prints an error message. + +```bash +@test "invoking foo with a nonexistent file prints an error" { + run foo nonexistent_filename + [ "$status" -eq 1 ] + [ "$output" = "foo: no such file 'nonexistent_filename'" ] + [ "$BATS_RUN_COMMAND" = "foo nonexistent_filename" ] + +} +``` + +The `$status` variable contains the status code of the command, the +`$output` variable contains the combined contents of the command's standard +output and standard error streams, and the `$BATS_RUN_COMMAND` string contains the +command and command arguments passed to `run` for execution. + +If invoked with one of the following as the first argument, `run` +will perform an implicit check on the exit status of the invoked command: + +```pre + -N expect exit status N (0-255), fail if otherwise + ! expect nonzero exit status (1-255), fail if command succeeds +``` + +We can then write the above more elegantly as: + +```bash +@test "invoking foo with a nonexistent file prints an error" { + run -1 foo nonexistent_filename + [ "$output" = "foo: no such file 'nonexistent_filename'" ] +} +``` + +A third special variable, the `$lines` array, is available for easily accessing +individual lines of output. For example, if you want to test that invoking `foo` +without any arguments prints usage information on the first line: + +```bash +@test "invoking foo without arguments prints usage" { + run -1 foo + [ "${lines[0]}" = "usage: foo " ] +} +``` + +__Note:__ The `run` helper executes its argument(s) in a subshell, so if +writing tests against environmental side-effects like a variable's value +being changed, these changes will not persist after `run` completes. + +By default `run` leaves out empty lines in `${lines[@]}`. Use +`run --keep-empty-lines` to retain them. + +Additionally, you can use `--separate-stderr` to split stdout and stderr +into `$output`/`$stderr` and `${lines[@]}`/`${stderr_lines[@]}`. + +All additional parameters to run should come before the command. +If you want to run a command that starts with `-`, prefix it with `--` to +prevent `run` from parsing it as an option. + +### When not to use `run` + +In case you only need to check the command succeeded, it is better to not use `run`, since the following code + +```bash +run -0 command args ... +``` + +is equivalent to + +```bash +command args ... +``` + +(because bats sets `set -e` for all tests). + +__Note__: In contrast to the above, testing that a command failed is best done via + +```bash +run ! command args ... +``` + +because + +```bash +! command args ... +``` + +will only fail the test if it is the last command and thereby determines the test function's exit code. +This is due to Bash's decision to (counterintuitively?) not trigger `set -e` on `!` commands. +(See also [the associated gotcha](https://bats-core.readthedocs.io/en/stable/gotchas.html#my-negated-statement-e-g-true-does-not-fail-the-test-even-when-it-should)) + + +### `run` and pipes + +Don't fool yourself with pipes when using `run`. Bash parses the pipe outside of `run`, not internal to its command. Take this example: + +```bash +run command args ... | jq -e '.limit == 42' +``` + +Here, `jq` receives no input (which is captured by `run`), +executes no filters, and always succeeds, so the test does not work as +expected. + +Instead use a Bash subshell: + +```bash +run bash -c "command args ... | jq -e '.limit == 42'" +``` + +This subshell is a fresh Bash environment, and will only inherit variables +and functions that are exported into it. + +```bash +limit() { jq -e '.limit == 42'; } +export -f limit +run bash -c "command args ... | limit" +``` + + +## `load`: Share common code + +You may want to share common code across multiple test files. Bats +includes a convenient `load` command for sourcing a Bash source files +relative to the current test file and from absolute paths. + +For example, if you have a Bats test in `test/foo.bats`, the command + +```bash +load test_helper.bash +``` + +will source the script `test/test_helper.bash` in your test file (limitations +apply, see below). This can be useful for sharing functions to set up your +environment or load fixtures. `load` delegates to Bash's `source` command after +resolving paths. + +If `load` encounters errors - e.g. because the targeted source file +errored - it will print a message with the failing library and Bats +exits. + +To allow to use `load` in conditions `bats_load_safe` has been added. +`bats_load_safe` prints a message and returns `1` if a source file cannot be +loaded instead of exiting Bats. +Aside from that `bats_load_safe` acts exactly like `load`. + +As pointed out by @iatrou in https://www.tldp.org/LDP/abs/html/declareref.html, +using the `declare` builtin restricts scope of a variable. Thus, since actual +`source`-ing is performed in context of the `load` function, `declare`d symbols +will _not_ be made available to callers of `load`. + +### `load` argument resolution + +`load` supports the following arguments: + +- absolute paths +- relative paths (to the current test file) + +> For backwards compatibility `load` first searches for a file ending in +> `.bash` (e.g. `load test_helper` searches for `test_helper.bash` before +> it looks for `test_helper`). This behaviour is deprecated and subject to +> change, please use exact filenames instead. + +If `argument` is an absolute path `load` tries to determine the load +path directly. + +If `argument` is a relative path or a name `load` looks for a matching +path in the directory of the current test. + +## `bats_load_library`: Load system wide libraries + +Some libraries are installed on the system, e.g. by `npm` or `brew`. +These should not be `load`ed, as their path depends on the installation method. +Instead, one should use `bats_load_library` together with setting +`BATS_LIB_PATH`, a `PATH`-like colon-delimited variable. + +`bats_load_library` has two modes of resolving requests: + +1. by relative path from the `BATS_LIB_PATH` to a file in the library +2. by library name, expecting libraries to have a `load.bash` entrypoint + +For example if your `BATS_LIB_PATH` is set to +`~/.bats/libs:/usr/lib/bats`, then `bats_load_library test_helper` +would look for existing files with the following paths: + +- `~/.bats/libs/test_helper` +- `~/.bats/libs/test_helper/load.bash` +- `/usr/lib/bats/test_helper` +- `/usr/lib/bats/test_helper/load.bash` + +The first existing file in this list will be sourced. + +If you want to load only part of a library or the entry point is not named `load.bash`, +you have to include it in the argument: +`bats_load_library library_name/file_to_load` will try + +- `~/.bats/libs/library_name/file_to_load` +- `~/.bats/libs/library_name/file_to_load/load.bash` +- `/usr/lib/bats/library_name/file_to_load` +- `/usr/lib/bats/library_name/file_to_load/load.bash` + +Apart from the changed lookup rules, `bats_load_library` behaves like `load`. + +__Note:__ As seen above `load.bash` is the entry point for libraries and +meant to load more files from its directory or other libraries. + +__Note:__ Obviously, the actual `BATS_LIB_PATH` is highly dependent on the environment. +To maintain a uniform location across systems, (distribution) package maintainers +are encouraged to use `/usr/lib/bats/` as the install path for libraries where possible. +However, if the package manager has another preferred location, like `npm` or `brew`, +you should use this instead. + +## `skip`: Easily skip tests + +Tests can be skipped by using the `skip` command at the point in a test you wish +to skip. + +```bash +@test "A test I don't want to execute for now" { + skip + run foo + [ "$status" -eq 0 ] +} +``` + +Optionally, you may include a reason for skipping: + +```bash +@test "A test I don't want to execute for now" { + skip "This command will return zero soon, but not now" + run foo + [ "$status" -eq 0 ] +} +``` + +Or you can skip conditionally: + +```bash +@test "A test which should run" { + if [ foo != bar ]; then + skip "foo isn't bar" + fi + + run foo + [ "$status" -eq 0 ] +} +``` + +__Note:__ `setup` and `teardown` hooks still run for skipped tests. + +## `setup` and `teardown`: Pre- and post-test hooks + +You can define special `setup` and `teardown` functions, which run before and +after each test case, respectively. Use these to load fixtures, set up your +environment, and clean up when you're done. + +You can also define `setup_file` and `teardown_file`, which will run once before +the first test's `setup` and after the last test's `teardown` for the containing +file. Variables that are exported in `setup_file` will be visible to all following +functions (`setup`, the test itself, `teardown`, `teardown_file`). + +Similarly, there is `setup_suite` (and `teardown_suite`) which run once before (and +after) all tests of the test run. + +__Note:__ As `setup_suite` and `teardown_suite` are intended for all files in a suite, +they must be defined in a separate `setup_suite.bash` file. Automatic discovery works +by searching for `setup_suite.bash` in the folder of the first `*.bats` file of the suite. +If this automatism does not work for your usecase, you can work around by specifying +`--setup-suite-file` on the `bats` command. If you have a `setup_suite.bash`, it must define +`setup_suite`! However, defining `teardown_suite` is optional. + + +
+ Example of setup/{,_file,_suite} (and teardown{,_file,_suite}) call order +For example the following call order would result from two files (file 1 with +tests 1 and 2, and file 2 with test3) with a corresponding `setup_suite.bash` file being tested: + +```text +setup_suite # from setup_suite.bash + setup_file # from file 1, on entering file 1 + setup + test1 + teardown + setup + test2 + teardown + teardown_file # from file 1, on leaving file 1 + setup_file # from file 2, on enter file 2 + setup + test3 + teardown + teardown_file # from file 2, on leaving file 2 +teardown_suite # from setup_suite.bash +``` + +
+ + +Note that the `teardown*` functions can fail a test, if their return code is nonzero. +This means, using `return 1` or having the last command in teardown fail, will +fail the teardown. Unlike `@test`, failing commands within `teardown` won't +trigger failure as ERREXIT is disabled. + + +
+ Example of different teardown failure modes + +```bash +teardown() { + false # this will fail the test, as it determines the return code +} + +teardown() { + false # this won't fail the test ... + echo some more code # ... and this will be executed too! +} + +teardown() { + return 1 # this will fail the test, but the rest won't be executed + echo some more code +} + +teardown() { + if true; then + false # this will also fail the test, as it is the last command in this function + else + true + fi +} +``` + +
+ + +## `bats_require_minimum_version ` + +Added in [v1.7.0](https://github.com/bats-core/bats-core/releases/tag/v1.7.0) + +Code for newer versions of Bats can be incompatible with older versions. +In the best case this will lead to an error message and a failed test suite. +In the worst case, the tests will pass erroneously, potentially masking a failure. + +Use `bats_require_minimum_version ` to avoid this. +It communicates in a concise manner, that you intend the following code to be run +under the given Bats version or higher. + +Additionally, this function will communicate the current Bats version floor to +subsequent code, allowing e.g. Bats' internal warning to give more informed warnings. + +__Note__: By default, calling `bats_require_minimum_version` with versions before +Bats 1.7.0 will fail regardless of the required version as the function is not +available. However, you can use the +[bats-backports plugin](https://github.com/bats-core/bats-backports) to make +your code usable with older versions, e.g. during migration while your CI system +is not yet upgraded. + +## Code outside of test cases + +In general you should avoid code outside tests, because each test file will be evaluated many times. +However, there are situations in which this might be useful, e.g. when you want to check for dependencies +and fail immediately if they're not present. + +In general, you should avoid printing outside of `@test`, `setup*` or `teardown*` functions. +Have a look at section [printing to the terminal](#printing-to-the-terminal) for more details. +## File descriptor 3 (read this if Bats hangs) + +Bats makes a separation between output from the code under test and output that +forms the TAP stream (which is produced by Bats internals). This is done in +order to produce TAP-compliant output. In the [Printing to the +terminal](#printing-to-the-terminal) section, there are details on how to use +file descriptor 3 to print custom text properly. + +A side effect of using file descriptor 3 is that, under some circumstances, it +can cause Bats to block and execution to seem dead without reason. This can +happen if a child process is spawned in the background from a test. In this +case, the child process will inherit file descriptor 3. Bats, as the parent +process, will wait for the file descriptor to be closed by the child process +before continuing execution. If the child process takes a lot of time to +complete (eg if the child process is a `sleep 100` command or a background +service that will run indefinitely), Bats will be similarly blocked for the same +amount of time. + +**To prevent this from happening, close FD 3 explicitly when running any command +that may launch long-running child processes**, e.g. `command_name 3>&-` . + +## Printing to the terminal + +Bats produces output compliant with [version 12 of the TAP protocol](https://testanything.org/tap-specification.html). The +produced TAP stream is by default piped to a pretty formatter for human +consumption, but if Bats is called with the `-t` flag, then the TAP stream is +directly printed to the console. + +This has implications if you try to print custom text to the terminal. As +mentioned in [File descriptor 3](#file-descriptor-3-read-this-if-bats-hangs), +bats provides a special file descriptor, `&3`, that you should use to print +your custom text. Here are some detailed guidelines to refer to: + +- Printing **from within a test function**: + - First you should consider if you want the text to be always visible or only + when the test fails. Text that is output directly to stdout or stderr (file + descriptor 1 or 2), ie `echo 'text'` is considered part of the test function + output and is printed only on test failures for diagnostic purposes, + regardless of the formatter used (TAP or pretty). + - To have text printed unconditionally from within a test function you need to + redirect the output to file descriptor 3, eg `echo 'text' >&3`. This output + will become part of the TAP stream. You are encouraged to prepend text printed + this way with a hash (eg `echo '# text' >&3`) in order to produce 100% TAP compliant + output. Otherwise, depending on the 3rd-party tools you use to analyze the + TAP stream, you can encounter unexpected behavior or errors. + +- Printing **from within the `setup*` or `teardown*` functions**: The same hold + true as for printing with test functions. + +- Printing **outside test or `setup*`/`teardown*` functions**: + - You should avoid printing in free code: Due to the multiple executions + contexts (`setup_file`, multiple `@test`s) of test files, output + will be printed more than once. + + - Regardless of where text is redirected to (stdout, stderr or file descriptor 3) + text is immediately visible in the terminal, as it is not piped into the formatter. + + - Text printed to stdout may interfere with formatters as it can + make output non-compliant with the TAP spec. The reason for this is that + such output will be produced before the [_plan line_][tap-plan] is printed, + contrary to the spec that requires the _plan line_ to be either the first or + the last line of the output. + + - Due to internal pipes/redirects, output to stderr is always printed first. + +[tap-plan]: https://testanything.org/tap-specification.html#the-plan + +## Special variables + +There are several global variables you can use to introspect on Bats tests: + +- `$BATS_RUN_COMMAND` is the run command used in your test case. +- `$BATS_TEST_FILENAME` is the fully expanded path to the Bats test file. +- `$BATS_TEST_DIRNAME` is the directory in which the Bats test file is located. +- `$BATS_TEST_NAMES` is an array of function names for each test case. +- `$BATS_TEST_NAME` is the name of the function containing the current test case. +- `BATS_TEST_NAME_PREFIX` will be prepended to the description of each test on + stdout and in reports. +- `$BATS_TEST_DESCRIPTION` is the description of the current test case. +- `BATS_TEST_RETRIES` is the maximum number of additional attempts that will be + made on a failed test before it is finally considered failed. + The default of 0 means the test must pass on the first attempt. +- `BATS_TEST_TIMEOUT` is the number of seconds after which a test (including setup) + will be aborted and marked as failed. Updates to this value in `setup()` or `@test` + cannot change the running timeout countdown, so the latest useful update location + is `setup_file()`. +- `$BATS_TEST_NUMBER` is the (1-based) index of the current test case in the test file. +- `$BATS_SUITE_TEST_NUMBER` is the (1-based) index of the current test case in the test suite (over all files). +- `$BATS_TEST_TAGS` the tags of the current test. +- `$BATS_TMPDIR` is the base temporary directory used by bats to create its + temporary files / directories. + (default: `$TMPDIR`. If `$TMPDIR` is not set, `/tmp` is used.) +- `$BATS_RUN_TMPDIR` is the location to the temporary directory used by bats to + store all its internal temporary files during the tests. + (default: `$BATS_TMPDIR/bats-run-$BATS_ROOT_PID-XXXXXX`) +- `$BATS_FILE_EXTENSION` (default: `bats`) specifies the extension of test files that should be found when running a suite (via `bats [-r] suite_folder/`) +- `$BATS_SUITE_TMPDIR` is a temporary directory common to all tests of a suite. + Could be used to create files required by multiple tests. +- `$BATS_FILE_TMPDIR` is a temporary directory common to all tests of a test file. + Could be used to create files required by multiple tests in the same test file. +- `$BATS_TEST_TMPDIR` is a temporary directory unique for each test. + Could be used to create files required only for specific tests. +- `$BATS_VERSION` is the version of Bats running the test. + +## Libraries and Add-ons + +Bats supports loading external assertion libraries and helpers. Those under `bats-core` are officially supported libraries (integration tests welcome!): + +- - common assertions for Bats +- - supporting library for Bats test helpers +- - common filesystem assertions for Bats +- - e2e tests of applications in K8s environments + +and some external libraries, supported on a "best-effort" basis: + +- (still relevant? Requires review) +- (as per #147) +- (how is this different from grayhemp/bats-mock?) diff --git a/test/bats/docs/versions.md b/test/bats/docs/versions.md new file mode 100644 index 000000000..5113077c4 --- /dev/null +++ b/test/bats/docs/versions.md @@ -0,0 +1,9 @@ +Here are the docs of following versions: + +* [v1.2.0](../../v1.2.0/README.md) +* [v1.1.0](../../v1.1.0/README.md) +* [v1.0.2](../../v1.0.2/README.md) +* [v0.4.0](../../v0.4.0/README.md) +* [v0.3.1](../../v0.3.1/README.md) +* [v0.2.0](../../v0.2.0/README.md) +* [v0.1.0](../../v0.1.0/README.md) diff --git a/test/bats/install.sh b/test/bats/install.sh new file mode 100755 index 000000000..39ca662e9 --- /dev/null +++ b/test/bats/install.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +set -e + +BATS_ROOT="${0%/*}" +PREFIX="$1" +LIBDIR="${2:-lib}" + +if [[ -z "$PREFIX" ]]; then + printf '%s\n' \ + "usage: $0 " \ + " e.g. $0 /usr/local" >&2 + exit 1 +fi + +install -d -m 755 "$PREFIX"/{bin,libexec/bats-core,"${LIBDIR}"/bats-core,share/man/man{1,7}} +install -m 755 "$BATS_ROOT/bin"/* "$PREFIX/bin" +install -m 755 "$BATS_ROOT/libexec/bats-core"/* "$PREFIX/libexec/bats-core" +install -m 755 "$BATS_ROOT/lib/bats-core"/* "$PREFIX/${LIBDIR}/bats-core" +install -m 644 "$BATS_ROOT/man/bats.1" "$PREFIX/share/man/man1" +install -m 644 "$BATS_ROOT/man/bats.7" "$PREFIX/share/man/man7" + +echo "Installed Bats to $PREFIX/bin/bats" diff --git a/test/bats/lib/bats-core/common.bash b/test/bats/lib/bats-core/common.bash new file mode 100644 index 000000000..a27f1ac8e --- /dev/null +++ b/test/bats/lib/bats-core/common.bash @@ -0,0 +1,249 @@ +#!/usr/bin/env bash + +bats_prefix_lines_for_tap_output() { + while IFS= read -r line; do + printf '# %s\n' "$line" || break # avoid feedback loop when errors are redirected into BATS_OUT (see #353) + done + if [[ -n "$line" ]]; then + printf '# %s\n' "$line" + fi +} + +function bats_replace_filename() { + local line + while read -r line; do + printf "%s\n" "${line//$BATS_TEST_SOURCE/$BATS_TEST_FILENAME}" + done + if [[ -n "$line" ]]; then + printf "%s\n" "${line//$BATS_TEST_SOURCE/$BATS_TEST_FILENAME}" + fi +} + +bats_quote_code() { # + printf -v "$1" -- "%s%s%s" "$BATS_BEGIN_CODE_QUOTE" "$2" "$BATS_END_CODE_QUOTE" +} + +bats_check_valid_version() { + if [[ ! $1 =~ [0-9]+.[0-9]+.[0-9]+ ]]; then + printf "ERROR: version '%s' must be of format ..!\n" "$1" >&2 + exit 1 + fi +} + +# compares two versions. Return 0 when version1 < version2 +bats_version_lt() { # + bats_check_valid_version "$1" + bats_check_valid_version "$2" + + local -a version1_parts version2_parts + IFS=. read -ra version1_parts <<<"$1" + IFS=. read -ra version2_parts <<<"$2" + + for i in {0..2}; do + if ((version1_parts[i] < version2_parts[i])); then + return 0 + elif ((version1_parts[i] > version2_parts[i])); then + return 1 + fi + done + # if we made it this far, they are equal -> also not less then + return 2 # use other failing return code to distinguish equal from gt +} + +# ensure a minimum version of bats is running or exit with failure +bats_require_minimum_version() { # + local required_minimum_version=$1 + + if bats_version_lt "$BATS_VERSION" "$required_minimum_version"; then + printf "BATS_VERSION=%s does not meet required minimum %s\n" "$BATS_VERSION" "$required_minimum_version" + exit 1 + fi + + if bats_version_lt "$BATS_GUARANTEED_MINIMUM_VERSION" "$required_minimum_version"; then + BATS_GUARANTEED_MINIMUM_VERSION="$required_minimum_version" + fi +} + +bats_binary_search() { # + if [[ $# -ne 2 ]]; then + printf "ERROR: bats_binary_search requires exactly 2 arguments: \n" >&2 + return 2 + fi + + local -r search_value=$1 array_name=$2 + + # we'd like to test if array is set but we cannot distinguish unset from empty arrays, so we need to skip that + + local start=0 mid end mid_value + # start is inclusive, end is exclusive ... + eval "end=\${#${array_name}[@]}" + + # so start == end means empty search space + while ((start < end)); do + mid=$(((start + end) / 2)) + eval "mid_value=\${${array_name}[$mid]}" + if [[ "$mid_value" == "$search_value" ]]; then + return 0 + elif [[ "$mid_value" < "$search_value" ]]; then + # This branch excludes equality -> +1 to skip the mid element. + # This +1 also avoids endless recursion on odd sized search ranges. + start=$((mid + 1)) + else + end=$mid + fi + done + + # did not find it -> its not there + return 1 +} + +# store the values in ascending (string!) order in result array +# Intended for short lists! (uses insertion sort) +bats_sort() { # + local -r result_name=$1 + shift + + if (($# == 0)); then + eval "$result_name=()" + return 0 + fi + + local -a sorted_array=() + local -i i + while (( $# > 0 )); do # loop over input values + local current_value="$1" + shift + for ((i = ${#sorted_array[@]}; i >= 0; --i)); do # loop over output array from end + if (( i == 0 )) || [[ ${sorted_array[i - 1]} < $current_value ]]; then + # shift bigger elements one position to the end + sorted_array[i]=$current_value + break + else + # insert new element at (freed) desired location + sorted_array[i]=${sorted_array[i - 1]} + fi + done + done + + eval "$result_name=(\"\${sorted_array[@]}\")" +} + +# check if all search values (must be sorted!) are in the (sorted!) array +# Intended for short lists/arrays! +bats_all_in() { # + local -r haystack_array=$1 + shift + + local -i haystack_length # just to appease shellcheck + eval "local -r haystack_length=\${#${haystack_array}[@]}" + + local -i haystack_index=0 # initialize only here to continue from last search position + local search_value haystack_value # just to appease shellcheck + for ((i = 1; i <= $#; ++i)); do + eval "local search_value=${!i}" + for (( ; haystack_index < haystack_length; ++haystack_index)); do + eval "local haystack_value=\${${haystack_array}[$haystack_index]}" + if [[ $haystack_value > "$search_value" ]]; then + # we passed the location this value would have been at -> not found + return 1 + elif [[ $haystack_value == "$search_value" ]]; then + continue 2 # search value found -> try the next one + fi + done + return 1 # we ran of the end of the haystack without finding the value! + done + + # did not return from loop above -> all search values were found + return 0 +} + +# check if any search value (must be sorted!) is in the (sorted!) array +# intended for short lists/arrays +bats_any_in() { # + local -r haystack_array=$1 + shift + + local -i haystack_length # just to appease shellcheck + eval "local -r haystack_length=\${#${haystack_array}[@]}" + + local -i haystack_index=0 # initialize only here to continue from last search position + local search_value haystack_value # just to appease shellcheck + for ((i = 1; i <= $#; ++i)); do + eval "local search_value=${!i}" + for (( ; haystack_index < haystack_length; ++haystack_index)); do + eval "local haystack_value=\${${haystack_array}[$haystack_index]}" + if [[ $haystack_value > "$search_value" ]]; then + continue 2 # search value not in array! -> try next + elif [[ $haystack_value == "$search_value" ]]; then + return 0 # search value found + fi + done + done + + # did not return from loop above -> no search value was found + return 1 +} + +bats_trim() { # + local -r bats_trim_ltrimmed=${2#"${2%%[![:space:]]*}"} # cut off leading whitespace + # shellcheck disable=SC2034 # used in eval! + local -r bats_trim_trimmed=${bats_trim_ltrimmed%"${bats_trim_ltrimmed##*[![:space:]]}"} # cut off trailing whitespace + eval "$1=\$bats_trim_trimmed" +} + +# a helper function to work around unbound variable errors with ${arr[@]} on Bash 3 +bats_append_arrays_as_args() { # -- + local -a trailing_args=() + while (($# > 0)) && [[ $1 != -- ]]; do + local array=$1 + shift + + if eval "(( \${#${array}[@]} > 0 ))"; then + eval "trailing_args+=(\"\${${array}[@]}\")" + fi + done + shift # remove -- separator + + if (($# == 0)); then + printf "Error: append_arrays_as_args is missing a command or -- separator\n" >&2 + return 1 + fi + + if ((${#trailing_args[@]} > 0)); then + "$@" "${trailing_args[@]}" + else + "$@" + fi +} + +bats_format_file_line_reference() { # + # shellcheck disable=SC2034 # will be used in subimplementation + local output="${1?}" + shift + "bats_format_file_line_reference_${BATS_LINE_REFERENCE_FORMAT?}" "$@" +} + +bats_format_file_line_reference_comma_line() { + printf -v "$output" "%s, line %d" "$@" +} + +bats_format_file_line_reference_colon() { + printf -v "$output" "%s:%d" "$@" +} + +# approximate realpath without subshell +bats_approx_realpath() { # + local output=$1 path=$2 + if [[ $path != /* ]]; then + path="$PWD/$path" + fi + # x/./y -> x/y + path=${path//\/.\//\/} + printf -v "$output" "%s" "$path" +} + +bats_format_file_line_reference_uri() { + local filename=${1?} line=${2?} + bats_approx_realpath filename "$filename" + printf -v "$output" "file://%s:%d" "$filename" "$line" +} diff --git a/test/bats/lib/bats-core/formatter.bash b/test/bats/lib/bats-core/formatter.bash new file mode 100644 index 000000000..b774e1673 --- /dev/null +++ b/test/bats/lib/bats-core/formatter.bash @@ -0,0 +1,143 @@ +#!/usr/bin/env bash + +# reads (extended) bats tap streams from stdin and calls callback functions for each line +# +# Segmenting functions +# ==================== +# bats_tap_stream_plan -> when the test plan is encountered +# bats_tap_stream_suite -> when a new file is begun WARNING: extended only +# bats_tap_stream_begin -> when a new test is begun WARNING: extended only +# +# Test result functions +# ===================== +# If timing was enabled, BATS_FORMATTER_TEST_DURATION will be set to their duration in milliseconds +# bats_tap_stream_ok -> when a test was successful +# bats_tap_stream_not_ok -> when a test has failed. If the failure was due to a timeout, +# BATS_FORMATTER_TEST_TIMEOUT is set to the timeout duration in seconds +# bats_tap_stream_skipped -> when a test was skipped +# +# Context functions +# ================= +# bats_tap_stream_comment -> when a comment line was encountered, +# scope tells the last encountered of plan, begin, ok, not_ok, skipped, suite +# bats_tap_stream_unknown -> when a line is encountered that does not match the previous entries, +# scope @see bats_tap_stream_comment +# forwards all input as is, when there is no TAP test plan header +function bats_parse_internal_extended_tap() { + local header_pattern='[0-9]+\.\.[0-9]+' + IFS= read -r header + + if [[ "$header" =~ $header_pattern ]]; then + bats_tap_stream_plan "${header:3}" + else + # If the first line isn't a TAP plan, print it and pass the rest through + printf '%s\n' "$header" + exec cat + fi + + ok_line_regexpr="ok ([0-9]+) (.*)" + skip_line_regexpr="ok ([0-9]+) (.*) # skip( (.*))?$" + timeout_line_regexpr="not ok ([0-9]+) (.*) # timeout after ([0-9]+)s$" + not_ok_line_regexpr="not ok ([0-9]+) (.*)" + + timing_expr="in ([0-9]+)ms$" + local test_name begin_index ok_index not_ok_index index scope + begin_index=0 + index=0 + scope=plan + while IFS= read -r line; do + unset BATS_FORMATTER_TEST_DURATION BATS_FORMATTER_TEST_TIMEOUT + case "$line" in + 'begin '*) # this might only be called in extended tap output + ((++begin_index)) + scope=begin + test_name="${line#* "$begin_index" }" + bats_tap_stream_begin "$begin_index" "$test_name" + ;; + 'ok '*) + ((++index)) + if [[ "$line" =~ $ok_line_regexpr ]]; then + ok_index="${BASH_REMATCH[1]}" + test_name="${BASH_REMATCH[2]}" + if [[ "$line" =~ $skip_line_regexpr ]]; then + scope=skipped + test_name="${BASH_REMATCH[2]}" # cut off name before "# skip" + local skip_reason="${BASH_REMATCH[4]}" + if [[ "$test_name" =~ $timing_expr ]]; then + local BATS_FORMATTER_TEST_DURATION="${BASH_REMATCH[1]}" + test_name="${test_name% in "${BATS_FORMATTER_TEST_DURATION}"ms}" + bats_tap_stream_skipped "$ok_index" "$test_name" "$skip_reason" + else + bats_tap_stream_skipped "$ok_index" "$test_name" "$skip_reason" + fi + else + scope=ok + if [[ "$line" =~ $timing_expr ]]; then + local BATS_FORMATTER_TEST_DURATION="${BASH_REMATCH[1]}" + bats_tap_stream_ok "$ok_index" "${test_name% in "${BASH_REMATCH[1]}"ms}" + else + bats_tap_stream_ok "$ok_index" "$test_name" + fi + fi + else + printf "ERROR: could not match ok line: %s" "$line" >&2 + exit 1 + fi + ;; + 'not ok '*) + ((++index)) + scope=not_ok + if [[ "$line" =~ $not_ok_line_regexpr ]]; then + not_ok_index="${BASH_REMATCH[1]}" + test_name="${BASH_REMATCH[2]}" + if [[ "$line" =~ $timeout_line_regexpr ]]; then + not_ok_index="${BASH_REMATCH[1]}" + test_name="${BASH_REMATCH[2]}" + # shellcheck disable=SC2034 # used in bats_tap_stream_ok + local BATS_FORMATTER_TEST_TIMEOUT="${BASH_REMATCH[3]}" + fi + if [[ "$test_name" =~ $timing_expr ]]; then + # shellcheck disable=SC2034 # used in bats_tap_stream_ok + local BATS_FORMATTER_TEST_DURATION="${BASH_REMATCH[1]}" + test_name="${test_name% in "${BASH_REMATCH[1]}"ms}" + fi + bats_tap_stream_not_ok "$not_ok_index" "$test_name" + else + printf "ERROR: could not match not ok line: %s" "$line" >&2 + exit 1 + fi + ;; + '# '*) + bats_tap_stream_comment "${line:2}" "$scope" + ;; + '#') + bats_tap_stream_comment "" "$scope" + ;; + 'suite '*) + scope=suite + # pass on the + bats_tap_stream_suite "${line:6}" + ;; + *) + bats_tap_stream_unknown "$line" "$scope" + ;; + esac + done +} + +normalize_base_path() { # + # the relative path root to use for reporting filenames + # this is mainly intended for suite mode, where this will be the suite root folder + local base_path="$2" + # use the containing directory when --base-path is a file + if [[ ! -d "$base_path" ]]; then + base_path="$(dirname "$base_path")" + fi + # get the absolute path + base_path="$(cd "$base_path" && pwd)" + # ensure the path ends with / to strip that later on + if [[ "${base_path}" != *"/" ]]; then + base_path="$base_path/" + fi + printf -v "$1" "%s" "$base_path" +} diff --git a/test/bats/lib/bats-core/preprocessing.bash b/test/bats/lib/bats-core/preprocessing.bash new file mode 100644 index 000000000..5d9a7652c --- /dev/null +++ b/test/bats/lib/bats-core/preprocessing.bash @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +BATS_TMPNAME="$BATS_RUN_TMPDIR/bats.$$" +BATS_PARENT_TMPNAME="$BATS_RUN_TMPDIR/bats.$PPID" +# shellcheck disable=SC2034 +BATS_OUT="${BATS_TMPNAME}.out" # used in bats-exec-file + +bats_preprocess_source() { + # export to make it visible to bats_evaluate_preprocessed_source + # since the latter runs in bats-exec-test's bash while this runs in bats-exec-file's + export BATS_TEST_SOURCE="${BATS_TMPNAME}.src" + CHECK_BATS_COMMENT_COMMANDS=1 bats-preprocess "$BATS_TEST_FILENAME" >"$BATS_TEST_SOURCE" +} + +bats_evaluate_preprocessed_source() { + if [[ -z "${BATS_TEST_SOURCE:-}" ]]; then + BATS_TEST_SOURCE="${BATS_PARENT_TMPNAME}.src" + fi + # Dynamically loaded user files provided outside of Bats. + # shellcheck disable=SC1090 + source "$BATS_TEST_SOURCE" +} diff --git a/test/bats/lib/bats-core/semaphore.bash b/test/bats/lib/bats-core/semaphore.bash new file mode 100644 index 000000000..a196ac82d --- /dev/null +++ b/test/bats/lib/bats-core/semaphore.bash @@ -0,0 +1,113 @@ +#!/usr/bin/env bash + +bats_run_under_flock() { + flock "$BATS_SEMAPHORE_DIR" "$@" +} + +bats_run_under_shlock() { + local lockfile="$BATS_SEMAPHORE_DIR/shlock.lock" + while ! shlock -p $$ -f "$lockfile"; do + sleep 1 + done + # we got the lock now, execute the command + "$@" + local status=$? + # free the lock + rm -f "$lockfile" + return $status + } + +# setup the semaphore environment for the loading file +bats_semaphore_setup() { + export -f bats_semaphore_get_free_slot_count + export -f bats_semaphore_acquire_while_locked + export BATS_SEMAPHORE_DIR="$BATS_RUN_TMPDIR/semaphores" + + if command -v flock >/dev/null; then + BATS_LOCKING_IMPLEMENTATION=flock + elif command -v shlock >/dev/null; then + BATS_LOCKING_IMPLEMENTATION=shlock + else + printf "ERROR: flock/shlock is required for parallelization within files!\n" >&2 + exit 1 + fi +} + +# $1 - output directory for stdout/stderr +# $@ - command to run +# run the given command in a semaphore +# block when there is no free slot for the semaphore +# when there is a free slot, run the command in background +# gather the output of the command in files in the given directory +bats_semaphore_run() { + local output_dir=$1 + shift + local semaphore_slot + semaphore_slot=$(bats_semaphore_acquire_slot) + bats_semaphore_release_wrapper "$output_dir" "$semaphore_slot" "$@" & + printf "%d\n" "$!" +} + +# $1 - output directory for stdout/stderr +# $@ - command to run +# this wraps the actual function call to install some traps on exiting +bats_semaphore_release_wrapper() { + local output_dir="$1" + local semaphore_name="$2" + shift 2 # all other parameters will be use for the command to execute + + # shellcheck disable=SC2064 # we want to expand the semaphore_name right now! + trap "status=$?; bats_semaphore_release_slot '$semaphore_name'; exit $status" EXIT + + mkdir -p "$output_dir" + "$@" 2>"$output_dir/stderr" >"$output_dir/stdout" + local status=$? + + # bash bug: the exit trap is not called for the background process + bats_semaphore_release_slot "$semaphore_name" + trap - EXIT # avoid calling release twice + return $status +} + +bats_semaphore_acquire_while_locked() { + if [[ $(bats_semaphore_get_free_slot_count) -gt 0 ]]; then + local slot=0 + while [[ -e "$BATS_SEMAPHORE_DIR/slot-$slot" ]]; do + ((++slot)) + done + if [[ $slot -lt $BATS_SEMAPHORE_NUMBER_OF_SLOTS ]]; then + touch "$BATS_SEMAPHORE_DIR/slot-$slot" && printf "%d\n" "$slot" && return 0 + fi + fi + return 1 +} + +# block until a semaphore slot becomes free +# prints the number of the slot that it received +bats_semaphore_acquire_slot() { + mkdir -p "$BATS_SEMAPHORE_DIR" + # wait for a slot to become free + # TODO: avoid busy waiting by using signals -> this opens op prioritizing possibilities as well + while true; do + # don't lock for reading, we are fine with spuriously getting no free slot + if [[ $(bats_semaphore_get_free_slot_count) -gt 0 ]]; then + bats_run_under_"$BATS_LOCKING_IMPLEMENTATION" \ + bash -c bats_semaphore_acquire_while_locked \ + && break + fi + sleep 1 + done +} + +bats_semaphore_release_slot() { + # we don't need to lock this, since only our process owns this file + # and freeing a semaphore cannot lead to conflicts with others + rm "$BATS_SEMAPHORE_DIR/slot-$1" # this will fail if we had not acquired a semaphore! +} + +bats_semaphore_get_free_slot_count() { + # find might error out without returning something useful when a file is deleted, + # while the directory is traversed -> only continue when there was no error + until used_slots=$(find "$BATS_SEMAPHORE_DIR" -name 'slot-*' 2>/dev/null | wc -l); do :; done + echo $((BATS_SEMAPHORE_NUMBER_OF_SLOTS - used_slots)) +} diff --git a/test/bats/lib/bats-core/test_functions.bash b/test/bats/lib/bats-core/test_functions.bash new file mode 100644 index 000000000..3db539e59 --- /dev/null +++ b/test/bats/lib/bats-core/test_functions.bash @@ -0,0 +1,371 @@ +#!/usr/bin/env bash + +BATS_TEST_DIRNAME="${BATS_TEST_FILENAME%/*}" +BATS_TEST_NAMES=() + +# shellcheck source=lib/bats-core/warnings.bash +source "$BATS_ROOT/lib/bats-core/warnings.bash" + +# find_in_bats_lib_path echoes the first recognized load path to +# a library in BATS_LIB_PATH or relative to BATS_TEST_DIRNAME. +# +# Libraries relative to BATS_TEST_DIRNAME take precedence over +# BATS_LIB_PATH. +# +# Library load paths are recognized using find_library_load_path. +# +# If no library is found find_in_bats_lib_path returns 1. +find_in_bats_lib_path() { # + local return_var="${1:?}" + local library_name="${2:?}" + + local -a bats_lib_paths + IFS=: read -ra bats_lib_paths <<<"$BATS_LIB_PATH" + + for path in "${bats_lib_paths[@]}"; do + if [[ -f "$path/$library_name" ]]; then + printf -v "$return_var" "%s" "$path/$library_name" + # A library load path was found, return + return 0 + elif [[ -f "$path/$library_name/load.bash" ]]; then + printf -v "$return_var" "%s" "$path/$library_name/load.bash" + # A library load path was found, return + return 0 + fi + done + + return 1 +} + +# bats_internal_load expects an absolute path that is a library load path. +# +# If the library load path points to a file (a library loader) it is +# sourced. +# +# If it points to a directory all files ending in .bash inside of the +# directory are sourced. +# +# If the sourcing of the library loader or of a file in a library +# directory fails bats_internal_load prints an error message and returns 1. +# +# If the passed library load path is not absolute or is not a valid file +# or directory bats_internal_load prints an error message and returns 1. +bats_internal_load() { + local library_load_path="${1:?}" + + if [[ "${library_load_path:0:1}" != / ]]; then + printf "Passed library load path is not an absolute path: %s\n" "$library_load_path" >&2 + return 1 + fi + + # library_load_path is a library loader + if [[ -f "$library_load_path" ]]; then + # shellcheck disable=SC1090 + if ! source "$library_load_path"; then + printf "Error while sourcing library loader at '%s'\n" "$library_load_path" >&2 + return 1 + fi + return 0 + fi + + printf "Passed library load path is neither a library loader nor library directory: %s\n" "$library_load_path" >&2 + return 1 +} + +# bats_load_safe accepts an argument called 'slug' and attempts to find and +# source a library based on the slug. +# +# A slug can be an absolute path, a library name or a relative path. +# +# If the slug is an absolute path bats_load_safe attempts to find the library +# load path using find_library_load_path. +# What is considered a library load path is documented in the +# documentation for find_library_load_path. +# +# If the slug is not an absolute path it is considered a library name or +# relative path. bats_load_safe attempts to find the library load path using +# find_in_bats_lib_path. +# +# If bats_load_safe can find a library load path it is passed to bats_internal_load. +# If bats_internal_load fails bats_load_safe returns 1. +# +# If no library load path can be found bats_load_safe prints an error message +# and returns 1. +bats_load_safe() { + local slug="${1:?}" + if [[ ${slug:0:1} != / ]]; then # relative paths are relative to BATS_TEST_DIRNAME + slug="$BATS_TEST_DIRNAME/$slug" + fi + + if [[ -f "$slug.bash" ]]; then + bats_internal_load "$slug.bash" + return $? + elif [[ -f "$slug" ]]; then + bats_internal_load "$slug" + return $? + fi + + # loading from PATH (retained for backwards compatibility) + if [[ ! -f "$1" ]] && type -P "$1" >/dev/null; then + # shellcheck disable=SC1090 + source "$1" + return $? + fi + + # No library load path can be found + printf "bats_load_safe: Could not find '%s'[.bash]\n" "$slug" >&2 + return 1 +} + +bats_load_library_safe() { # + local slug="${1:?}" library_path + + # Check for library load paths in BATS_TEST_DIRNAME and BATS_LIB_PATH + if [[ ${slug:0:1} != / ]]; then + if ! find_in_bats_lib_path library_path "$slug"; then + printf "Could not find library '%s' relative to test file or in BATS_LIB_PATH\n" "$slug" >&2 + return 1 + fi + else + # absolute paths are taken as is + library_path="$slug" + if [[ ! -f "$library_path" ]]; then + printf "Could not find library on absolute path '%s'\n" "$library_path" >&2 + return 1 + fi + fi + + bats_internal_load "$library_path" + return $? +} + +# immediately exit on error, use bats_load_library_safe to catch and handle errors +bats_load_library() { # + if ! bats_load_library_safe "$@"; then + exit 1 + fi +} + +# load acts like bats_load_safe but exits the shell instead of returning 1. +load() { + if ! bats_load_safe "$@"; then + exit 1 + fi +} + +bats_redirect_stderr_into_file() { + "$@" 2>>"$bats_run_separate_stderr_file" # use >> to see collisions' content +} + +bats_merge_stdout_and_stderr() { + "$@" 2>&1 +} + +# write separate lines from into +bats_separate_lines() { # + local -r output_array_name="$1" + local -r input_var_name="$2" + local input="${!input_var_name}" + if [[ $keep_empty_lines ]]; then + local bats_separate_lines_lines=() + if [[ -n "$input" ]]; then # avoid getting an empty line for empty input + # remove one trailing \n if it exists to compensate its addition by <<< + input=${input%$'\n'} + while IFS= read -r line; do + bats_separate_lines_lines+=("$line") + done <<<"${input}" + fi + eval "${output_array_name}=(\"\${bats_separate_lines_lines[@]}\")" + else + # shellcheck disable=SC2034,SC2206 + IFS=$'\n' read -d '' -r -a "$output_array_name" <<<"${!input_var_name}" || true # don't fail due to EOF + fi +} + +run() { # [!|-N] [--keep-empty-lines] [--separate-stderr] [--] + # This has to be restored on exit from this function to avoid leaking our trap INT into surrounding code. + # Non zero exits won't restore under the assumption that they will fail the test before it can be aborted, + # which allows us to avoid duplicating the restore code on every exit path + trap bats_interrupt_trap_in_run INT + local expected_rc= + local keep_empty_lines= + local output_case=merged + local has_flags= + # parse options starting with - + while [[ $# -gt 0 ]] && [[ $1 == -* || $1 == '!' ]]; do + has_flags=1 + case "$1" in + '!') + expected_rc=-1 + ;; + -[0-9]*) + expected_rc=${1#-} + if [[ $expected_rc =~ [^0-9] ]]; then + printf "Usage error: run: '-NNN' requires numeric NNN (got: %s)\n" "$expected_rc" >&2 + return 1 + elif [[ $expected_rc -gt 255 ]]; then + printf "Usage error: run: '-NNN': NNN must be <= 255 (got: %d)\n" "$expected_rc" >&2 + return 1 + fi + ;; + --keep-empty-lines) + keep_empty_lines=1 + ;; + --separate-stderr) + output_case="separate" + ;; + --) + shift # eat the -- before breaking away + break + ;; + *) + printf "Usage error: unknown flag '%s'" "$1" >&2 + return 1 + ;; + esac + shift + done + + if [[ -n $has_flags ]]; then + bats_warn_minimum_guaranteed_version "Using flags on \`run\`" 1.5.0 + fi + + local pre_command= + + case "$output_case" in + merged) # redirects stderr into stdout and fills only $output/$lines + pre_command=bats_merge_stdout_and_stderr + ;; + separate) # splits stderr into own file and fills $stderr/$stderr_lines too + local bats_run_separate_stderr_file + bats_run_separate_stderr_file="$(mktemp "${BATS_TEST_TMPDIR}/separate-stderr-XXXXXX")" + pre_command=bats_redirect_stderr_into_file + ;; + esac + + local origFlags="$-" + set +eET + if [[ $keep_empty_lines ]]; then + # 'output', 'status', 'lines' are global variables available to tests. + # preserve trailing newlines by appending . and removing it later + # shellcheck disable=SC2034 + output="$( + "$pre_command" "$@" + status=$? + printf . + exit $status + )" && status=0 || status=$? + output="${output%.}" + else + # 'output', 'status', 'lines' are global variables available to tests. + # shellcheck disable=SC2034 + output="$("$pre_command" "$@")" && status=0 || status=$? + fi + + bats_separate_lines lines output + + if [[ "$output_case" == separate ]]; then + # shellcheck disable=SC2034 + read -d '' -r stderr <"$bats_run_separate_stderr_file" || true + bats_separate_lines stderr_lines stderr + fi + + # shellcheck disable=SC2034 + BATS_RUN_COMMAND="${*}" + set "-$origFlags" + + bats_run_print_output() { + if [[ -n "$output" ]]; then + printf "%s\n" "$output" + fi + if [[ "$output_case" == separate && -n "$stderr" ]]; then + printf "stderr:\n%s\n" "$stderr" + fi + } + + if [[ -n "$expected_rc" ]]; then + if [[ "$expected_rc" = "-1" ]]; then + if [[ "$status" -eq 0 ]]; then + BATS_ERROR_SUFFIX=", expected nonzero exit code!" + bats_run_print_output + return 1 + fi + elif [ "$status" -ne "$expected_rc" ]; then + # shellcheck disable=SC2034 + BATS_ERROR_SUFFIX=", expected exit code $expected_rc, got $status" + bats_run_print_output + return 1 + fi + elif [[ "$status" -eq 127 ]]; then # "command not found" + bats_generate_warning 1 "$BATS_RUN_COMMAND" + fi + + if [[ ${BATS_VERBOSE_RUN:-} ]]; then + bats_run_print_output + fi + + # don't leak our trap into surrounding code + trap bats_interrupt_trap INT +} + +setup() { + return 0 +} + +teardown() { + return 0 +} + +skip() { + # if this is a skip in teardown ... + if [[ -n "${BATS_TEARDOWN_STARTED-}" ]]; then + # ... we want to skip the rest of teardown. + # communicate to bats_exit_trap that the teardown was completed without error + # shellcheck disable=SC2034 + BATS_TEARDOWN_COMPLETED=1 + # if we are already in the exit trap (e.g. due to previous skip) ... + if [[ "$BATS_TEARDOWN_STARTED" == as-exit-trap ]]; then + # ... we need to do the rest of the tear_down_trap that would otherwise be skipped after the next call to exit + bats_exit_trap + # and then do the exit (at the end of this function) + fi + # if we aren't in exit trap, the normal exit handling should suffice + else + # ... this is either skip in test or skip in setup. + # Following variables are used in bats-exec-test which sources this file + # shellcheck disable=SC2034 + BATS_TEST_SKIPPED="${1:-1}" + # shellcheck disable=SC2034 + BATS_TEST_COMPLETED=1 + fi + exit 0 +} + +bats_test_begin() { + BATS_TEST_DESCRIPTION="$1" + if [[ -n "$BATS_EXTENDED_SYNTAX" ]]; then + printf 'begin %d %s\n' "$BATS_SUITE_TEST_NUMBER" "${BATS_TEST_NAME_PREFIX:-}$BATS_TEST_DESCRIPTION" >&3 + fi + setup +} + +bats_test_function() { + local tags=() + if [[ "$1" == --tags ]]; then + IFS=',' read -ra tags <<<"$2" + shift 2 + fi + local test_name="$1" + BATS_TEST_NAMES+=("$test_name") + if [[ "$test_name" == "$BATS_TEST_NAME" ]]; then + # shellcheck disable=SC2034 + BATS_TEST_TAGS=("${tags[@]+${tags[@]}}") + fi +} + +# decides whether a failed test should be run again +bats_should_retry_test() { + # test try number starts at 1 + # 0 retries means run only first try + ((BATS_TEST_TRY_NUMBER <= BATS_TEST_RETRIES)) +} diff --git a/test/bats/lib/bats-core/tracing.bash b/test/bats/lib/bats-core/tracing.bash new file mode 100644 index 000000000..0e94ec30d --- /dev/null +++ b/test/bats/lib/bats-core/tracing.bash @@ -0,0 +1,399 @@ +#!/usr/bin/env bash + +# shellcheck source=lib/bats-core/common.bash +source "$BATS_ROOT/lib/bats-core/common.bash" + +bats_capture_stack_trace() { + local test_file + local funcname + local i + + BATS_DEBUG_LAST_STACK_TRACE=() + + for ((i = 2; i != ${#FUNCNAME[@]}; ++i)); do + # Use BATS_TEST_SOURCE if necessary to work around Bash < 4.4 bug whereby + # calling an exported function erases the test file's BASH_SOURCE entry. + test_file="${BASH_SOURCE[$i]:-$BATS_TEST_SOURCE}" + funcname="${FUNCNAME[$i]}" + BATS_DEBUG_LAST_STACK_TRACE+=("${BASH_LINENO[$((i - 1))]} $funcname $test_file") + case "$funcname" in + "${BATS_TEST_NAME-}" | setup | teardown | setup_file | teardown_file | setup_suite | teardown_suite) + break + ;; + esac + if [[ "${BASH_SOURCE[$i + 1]:-}" == *"bats-exec-file" ]] && [[ "$funcname" == 'source' ]]; then + break + fi + done +} + +bats_get_failure_stack_trace() { + local stack_trace_var + # See bats_debug_trap for details. + if [[ -n "${BATS_DEBUG_LAST_STACK_TRACE_IS_VALID:-}" ]]; then + stack_trace_var=BATS_DEBUG_LAST_STACK_TRACE + else + stack_trace_var=BATS_DEBUG_LASTLAST_STACK_TRACE + fi + # shellcheck disable=SC2016 + eval "$(printf \ + '%s=(${%s[@]+"${%s[@]}"})' \ + "${1}" \ + "${stack_trace_var}" \ + "${stack_trace_var}")" +} + +bats_print_stack_trace() { + local frame + local index=1 + local count="${#@}" + local filename + local lineno + + for frame in "$@"; do + bats_frame_filename "$frame" 'filename' + bats_trim_filename "$filename" 'filename' + bats_frame_lineno "$frame" 'lineno' + + printf '%s' "${BATS_STACK_TRACE_PREFIX-# }" + if [[ $index -eq 1 ]]; then + printf '(' + else + printf ' ' + fi + + local fn + bats_frame_function "$frame" 'fn' + if [[ "$fn" != "${BATS_TEST_NAME-}" ]] && + # don't print "from function `source'"", + # when failing in free code during `source $test_file` from bats-exec-file + ! [[ "$fn" == 'source' && $index -eq $count ]]; then + local quoted_fn + bats_quote_code quoted_fn "$fn" + printf "from function %s " "$quoted_fn" + fi + + local reference + bats_format_file_line_reference reference "$filename" "$lineno" + if [[ $index -eq $count ]]; then + printf 'in test file %s)\n' "$reference" + else + printf 'in file %s,\n' "$reference" + fi + + ((++index)) + done +} + +bats_print_failed_command() { + local stack_trace=("${@}") + if [[ ${#stack_trace[@]} -eq 0 ]]; then + return 0 + fi + local frame="${stack_trace[${#stack_trace[@]} - 1]}" + local filename + local lineno + local failed_line + local failed_command + + bats_frame_filename "$frame" 'filename' + bats_frame_lineno "$frame" 'lineno' + bats_extract_line "$filename" "$lineno" 'failed_line' + bats_strip_string "$failed_line" 'failed_command' + local quoted_failed_command + bats_quote_code quoted_failed_command "$failed_command" + printf '# %s ' "${quoted_failed_command}" + + if [[ "${BATS_TIMED_OUT-NOTSET}" != NOTSET ]]; then + # the other values can be safely overwritten here, + # as the timeout is the primary reason for failure + BATS_ERROR_SUFFIX=" due to timeout" + fi + + if [[ "$BATS_ERROR_STATUS" -eq 1 ]]; then + printf 'failed%s\n' "$BATS_ERROR_SUFFIX" + else + printf 'failed with status %d%s\n' "$BATS_ERROR_STATUS" "$BATS_ERROR_SUFFIX" + fi +} + +bats_frame_lineno() { + printf -v "$2" '%s' "${1%% *}" +} + +bats_frame_function() { + local __bff_function="${1#* }" + printf -v "$2" '%s' "${__bff_function%% *}" +} + +bats_frame_filename() { + local __bff_filename="${1#* }" + __bff_filename="${__bff_filename#* }" + + if [[ "$__bff_filename" == "${BATS_TEST_SOURCE-}" ]]; then + __bff_filename="$BATS_TEST_FILENAME" + fi + printf -v "$2" '%s' "$__bff_filename" +} + +bats_extract_line() { + local __bats_extract_line_line + local __bats_extract_line_index=0 + + while IFS= read -r __bats_extract_line_line; do + if [[ "$((++__bats_extract_line_index))" -eq "$2" ]]; then + printf -v "$3" '%s' "${__bats_extract_line_line%$'\r'}" + break + fi + done <"$1" +} + +bats_strip_string() { + [[ "$1" =~ ^[[:space:]]*(.*)[[:space:]]*$ ]] + printf -v "$2" '%s' "${BASH_REMATCH[1]}" +} + +bats_trim_filename() { + printf -v "$2" '%s' "${1#"$BATS_CWD"/}" +} + +# normalize a windows path from e.g. C:/directory to /c/directory +# The path must point to an existing/accessable directory, not a file! +bats_normalize_windows_dir_path() { # + local output_var="$1" path="$2" + if [[ "$output_var" != NORMALIZED_INPUT ]]; then + local NORMALIZED_INPUT + fi + if [[ $path == ?:* ]]; then + NORMALIZED_INPUT="$( + cd "$path" || exit 1 + pwd + )" + else + NORMALIZED_INPUT="$path" + fi + printf -v "$output_var" "%s" "$NORMALIZED_INPUT" +} + +bats_emit_trace() { + if [[ $BATS_TRACE_LEVEL -gt 0 ]]; then + local line=${BASH_LINENO[1]} + # shellcheck disable=SC2016 + if [[ $BASH_COMMAND != '"$BATS_TEST_NAME" >> "$BATS_OUT" 2>&1 4>&1' && $BASH_COMMAND != "bats_test_begin "* ]] && # don't emit these internal calls + [[ $BASH_COMMAND != "$BATS_LAST_BASH_COMMAND" || $line != "$BATS_LAST_BASH_LINENO" ]] && + # avoid printing a function twice (at call site and at definition site) + [[ $BASH_COMMAND != "$BATS_LAST_BASH_COMMAND" || ${BASH_LINENO[2]} != "$BATS_LAST_BASH_LINENO" || ${BASH_SOURCE[3]} != "$BATS_LAST_BASH_SOURCE" ]]; then + local file="${BASH_SOURCE[2]}" # index 2: skip over bats_emit_trace and bats_debug_trap + if [[ $file == "${BATS_TEST_SOURCE}" ]]; then + file="$BATS_TEST_FILENAME" + fi + local padding='$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$' + if ((BATS_LAST_STACK_DEPTH != ${#BASH_LINENO[@]})); then + local reference + bats_format_file_line_reference reference "${file##*/}" "$line" + printf '%s [%s]\n' "${padding::${#BASH_LINENO[@]}-4}" "$reference" >&4 + fi + printf '%s %s\n' "${padding::${#BASH_LINENO[@]}-4}" "$BASH_COMMAND" >&4 + BATS_LAST_BASH_COMMAND="$BASH_COMMAND" + BATS_LAST_BASH_LINENO="$line" + BATS_LAST_BASH_SOURCE="${BASH_SOURCE[2]}" + BATS_LAST_STACK_DEPTH="${#BASH_LINENO[@]}" + fi + fi +} + +# bats_debug_trap tracks the last line of code executed within a test. This is +# necessary because $BASH_LINENO is often incorrect inside of ERR and EXIT +# trap handlers. +# +# Below are tables describing different command failure scenarios and the +# reliability of $BASH_LINENO within different the executed DEBUG, ERR, and EXIT +# trap handlers. Naturally, the behaviors change between versions of Bash. +# +# Table rows should be read left to right. For example, on bash version +# 4.0.44(2)-release, if a test executes `false` (or any other failing external +# command), bash will do the following in order: +# 1. Call the DEBUG trap handler (bats_debug_trap) with $BASH_LINENO referring +# to the source line containing the `false` command, then +# 2. Call the DEBUG trap handler again, but with an incorrect $BASH_LINENO, then +# 3. Call the ERR trap handler, but with a (possibly-different) incorrect +# $BASH_LINENO, then +# 4. Call the DEBUG trap handler again, but with $BASH_LINENO set to 1, then +# 5. Call the EXIT trap handler, with $BASH_LINENO set to 1. +# +# bash version 4.4.20(1)-release +# command | first DEBUG | second DEBUG | ERR | third DEBUG | EXIT +# -------------+-------------+--------------+---------+-------------+-------- +# false | OK | OK | OK | BAD[1] | BAD[1] +# [[ 1 = 2 ]] | OK | BAD[2] | BAD[2] | BAD[1] | BAD[1] +# (( 1 = 2 )) | OK | BAD[2] | BAD[2] | BAD[1] | BAD[1] +# ! true | OK | --- | BAD[4] | --- | BAD[1] +# $var_dne | OK | --- | --- | BAD[1] | BAD[1] +# source /dne | OK | --- | --- | BAD[1] | BAD[1] +# +# bash version 4.0.44(2)-release +# command | first DEBUG | second DEBUG | ERR | third DEBUG | EXIT +# -------------+-------------+--------------+---------+-------------+-------- +# false | OK | BAD[3] | BAD[3] | BAD[1] | BAD[1] +# [[ 1 = 2 ]] | OK | --- | BAD[3] | --- | BAD[1] +# (( 1 = 2 )) | OK | --- | BAD[3] | --- | BAD[1] +# ! true | OK | --- | BAD[3] | --- | BAD[1] +# $var_dne | OK | --- | --- | BAD[1] | BAD[1] +# source /dne | OK | --- | --- | BAD[1] | BAD[1] +# +# [1] The reported line number is always 1. +# [2] The reported source location is that of the beginning of the function +# calling the command. +# [3] The reported line is that of the last command executed in the DEBUG trap +# handler. +# [4] The reported source location is that of the call to the function calling +# the command. +bats_debug_trap() { + # on windows we sometimes get a mix of paths (when install via nmp install -g) + # which have C:/... or /c/... comparing them is going to be problematic. + # We need to normalize them to a common format! + local NORMALIZED_INPUT + bats_normalize_windows_dir_path NORMALIZED_INPUT "${1%/*}" + local file_excluded='' path + for path in "${BATS_DEBUG_EXCLUDE_PATHS[@]}"; do + if [[ "$NORMALIZED_INPUT" == "$path"* ]]; then + file_excluded=1 + break + fi + done + + # don't update the trace within library functions or we get backtraces from inside traps + # also don't record new stack traces while handling interruptions, to avoid overriding the interrupted command + if [[ -z "$file_excluded" && + "${BATS_INTERRUPTED-NOTSET}" == NOTSET && + "${BATS_TIMED_OUT-NOTSET}" == NOTSET ]]; then + BATS_DEBUG_LASTLAST_STACK_TRACE=( + ${BATS_DEBUG_LAST_STACK_TRACE[@]+"${BATS_DEBUG_LAST_STACK_TRACE[@]}"} + ) + + BATS_DEBUG_LAST_LINENO=(${BASH_LINENO[@]+"${BASH_LINENO[@]}"}) + BATS_DEBUG_LAST_SOURCE=(${BASH_SOURCE[@]+"${BASH_SOURCE[@]}"}) + bats_capture_stack_trace + bats_emit_trace + fi +} + +# For some versions of Bash, the `ERR` trap may not always fire for every +# command failure, but the `EXIT` trap will. Also, some command failures may not +# set `$?` properly. See #72 and #81 for details. +# +# For this reason, we call `bats_check_status_from_trap` at the very beginning +# of `bats_teardown_trap` and check the value of `$BATS_TEST_COMPLETED` before +# taking other actions. We also adjust the exit status value if needed. +# +# See `bats_exit_trap` for an additional EXIT error handling case when `$?` +# isn't set properly during `teardown()` errors. +bats_check_status_from_trap() { + local status="$?" + if [[ -z "${BATS_TEST_COMPLETED:-}" ]]; then + BATS_ERROR_STATUS="${BATS_ERROR_STATUS:-$status}" + if [[ "$BATS_ERROR_STATUS" -eq 0 ]]; then + BATS_ERROR_STATUS=1 + fi + trap - DEBUG + fi +} + +bats_add_debug_exclude_path() { # + if [[ -z "$1" ]]; then # don't exclude everything + printf "bats_add_debug_exclude_path: Exclude path must not be empty!\n" >&2 + return 1 + fi + if [[ "$OSTYPE" == cygwin || "$OSTYPE" == msys ]]; then + local normalized_dir + bats_normalize_windows_dir_path normalized_dir "$1" + BATS_DEBUG_EXCLUDE_PATHS+=("$normalized_dir") + else + BATS_DEBUG_EXCLUDE_PATHS+=("$1") + fi +} + +bats_setup_tracing() { + # Variables for capturing accurate stack traces. See bats_debug_trap for + # details. + # + # BATS_DEBUG_LAST_LINENO, BATS_DEBUG_LAST_SOURCE, and + # BATS_DEBUG_LAST_STACK_TRACE hold data from the most recent call to + # bats_debug_trap. + # + # BATS_DEBUG_LASTLAST_STACK_TRACE holds data from two bats_debug_trap calls + # ago. + # + # BATS_DEBUG_LAST_STACK_TRACE_IS_VALID indicates that + # BATS_DEBUG_LAST_STACK_TRACE contains the stack trace of the test's error. If + # unset, BATS_DEBUG_LAST_STACK_TRACE is unreliable and + # BATS_DEBUG_LASTLAST_STACK_TRACE should be used instead. + BATS_DEBUG_LASTLAST_STACK_TRACE=() + BATS_DEBUG_LAST_LINENO=() + BATS_DEBUG_LAST_SOURCE=() + BATS_DEBUG_LAST_STACK_TRACE=() + BATS_DEBUG_LAST_STACK_TRACE_IS_VALID= + BATS_ERROR_SUFFIX= + BATS_DEBUG_EXCLUDE_PATHS=() + # exclude some paths by default + bats_add_debug_exclude_path "$BATS_ROOT/lib/" + bats_add_debug_exclude_path "$BATS_ROOT/libexec/" + + exec 4<&1 # used for tracing + if [[ "${BATS_TRACE_LEVEL:-0}" -gt 0 ]]; then + # avoid undefined variable errors + BATS_LAST_BASH_COMMAND= + BATS_LAST_BASH_LINENO= + BATS_LAST_BASH_SOURCE= + BATS_LAST_STACK_DEPTH= + # try to exclude helper libraries if found, this is only relevant for tracing + while read -r path; do + bats_add_debug_exclude_path "$path" + done < <(find "$PWD" -type d -name bats-assert -o -name bats-support) + fi + + local exclude_paths path + # exclude user defined libraries + IFS=':' read -r exclude_paths <<<"${BATS_DEBUG_EXCLUDE_PATHS:-}" + for path in "${exclude_paths[@]}"; do + if [[ -n "$path" ]]; then + bats_add_debug_exclude_path "$path" + fi + done + + # turn on traps after setting excludes to avoid tracing the exclude setup + trap 'bats_debug_trap "$BASH_SOURCE"' DEBUG + trap 'bats_error_trap' ERR +} + +bats_error_trap() { + bats_check_status_from_trap + + # If necessary, undo the most recent stack trace captured by bats_debug_trap. + # See bats_debug_trap for details. + if [[ "${BASH_LINENO[*]}" = "${BATS_DEBUG_LAST_LINENO[*]:-}" && + "${BASH_SOURCE[*]}" = "${BATS_DEBUG_LAST_SOURCE[*]:-}" && + -z "$BATS_DEBUG_LAST_STACK_TRACE_IS_VALID" ]]; then + BATS_DEBUG_LAST_STACK_TRACE=( + ${BATS_DEBUG_LASTLAST_STACK_TRACE[@]+"${BATS_DEBUG_LASTLAST_STACK_TRACE[@]}"} + ) + fi + BATS_DEBUG_LAST_STACK_TRACE_IS_VALID=1 +} + +bats_interrupt_trap() { + # mark the interruption, to handle during exit + BATS_INTERRUPTED=true + BATS_ERROR_STATUS=130 + # debug trap fires before interrupt trap but gets wrong linenumber (line 1) + # -> use last stack trace instead of BATS_DEBUG_LAST_STACK_TRACE_IS_VALID=true +} + +# this is used inside run() +bats_interrupt_trap_in_run() { + # mark the interruption, to handle during exit + BATS_INTERRUPTED=true + BATS_ERROR_STATUS=130 + BATS_DEBUG_LAST_STACK_TRACE_IS_VALID=true + exit 130 +} diff --git a/test/bats/lib/bats-core/validator.bash b/test/bats/lib/bats-core/validator.bash new file mode 100644 index 000000000..59fc2c1e6 --- /dev/null +++ b/test/bats/lib/bats-core/validator.bash @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +bats_test_count_validator() { + trap '' INT # continue forwarding + header_pattern='[0-9]+\.\.[0-9]+' + IFS= read -r header + # repeat the header + printf "%s\n" "$header" + + # if we detect a TAP plan + if [[ "$header" =~ $header_pattern ]]; then + # extract the number of tests ... + local expected_number_of_tests="${header:3}" + # ... count the actual number of [not ] oks... + local actual_number_of_tests=0 + while IFS= read -r line; do + # forward line + printf "%s\n" "$line" + case "$line" in + 'ok '*) + ((++actual_number_of_tests)) + ;; + 'not ok'*) + ((++actual_number_of_tests)) + ;; + esac + done + # ... and error if they are not the same + if [[ "${actual_number_of_tests}" != "${expected_number_of_tests}" ]]; then + printf '# bats warning: Executed %s instead of expected %s tests\n' "$actual_number_of_tests" "$expected_number_of_tests" + return 1 + fi + else + # forward output unchanged + cat + fi +} diff --git a/test/bats/lib/bats-core/warnings.bash b/test/bats/lib/bats-core/warnings.bash new file mode 100644 index 000000000..fbb5186a4 --- /dev/null +++ b/test/bats/lib/bats-core/warnings.bash @@ -0,0 +1,44 @@ +#!/usr/bin/env bash + +# shellcheck source=lib/bats-core/tracing.bash +source "$BATS_ROOT/lib/bats-core/tracing.bash" + +# generate a warning report for the parent call's call site +bats_generate_warning() { # [--no-stacktrace] [...] + local warning_number="${1-}" padding="00" + shift + local no_stacktrace= + if [[ ${1-} == --no-stacktrace ]]; then + no_stacktrace=1 + shift + fi + if [[ $warning_number =~ [0-9]+ ]] && ((warning_number < ${#BATS_WARNING_SHORT_DESCS[@]})); then + { + printf "BW%s: ${BATS_WARNING_SHORT_DESCS[$warning_number]}\n" "${padding:${#warning_number}}${warning_number}" "$@" + if [[ -z "$no_stacktrace" ]]; then + bats_capture_stack_trace + BATS_STACK_TRACE_PREFIX=' ' bats_print_stack_trace "${BATS_DEBUG_LAST_STACK_TRACE[@]}" + fi + } >>"$BATS_WARNING_FILE" 2>&3 + else + printf "Invalid Bats warning number '%s'. It must be an integer between 1 and %d." "$warning_number" "$((${#BATS_WARNING_SHORT_DESCS[@]} - 1))" >&2 + exit 1 + fi +} + +# generate a warning if the BATS_GUARANTEED_MINIMUM_VERSION is not high enough +bats_warn_minimum_guaranteed_version() { # + if bats_version_lt "$BATS_GUARANTEED_MINIMUM_VERSION" "$2"; then + bats_generate_warning 2 "$1" "$2" "$2" + fi +} + +# put after functions to avoid line changes in tests when new ones get added +BATS_WARNING_SHORT_DESCS=( + # to start with 1 + 'PADDING' + # see issue #578 for context + "\`run\`'s command \`%s\` exited with code 127, indicating 'Command not found'. Use run's return code checks, e.g. \`run -127\`, to fix this message." + "%s requires at least BATS_VERSION=%s. Use \`bats_require_minimum_version %s\` to fix this message." + "\`setup_suite\` is visible to test file '%s', but was not executed. It belongs into 'setup_suite.bash' to be picked up automatically." +) diff --git a/test/bats/libexec/bats-core/bats b/test/bats/libexec/bats-core/bats new file mode 100755 index 000000000..9b5f0c435 --- /dev/null +++ b/test/bats/libexec/bats-core/bats @@ -0,0 +1,504 @@ +#!/usr/bin/env bash +set -e + +export BATS_VERSION='1.9.0' +VALID_FORMATTERS="pretty, junit, tap, tap13" + +version() { + printf 'Bats %s\n' "$BATS_VERSION" +} + +abort() { + local print_usage=1 + if [[ ${1:-} == --no-print-usage ]]; then + print_usage= + shift + fi + printf 'Error: %s\n' "$1" >&2 + if [[ -n $print_usage ]]; then + usage >&2 + fi + exit 1 +} + +usage() { + local cmd="${0##*/}" + local line + + cat < + ${cmd} [-h | -v] + +HELP_TEXT_HEADER + + cat <<'HELP_TEXT_BODY' + is the path to a Bats test file, or the path to a directory + containing Bats test files (ending with ".bats") + + -c, --count Count test cases without running any tests + --code-quote-style