diff --git a/.coveragerc b/.coveragerc
index 1c4b01767..0f0187862 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -9,6 +9,7 @@ exclude_lines =
pragma: no cover
raise NotImplementedError
if __name__ == .__main__.:
+ if TYPE_CHECKING:
ignore_errors = True
omit =
tests/*
diff --git a/.flake8 b/.flake8
new file mode 100644
index 000000000..9149f7e2f
--- /dev/null
+++ b/.flake8
@@ -0,0 +1,8 @@
+[flake8]
+ignore = E203,W503
+max-line-length = 100
+select = B,C,E,F,W,T4
+exclude = cwltool/schemas
+extend-ignore = E501,B905
+# when Python 3.10 is the minimum version, re-enable check B905 for zip + strict
+extend-select = B9
diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
new file mode 100644
index 000000000..1ce1ca378
--- /dev/null
+++ b/.git-blame-ignore-revs
@@ -0,0 +1,8 @@
+# isort
+46e0485a889453dc178a878b5b5ebbfc7e4eb5f1
+
+# upgrade to black 20.8b1
+6fd6fdb381fe3f347627fd517a8f2dba7b0a7029
+
+# upgrade to black 23.1, longer lines (100)
+7fe8c0739b0515d00daabc7db87bc5fad926d345
diff --git a/.github/workflows/ci-tests.yml b/.github/workflows/ci-tests.yml
index 85e078a0e..5d50548fd 100644
--- a/.github/workflows/ci-tests.yml
+++ b/.github/workflows/ci-tests.yml
@@ -1,4 +1,4 @@
-name: Continuous integration tests
+name: CI Tests
on:
push:
@@ -12,48 +12,53 @@ concurrency:
cancel-in-progress: true
env:
- singularity_version: 3.8.3
+ TOX_SKIP_MISSING_INTERPRETERS: False
+ # Rich (pip)
+ FORCE_COLOR: 1
+ # Tox
+ PY_COLORS: 1
+ # Mypy (see https://github.com/python/mypy/issues/7771)
+ TERM: xterm-color
+ MYPY_FORCE_COLOR: 1
+ MYPY_FORCE_TERMINAL_WIDTH: 200
+ # Pytest
+ PYTEST_ADDOPTS: --color=yes
jobs:
tox:
- name: CI tests via Tox
-
- runs-on: ubuntu-20.04 # 22.04 doesn't support Python 3.6
-
+ name: Tox
+ runs-on: ubuntu-22.04
strategy:
matrix:
py-ver-major: [3]
- py-ver-minor: [6, 7, 8, 9, 10, 11]
+ py-ver-minor: [8, 9, 10, 11, 12]
step: [lint, unit, bandit, mypy]
- exclude:
- - py-ver-major: 3
- py-ver-minor: 6
- step: mypy
env:
py-semver: ${{ format('{0}.{1}', matrix.py-ver-major, matrix.py-ver-minor) }}
TOXENV: ${{ format('py{0}{1}-{2}', matrix.py-ver-major, matrix.py-ver-minor, matrix.step) }}
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Singularity
if: ${{ matrix.step == 'unit' || matrix.step == 'mypy' }}
- uses: eWaterCycle/setup-singularity@v7
- with:
- singularity-version: ${{ env.singularity_version }}
+ run: |
+ wget --no-verbose https://github.com/sylabs/singularity/releases/download/v3.10.4/singularity-ce_3.10.4-focal_amd64.deb
+ sudo apt-get install -y ./singularity-ce_3.10.4-focal_amd64.deb
- name: Give the test runner user a name to make provenance happy.
if: ${{ matrix.step == 'unit' || matrix.step == 'mypy' }}
- run: sudo usermod -c 'CI Runner' $(whoami)
+ run: sudo usermod -c 'CI Runner' "$(whoami)"
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: ${{ env.py-semver }}
+ allow-prereleases: true
cache: pip
cache-dependency-path: |
requirements.txt
@@ -62,7 +67,7 @@ jobs:
- name: Upgrade setuptools and install tox
run: |
pip install -U pip setuptools wheel
- pip install tox tox-gh-actions
+ pip install "tox<4" "tox-gh-actions<3"
- name: MyPy cache
if: ${{ matrix.step == 'mypy' }}
@@ -78,12 +83,11 @@ jobs:
if: ${{ matrix.step == 'unit' }}
uses: codecov/codecov-action@v3
with:
- fail_ci_if_error: true
+ fail_ci_if_error: false
token: ${{ secrets.CODECOV_TOKEN }}
tox-style:
- name: CI linters via Tox
-
+ name: Linters
runs-on: ubuntu-20.04
strategy:
@@ -91,11 +95,11 @@ jobs:
step: [lintreadme, shellcheck, pydocstyle]
env:
- py-semver: "3.11"
- TOXENV: ${{ format('py311-{0}', matrix.step) }}
+ py-semver: "3.12"
+ TOXENV: ${{ format('py312-{0}', matrix.step) }}
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
with:
fetch-depth: 0
@@ -108,7 +112,7 @@ jobs:
- name: Upgrade setuptools and install tox
run: |
pip install -U pip setuptools wheel
- pip install tox tox-gh-actions
+ pip install "tox<4" "tox-gh-actions<3"
- if: ${{ matrix.step == 'pydocstyle' && github.event_name == 'pull_request'}}
name: Create local branch for diff-quality for PRs
@@ -117,10 +121,47 @@ jobs:
- name: Test with tox
run: tox
- conformance_tests:
- name: CWL spec conformance tests
+ clean_working_dir:
+ name: No leftovers
+ runs-on: ubuntu-22.04
+ env:
+ py-semver: "3.12"
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: Set up Singularity
+ run: |
+ wget --no-verbose https://github.com/sylabs/singularity/releases/download/v3.10.4/singularity-ce_3.10.4-focal_amd64.deb
+ sudo apt-get install -y ./singularity-ce_3.10.4-focal_amd64.deb
+
+ - name: Give the test runner user a name to make provenance happy.
+ run: sudo usermod -c 'CI Runner' "$(whoami)"
+
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ env.py-semver }}
+ cache: pip
+
+ - name: install with test dependencies
+ run: |
+ pip install -U pip setuptools wheel
+ pip install --no-build-isolation -rtest-requirements.txt .[deps]
+
+ - name: make working directory read-only
+ run: |
+ mkdir .pytest_cache
+ chmod a-w .
+
+ - name: run tests
+ run: make test
- runs-on: ubuntu-20.04
+
+ conformance_tests:
+ name: CWL conformance
+ runs-on: ubuntu-22.04
strategy:
matrix:
@@ -133,13 +174,20 @@ jobs:
extras: "--fast-parser"
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
- name: Set up Singularity
if: ${{ matrix.container == 'singularity' }}
- uses: eWaterCycle/setup-singularity@v7
+ run: |
+ wget --no-verbose https://github.com/sylabs/singularity/releases/download/v3.10.4/singularity-ce_3.10.4-jammy_amd64.deb
+ sudo apt-get install -y ./singularity-ce_3.10.4-jammy_amd64.deb
+
+ - name: Singularity cache
+ if: ${{ matrix.container == 'singularity' }}
+ uses: actions/cache@v3
with:
- singularity-version: ${{ env.singularity_version }}
+ path: sifcache
+ key: singularity
- name: Set up Podman
if: ${{ matrix.container == 'podman' }}
@@ -148,41 +196,44 @@ jobs:
- name: Set up Python
uses: actions/setup-python@v4
with:
- python-version: 3.9
+ python-version: 3.12
cache: pip
- - name: Run CWL conformance tests ${{ matrix.cwl-version }}
+ - name: "Test CWL ${{ matrix.cwl-version }} conformance"
env:
- version: ${{ matrix.cwl-version }}
- container: ${{ matrix.container }}
- spec_branch: main
+ VERSION: ${{ matrix.cwl-version }}
+ CONTAINER: ${{ matrix.container }}
+ GIT_TARGET: main
CWLTOOL_OPTIONS: ${{ matrix.extras }}
run: ./conformance-test.sh
-
+ - name: Upload coverage to Codecov
+ uses: codecov/codecov-action@v3
+ with:
+ fail_ci_if_error: false
+ token: ${{ secrets.CODECOV_TOKEN }}
release_test:
name: cwltool release test
-
runs-on: ubuntu-22.04
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
- name: Set up Singularity
- uses: eWaterCycle/setup-singularity@v7
- with:
- singularity-version: ${{ env.singularity_version }}
+ run: |
+ wget --no-verbose https://github.com/sylabs/singularity/releases/download/v3.10.4/singularity-ce_3.10.4-jammy_amd64.deb
+ sudo apt-get install -y ./singularity-ce_3.10.4-jammy_amd64.deb
- name: Set up Python
uses: actions/setup-python@v4
with:
- python-version: 3.11
+ python-version: 3.12
cache: pip
cache-dependency-path: |
requirements.txt
test-requirements.txt
- name: Give the test runner user a name to make provenance happy.
- run: sudo usermod -c 'CI Runner' $(whoami)
+ run: sudo usermod -c 'CI Runner' "$(whoami)"
- name: Install packages
run: |
@@ -197,23 +248,27 @@ jobs:
build_test_container:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+ - name: record cwltool version
+ run: pip install -U setuptools wheel && pip install setuptools_scm[toml] && python setup.py --version
- name: build & test cwltool_module container
run: ./build-cwltool-docker.sh
macos:
- name: CI test on macos-latest
+ name: Test on macos-latest
runs-on: macos-latest
env:
- TOXENV: py310-unit
+ TOXENV: py312-unit
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v4
with:
- python-version: 3.11
+ python-version: 3.12
cache: pip
cache-dependency-path: |
requirements.txt
@@ -221,7 +276,7 @@ jobs:
- name: Upgrade setuptools and install tox
run: |
pip install -U pip setuptools wheel
- pip install tox tox-gh-actions
+ pip install "tox<4" "tox-gh-actions<3"
# # docker for mac install is not currently stable
# - name: 'SETUP MacOS: load Homebrew cache'
# uses: actions/cache@v3
@@ -236,8 +291,7 @@ jobs:
- name: Test with tox
run: tox
- name: Upload coverage to Codecov
- if: ${{ matrix.step == 'unit' }}
uses: codecov/codecov-action@v3
with:
- fail_ci_if_error: true
+ fail_ci_if_error: false
token: ${{ secrets.CODECOV_TOKEN }}
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index 4baf55059..8af9efe6f 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -23,7 +23,7 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
diff --git a/.github/workflows/quay-publish.yml b/.github/workflows/quay-publish.yml
index ae40f87f4..0e2d09915 100644
--- a/.github/workflows/quay-publish.yml
+++ b/.github/workflows/quay-publish.yml
@@ -8,31 +8,39 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
- name: Get image tags
id: image_tags
run: |
- echo -n ::set-output name=IMAGE_TAGS::${GITHUB_REF#refs/*/}
+ echo -n "IMAGE_TAGS=${GITHUB_REF#refs/*/}" >> $GITHUB_OUTPUT
+ - name: record cwltool version
+ run: |
+ pip install "setuptools>=61"
+ pip install setuptools_scm[toml] wheel
+ python setup.py --version
- name: Set up QEMU
- uses: docker/setup-qemu-action@v2
+ uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v2
+ uses: docker/setup-buildx-action@v3
- name: Cache Docker layers
- uses: actions/cache@v2
+ uses: actions/cache@v3
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-multi-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-multi-buildx
- name: Login to Quay.io
- uses: docker/login-action@v2
+ uses: docker/login-action@v3
with:
registry: ${{ secrets.REGISTRY_SERVER }}
username: ${{ secrets.REGISTRY_USERNAME }}
password: ${{ secrets.REGISTRY_PASSWORD }}
- name: Build and publish cwltool_module image to Quay
- uses: docker/build-push-action@v3
+ uses: docker/build-push-action@v5
with:
+ context: .
file: cwltool.Dockerfile
tags: quay.io/commonwl/cwltool_module:${{ steps.image_tags.outputs.IMAGE_TAGS }},quay.io/commonwl/cwltool_module:latest
target: module
@@ -41,8 +49,9 @@ jobs:
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Build and publish cwltool image to Quay
- uses: docker/build-push-action@v3
+ uses: docker/build-push-action@v5
with:
+ context: .
file: cwltool.Dockerfile
tags: quay.io/commonwl/cwltool:${{ steps.image_tags.outputs.IMAGE_TAGS }},quay.io/commonwl/cwltool:latest
platforms: linux/amd64,linux/arm64
diff --git a/.gitignore b/.gitignore
index 3300dfac2..5941627f8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -53,5 +53,10 @@ value
.python-version
+cwltool/_version.py
+
# Folder created when using make
cwltool_deps
+docs/_build/
+docs/autoapi/
+
diff --git a/.readthedocs.yml b/.readthedocs.yml
index eb781d178..cd1f44052 100644
--- a/.readthedocs.yml
+++ b/.readthedocs.yml
@@ -12,9 +12,14 @@ sphinx:
# Optionally build your docs in additional formats such as PDF and ePub
formats: all
-# Optionally set the version of Python and requirements required to build your docs
+build:
+ os: ubuntu-22.04
+ tools:
+ python: "3.11"
+ apt_packages:
+ - graphviz
+
python:
- version: 3.7
install:
- requirements: docs/requirements.txt
- method: pip
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 0494223fe..cd9e9105d 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,6 +1,6 @@
Style guide:
- PEP-8 (as implemented by the `black` code formatting tool)
-- Python 3.6+ compatible code
+- Python 3.8+ compatible code
- PEP-484 type hints
The development is done using `git`, we encourage you to get familiar with it.
@@ -9,6 +9,9 @@ To get the code and start working on the changes you can start a console and:
- Clone the cwltool: `git clone https://github.com/common-workflow-language/cwltool.git`
- Switch to cwltool directory: `cd cwltool`
+It is suggested that you run `git config blame.ignoreRevsFile .git-blame-ignore-revs`
+to filter out mass-formatting commits from `git blame`.
+
In order to contribute to the development of `cwltool`, the source code needs to
pass the test before your changes are accepted. There are a couple ways to test
the code with your changes: let `tox` manage installation and test running in
diff --git a/MANIFEST.in b/MANIFEST.in
index 0939a4cc2..722a4e42b 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -2,9 +2,10 @@ include README.rst CODE_OF_CONDUCT.md CONTRIBUTING.md
include MANIFEST.in
include LICENSE.txt
include *requirements.txt mypy.ini tox.ini
-include gittaggers.py Makefile cwltool.py
+include Makefile cwltool.py
recursive-include mypy-stubs *.pyi *.py
include tests/*
+include tests/cwl-conformance/cwltool-conftest.py
include tests/loop/*
include tests/tmp1/tmp2/tmp3/.gitkeep
include tests/tmp4/alpha/*
diff --git a/Makefile b/Makefile
index 62bd941ce..29716b820 100644
--- a/Makefile
+++ b/Makefile
@@ -24,10 +24,10 @@ MODULE=cwltool
# `SHELL=bash` doesn't work for some, so don't use BASH-isms like
# `[[` conditional expressions.
-PYSOURCES=$(wildcard ${MODULE}/**.py tests/*.py) setup.py
-DEVPKGS=diff_cover pylint pep257 pydocstyle tox tox-pyenv \
+PYSOURCES=$(wildcard ${MODULE}/**.py cwltool/cwlprov/*.py tests/*.py) setup.py
+DEVPKGS=diff_cover pylint pep257 pydocstyle 'tox<4' tox-pyenv auto-walrus \
isort wheel autoflake pyupgrade bandit -rlint-requirements.txt\
- -rtest-requirements.txt -rmypy-requirements.txt
+ -rtest-requirements.txt -rmypy-requirements.txt -rdocs/requirements.txt
DEBDEVPKGS=pep8 python-autopep8 pylint python-coverage pydocstyle sloccount \
python-flake8 python-mock shellcheck
@@ -77,7 +77,7 @@ check-python3:
python --version 2>&1 | grep "Python 3"
dist/${MODULE}-$(VERSION).tar.gz: check-python3 $(SOURCES)
- python setup.py sdist bdist_wheel
+ python -m build
## docs : make the docs
docs: FORCE
@@ -85,7 +85,7 @@ docs: FORCE
## clean : clean up all temporary / machine-generated files
clean: check-python3 FORCE
- rm -f ${MODULE}/*.pyc tests/*.pyc *.so ${MODULE}/*.so
+ rm -f ${MODULE}/*.pyc tests/*.pyc *.so ${MODULE}/*.so cwltool/cwlprov/*.so
rm -Rf ${MODULE}/__pycache__/
python setup.py clean --all || true
rm -Rf .coverage
@@ -111,16 +111,21 @@ pydocstyle_report.txt: $(PYSOURCES)
diff_pydocstyle_report: pydocstyle_report.txt
diff-quality --compare-branch=main --violations=pydocstyle --fail-under=100 $^
-## codespell : check for common misspellings
-codespell:
- codespell -w $(shell git ls-files | grep -v cwltool/schemas | grep -v cwltool/jshint/ | grep -v mypy-stubs)
+## codespell-check : check for common misspellings
+codespell-check:
+ @codespell $(shell git ls-files | grep -v cwltool/schemas | grep -v cwltool/jshint/ | grep -v mypy-stubs) \
+ || (echo Probable typo foun. Run \"make codespell-fix\" to accept suggested fixes, or add the word to the ignore list in setup.cfg ; exit 1)
+
+## codespell-fix : fix common misspellings
+codespell-fix:
+ @codespell -w $(shell git ls-files | grep -v cwltool/schemas | grep -v cwltool/jshint/ | grep -v mypy-stubs)
## format : check/fix all code indentation and formatting (runs black)
format:
- black --exclude cwltool/schemas setup.py cwltool.py cwltool tests mypy-stubs
+ black --exclude cwltool/schemas --exclude cwltool/_version.py setup.py cwltool.py cwltool tests mypy-stubs
format-check:
- black --diff --check --exclude cwltool/schemas setup.py cwltool.py cwltool tests mypy-stubs
+ black --diff --check --exclude cwltool/schemas setup.py --exclude cwltool/_version.py cwltool.py cwltool tests mypy-stubs
## pylint : run static code analysis on Python code
pylint: $(PYSOURCES)
@@ -177,25 +182,9 @@ list-author-emails:
@git log --format='%aN,%aE' | sort -u | grep -v 'root'
mypy3: mypy
-mypy: $(filter-out setup.py gittagger.py,$(PYSOURCES))
- if ! test -f $(shell python -c 'import ruamel.yaml; import os.path; print(os.path.dirname(ruamel.yaml.__file__))')/py.typed ; \
- then \
- rm -Rf mypy-stubs/ruamel/yaml ; \
- ln -s $(shell python -c 'import ruamel.yaml; import os.path; print(os.path.dirname(ruamel.yaml.__file__))') \
- mypy-stubs/ruamel/ ; \
- fi # if minimally required ruamel.yaml version is 0.15.99 or greater, than the above can be removed
+mypy: $(PYSOURCES)
MYPYPATH=$$MYPYPATH:mypy-stubs mypy $^
-mypy_3.6: $(filter-out setup.py gittagger.py,$(PYSOURCES))
- if ! test -f $(shell python -c 'import ruamel.yaml; import os.path; print(os.path.dirname(ruamel.yaml.__file__))')/py.typed ; \
- then \
- rm -Rf mypy-stubs/ruamel/yaml ; \
- ln -s $(shell python -c 'import ruamel.yaml; import os.path; print(os.path.dirname(ruamel.yaml.__file__))') \
- mypy-stubs/ruamel/ ; \
- fi # if minimally required ruamel.yaml version is 0.15.99 or greater, than the above can be removed
- MYPYPATH=$$MYPYPATH:mypy-stubs mypy --python-version 3.6 $^
-
-
mypyc: $(PYSOURCES)
MYPYPATH=mypy-stubs CWLTOOL_USE_MYPYC=1 pip install --verbose -e . \
&& pytest -rs -vv ${PYTEST_EXTRA}
@@ -205,15 +194,19 @@ shellcheck: FORCE
cwltool-in-docker.sh
pyupgrade: $(PYSOURCES)
- pyupgrade --exit-zero-even-if-changed --py36-plus $^
+ pyupgrade --exit-zero-even-if-changed --py38-plus $^
+ auto-walrus $^
release-test: check-python3 FORCE
git diff-index --quiet HEAD -- || ( echo You have uncommitted changes, please commit them and try again; false )
./release-test.sh
-release: release-test
+release:
+ export SETUPTOOLS_SCM_PRETEND_VERSION_FOR_CWLTOOL=${VERSION} && \
+ ./release-test.sh && \
. testenv2/bin/activate && \
- python testenv2/src/${MODULE}/setup.py sdist bdist_wheel && \
+ pip install build && \
+ python -m build testenv2/src/${MODULE} && \
pip install twine && \
twine upload testenv2/src/${MODULE}/dist/* && \
git tag ${VERSION} && git push --tags
diff --git a/README.rst b/README.rst
index 87313bff6..2888f08bb 100644
--- a/README.rst
+++ b/README.rst
@@ -1,6 +1,6 @@
-==================================================================
-Common Workflow Language tool description reference implementation
-==================================================================
+#############################################################################################
+``cwltool``: The reference reference implementation of the Common Workflow Language standards
+#############################################################################################
|Linux Status| |Coverage Status| |Docs Status|
@@ -46,12 +46,13 @@ Quay.io (Docker): |Quay.io Container|
:target: https://cwltool.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
-This is the reference implementation of the Common Workflow Language. It is
-intended to be feature complete and provide comprehensive validation of CWL
+This is the reference implementation of the `Common Workflow Language open
+standards `_. It is intended to be feature complete
+and provide comprehensive validation of CWL
files as well as provide other tools related to working with CWL.
-This is written and tested for
-`Python `_ ``3.x {x = 6, 7, 8, 9, 10}``
+``cwltool`` is written and tested for
+`Python `_ ``3.x {x = 6, 8, 9, 10, 11}``
The reference implementation consists of two packages. The ``cwltool`` package
is the primary Python module containing the reference implementation in the
@@ -64,11 +65,14 @@ default CWL interpreter installed on a host.
``cwltool`` is provided by the CWL project, `a member project of Software Freedom Conservancy `_
and our `many contributors `_.
+.. contents:: Table of Contents
+
+*******
Install
--------
+*******
``cwltool`` packages
-^^^^^^^^^^^^^^^^^^^^
+====================
Your operating system may offer cwltool directly. For `Debian `_, `Ubuntu `_,
and similar Linux distribution try
@@ -112,19 +116,19 @@ If installing alongside another CWL implementation (like ``toil-cwl-runner`` or
pip install cwltool
MS Windows users
-^^^^^^^^^^^^^^^^
+================
-1. Install `"Windows Subsystem for Linux 2" (WSL2) and Docker Desktop `_
-2. Install `Debian from the Microsoft Store `_
-3. Set Debian as your default WSL 2 distro: ``wsl --set-default debian``.
-4. Return to the Docker Desktop, choose `Settings → Resources → WSL Integration `_ and under "Enable integration with additional distros" select "Debian",
-5. Reboot if you have not yet already.
-6. Launch Debian and follow the Linux instructions above (``apt-get install cwltool`` or use the ``venv`` method)
+1. `Install Windows Subsystem for Linux 2 and Docker Desktop `_.
+2. `Install Debian from the Microsoft Store `_.
+3. Set Debian as your default WSL 2 distro: ``wsl --set-default debian``.
+4. Return to the Docker Desktop, choose ``Settings`` → ``Resources`` → ``WSL Integration`` and under "Enable integration with additional distros" select "Debian",
+5. Reboot if you have not yet already.
+6. Launch Debian and follow the Linux instructions above (``apt-get install cwltool`` or use the ``venv`` method)
Network problems from within WSL2? Try `these instructions `_ followed by ``wsl --shutdown``.
``cwltool`` development version
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+===============================
Or you can skip the direct ``pip`` commands above and install the latest development version of ``cwltool``:
@@ -140,33 +144,42 @@ maintain which implementation ``cwl-runner`` points to via a symbolic file
system link or `another facility `_.
Recommended Software
-^^^^^^^^^^^^^^^^^^^^
+====================
+
+We strongly suggested to have the following installed:
+
+* One of the following software container engines
+
+ * `Podman `_
+ * `Docker `_
+ * Singularity/Apptainer: See `Using Singularity`_
+ * udocker: See `Using uDocker`_
-You may also want to have the following installed:
-- `node.js `_
-- Docker, udocker, or Singularity (optional)
+* `node.js `_ for evaluating CWL Expressions quickly
+ (required for `udocker` users, optional but recommended for the other container engines).
Without these, some examples in the CWL tutorials at http://www.commonwl.org/user_guide/ may not work.
+***********************
Run on the command line
------------------------
+***********************
Simple command::
- cwl-runner [tool-or-workflow-description] [input-job-settings]
+ cwl-runner my_workflow.cwl my_inputs.yaml
Or if you have multiple CWL implementations installed and you want to override
the default cwl-runner then use::
- cwltool [tool-or-workflow-description] [input-job-settings]
+ cwltool my_workflow.cwl my_inputs.yml
-You can set cwltool options in the environment with CWLTOOL_OPTIONS,
+You can set cwltool options in the environment with ``CWLTOOL_OPTIONS``,
these will be inserted at the beginning of the command line::
export CWLTOOL_OPTIONS="--debug"
Use with boot2docker on macOS
------------------------------
+=============================
boot2docker runs Docker inside a virtual machine, and it only mounts ``Users``
on it. The default behavior of CWL is to create temporary directories under e.g.
``/Var`` which is not accessible to Docker containers.
@@ -177,21 +190,24 @@ and ``--tmp-outdir-prefix`` to somewhere under ``/Users``::
$ cwl-runner --tmp-outdir-prefix=/Users/username/project --tmpdir-prefix=/Users/username/project wc-tool.cwl wc-job.json
Using uDocker
--------------
+=============
Some shared computing environments don't support Docker software containers for technical or policy reasons.
-As a workaround, the CWL reference runner supports using alternative ``docker`` implementations on Linux
-with the ``--user-space-docker-cmd`` option.
-
-One such "user space" friendly docker replacement is ``udocker`` https://github.com/indigo-dc/udocker.
+As a workaround, the CWL reference runner supports using the `udocker `_
+program on Linux using ``--udocker``.
udocker installation: https://indigo-dc.github.io/udocker/installation_manual.html
-Run `cwltool` just as you usually would, but with the new option, e.g., from the conformance tests
+Run `cwltool` just as you usually would, but with ``--udocker`` prior to the workflow path:
.. code:: bash
- cwltool --user-space-docker-cmd=udocker https://raw.githubusercontent.com/common-workflow-language/common-workflow-language/main/v1.0/v1.0/test-cwl-out2.cwl https://github.com/common-workflow-language/common-workflow-language/raw/main/v1.0/v1.0/empty.json
+ cwltool --udocker https://github.com/common-workflow-language/common-workflow-language/raw/main/v1.0/v1.0/test-cwl-out2.cwl https://github.com/common-workflow-language/common-workflow-language/raw/main/v1.0/v1.0/empty.json
+
+As was mentioned in the `Recommended Software`_ section,
+
+Using Singularity
+=================
``cwltool`` can also use `Singularity `_ version 2.6.1
or later as a Docker container runtime.
@@ -206,10 +222,10 @@ Example
.. code:: bash
- cwltool --singularity https://raw.githubusercontent.com/common-workflow-language/common-workflow-language/main/v1.0/v1.0/v1.0/cat3-tool-mediumcut.cwl https://github.com/common-workflow-language/common-workflow-language/blob/main/v1.0/v1.0/cat-job.json
+ cwltool --singularity https://github.com/common-workflow-language/common-workflow-language/raw/main/v1.0/v1.0/cat3-tool-mediumcut.cwl https://github.com/common-workflow-language/common-workflow-language/raw/main/v1.0/v1.0/cat-job.json
Running a tool or workflow from remote or local locations
----------------------------------------------------------
+=========================================================
``cwltool`` can run tool and workflow descriptions on both local and remote
systems via its support for HTTP[S] URLs.
@@ -220,11 +236,11 @@ is referenced and that document isn't found in the current directory, then the
following locations will be searched:
http://www.commonwl.org/v1.0/CommandLineTool.html#Discovering_CWL_documents_on_a_local_filesystem
-You can also use `cwldep `
+You can also use `cwldep `_
to manage dependencies on external tools and workflows.
Overriding workflow requirements at load time
----------------------------------------------
+=============================================
Sometimes a workflow needs additional requirements to run in a particular
environment or with a particular dataset. To avoid the need to modify the
@@ -268,7 +284,7 @@ Override identifiers are relative to the top-level workflow document.
Combining parts of a workflow into a single document
-----------------------------------------------------
+====================================================
Use ``--pack`` to combine a workflow made up of multiple files into a
single compound document. This operation takes all the CWL files
@@ -284,7 +300,7 @@ document. The top-level workflow is named ``#main``.
Running only part of a workflow
--------------------------------
+===============================
You can run a partial workflow with the ``--target`` (``-t``) option. This
takes the name of an output parameter, workflow step, or input
@@ -314,7 +330,7 @@ selected targets.
Visualizing a CWL document
---------------------------
+==========================
The ``--print-dot`` option will print a file suitable for Graphviz ``dot`` program. Here is a bash onliner to generate a Scalable Vector Graphic (SVG) file:
@@ -323,7 +339,7 @@ The ``--print-dot`` option will print a file suitable for Graphviz ``dot`` progr
cwltool --print-dot my-wf.cwl | dot -Tsvg > my-wf.svg
Modeling a CWL document as RDF
-------------------------------
+==============================
CWL documents can be expressed as RDF triple graphs.
@@ -333,7 +349,7 @@ CWL documents can be expressed as RDF triple graphs.
Environment Variables in cwltool
---------------------------------
+================================
This reference implementation supports several ways of setting
environment variables for tools, in addition to the standard
@@ -572,7 +588,7 @@ at the following links:
- `Initial cwltool Integration Pull Request `__
Use with GA4GH Tool Registry API
---------------------------------
+================================
Cwltool can launch tools directly from `GA4GH Tool Registry API`_ endpoints.
@@ -595,7 +611,7 @@ For this example, grab the test.json (and input file) from https://github.com/Ca
.. _`GA4GH Tool Registry API`: https://github.com/ga4gh/tool-registry-schemas
Running MPI-based tools that need to be launched
-------------------------------------------------
+================================================
Cwltool supports an extension to the CWL spec
``http://commonwl.org/cwltool#MPIRequirement``. When the tool
@@ -668,7 +684,7 @@ given in the following table; all are optional.
Enabling Fast Parser (experimental)
------------------------------------
+===================================
For very large workflows, `cwltool` can spend a lot of time in
initialization, before the first step runs. There is an experimental
@@ -681,12 +697,12 @@ initialization overhead, however as of this writing it has several limitations:
- Several other cases fail, as documented in https://github.com/common-workflow-language/cwltool/pull/1720
-===========
+***********
Development
-===========
+***********
Running tests locally
----------------------
+=====================
- Running basic tests ``(/tests)``:
@@ -700,7 +716,7 @@ To run the basic tests after installing `cwltool` execute the following:
To run various tests in all supported Python environments, we use `tox `_. To run the test suite in all supported Python environments
first clone the complete code repository (see the ``git clone`` instructions above) and then run
the following in the terminal:
-``pip install tox; tox -p``
+``pip install "tox<4"; tox -p``
List of all environment can be seen using:
``tox --listenvs``
@@ -718,7 +734,7 @@ program
Instructions for running these tests can be found in the Common Workflow Language Specification repository at https://github.com/common-workflow-language/common-workflow-language/blob/main/CONFORMANCE_TESTS.md .
Import as a module
-------------------
+==================
Add
@@ -742,7 +758,7 @@ The easiest way to use cwltool to run a tool or workflow from Python is to use a
CWL Tool Control Flow
----------------------
+=====================
Technical outline of how cwltool works internally, for maintainers.
@@ -800,9 +816,8 @@ Technical outline of how cwltool works internally, for maintainers.
Docker container, waits for it to complete, collects output, and makes
the output callback.
-
Extension points
-----------------
+================
The following functions can be passed to main() to override or augment
the listed behaviors.
diff --git a/build-cwltool-docker.sh b/build-cwltool-docker.sh
index d733b9919..a70fdf4df 100755
--- a/build-cwltool-docker.sh
+++ b/build-cwltool-docker.sh
@@ -1,10 +1,11 @@
#!/bin/bash
set -ex
-docker build --file=cwltool.Dockerfile --tag=quay.io/commonwl/cwltool_module --target module .
-docker build --file=cwltool.Dockerfile --tag=quay.io/commonwl/cwltool .
+engine=${ENGINE:-docker} # example: `ENGINE=podman ./build-cwltool-docker.sh`
+${engine} build --file=cwltool.Dockerfile --tag=quay.io/commonwl/cwltool_module --target module .
+${engine} build --file=cwltool.Dockerfile --tag=quay.io/commonwl/cwltool .
-docker run -t -v /var/run/docker.sock:/var/run/docker.sock \
+${engine} run -t -v /var/run/docker.sock:/var/run/docker.sock \
-v /tmp:/tmp \
-v "$PWD":/tmp/cwltool \
quay.io/commonwl/cwltool_module /bin/sh -c \
- "apk add gcc bash && pip install -r/tmp/cwltool/test-requirements.txt ; pytest -k 'not (test_bioconda or test_double_overwrite or test_env_filtering or test_biocontainers or test_disable_file_overwrite_without_ext or test_disable_file_creation_in_outdir_with_ext or test_write_write_conflict or test_directory_literal_with_real_inputs_inside or test_revsort_workflow or test_stdin_with_id_preset or test_no_compute_chcksum or test_packed_workflow_execution[tests/wf/count-lines1-wf.cwl-tests/wf/wc-job.json-False] or test_sequential_workflow or test_single_process_subwf_subwf_inline_step)' --ignore-glob '*test_udocker.py' -n 0 -v -rs --pyargs cwltool"
+ "apk add gcc bash git && pip install -r/tmp/cwltool/test-requirements.txt ; pytest -k 'not (test_bioconda or test_double_overwrite or test_env_filtering or test_biocontainers or test_disable_file_overwrite_without_ext or test_disable_file_creation_in_outdir_with_ext or test_write_write_conflict or test_directory_literal_with_real_inputs_inside or test_revsort_workflow or test_stdin_with_id_preset or test_no_compute_chcksum or test_packed_workflow_execution[tests/wf/count-lines1-wf.cwl-tests/wf/wc-job.json-False] or test_sequential_workflow or test_single_process_subwf_subwf_inline_step)' --ignore-glob '*test_udocker.py' -n 0 -v -rs --pyargs cwltool"
diff --git a/conformance-test.sh b/conformance-test.sh
index f180c39d3..483ad984e 100755
--- a/conformance-test.sh
+++ b/conformance-test.sh
@@ -1,128 +1,131 @@
#!/bin/bash
+
venv() {
- if ! test -d "$1" ; then
- if command -v virtualenv > /dev/null; then
- virtualenv -p python3 "$1"
- else
- python3 -m venv "$1"
- fi
+ if ! test -d "$1" ; then
+ if command -v virtualenv > /dev/null; then
+ virtualenv -p python3 "$1"
+ else
+ python3 -m venv "$1"
fi
- # shellcheck source=/dev/null
- source "$1"/bin/activate
+ fi
+ # shellcheck source=/dev/null
+ source "$1"/bin/activate
}
-# Set these environment variables when running the script, e.g.:
-# version=v1.1 spec_branch=new_test container=docker ./conformance_test.sh
+# Set these variables when running the script, e.g.:
+# VERSION=v1.2 GIT_TARGET=1.2.1_proposed CONTAINER=podman ./conformance_test.sh
# Version of the standard to test against
# Current options: v1.0, v1.1, v1.2
-version=${version:-v1.0}
+VERSION=${VERSION:-"v1.2"}
-# Which branch of the standard's repo to use.
-# This can be useful when adding new features
-spec_branch=${spec_branch:-main}
+# Which commit of the standard's repo to use
+# Defaults to the last commit of the 1.2.1_proposed branch
+GIT_TARGET=${GIT_TARGET:-"1.2.1_proposed"}
# Which container runtime to use
# Valid options: docker, singularity
-container=${container:-docker}
+CONTAINER=${CONTAINER:-docker}
+
+# Comma-separated list of test names that should be excluded from execution
+# Defaults to "docker_entrypoint, inplace_update_on_file_content"
+# EXCLUDE=${EXCLUDE:-"some_default_test_to_exclude"}
set -e
set -x
-if [[ "$version" = "v1.0" ]] ; then
- repo=common-workflow-language
+# Additional arguments for the pytest command
+# Defaults to none
+# PYTEST_EXTRA=
+
+# The directory where this script resides
+SCRIPT_DIRECTORY="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+
+# Download archive from GitHub
+if [[ "${VERSION}" = "v1.0" ]] ; then
+ REPO=common-workflow-language
else
# shellcheck disable=SC2001
- repo=cwl-$(echo "$version" | sed 's/\(v[0-9]*\.\)\([0-9]*\).*/\1\2/')
+ REPO=cwl-$(echo "$VERSION" | sed 's/\(v[0-9]*\.\)\([0-9]*\).*/\1\2/')
fi
-if [ ! -d "${repo}-${spec_branch}" ]; then
- if [ ! -f "${repo}-${spec_branch}.tar.gz" ]; then
- wget "https://github.com/common-workflow-language/${repo}/archive/${spec_branch}.tar.gz"
- fi
- tar xzf "${spec_branch}.tar.gz"
+if [ ! -d "${REPO}-${GIT_TARGET}" ] ; then
+ if [ ! -f "${GIT_TARGET}.tar.gz" ] ; then
+ wget "https://github.com/common-workflow-language/${REPO}/archive/${GIT_TARGET}.tar.gz"
+ fi
+ tar xzf "${GIT_TARGET}.tar.gz"
fi
-if [ "${container}" == "docker" ]; then
+if [ "${CONTAINER}" == "docker" ]; then
docker pull docker.io/node:slim
fi
-if [ "${container}" == "podman" ]; then
+if [ "${CONTAINER}" == "podman" ]; then
podman pull docker.io/node:slim
fi
-venv cwltool-venv3
-pip3 install -U setuptools wheel pip
-pip3 uninstall -y cwltool
-pip3 install -e .
-pip3 install codecov cwltest>=2.1
-root_folder=${PWD}
-pushd "${repo}-${spec_branch}" || exit 1
-
-# shellcheck disable=SC2043
-if [[ "$version" = "v1.0" ]]; then
- DRAFT="DRAFT=v1.0"
+if [ "${CONTAINER}" == "singularity" ]; then
+ export CWL_SINGULARITY_CACHE="$SCRIPT_DIRECTORY/sifcache"
+ mkdir --parents "${CWL_SINGULARITY_CACHE}"
fi
-# Clean up all cov data
-find . -name '.coverage*' -print0 | xargs -0 rm -f
-rm -f coverage.xml
-
-COVERAGE_RC=${PWD}/.coveragerc
-cat > "${COVERAGE_RC}" < "${CWLTOOL_WITH_COV}" <=2.3' pytest-cov pytest-xdist
+
+# Set conformance test filename
+if [[ "${VERSION}" = "v1.0" ]] ; then
+ CONFORMANCE_TEST="${SCRIPT_DIRECTORY}/${REPO}-${GIT_TARGET}/${VERSION}/conformance_test_v1.0.yaml"
+else
+ CONFORMANCE_TEST="${SCRIPT_DIRECTORY}/${REPO}-${GIT_TARGET}/conformance_tests.yaml"
+fi
+cp "${CONFORMANCE_TEST}" "${CONFORMANCE_TEST%".yaml"}.cwltest.yaml"
+CONFORMANCE_TEST="${CONFORMANCE_TEST%".yaml"}.cwltest.yaml"
CWLTOOL_OPTIONS+=" --parallel"
-# shellcheck disable=SC2154
-if [[ "$version" = *dev* ]]
+unset exclusions
+declare -a exclusions
+if [[ "$VERSION" = *dev* ]]
then
CWLTOOL_OPTIONS+=" --enable-dev"
fi
-
-if [[ "$container" = "singularity" ]]; then
+if [[ "$CONTAINER" = "singularity" ]]; then
CWLTOOL_OPTIONS+=" --singularity"
# This test fails because Singularity and Docker have
# different views on how to deal with this.
exclusions+=(docker_entrypoint)
-
- if [[ "${version}" = "v1.1" ]]; then
+ if [[ "${VERSION}" = "v1.1" ]]; then
# This fails because of a difference (in Singularity vs Docker) in
# the way filehandles are passed to processes in the container and
# wc can tell somehow.
# See issue #1440
exclusions+=(stdin_shorcut)
fi
-elif [[ "$container" = "podman" ]]; then
+elif [[ "$CONTAINER" = "podman" ]]; then
CWLTOOL_OPTIONS+=" --podman"
fi
-if [ "$GIT_BRANCH" = "origin/main" ] && [[ "$version" = "v1.0" ]] && [[ "$container" = "docker" ]]
+if [[ -n "${EXCLUDE}" ]] ; then
+ EXCLUDE="${EXCLUDE},"
+fi
+if (( "${#exclusions[*]}" > 0 )); then
+ EXCLUDE=${EXCLUDE}$(IFS=,; echo "${exclusions[*]}")
+fi
+
+# Build command
+TEST_COMMAND="python -m pytest ${CONFORMANCE_TEST} -n auto -rs --junit-xml=${SCRIPT_DIRECTORY}/cwltool_conf_${VERSION}_${GIT_TARGET}_${CONTAINER}.xml -o junit_suite_name=cwltool_$(echo "${CWLTOOL_OPTIONS}" | tr "[:blank:]-" _)"
+if [[ -n "${EXCLUDE}" ]] ; then
+ TEST_COMMAND="${TEST_COMMAND} --cwl-exclude ${EXCLUDE}"
+fi
+TEST_COMMAND="${TEST_COMMAND} --cov --cov-config ${SCRIPT_DIRECTORY}/.coveragerc --cov-report= ${PYTEST_EXTRA}"
+
+# Clean up all old coverage data
+find "${SCRIPT_DIRECTORY}" \( -type f -name .coverage -or -name '.coverage.*' -or -name coverage.xml \) -delete
+
+if [ "$GIT_BRANCH" = "origin/main" ] && [[ "$VERSION" = "v1.0" ]] && [[ "$CONTAINER" = "docker" ]]
then
rm -Rf conformance
# shellcheck disable=SC2154
@@ -130,59 +133,47 @@ then
git -C conformance config user.email "cwl-bot@users.noreply.github.com"
git -C conformance config user.name "CWL Jenkins build bot"
+ tool_ver=$(cwltool --version | awk '{ print $2 }')
+ badgedir=${PWD}/conformance/cwltool/cwl_${VERSION}/cwltool_${tool_ver}
+ mkdir -p "${PWD}"/conformance/cwltool/cwl_"${VERSION}"/
+ rm -fr "${badgedir}"
+ TEST_COMMAND="${TEST_COMMAND} --cwl-badgedir=${badgedir}"
CONFORMANCE_MSG=$(cat << EOM
-Conformance test of cwltool ${tool_ver} for CWL ${version}
+Conformance test of cwltool ${tool_ver} for CWL ${VERSION}
Commit: ${GIT_COMMIT}
Python version: 3
-Container: ${container}
+Container: ${CONTAINER}
Extra options: ${CWLTOOL_OPTIONS}
EOM
)
-
- tool_ver=$(cwltool --version | awk '{ print $2 }')
- badgedir=${PWD}/conformance/cwltool/cwl_${version}/cwltool_${tool_ver}
- mkdir -p "${PWD}"/conformance/cwltool/cwl_"${version}"/
- rm -fr "${badgedir}"
- BADGE=" --badgedir=${badgedir}"
fi
-if (( "${#exclusions[*]}" > 0 )); then
- EXCLUDE=-S$(IFS=,; echo "${exclusions[*]}")
-else
- EXCLUDE=""
-fi
export CWLTOOL_OPTIONS
echo CWLTOOL_OPTIONS="${CWLTOOL_OPTIONS}"
-# shellcheck disable=SC2086
-LC_ALL=C.UTF-8 ./run_test.sh --junit-xml=result3.xml ${EXCLUDE} \
- RUNNER=${CWLTOOL_WITH_COV} "-j$(nproc)" ${BADGE} \
- ${DRAFT} \
- "--classname=py3_${container}_$(echo ${CWLTOOL_OPTIONS} | tr "[:blank:]-" _)"
-# LC_ALL=C is to work around junit-xml ASCII only bug
-# capture return code of ./run_test.sh
-CODE=$?
+# Run test
+cp "${SCRIPT_DIRECTORY}/tests/cwl-conformance/cwltool-conftest.py" "$(dirname "${CONFORMANCE_TEST}")/conftest.py"
+bash -c "${TEST_COMMAND}"
+RETURN_CODE=$?
-find . -name '.coverage.*' -print0 | xargs -0 coverage combine --rcfile="${COVERAGE_RC}" --append
-coverage xml --rcfile="${COVERAGE_RC}"
-codecov --file coverage.xml
+# Coverage report
+if [ "${RETURN_CODE}" -eq "0" ] ; then
+ coverage report
+ coverage xml
+fi
if [ -d conformance ]
then
- rm -rf conformance/cwltool/cwl_"${version}"/cwltool_latest
- cp -r conformance/cwltool/cwl_"${version}"/cwltool_"${tool_ver}" conformance/cwltool/cwl_"${version}"/cwltool_latest
+ rm -rf conformance/cwltool/cwl_"${VERSION}"/cwltool_latest
+ cp -r conformance/cwltool/cwl_"${VERSION}"/cwltool_"${tool_ver}" conformance/cwltool/cwl_"${VERSION}"/cwltool_latest
git -C conformance add --all
git -C conformance diff-index --quiet HEAD || git -C conformance commit -m "${CONFORMANCE_MSG}"
git -C conformance push http://"${jenkins_cwl_conformance}":x-oauth-basic@github.com/common-workflow-language/conformance.git
fi
-popd || exit
+# Cleanup
deactivate
+#rm -rf "${GIT_TARGET}.tar.gz" "${SCRIPT_DIRECTORY}/${REPO}-${GIT_TARGET}" "${SCRIPT_DIRECTORY}/cwl-conformance-venv"
-# build new docker container
-# if [ "$GIT_BRANCH" = "origin/main" ] && [[ "$version" = "v1.0" ]]
-# then
-# ./build-cwltool-docker.sh || true
-# fi
-#docker rm -v $(docker ps -a -f status=exited | sed 's/ */ /g' | cut -d' ' -f1)
-exit ${CODE}
+# Exit
+exit ${RETURN_CODE}
diff --git a/cwltool-in-docker.sh b/cwltool-in-docker.sh
index ad0eab1c3..552e8c55c 100755
--- a/cwltool-in-docker.sh
+++ b/cwltool-in-docker.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-if [ -S /var/run/docker.sock ] && [ -z "$DOCKER_HOST" ]; then
+if ! [ -S /var/run/docker.sock ] && [ -z "$DOCKER_HOST" ]; then
>&2 echo 'ERROR: cwltool cannot work inside a container without access to docker'
>&2 echo 'Launch the container with the option -v /var/run/docker.sock:/var/run/docker.sock'
# shellcheck disable=SC2016
diff --git a/cwltool.Dockerfile b/cwltool.Dockerfile
index 5976737ee..4fa76b126 100644
--- a/cwltool.Dockerfile
+++ b/cwltool.Dockerfile
@@ -1,29 +1,31 @@
-FROM python:3.10-alpine as builder
+FROM python:3.12-alpine3.17 as builder
RUN apk add --no-cache git gcc python3-dev libxml2-dev libxslt-dev libc-dev linux-headers
WORKDIR /cwltool
COPY . .
-
-RUN pip install toml -rmypy-requirements.txt
-RUN CWLTOOL_USE_MYPYC=1 MYPYPATH=mypy-stubs pip wheel --no-binary schema-salad --wheel-dir=/wheels .[deps]
+RUN export SETUPTOOLS_SCM_PRETEND_VERSION_FOR_CWLTOOL=$(grep __version__ cwltool/_version.py | awk -F\' '{ print $2 }') ; \
+ CWLTOOL_USE_MYPYC=1 MYPYPATH=mypy-stubs pip wheel --no-binary schema-salad \
+ --wheel-dir=/wheels .[deps] # --verbose
RUN rm /wheels/schema_salad*
-RUN pip install black
+RUN pip install "black~=22.0"
+# galaxy-util 22.1.x depends on packaging<22, but black 23.x needs packaging>22
RUN SCHEMA_SALAD_USE_MYPYC=1 MYPYPATH=mypy-stubs pip wheel --no-binary schema-salad \
- $(grep schema.salad requirements.txt) black --wheel-dir=/wheels
-RUN pip install --force-reinstall --no-index --no-warn-script-location --root=/pythonroot/ /wheels/*.whl
+ $(grep schema.salad requirements.txt) "black~=22.0" --wheel-dir=/wheels # --verbose
+RUN pip install --force-reinstall --no-index --no-warn-script-location \
+ --root=/pythonroot/ /wheels/*.whl
# --force-reinstall to install our new mypyc compiled schema-salad package
-FROM python:3.10-alpine as module
-LABEL maintainer peter.amstutz@curri.com
+FROM python:3.12-alpine3.17 as module
+LABEL maintainer peter.amstutz@curii.com
-RUN apk add --no-cache docker nodejs graphviz libxml2 libxslt
+RUN apk add --no-cache docker nodejs 'graphviz<8' libxml2 libxslt
COPY --from=builder /pythonroot/ /
-FROM python:3.10-alpine
-LABEL maintainer peter.amstutz@curri.com
+FROM python:3.12-alpine3.17
+LABEL maintainer peter.amstutz@curii.com
-RUN apk add --no-cache docker nodejs graphviz libxml2 libxslt
+RUN apk add --no-cache docker nodejs 'graphviz<8' libxml2 libxslt
COPY --from=builder /pythonroot/ /
COPY cwltool-in-docker.sh /cwltool-in-docker.sh
diff --git a/cwltool/argparser.py b/cwltool/argparser.py
index dc0818c32..5439d2bd2 100644
--- a/cwltool/argparser.py
+++ b/cwltool/argparser.py
@@ -42,7 +42,8 @@ def arg_parser() -> argparse.ArgumentParser:
type=str,
default="",
help="Log your tools stdout/stderr to this location outside of container "
- "This will only log stdout/stderr if you specify stdout/stderr in their respective fields or capture it as an output",
+ "This will only log stdout/stderr if you specify stdout/stderr in their "
+ "respective fields or capture it as an output",
)
parser.add_argument(
@@ -125,9 +126,7 @@ def arg_parser() -> argparse.ArgumentParser:
help="Path prefix for temporary directories. If --tmpdir-prefix is not "
"provided, then the prefix for temporary directories is influenced by "
"the value of the TMPDIR, TEMP, or TMP environment variables. Taking "
- "those into consideration, the current default is {}.".format(
- DEFAULT_TMP_PREFIX
- ),
+ "those into consideration, the current default is {}.".format(DEFAULT_TMP_PREFIX),
default=DEFAULT_TMP_PREFIX,
)
@@ -317,12 +316,8 @@ def arg_parser() -> argparse.ArgumentParser:
action="store_true",
help="Combine components into single document and print.",
)
- printgroup.add_argument(
- "--version", action="store_true", help="Print version and exit"
- )
- printgroup.add_argument(
- "--validate", action="store_true", help="Validate CWL document only."
- )
+ printgroup.add_argument("--version", action="store_true", help="Print version and exit")
+ printgroup.add_argument("--validate", action="store_true", help="Validate CWL document only.")
printgroup.add_argument(
"--print-supported-versions",
action="store_true",
@@ -383,12 +378,8 @@ def arg_parser() -> argparse.ArgumentParser:
volumegroup = parser.add_mutually_exclusive_group()
volumegroup.add_argument("--verbose", action="store_true", help="Default logging")
- volumegroup.add_argument(
- "--quiet", action="store_true", help="Only print warnings and errors."
- )
- volumegroup.add_argument(
- "--debug", action="store_true", help="Print even more logging"
- )
+ volumegroup.add_argument("--quiet", action="store_true", help="Only print warnings and errors.")
+ volumegroup.add_argument("--debug", action="store_true", help="Print even more logging")
parser.add_argument(
"--write-summary",
@@ -493,12 +484,9 @@ def arg_parser() -> argparse.ArgumentParser:
"Default root directory used by dependency resolvers configuration."
)
use_biocontainers_help = (
- "Use biocontainers for tools without an "
- "explicitly annotated Docker container."
- )
- conda_dependencies = (
- "Short cut to use Conda to resolve 'SoftwareRequirement' packages."
+ "Use biocontainers for tools without an " "explicitly annotated Docker container."
)
+ conda_dependencies = "Short cut to use Conda to resolve 'SoftwareRequirement' packages."
parser.add_argument(
"--beta-dependency-resolvers-configuration",
@@ -521,9 +509,7 @@ def arg_parser() -> argparse.ArgumentParser:
action="store_true",
)
- parser.add_argument(
- "--tool-help", action="store_true", help="Print command line help for tool"
- )
+ parser.add_argument("--tool-help", action="store_true", help="Print command line help for tool")
parser.add_argument(
"--relative-deps",
@@ -536,8 +522,7 @@ def arg_parser() -> argparse.ArgumentParser:
parser.add_argument(
"--enable-dev",
action="store_true",
- help="Enable loading and running unofficial development versions of "
- "the CWL standards.",
+ help="Enable loading and running unofficial development versions of " "the CWL standards.",
default=False,
)
@@ -646,8 +631,7 @@ def arg_parser() -> argparse.ArgumentParser:
"--relax-path-checks",
action="store_true",
default=False,
- help="Relax requirements on path names to permit "
- "spaces and hash characters.",
+ help="Relax requirements on path names to permit " "spaces and hash characters.",
dest="relax_path_checks",
)
@@ -741,7 +725,9 @@ def get_default_args() -> Dict[str, Any]:
class FSAction(argparse.Action):
- objclass = None # type: str
+ """Base action for our custom actions."""
+
+ objclass: Optional[str] = None
def __init__(
self,
@@ -777,7 +763,9 @@ def __call__(
class FSAppendAction(argparse.Action):
- objclass = None # type: str
+ """Appending version of the base action for our custom actions."""
+
+ objclass: Optional[str] = None
def __init__(
self,
@@ -815,27 +803,26 @@ def __call__(
class FileAction(FSAction):
- objclass = "File"
+ objclass: Optional[str] = "File"
class DirectoryAction(FSAction):
- objclass = "Directory"
+ objclass: Optional[str] = "Directory"
class FileAppendAction(FSAppendAction):
- objclass = "File"
+ objclass: Optional[str] = "File"
class DirectoryAppendAction(FSAppendAction):
- objclass = "Directory"
+ objclass: Optional[str] = "Directory"
class AppendAction(argparse.Action):
- """An argparse action that clears the default values if any value is provided.
+ """An argparse action that clears the default values if any value is provided."""
- Attributes:
- _called (bool): Initially set to ``False``, changed if any value is appended.
- """
+ _called: bool
+ """Initially set to ``False``, changed if any value is appended."""
def __init__(
self,
@@ -900,9 +887,9 @@ def add_argument(
return None
ahelp = description.replace("%", "%%")
- action = None # type: Optional[Union[Type[argparse.Action], str]]
- atype = None # type: Any
- typekw = {} # type: Dict[str, Any]
+ action: Optional[Union[Type[argparse.Action], str]] = None
+ atype: Optional[Any] = None
+ typekw: Dict[str, Any] = {}
if inptype == "File":
action = FileAction
@@ -929,9 +916,7 @@ def add_argument(
fieldtype,
records,
fielddescription,
- default=default.get(shortname(field["name"]), None)
- if default
- else None,
+ default=default.get(shortname(field["name"]), None) if default else None,
input_required=required,
)
return
@@ -977,7 +962,8 @@ def generate_parser(
urljoin: Callable[[str, str], str] = urllib.parse.urljoin,
base_uri: str = "",
) -> argparse.ArgumentParser:
- toolparser.description = tool.tool.get("doc", None)
+ """Generate an ArgumentParser for the given CWL Process."""
+ toolparser.description = tool.tool.get("doc", tool.tool.get("label", None))
toolparser.add_argument("job_order", nargs="?", help="Job input json file")
namemap["job_order"] = "job_order"
@@ -985,7 +971,7 @@ def generate_parser(
name = shortname(inp["id"])
namemap[name.replace("-", "_")] = name
inptype = inp["type"]
- description = inp.get("doc", "")
+ description = inp.get("doc", inp.get("label", ""))
default = inp.get("default", None)
add_argument(
toolparser,
diff --git a/cwltool/builder.py b/cwltool/builder.py
index 3c5d80923..eeb9a5bca 100644
--- a/cwltool/builder.py
+++ b/cwltool/builder.py
@@ -1,8 +1,11 @@
+"""Command line builder."""
import copy
import logging
import math
+from decimal import Decimal
from typing import (
IO,
+ TYPE_CHECKING,
Any,
Callable,
Dict,
@@ -10,21 +13,23 @@
MutableMapping,
MutableSequence,
Optional,
+ Type,
Union,
cast,
)
from cwl_utils import expression
from cwl_utils.file_formats import check_format
+from mypy_extensions import mypyc_attr
from rdflib import Graph
+from ruamel.yaml.comments import CommentedMap
+from ruamel.yaml.representer import RoundTripRepresenter
+from ruamel.yaml.scalarfloat import ScalarFloat
from schema_salad.avro.schema import Names, Schema, make_avsc_object
from schema_salad.exceptions import ValidationException
from schema_salad.sourceline import SourceLine
from schema_salad.utils import convert_to_dict, json_dumps
from schema_salad.validate import validate
-from typing_extensions import TYPE_CHECKING, Type # pylint: disable=unused-import
-
-from ruamel.yaml.comments import CommentedMap
from .errors import WorkflowException
from .loghandler import _logger
@@ -36,6 +41,7 @@
CWLObjectType,
CWLOutputType,
HasReqsHints,
+ LoadListingType,
aslist,
get_listing,
normalizeFilesDirs,
@@ -43,8 +49,10 @@
)
if TYPE_CHECKING:
+ from .cwlprov.provenance_profile import (
+ ProvenanceProfile, # pylint: disable=unused-import
+ )
from .pathmapper import PathMapper
- from .provenance_profile import ProvenanceProfile # pylint: disable=unused-import
INPUT_OBJ_VOCAB: Dict[str, str] = {
"Any": "https://w3id.org/cwl/salad#Any",
@@ -53,7 +61,14 @@
}
-def content_limit_respected_read_bytes(f): # type: (IO[bytes]) -> bytes
+def content_limit_respected_read_bytes(f: IO[bytes]) -> bytes:
+ """
+ Read a file as bytes, respecting the :py:data:`~cwltool.utils.CONTENT_LIMIT`.
+
+ :param f: file handle
+ :returns: the file contents
+ :raises WorkflowException: if the file is too large
+ """
contents = f.read(CONTENT_LIMIT + 1)
if len(contents) > CONTENT_LIMIT:
raise WorkflowException(
@@ -62,11 +77,19 @@ def content_limit_respected_read_bytes(f): # type: (IO[bytes]) -> bytes
return contents
-def content_limit_respected_read(f): # type: (IO[bytes]) -> str
+def content_limit_respected_read(f: IO[bytes]) -> str:
+ """
+ Read a file as a string, respecting the :py:data:`~cwltool.utils.CONTENT_LIMIT`.
+
+ :param f: file handle
+ :returns: the file contents
+ :raises WorkflowException: if the file is too large
+ """
return content_limit_respected_read_bytes(f).decode("utf-8")
-def substitute(value, replace): # type: (str, str) -> str
+def substitute(value: str, replace: str) -> str:
+ """Perform CWL SecondaryFilesDSL style substitution."""
if replace.startswith("^"):
try:
return substitute(value[0 : value.rindex(".")], replace[1:])
@@ -76,7 +99,10 @@ def substitute(value, replace): # type: (str, str) -> str
return value + replace
+@mypyc_attr(allow_interpreted_subclasses=True)
class Builder(HasReqsHints):
+ """Helper class to construct a command line from a CWL CommandLineTool."""
+
def __init__(
self,
job: CWLObjectType,
@@ -96,14 +122,19 @@ def __init__(
debug: bool,
js_console: bool,
force_docker_pull: bool,
- loadListing: str,
+ loadListing: LoadListingType,
outdir: str,
tmpdir: str,
stagedir: str,
cwlVersion: str,
container_engine: str,
) -> None:
- """Initialize this Builder."""
+ """
+ Initialize this Builder.
+
+ :param timeout: Maximum number of seconds to wait while evaluating CWL
+ expressions.
+ """
super().__init__()
self.job = job
self.files = files
@@ -127,7 +158,6 @@ def __init__(
self.js_console = js_console
self.force_docker_pull = force_docker_pull
- # One of "no_listing", "shallow_listing", "deep_listing"
self.loadListing = loadListing
self.outdir = outdir
@@ -136,9 +166,9 @@ def __init__(
self.cwlVersion = cwlVersion
- self.pathmapper = None # type: Optional[PathMapper]
- self.prov_obj = None # type: Optional[ProvenanceProfile]
- self.find_default_container = None # type: Optional[Callable[[], str]]
+ self.pathmapper: Optional["PathMapper"] = None
+ self.prov_obj: Optional["ProvenanceProfile"] = None
+ self.find_default_container: Optional[Callable[[], str]] = None
self.container_engine = container_engine
def build_job_script(self, commands: List[str]) -> Optional[str]:
@@ -154,6 +184,14 @@ def bind_input(
lead_pos: Optional[Union[int, List[int]]] = None,
tail_pos: Optional[Union[str, List[int]]] = None,
) -> List[MutableMapping[str, Union[str, List[int]]]]:
+ """
+ Bind an input object to the command line.
+
+ :raises ValidationException: in the event of an invalid type union
+ :raises WorkflowException: if a CWL Expression ("position", "required",
+ "pattern", "format") evaluates to the wrong type or if a required
+ secondary file is missing
+ """
debug = _logger.isEnabledFor(logging.DEBUG)
if tail_pos is None:
@@ -161,14 +199,10 @@ def bind_input(
if lead_pos is None:
lead_pos = []
- bindings = [] # type: List[MutableMapping[str, Union[str, List[int]]]]
- binding = (
- {}
- ) # type: Union[MutableMapping[str, Union[str, List[int]]], CommentedMap]
+ bindings: List[MutableMapping[str, Union[str, List[int]]]] = []
+ binding: Union[MutableMapping[str, Union[str, List[int]]], CommentedMap] = {}
value_from_expression = False
- if "inputBinding" in schema and isinstance(
- schema["inputBinding"], MutableMapping
- ):
+ if "inputBinding" in schema and isinstance(schema["inputBinding"], MutableMapping):
binding = CommentedMap(schema["inputBinding"].items())
bp = list(aslist(lead_pos))
@@ -183,7 +217,7 @@ def bind_input(
).makeError(
"'position' expressions must evaluate to an int, "
f"not a {type(result)}. Expression {position} "
- f"resulted in '{result}'."
+ f"resulted in {result!r}."
)
binding["position"] = result
bp.append(result)
@@ -202,7 +236,7 @@ def bind_input(
if isinstance(schema["type"], MutableSequence):
bound_input = False
for t in schema["type"]:
- avsc = None # type: Optional[Schema]
+ avsc: Optional[Schema] = None
if isinstance(t, str) and self.names.has_name(t, None):
avsc = self.names.get_name(t, None)
elif (
@@ -278,8 +312,7 @@ def bind_input(
else:
schema["type"] = "record"
schema["fields"] = [
- {"name": field_name, "type": "Any"}
- for field_name in datum.keys()
+ {"name": field_name, "type": "Any"} for field_name in datum.keys()
]
elif isinstance(datum, list):
schema["type"] = "array"
@@ -311,10 +344,10 @@ def bind_input(
if binding:
b2 = cast(CWLObjectType, copy.deepcopy(binding))
b2["datum"] = item
- itemschema = {
+ itemschema: CWLObjectType = {
"type": schema["items"],
"inputBinding": b2,
- } # type: CWLObjectType
+ }
for k in ("secondaryFiles", "format", "streamable"):
if k in schema:
itemschema[k] = schema[k]
@@ -337,9 +370,9 @@ def _capture_files(f: CWLObjectType) -> CWLObjectType:
datum = cast(CWLObjectType, datum)
self.files.append(datum)
- loadContents_sourceline = (
- None
- ) # type: Union[None, MutableMapping[str, Union[str, List[int]]], CWLObjectType]
+ loadContents_sourceline: Union[
+ None, MutableMapping[str, Union[str, List[int]]], CWLObjectType
+ ] = None
if binding and binding.get("loadContents"):
loadContents_sourceline = binding
elif schema.get("loadContents"):
@@ -353,14 +386,10 @@ def _capture_files(f: CWLObjectType) -> CWLObjectType:
debug,
):
try:
- with self.fs_access.open(
- cast(str, datum["location"]), "rb"
- ) as f2:
+ with self.fs_access.open(cast(str, datum["location"]), "rb") as f2:
datum["contents"] = content_limit_respected_read(f2)
except Exception as e:
- raise Exception(
- "Reading {}\n{}".format(datum["location"], e)
- )
+ raise Exception("Reading {}\n{}".format(datum["location"], e)) from e
if "secondaryFiles" in schema:
if "secondaryFiles" not in datum:
@@ -373,13 +402,8 @@ def _capture_files(f: CWLObjectType) -> CWLObjectType:
for num, sf_entry in enumerate(sf_schema):
if "required" in sf_entry and sf_entry["required"] is not None:
- required_result = self.do_eval(
- sf_entry["required"], context=datum
- )
- if not (
- isinstance(required_result, bool)
- or required_result is None
- ):
+ required_result = self.do_eval(sf_entry["required"], context=datum)
+ if not (isinstance(required_result, bool) or required_result is None):
if sf_schema == schema["secondaryFiles"]:
sf_item: Any = sf_schema[num]
else:
@@ -390,8 +414,8 @@ def _capture_files(f: CWLObjectType) -> CWLObjectType:
"The result of a expression in the field "
"'required' must "
f"be a bool or None, not a {type(required_result)}. "
- f"Expression '{sf_entry['required']}' resulted "
- f"in '{required_result}'."
+ f"Expression {sf_entry['required']!r} resulted "
+ f"in {required_result!r}."
)
sf_required = required_result
else:
@@ -400,9 +424,7 @@ def _capture_files(f: CWLObjectType) -> CWLObjectType:
if "$(" in sf_entry["pattern"] or "${" in sf_entry["pattern"]:
sfpath = self.do_eval(sf_entry["pattern"], context=datum)
else:
- sfpath = substitute(
- cast(str, datum["basename"]), sf_entry["pattern"]
- )
+ sfpath = substitute(cast(str, datum["basename"]), sf_entry["pattern"])
for sfname in aslist(sfpath):
if not sfname:
@@ -413,8 +435,7 @@ def _capture_files(f: CWLObjectType) -> CWLObjectType:
d_location = cast(str, datum["location"])
if "/" in d_location:
sf_location = (
- d_location[0 : d_location.rindex("/") + 1]
- + sfname
+ d_location[0 : d_location.rindex("/") + 1] + sfname
)
else:
sf_location = d_location + sfname
@@ -429,7 +450,7 @@ def _capture_files(f: CWLObjectType) -> CWLObjectType:
"Expected secondaryFile expression to "
"return type 'str', a 'File' or 'Directory' "
"dictionary, or a list of the same. Received "
- f"'{type(sfname)} from '{sf_entry['pattern']}'."
+ f"{type(sfname)!r} from {sf_entry['pattern']!r}."
)
for d in cast(
@@ -437,9 +458,7 @@ def _capture_files(f: CWLObjectType) -> CWLObjectType:
datum["secondaryFiles"],
):
if not d.get("basename"):
- d["basename"] = d["location"][
- d["location"].rindex("/") + 1 :
- ]
+ d["basename"] = d["location"][d["location"].rindex("/") + 1 :]
if d["basename"] == sfbasename:
found = True
@@ -463,9 +482,7 @@ def addsf(
),
sfname,
)
- elif discover_secondaryFiles and self.fs_access.exists(
- sf_location
- ):
+ elif discover_secondaryFiles and self.fs_access.exists(sf_location):
addsf(
cast(
MutableSequence[CWLObjectType],
@@ -504,9 +521,9 @@ def addsf(
"An expression in the 'format' field must "
"evaluate to a string, or list of strings. "
"However a non-string item was received: "
- f"'{entry}' of type '{type(entry)}'. "
- f"The expression was '{schema['format']}' and "
- f"its fully evaluated result is '{eval_format}'."
+ f"{entry!r} of type {type(entry)!r}. "
+ f"The expression was {schema['format']!r} and "
+ f"its fully evaluated result is {eval_format!r}."
)
if expression.needs_parsing(entry):
message = (
@@ -515,9 +532,9 @@ def addsf(
"Reference, a single format string, or a list of "
"format strings. But the list cannot contain CWL "
"Expressions or CWL Parameter References. List "
- f"entry number {index+1} contains the following "
+ f"entry number {index + 1} contains the following "
"unallowed CWL Parameter Reference or Expression: "
- f"'{entry}'."
+ f"{entry!r}."
)
if message:
raise SourceLine(
@@ -525,15 +542,13 @@ def addsf(
).makeError(message)
evaluated_format = cast(List[str], eval_format)
else:
- raise SourceLine(
- schema, "format", WorkflowException, debug
- ).makeError(
+ raise SourceLine(schema, "format", WorkflowException, debug).makeError(
"An expression in the 'format' field must "
"evaluate to a string, or list of strings. "
"However the type of the expression result was "
f"{type(eval_format)}. "
- f"The expression was '{schema['format']}' and "
- f"its fully evaluated result is 'eval_format'."
+ f"The expression was {schema['format']!r} and "
+ f"its fully evaluated result is {eval_format!r}."
)
try:
check_format(
@@ -543,8 +558,8 @@ def addsf(
)
except ValidationException as ve:
raise WorkflowException(
- "Expected value of '%s' to have format %s but\n "
- " %s" % (schema["name"], schema["format"], ve)
+ f"Expected value of {schema['name']!r} to have "
+ f"format {schema['format']!r} but\n {ve}"
) from ve
visit_class(
@@ -578,6 +593,12 @@ def addsf(
return bindings
def tostr(self, value: Union[MutableMapping[str, str], Any]) -> str:
+ """
+ Represent an input parameter as a string.
+
+ :raises WorkflowException: if the item is a File or Directory and the
+ "path" is missing.
+ """
if isinstance(value, MutableMapping) and value.get("class") in (
"File",
"Directory",
@@ -587,6 +608,12 @@ def tostr(self, value: Union[MutableMapping[str, str], Any]) -> str:
'{} object missing "path": {}'.format(value["class"], value)
)
return value["path"]
+ elif isinstance(value, ScalarFloat):
+ rep = RoundTripRepresenter()
+ dec_value = Decimal(rep.represent_scalar_float(value).value)
+ if "E" in str(dec_value):
+ return str(dec_value.quantize(1))
+ return str(dec_value)
else:
return str(value)
@@ -611,20 +638,16 @@ def generate_arg(self, binding: CWLObjectType) -> List[str]:
WorkflowException,
debug,
):
- raise WorkflowException(
- "'separate' option can not be specified without prefix"
- )
+ raise WorkflowException("'separate' option can not be specified without prefix")
- argl = [] # type: MutableSequence[CWLOutputType]
+ argl: MutableSequence[CWLOutputType] = []
if isinstance(value, MutableSequence):
if binding.get("itemSeparator") and value:
itemSeparator = cast(str, binding["itemSeparator"])
argl = [itemSeparator.join([self.tostr(v) for v in value])]
elif binding.get("valueFrom"):
value = [self.tostr(v) for v in value]
- return cast(List[str], ([prefix] if prefix else [])) + cast(
- List[str], value
- )
+ return cast(List[str], ([prefix] if prefix else [])) + cast(List[str], value)
elif prefix and value:
return [prefix]
else:
diff --git a/cwltool/checker.py b/cwltool/checker.py
index cb4365a9b..c11561719 100644
--- a/cwltool/checker.py
+++ b/cwltool/checker.py
@@ -3,7 +3,9 @@
from typing import (
Any,
Dict,
+ Iterator,
List,
+ Literal,
MutableMapping,
MutableSequence,
Optional,
@@ -35,11 +37,11 @@ def check_types(
sinktype: SinkType,
linkMerge: Optional[str],
valueFrom: Optional[str],
-) -> str:
+) -> Union[Literal["pass"], Literal["warning"], Literal["exception"]]:
"""
Check if the source and sink types are correct.
- Acceptable types are "pass", "warning", or "exception".
+ :raises WorkflowException: If there is an unrecognized linkMerge type
"""
if valueFrom is not None:
return "pass"
@@ -57,10 +59,8 @@ def check_types(
None,
)
if linkMerge == "merge_flattened":
- return check_types(
- merge_flatten_type(_get_type(srctype)), _get_type(sinktype), None, None
- )
- raise WorkflowException(f"Unrecognized linkMerge enum '{linkMerge}'")
+ return check_types(merge_flatten_type(_get_type(srctype)), _get_type(sinktype), None, None)
+ raise WorkflowException(f"Unrecognized linkMerge enum {linkMerge!r}")
def merge_flatten_type(src: SinkType) -> CWLOutputType:
@@ -72,18 +72,16 @@ def merge_flatten_type(src: SinkType) -> CWLOutputType:
return {"items": src, "type": "array"}
-def can_assign_src_to_sink(
- src: SinkType, sink: Optional[SinkType], strict: bool = False
-) -> bool:
+def can_assign_src_to_sink(src: SinkType, sink: Optional[SinkType], strict: bool = False) -> bool:
"""
Check for identical type specifications, ignoring extra keys like inputBinding.
- src: admissible source types
- sink: admissible sink types
-
In non-strict comparison, at least one source type must match one sink type,
- except for 'null'.
+ except for 'null'.
In strict comparison, all source types must match at least one sink type.
+
+ :param src: admissible source types
+ :param sink: admissible sink types
"""
if src == "Any" or sink == "Any":
return True
@@ -102,9 +100,7 @@ def can_assign_src_to_sink(
for sinksf in cast(List[CWLObjectType], sink.get("secondaryFiles", [])):
if not [
1
- for srcsf in cast(
- List[CWLObjectType], src.get("secondaryFiles", [])
- )
+ for srcsf in cast(List[CWLObjectType], src.get("secondaryFiles", []))
if sinksf == srcsf
]:
if strict:
@@ -120,9 +116,7 @@ def can_assign_src_to_sink(
return False
return True
for this_src in src:
- if this_src != "null" and can_assign_src_to_sink(
- cast(SinkType, this_src), sink
- ):
+ if this_src != "null" and can_assign_src_to_sink(cast(SinkType, this_src), sink):
return True
return False
if isinstance(sink, MutableSequence):
@@ -133,19 +127,17 @@ def can_assign_src_to_sink(
return bool(src == sink)
-def _compare_records(
- src: CWLObjectType, sink: CWLObjectType, strict: bool = False
-) -> bool:
+def _compare_records(src: CWLObjectType, sink: CWLObjectType, strict: bool = False) -> bool:
"""
Compare two records, ensuring they have compatible fields.
This handles normalizing record names, which will be relative to workflow
step, so that they can be compared.
+
+ :return: True if the records have compatible fields, False otherwise.
"""
- def _rec_fields(
- rec,
- ): # type: (MutableMapping[str, Any]) -> MutableMapping[str, Any]
+ def _rec_fields(rec: MutableMapping[str, Any]) -> MutableMapping[str, Any]:
out = {}
for field in rec["fields"]:
name = shortname(field["name"])
@@ -189,7 +181,11 @@ def static_checker(
step_outputs: List[CWLObjectType],
param_to_step: Dict[str, CWLObjectType],
) -> None:
- """Check if all source and sink types of a workflow are compatible before run time."""
+ """
+ Check if all source and sink types of a workflow are compatible before run time.
+
+ :raises ValidationException: If any incompatibilities are detected.
+ """
# source parameters: workflow_inputs and step_outputs
# sink parameters: step_inputs and workflow_outputs
@@ -213,9 +209,7 @@ def static_checker(
sink = warning.sink
linkMerge = warning.linkMerge
sinksf = sorted(
- p["pattern"]
- for p in sink.get("secondaryFiles", [])
- if p.get("required", True)
+ p["pattern"] for p in sink.get("secondaryFiles", []) if p.get("required", True)
)
srcsf = sorted(p["pattern"] for p in src.get("secondaryFiles", []))
# Every secondaryFile required by the sink, should be declared
@@ -227,16 +221,13 @@ def static_checker(
missing,
)
msg3 = SourceLine(src, "id").makeError(
- "source '%s' does not provide those secondaryFiles."
- % (shortname(src["id"]))
+ "source '%s' does not provide those secondaryFiles." % (shortname(src["id"]))
)
msg4 = SourceLine(src.get("_tool_entry", src), "secondaryFiles").makeError(
"To resolve, add missing secondaryFiles patterns to definition of '%s' or"
% (shortname(src["id"]))
)
- msg5 = SourceLine(
- sink.get("_tool_entry", sink), "secondaryFiles"
- ).makeError(
+ msg5 = SourceLine(sink.get("_tool_entry", sink), "secondaryFiles").makeError(
"mark missing secondaryFiles in definition of '%s' as optional."
% shortname(sink["id"])
)
@@ -297,17 +288,16 @@ def static_checker(
)
+ "\n"
+ SourceLine(sink, "type").makeError(
- " with sink '%s' of type %s"
- % (shortname(sink["id"]), json_dumps(sink["type"]))
+ " with sink '{}' of type {}".format(
+ shortname(sink["id"]), json_dumps(sink["type"])
+ )
)
)
if extra_message is not None:
msg += "\n" + SourceLine(sink).makeError(" " + extra_message)
if linkMerge is not None:
- msg += "\n" + SourceLine(sink).makeError(
- " source has linkMerge method %s" % linkMerge
- )
+ msg += "\n" + SourceLine(sink).makeError(" source has linkMerge method %s" % linkMerge)
exception_msgs.append(msg)
for sink in step_inputs:
@@ -339,18 +329,19 @@ def static_checker(
def check_all_types(
src_dict: Dict[str, CWLObjectType],
sinks: MutableSequence[CWLObjectType],
- sourceField: str,
+ sourceField: Union[Literal["source"], Literal["outputSource"]],
param_to_step: Dict[str, CWLObjectType],
) -> Dict[str, List[SrcSink]]:
"""
Given a list of sinks, check if their types match with the types of their sources.
- sourceField is either "source" or "outputSource"
+ :raises WorkflowException: if there is an unrecognized linkMerge value
+ (from :py:func:`check_types`)
+ :raises ValidationException: if a sourceField is missing
"""
validation = {"warning": [], "exception": []} # type: Dict[str, List[SrcSink]]
for sink in sinks:
if sourceField in sink:
-
valueFrom = cast(Optional[str], sink.get("valueFrom"))
pickValue = cast(Optional[str], sink.get("pickValue"))
@@ -363,11 +354,7 @@ def check_all_types(
Optional[str],
sink.get(
"linkMerge",
- (
- "merge_nested"
- if len(cast(Sized, sink[sourceField])) > 1
- else None
- ),
+ ("merge_nested" if len(cast(Sized, sink[sourceField])) > 1 else None),
),
) # type: Optional[str]
@@ -377,10 +364,7 @@ def check_all_types(
srcs_of_sink = [] # type: List[CWLObjectType]
for parm_id in cast(MutableSequence[str], sink[sourceField]):
srcs_of_sink += [src_dict[parm_id]]
- if (
- is_conditional_step(param_to_step, parm_id)
- and pickValue is None
- ):
+ if is_conditional_step(param_to_step, parm_id) and pickValue is None:
validation["warning"].append(
SrcSink(
src_dict[parm_id],
@@ -456,7 +440,11 @@ def check_all_types(
def circular_dependency_checker(step_inputs: List[CWLObjectType]) -> None:
- """Check if a workflow has circular dependency."""
+ """
+ Check if a workflow has circular dependency.
+
+ :raises ValidationException: If a circular dependency is detected.
+ """
adjacency = get_dependency_tree(step_inputs)
vertices = adjacency.keys()
processed: List[str] = []
@@ -478,9 +466,7 @@ def get_dependency_tree(step_inputs: List[CWLObjectType]) -> Dict[str, List[str]
for step_input in step_inputs:
if "source" in step_input:
if isinstance(step_input["source"], list):
- vertices_in = [
- get_step_id(cast(str, src)) for src in step_input["source"]
- ]
+ vertices_in = [get_step_id(cast(str, src)) for src in step_input["source"]]
else:
vertices_in = [get_step_id(cast(str, step_input["source"]))]
vertex_out = get_step_id(cast(str, step_input["id"]))
@@ -523,16 +509,13 @@ def get_step_id(field_id: str) -> str:
def is_conditional_step(param_to_step: Dict[str, CWLObjectType], parm_id: str) -> bool:
- source_step = param_to_step.get(parm_id)
- if source_step is not None:
+ if (source_step := param_to_step.get(parm_id)) is not None:
if source_step.get("when") is not None:
return True
return False
-def is_all_output_method_loop_step(
- param_to_step: Dict[str, CWLObjectType], parm_id: str
-) -> bool:
+def is_all_output_method_loop_step(param_to_step: Dict[str, CWLObjectType], parm_id: str) -> bool:
"""Check if a step contains a http://commonwl.org/cwltool#Loop requirement with `all` outputMethod."""
source_step: Optional[MutableMapping[str, Any]] = param_to_step.get(parm_id)
if source_step is not None:
@@ -545,8 +528,13 @@ def is_all_output_method_loop_step(
return False
-def loop_checker(steps: List[MutableMapping[str, Any]]) -> None:
- """Check http://commonwl.org/cwltool#Loop requirement compatibility with other directives."""
+def loop_checker(steps: Iterator[MutableMapping[str, Any]]) -> None:
+ """
+ Check http://commonwl.org/cwltool#Loop requirement compatibility with other directives.
+
+ :raises ValidationException: If there is an incompatible combination between
+ cwltool:loop and 'scatter' or 'when'.
+ """
exceptions = []
for step in steps:
requirements = {
diff --git a/cwltool/command_line_tool.py b/cwltool/command_line_tool.py
index bc463413a..0b0d7f3ec 100644
--- a/cwltool/command_line_tool.py
+++ b/cwltool/command_line_tool.py
@@ -14,6 +14,7 @@
from enum import Enum
from functools import cmp_to_key, partial
from typing import (
+ TYPE_CHECKING,
Any,
Dict,
Generator,
@@ -25,20 +26,20 @@
Pattern,
Set,
TextIO,
+ Type,
Union,
cast,
)
import shellescape
+from mypy_extensions import mypyc_attr
+from ruamel.yaml.comments import CommentedMap, CommentedSeq
from schema_salad.avro.schema import Schema
from schema_salad.exceptions import ValidationException
from schema_salad.ref_resolver import file_uri, uri_file_path
from schema_salad.sourceline import SourceLine
from schema_salad.utils import json_dumps
from schema_salad.validate import validate_ex
-from typing_extensions import TYPE_CHECKING, Type
-
-from ruamel.yaml.comments import CommentedMap, CommentedSeq
from .builder import (
INPUT_OBJ_VOCAB,
@@ -85,7 +86,9 @@
)
if TYPE_CHECKING:
- from .provenance_profile import ProvenanceProfile # pylint: disable=unused-import
+ from .cwlprov.provenance_profile import (
+ ProvenanceProfile, # pylint: disable=unused-import
+ )
class PathCheckingMode(Enum):
@@ -96,49 +99,64 @@ class PathCheckingMode(Enum):
"""
STRICT = re.compile(r"^[\w.+\,\-:@\]^\u2600-\u26FF\U0001f600-\U0001f64f]+$")
- # accepts names that contain one or more of the following:
- # "\w" unicode word characters; this includes most characters
- # that can be part of a word in any language, as well
- # as numbers and the underscore
- # "." a literal period
- # "+" a literal plus sign
- # "\," a literal comma
- # "\-" a literal minus sign
- # ":" a literal colon
- # "@" a literal at-symbol
- # "\]" a literal end-square-bracket
- # "^" a literal caret symbol
- # \u2600-\u26FF matches a single character in the range between
- # ☀ (index 9728) and ⛿ (index 9983)
- # \U0001f600-\U0001f64f matches a single character in the range between
- # 😀 (index 128512) and 🙏 (index 128591)
-
- # Note: the following characters are intentionally not included:
- #
- # 1. reserved words in POSIX:
- # ! { }
- #
- # 2. POSIX metacharacters listed in the CWL standard as okay to reject
- # | & ; < > ( ) $ ` " '
- # (In accordance with
- # https://www.commonwl.org/v1.0/CommandLineTool.html#File under "path" )
- #
- # 3. POSIX path separator
- # \
- # (also listed at
- # https://www.commonwl.org/v1.0/CommandLineTool.html#File under "path")
- #
- # 4. Additional POSIX metacharacters
- # * ? [ # ˜ = %
-
- # TODO: switch to https://pypi.org/project/regex/ and use
- # `\p{Extended_Pictographic}` instead of the manual emoji ranges
-
- RELAXED = re.compile(r".*") # Accept anything
+ r"""
+ Accepts names that contain one or more of the following:
+
+ .. list-table::
+
+ * - ``\w``
+ - unicode word characters
+
+ this includes most characters that can be part of a word in any
+ language, as well as numbers and the underscore
+ * - ``.``
+ - a literal period
+ * - ``+``
+ - a literal plus sign
+ * - ``,``
+ - a literal comma
+ * - ``-``
+ - a literal minus sign
+ * - ``:``
+ - a literal colon
+ * - ``@``
+ - a literal at-symbol
+ * - ``]``
+ - a literal end-square-bracket
+ * - ``^``
+ - a literal caret symbol
+ * - ``\u2600-\u26FF``
+ - matches a single character in the range between ☀ (index 9728) and ⛿ (index 9983)
+ * - ``\U0001f600-\U0001f64f``
+ - matches a single character in the range between 😀 (index 128512) and 🙏 (index 128591)
+
+ Note: the following characters are intentionally not included:
+
+ 1. reserved words in POSIX: ``!``, :code:`{`, ``}``
+
+ 2. POSIX metacharacters listed in the CWL standard as okay to reject: ``|``,
+ ``&``, ``;``, ``<``, ``>``, ``(``, ``)``, ``$``, `````, ``"``, ``'``,
+ :kbd:``, :kbd:``, :kbd:``.
+
+ (In accordance with https://www.commonwl.org/v1.0/CommandLineTool.html#File under "path" )
+
+ 3. POSIX path separator: ``\``
+
+ (also listed at https://www.commonwl.org/v1.0/CommandLineTool.html#File under "path")
+
+ 4. Additional POSIX metacharacters: ``*``, ``?``, ``[``, ``#``, ``˜``,
+ ``=``, ``%``.
+
+ TODO: switch to https://pypi.org/project/regex/ and use
+ ``\p{Extended_Pictographic}`` instead of the manual emoji ranges
+ """
+
+ RELAXED = re.compile(r".*")
+ """Accept anything."""
class ExpressionJob:
- """Job for ExpressionTools."""
+ """Job for :py:class:`ExpressionTool`."""
def __init__(
self,
@@ -150,7 +168,7 @@ def __init__(
outdir: Optional[str] = None,
tmpdir: Optional[str] = None,
) -> None:
- """Initializet this ExpressionJob."""
+ """Initialize this ExpressionJob."""
self.builder = builder
self.requirements = requirements
self.hints = hints
@@ -158,7 +176,7 @@ def __init__(
self.outdir = outdir
self.tmpdir = tmpdir
self.script = script
- self.prov_obj = None # type: Optional[ProvenanceProfile]
+ self.prov_obj: Optional["ProvenanceProfile"] = None
def run(
self,
@@ -192,6 +210,7 @@ def run(
self.output_callback({}, "permanentFail")
+@mypyc_attr(allow_interpreted_subclasses=True)
class ExpressionTool(Process):
def job(
self,
@@ -227,9 +246,7 @@ def remove_path(f): # type: (CWLObjectType) -> None
del f["path"]
-def revmap_file(
- builder: Builder, outdir: str, f: CWLObjectType
-) -> Optional[CWLObjectType]:
+def revmap_file(builder: Builder, outdir: str, f: CWLObjectType) -> Optional[CWLObjectType]:
"""
Remap a file from internal path to external path.
@@ -237,7 +254,6 @@ def revmap_file(
outside the container. Recognizes files in the pathmapper or remaps
internal output directories to the external directory.
"""
-
# builder.outdir is the inner (container/compute node) output directory
# outdir is the outer (host/storage system) output directory
@@ -273,9 +289,7 @@ def revmap_file(
)
revmap_f = builder.pathmapper.reversemap(path)
- if revmap_f and not builder.pathmapper.mapper(revmap_f[0]).type.startswith(
- "Writable"
- ):
+ if revmap_f and not builder.pathmapper.mapper(revmap_f[0]).type.startswith("Writable"):
f["location"] = revmap_f[1]
elif (
uripath == outdir
@@ -304,8 +318,9 @@ def revmap_file(
)
+@mypyc_attr(serializable=True)
class CallbackJob:
- """Callback Job class, used by CommandLine.job()."""
+ """Callback Job class, used by :py:func:`CommandLineTool.job`."""
def __init__(
self,
@@ -338,9 +353,7 @@ def run(
)
-def check_adjust(
- accept_re: Pattern[str], builder: Builder, file_o: CWLObjectType
-) -> CWLObjectType:
+def check_adjust(accept_re: Pattern[str], builder: Builder, file_o: CWLObjectType) -> CWLObjectType:
"""
Map files to assigned path inside a container.
@@ -348,9 +361,7 @@ def check_adjust(
doesn't reach everything in builder.bindings
"""
if not builder.pathmapper:
- raise ValueError(
- "Do not call check_adjust using a builder that doesn't have a pathmapper."
- )
+ raise ValueError("Do not call check_adjust using a builder that doesn't have a pathmapper.")
file_o["path"] = path = builder.pathmapper.mapper(cast(str, file_o["location"]))[1]
basename = cast(str, file_o.get("basename"))
dn, bn = os.path.split(path)
@@ -366,7 +377,7 @@ def check_adjust(
file_o["nameext"] = str(ne)
if not accept_re.match(basename):
raise WorkflowException(
- f"Invalid filename: '{file_o['basename']}' contains illegal characters"
+ f"Invalid filename: {file_o['basename']!r} contains illegal characters"
)
return file_o
@@ -378,9 +389,7 @@ def check_valid_locations(fs_access: StdFsAccess, ob: CWLObjectType) -> None:
if ob["class"] == "File" and not fs_access.isfile(location):
raise ValidationException("Does not exist or is not a File: '%s'" % location)
if ob["class"] == "Directory" and not fs_access.isdir(location):
- raise ValidationException(
- "Does not exist or is not a Directory: '%s'" % location
- )
+ raise ValidationException("Does not exist or is not a Directory: '%s'" % location)
OutputPortsType = Dict[str, Optional[CWLOutputType]]
@@ -396,10 +405,9 @@ def __init__(self, msg: str, port: CWLObjectType, **kwargs: Any) -> None:
)
+@mypyc_attr(allow_interpreted_subclasses=True)
class CommandLineTool(Process):
- def __init__(
- self, toolpath_object: CommentedMap, loadingContext: LoadingContext
- ) -> None:
+ def __init__(self, toolpath_object: CommentedMap, loadingContext: LoadingContext) -> None:
"""Initialize this CommandLineTool."""
super().__init__(toolpath_object, loadingContext)
self.prov_obj = loadingContext.prov_obj
@@ -445,18 +453,14 @@ def make_job_runner(self, runtimeContext: RuntimeContext) -> Type[JobBase]:
_logger.warning(
"MPI has been required while Docker is hinted, discarding Docker hint(s)"
)
- self.hints = [
- h for h in self.hints if h["class"] != "DockerRequirement"
- ]
+ self.hints = [h for h in self.hints if h["class"] != "DockerRequirement"]
return CommandLineJob
else:
if dockerRequired:
_logger.warning(
"Docker has been required while MPI is hinted, discarding MPI hint(s)"
)
- self.hints = [
- h for h in self.hints if h["class"] != MPIRequirementName
- ]
+ self.hints = [h for h in self.hints if h["class"] != MPIRequirementName]
else:
raise UnsupportedRequirement(
"Both Docker and MPI have been hinted - don't know what to do"
@@ -471,8 +475,8 @@ def make_job_runner(self, runtimeContext: RuntimeContext) -> Type[JobBase]:
)
return CommandLineJob
+ @staticmethod
def make_path_mapper(
- self,
reffiles: List[CWLObjectType],
stagedir: str,
runtimeContext: RuntimeContext,
@@ -480,13 +484,10 @@ def make_path_mapper(
) -> PathMapper:
return PathMapper(reffiles, runtimeContext.basedir, stagedir, separateDirs)
- def updatePathmap(
- self, outdir: str, pathmap: PathMapper, fn: CWLObjectType
- ) -> None:
+ def updatePathmap(self, outdir: str, pathmap: PathMapper, fn: CWLObjectType) -> None:
+ """Update a PathMapper with a CWL File or Directory object."""
if not isinstance(fn, MutableMapping):
- raise WorkflowException(
- "Expected File or Directory object, was %s" % type(fn)
- )
+ raise WorkflowException("Expected File or Directory object, was %s" % type(fn))
basename = cast(str, fn["basename"])
if "location" in fn:
location = cast(str, fn["location"])
@@ -501,9 +502,7 @@ def updatePathmap(
for sf in cast(List[CWLObjectType], fn.get("secondaryFiles", [])):
self.updatePathmap(outdir, pathmap, sf)
for ls in cast(List[CWLObjectType], fn.get("listing", [])):
- self.updatePathmap(
- os.path.join(outdir, cast(str, fn["basename"])), pathmap, ls
- )
+ self.updatePathmap(os.path.join(outdir, cast(str, fn["basename"])), pathmap, ls)
def _initialworkdir(self, j: JobBase, builder: Builder) -> None:
initialWorkdir, _ = self.get_requirement("InitialWorkDirRequirement")
@@ -529,9 +528,7 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None:
if not isinstance(ls_evaluated, MutableSequence):
fail = ls_evaluated
else:
- ls_evaluated2 = cast(
- MutableSequence[Union[None, CWLOutputType]], ls_evaluated
- )
+ ls_evaluated2 = cast(MutableSequence[Union[None, CWLOutputType]], ls_evaluated)
for entry in ls_evaluated2:
if entry == None: # noqa
if classic_dirent:
@@ -585,11 +582,11 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None:
message += ". "
else:
message += "; null; or arrays of File or Directory objects. "
- message += f"Got '{fail}' among the results from "
- message += f"'{initialWorkdir['listing'].strip()}'." + fail_suffix
- raise SourceLine(
- initialWorkdir, "listing", WorkflowException, debug
- ).makeError(message)
+ message += f"Got {fail!r} among the results from "
+ message += f"{initialWorkdir['listing'].strip()!r}." + fail_suffix
+ raise SourceLine(initialWorkdir, "listing", WorkflowException, debug).makeError(
+ message
+ )
ls = cast(List[CWLObjectType], ls_evaluated)
else:
# "listing" is an array of either expressions or Dirent so
@@ -608,9 +605,7 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None:
if isinstance(entry, MutableSequence):
if classic_listing:
- raise SourceLine(
- t, "entry", WorkflowException, debug
- ).makeError(
+ raise SourceLine(t, "entry", WorkflowException, debug).makeError(
"'entry' expressions are not allowed to evaluate "
"to an array of Files or Directories until CWL "
"v1.2. Consider using 'cwl-upgrader' to upgrade "
@@ -622,9 +617,10 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None:
filelist = True
for e in entry:
- if not isinstance(e, MutableMapping) or e.get(
- "class"
- ) not in ("File", "Directory"):
+ if not isinstance(e, MutableMapping) or e.get("class") not in (
+ "File",
+ "Directory",
+ ):
filelist = False
break
@@ -652,9 +648,7 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None:
et["entry"] = entry
else:
if classic_dirent:
- raise SourceLine(
- t, "entry", WorkflowException, debug
- ).makeError(
+ raise SourceLine(t, "entry", WorkflowException, debug).makeError(
"'entry' expression resulted in "
"something other than number, object or "
"array besides a single File or Dirent object. "
@@ -663,7 +657,7 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None:
"If that is the desired result then please "
"consider using 'cwl-upgrader' to upgrade "
"your document to CWL version 1.2. "
- f"Result of '{entry_field}' was '{entry}'."
+ f"Result of {entry_field!r} was {entry!r}."
)
et["entry"] = json_dumps(entry, sort_keys=True)
@@ -676,7 +670,7 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None:
t, "entryname", WorkflowException, debug
).makeError(
"'entryname' expression must result a string. "
- f"Got '{en}' from '{entryname_field}'"
+ f"Got {en!r} from {entryname_field!r}"
)
et["entryname"] = en
else:
@@ -698,11 +692,8 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None:
for i, t2 in enumerate(ls):
if not isinstance(t2, Mapping):
- raise SourceLine(
- initialWorkdir, "listing", WorkflowException, debug
- ).makeError(
- "Entry at index %s of listing is not a record, was %s"
- % (i, type(t2))
+ raise SourceLine(initialWorkdir, "listing", WorkflowException, debug).makeError(
+ f"Entry at index {i} of listing is not a record, was {type(t2)}"
)
if "entry" not in t2:
@@ -711,9 +702,9 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None:
# Dirent
if isinstance(t2["entry"], str):
if not t2["entryname"]:
- raise SourceLine(
- initialWorkdir, "listing", WorkflowException, debug
- ).makeError("Entry at index %s of listing missing entryname" % (i))
+ raise SourceLine(initialWorkdir, "listing", WorkflowException, debug).makeError(
+ "Entry at index %s of listing missing entryname" % (i)
+ )
ls[i] = {
"class": "File",
"basename": t2["entryname"],
@@ -723,17 +714,14 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None:
continue
if not isinstance(t2["entry"], Mapping):
- raise SourceLine(
- initialWorkdir, "listing", WorkflowException, debug
- ).makeError(
- "Entry at index %s of listing is not a record, was %s"
- % (i, type(t2["entry"]))
+ raise SourceLine(initialWorkdir, "listing", WorkflowException, debug).makeError(
+ "Entry at index {} of listing is not a record, was {}".format(
+ i, type(t2["entry"])
+ )
)
if t2["entry"].get("class") not in ("File", "Directory"):
- raise SourceLine(
- initialWorkdir, "listing", WorkflowException, debug
- ).makeError(
+ raise SourceLine(initialWorkdir, "listing", WorkflowException, debug).makeError(
"Entry at index %s of listing is not a File or Directory object, was %s"
% (i, t2)
)
@@ -750,9 +738,7 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None:
for i, t3 in enumerate(ls):
if t3.get("class") not in ("File", "Directory"):
# Check that every item is a File or Directory object now
- raise SourceLine(
- initialWorkdir, "listing", WorkflowException, debug
- ).makeError(
+ raise SourceLine(initialWorkdir, "listing", WorkflowException, debug).makeError(
f"Entry at index {i} of listing is not a Dirent, File or "
f"Directory object, was {t2}."
)
@@ -761,21 +747,17 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None:
basename = os.path.normpath(cast(str, t3["basename"]))
t3["basename"] = basename
if basename.startswith("../"):
- raise SourceLine(
- initialWorkdir, "listing", WorkflowException, debug
- ).makeError(
- f"Name '{basename}' at index {i} of listing is invalid, "
+ raise SourceLine(initialWorkdir, "listing", WorkflowException, debug).makeError(
+ f"Name {basename!r} at index {i} of listing is invalid, "
"cannot start with '../'"
)
if basename.startswith("/"):
# only if DockerRequirement in requirements
- if cwl_version and ORDERED_VERSIONS.index(
- cwl_version
- ) < ORDERED_VERSIONS.index("v1.2.0-dev4"):
- raise SourceLine(
- initialWorkdir, "listing", WorkflowException, debug
- ).makeError(
- f"Name '{basename}' at index {i} of listing is invalid, "
+ if cwl_version and ORDERED_VERSIONS.index(cwl_version) < ORDERED_VERSIONS.index(
+ "v1.2.0-dev4"
+ ):
+ raise SourceLine(initialWorkdir, "listing", WorkflowException, debug).makeError(
+ f"Name {basename!r} at index {i} of listing is invalid, "
"paths starting with '/' are only permitted in CWL 1.2 "
"and later. Consider changing the absolute path to a relative "
"path, or upgrade the CWL description to CWL v1.2 using "
@@ -784,10 +766,8 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None:
req, is_req = self.get_requirement("DockerRequirement")
if is_req is not True:
- raise SourceLine(
- initialWorkdir, "listing", WorkflowException, debug
- ).makeError(
- f"Name '{basename}' at index {i} of listing is invalid, "
+ raise SourceLine(initialWorkdir, "listing", WorkflowException, debug).makeError(
+ f"Name {basename!r} at index {i} of listing is invalid, "
"name can only start with '/' when DockerRequirement "
"is in 'requirements'."
)
@@ -797,9 +777,7 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None:
for entry in ls:
if "basename" in entry:
basename = cast(str, entry["basename"])
- entry["dirname"] = os.path.join(
- builder.outdir, os.path.dirname(basename)
- )
+ entry["dirname"] = os.path.join(builder.outdir, os.path.dirname(basename))
entry["basename"] = os.path.basename(basename)
normalizeFilesDirs(entry)
self.updatePathmap(
@@ -831,13 +809,10 @@ def job(
output_callbacks: Optional[OutputCallbackType],
runtimeContext: RuntimeContext,
) -> Generator[Union[JobBase, CallbackJob], None, None]:
-
workReuse, _ = self.get_requirement("WorkReuse")
enableReuse = workReuse.get("enableReuse", True) if workReuse else True
- jobname = uniquename(
- runtimeContext.name or shortname(self.tool.get("id", "job"))
- )
+ jobname = uniquename(runtimeContext.name or shortname(self.tool.get("id", "job")))
if runtimeContext.cachedir and enableReuse:
cachecontext = runtimeContext.copy()
cachecontext.outdir = "/out"
@@ -850,9 +825,7 @@ def job(
cachebuilder.stagedir,
separateDirs=False,
)
- _check_adjust = partial(
- check_adjust, self.path_check_mode.value, cachebuilder
- )
+ _check_adjust = partial(check_adjust, self.path_check_mode.value, cachebuilder)
_checksum = partial(
compute_checksums,
runtimeContext.make_fs_access(runtimeContext.basedir),
@@ -862,22 +835,13 @@ def job(
("File", "Directory"),
_check_adjust,
)
- visit_class(
- [cachebuilder.files, cachebuilder.bindings], ("File"), _checksum
- )
+ visit_class([cachebuilder.files, cachebuilder.bindings], ("File"), _checksum)
- cmdline = flatten(
- list(map(cachebuilder.generate_arg, cachebuilder.bindings))
- )
+ cmdline = flatten(list(map(cachebuilder.generate_arg, cachebuilder.bindings)))
docker_req, _ = self.get_requirement("DockerRequirement")
if docker_req is not None and runtimeContext.use_container:
- dockerimg = docker_req.get("dockerImageId") or docker_req.get(
- "dockerPull"
- )
- elif (
- runtimeContext.default_container is not None
- and runtimeContext.use_container
- ):
+ dockerimg = docker_req.get("dockerImageId") or docker_req.get("dockerPull")
+ elif runtimeContext.default_container is not None and runtimeContext.use_container:
dockerimg = runtimeContext.default_container
else:
dockerimg = None
@@ -938,9 +902,7 @@ def remove_prefix(s: str, prefix: str) -> str:
keydictstr = json_dumps(keydict, separators=(",", ":"), sort_keys=True)
cachekey = hashlib.md5(keydictstr.encode("utf-8")).hexdigest() # nosec
- _logger.debug(
- "[job %s] keydictstr is %s -> %s", jobname, keydictstr, cachekey
- )
+ _logger.debug("[job %s] keydictstr is %s -> %s", jobname, keydictstr, cachekey)
jobcache = os.path.join(runtimeContext.cachedir, cachekey)
@@ -960,9 +922,7 @@ def remove_prefix(s: str, prefix: str) -> str:
if os.path.isdir(jobcache) and jobstatus == "success":
if docker_req and runtimeContext.use_container:
- cachebuilder.outdir = (
- runtimeContext.docker_outdir or random_outdir()
- )
+ cachebuilder.outdir = runtimeContext.docker_outdir or random_outdir()
else:
cachebuilder.outdir = jobcache
@@ -972,9 +932,7 @@ def remove_prefix(s: str, prefix: str) -> str:
jobcachelock.close()
return
else:
- _logger.info(
- "[job %s] Output of job will be cached in %s", jobname, jobcache
- )
+ _logger.info("[job %s] Output of job will be cached in %s", jobname, jobcache)
# turn shared lock into an exclusive lock since we'll
# be writing the cache directory
@@ -1027,22 +985,16 @@ def update_status_output_callback(
"[job %s] initializing from %s%s",
j.name,
self.tool.get("id", ""),
- " as part of %s" % runtimeContext.part_of
- if runtimeContext.part_of
- else "",
+ " as part of %s" % runtimeContext.part_of if runtimeContext.part_of else "",
)
_logger.debug("[job %s] %s", j.name, json_dumps(builder.job, indent=4))
- builder.pathmapper = self.make_path_mapper(
- reffiles, builder.stagedir, runtimeContext, True
- )
+ builder.pathmapper = self.make_path_mapper(reffiles, builder.stagedir, runtimeContext, True)
builder.requirements = j.requirements
_check_adjust = partial(check_adjust, self.path_check_mode.value, builder)
- visit_class(
- [builder.files, builder.bindings], ("File", "Directory"), _check_adjust
- )
+ visit_class([builder.files, builder.bindings], ("File", "Directory"), _check_adjust)
self._initialworkdir(j, builder)
@@ -1051,10 +1003,7 @@ def update_status_output_callback(
"[job %s] path mappings is %s",
j.name,
json_dumps(
- {
- p: builder.pathmapper.mapper(p)
- for p in builder.pathmapper.files()
- },
+ {p: builder.pathmapper.mapper(p) for p in builder.pathmapper.files()},
indent=4,
),
)
@@ -1064,8 +1013,8 @@ def update_status_output_callback(
stdin_eval = builder.do_eval(self.tool["stdin"])
if not (isinstance(stdin_eval, str) or stdin_eval is None):
raise ValidationException(
- f"'stdin' expression must return a string or null. Got '{stdin_eval}' "
- f"for '{self.tool['stdin']}'."
+ f"'stdin' expression must return a string or null. Got {stdin_eval!r} "
+ f"for {self.tool['stdin']!r}."
)
j.stdin = stdin_eval
if j.stdin:
@@ -1076,8 +1025,8 @@ def update_status_output_callback(
stderr_eval = builder.do_eval(self.tool["stderr"])
if not isinstance(stderr_eval, str):
raise ValidationException(
- f"'stderr' expression must return a string. Got '{stderr_eval}' "
- f"for '{self.tool['stderr']}'."
+ f"'stderr' expression must return a string. Got {stderr_eval!r} "
+ f"for {self.tool['stderr']!r}."
)
j.stderr = stderr_eval
if j.stderr:
@@ -1091,8 +1040,8 @@ def update_status_output_callback(
stdout_eval = builder.do_eval(self.tool["stdout"])
if not isinstance(stdout_eval, str):
raise ValidationException(
- f"'stdout' expression must return a string. Got '{stdout_eval}' "
- f"for '{self.tool['stdout']}'."
+ f"'stdout' expression must return a string. Got {stdout_eval!r} "
+ f"for {self.tool['stdout']!r}."
)
j.stdout = stdout_eval
if j.stdout:
@@ -1160,13 +1109,13 @@ def register_reader(f: CWLObjectType) -> None:
if timelimit_eval and not isinstance(timelimit_eval, int):
raise WorkflowException(
"'timelimit' expression must evaluate to a long/int. Got "
- f"'{timelimit_eval}' for expression '{limit_field}'."
+ f"{timelimit_eval!r} for expression {limit_field!r}."
)
else:
timelimit_eval = limit_field
if not isinstance(timelimit_eval, int) or timelimit_eval < 0:
raise WorkflowException(
- f"timelimit must be an integer >= 0, got: {timelimit_eval}"
+ f"timelimit must be an integer >= 0, got: {timelimit_eval!r}"
)
j.timelimit = timelimit_eval
@@ -1179,13 +1128,13 @@ def register_reader(f: CWLObjectType) -> None:
if not isinstance(networkaccess_eval, bool):
raise WorkflowException(
"'networkAccess' expression must evaluate to a bool. "
- f"Got '{networkaccess_eval}' for expression '{networkaccess_field}'."
+ f"Got {networkaccess_eval!r} for expression {networkaccess_field!r}."
)
else:
networkaccess_eval = networkaccess_field
if not isinstance(networkaccess_eval, bool):
raise WorkflowException(
- "networkAccess must be a boolean, got: {networkaccess_eval}."
+ "networkAccess must be a boolean, got: {networkaccess_eval!r}."
)
j.networkaccess = networkaccess_eval
@@ -1198,11 +1147,9 @@ def register_reader(f: CWLObjectType) -> None:
if "${" in env_value_field or "$(" in env_value_field:
env_value_eval = builder.do_eval(env_value_field)
if not isinstance(env_value_eval, str):
- raise SourceLine(
- evr["envDef"], eindex, WorkflowException, debug
- ).makeError(
+ raise SourceLine(evr["envDef"], eindex, WorkflowException, debug).makeError(
"'envValue expression must evaluate to a str. "
- f"Got '{env_value_eval}' for expression '{env_value_field}'."
+ f"Got {env_value_eval!r} for expression {env_value_field!r}."
)
env_value = env_value_eval
else:
@@ -1244,11 +1191,9 @@ def register_reader(f: CWLObjectType) -> None:
if isinstance(np, str):
np_eval = builder.do_eval(np)
if not isinstance(np_eval, int):
- raise SourceLine(
- mpi, "processes", WorkflowException, debug
- ).makeError(
+ raise SourceLine(mpi, "processes", WorkflowException, debug).makeError(
f"{MPIRequirementName} needs 'processes' expression to "
- f"evaluate to an int, got '{np_eval}' for expression '{np}'."
+ f"evaluate to an int, got {np_eval!r} for expression {np!r}."
)
np = np_eval
j.mpi_procs = np
@@ -1283,7 +1228,6 @@ def collect_output_ports(
)
else:
for i, port in enumerate(ports):
-
with SourceLine(
ports,
i,
@@ -1312,9 +1256,7 @@ def collect_output_ports(
if compute_checksum:
adjustFileObjs(ret, partial(compute_checksums, fs_access))
- expected_schema = cast(
- Schema, self.names.get_name("outputs_record_schema", None)
- )
+ expected_schema = cast(Schema, self.names.get_name("outputs_record_schema", None))
validate_ex(
expected_schema,
ret,
@@ -1327,10 +1269,7 @@ def collect_output_ports(
return ret if ret is not None else {}
except ValidationException as e:
raise WorkflowException(
- "Error validating output record. "
- + str(e)
- + "\n in "
- + json_dumps(ret, indent=4)
+ "Error validating output record. " + str(e) + "\n in " + json_dumps(ret, indent=4)
) from e
finally:
if builder.mutation_manager and readers:
@@ -1375,7 +1314,7 @@ def collect_output(
raise WorkflowException(
"Resolved glob patterns must be strings "
f"or list of strings, not "
- f"'{gb}' from '{binding['glob']}'"
+ f"{gb!r} from {binding['glob']!r}"
)
globpatterns.extend(aslist(gb))
@@ -1385,9 +1324,7 @@ def collect_output(
elif gb == ".":
gb = outdir
elif gb.startswith("/"):
- raise WorkflowException(
- "glob patterns must not start with '/'"
- )
+ raise WorkflowException("glob patterns must not start with '/'")
try:
prefix = fs_access.glob(outdir)
sorted_glob_result = sorted(
@@ -1400,38 +1337,26 @@ def collect_output(
"location": g,
"path": fs_access.join(
builder.outdir,
- urllib.parse.unquote(
- g[len(prefix[0]) + 1 :]
- ),
+ urllib.parse.unquote(g[len(prefix[0]) + 1 :]),
),
"basename": decoded_basename,
- "nameroot": os.path.splitext(decoded_basename)[
- 0
- ],
- "nameext": os.path.splitext(decoded_basename)[
- 1
- ],
- "class": "File"
- if fs_access.isfile(g)
- else "Directory",
+ "nameroot": os.path.splitext(decoded_basename)[0],
+ "nameext": os.path.splitext(decoded_basename)[1],
+ "class": "File" if fs_access.isfile(g) else "Directory",
}
for g, decoded_basename in zip(
sorted_glob_result,
map(
- lambda x: os.path.basename(
- urllib.parse.unquote(x)
- ),
+ lambda x: os.path.basename(urllib.parse.unquote(x)),
sorted_glob_result,
),
)
]
)
- except (OSError) as e:
+ except OSError as e:
_logger.warning(str(e))
except Exception:
- _logger.error(
- "Unexpected error from fs_access", exc_info=True
- )
+ _logger.error("Unexpected error from fs_access", exc_info=True)
raise
for files in cast(List[Dict[str, Optional[CWLOutputType]]], r):
@@ -1443,16 +1368,12 @@ def collect_output(
get_listing(fs_access, files, (ll == "deep_listing"))
else:
if binding.get("loadContents"):
- with fs_access.open(
- cast(str, rfile["location"]), "rb"
- ) as f:
- files["contents"] = content_limit_respected_read_bytes(
- f
- ).decode("utf-8")
+ with fs_access.open(cast(str, rfile["location"]), "rb") as f:
+ files["contents"] = content_limit_respected_read_bytes(f).decode(
+ "utf-8"
+ )
if compute_checksum:
- with fs_access.open(
- cast(str, rfile["location"]), "rb"
- ) as f:
+ with fs_access.open(cast(str, rfile["location"]), "rb") as f:
checksum = hashlib.sha1() # nosec
contents = f.read(1024 * 1024)
while contents != b"":
@@ -1473,9 +1394,7 @@ def collect_output(
if "outputEval" in binding:
with SourceLine(binding, "outputEval", WorkflowException, debug):
- result = builder.do_eval(
- cast(CWLOutputType, binding["outputEval"]), context=r
- )
+ result = builder.do_eval(cast(CWLOutputType, binding["outputEval"]), context=r)
else:
result = cast(CWLOutputType, r)
@@ -1483,7 +1402,7 @@ def collect_output(
with SourceLine(binding, "glob", WorkflowException, debug):
if not result and not optional:
raise WorkflowException(
- f"Did not find output file with glob pattern: '{globpatterns}'."
+ f"Did not find output file with glob pattern: {globpatterns!r}."
)
elif not result and optional:
pass
@@ -1500,9 +1419,7 @@ def collect_output(
for primary in aslist(result):
if isinstance(primary, MutableMapping):
primary.setdefault("secondaryFiles", [])
- pathprefix = primary["path"][
- 0 : primary["path"].rindex(os.sep) + 1
- ]
+ pathprefix = primary["path"][0 : primary["path"].rindex(os.sep) + 1]
for sf in aslist(schema["secondaryFiles"]):
if "required" in sf:
with SourceLine(
@@ -1522,34 +1439,35 @@ def collect_output(
"Expressions in the field "
"'required' must evaluate to a "
"Boolean (true or false) or None. "
- f"Got '{sf_required_eval}' for "
- f"'{sf['required']}'."
+ f"Got {sf_required_eval!r} for "
+ f"{sf['required']!r}."
)
sf_required: bool = sf_required_eval or False
else:
sf_required = False
if "$(" in sf["pattern"] or "${" in sf["pattern"]:
- sfpath = builder.do_eval(
- sf["pattern"], context=primary
- )
+ sfpath = builder.do_eval(sf["pattern"], context=primary)
else:
- sfpath = substitute(
- primary["basename"], sf["pattern"]
- )
+ sfpath = substitute(primary["basename"], sf["pattern"])
for sfitem in aslist(sfpath):
if not sfitem:
continue
if isinstance(sfitem, str):
sfitem = {"path": pathprefix + sfitem}
+ original_sfitem = copy.deepcopy(sfitem)
if (
- not fs_access.exists(sfitem["path"])
+ not fs_access.exists(
+ cast(
+ str, cast(CWLObjectType, revmap(sfitem))["location"]
+ )
+ )
and sf_required
):
raise WorkflowException(
"Missing required secondary file '%s'"
- % (sfitem["path"])
+ % (original_sfitem["path"])
)
if "path" in sfitem and "location" not in sfitem:
revmap(sfitem)
@@ -1568,13 +1486,13 @@ def collect_output(
if not isinstance(format_eval, str):
message = (
f"'format' expression must evaluate to a string. "
- f"Got '{format_eval}' from '{format_field}'."
+ f"Got {format_eval!r} from {format_field!r}."
)
if isinstance(result, list):
- message += f" 'self' had the value of the index {index} result: '{primary}'."
- raise SourceLine(
- schema, "format", WorkflowException, debug
- ).makeError(message)
+ message += f" 'self' had the value of the index {index} result: {primary!r}."
+ raise SourceLine(schema, "format", WorkflowException, debug).makeError(
+ message
+ )
primary["format"] = format_eval
else:
for primary in aslist(result):
diff --git a/cwltool/context.py b/cwltool/context.py
index b348e4c9d..28050b8ea 100644
--- a/cwltool/context.py
+++ b/cwltool/context.py
@@ -6,44 +6,43 @@
import threading
from typing import (
IO,
+ TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
+ Literal,
Optional,
TextIO,
Tuple,
Union,
)
+from ruamel.yaml.comments import CommentedMap
from schema_salad.avro.schema import Names
from schema_salad.ref_resolver import Loader
from schema_salad.utils import FetcherCallableType
-from typing_extensions import TYPE_CHECKING
-
-# move to a regular typing import when Python 3.3-3.6 is no longer supported
-from ruamel.yaml.comments import CommentedMap
-from .builder import Builder
from .mpi import MpiConfig
-from .mutation import MutationManager
from .pathmapper import PathMapper
-from .secrets import SecretStore
-from .software_requirements import DependenciesConfiguration
from .stdfsaccess import StdFsAccess
from .utils import DEFAULT_TMP_PREFIX, CWLObjectType, HasReqsHints, ResolverType
if TYPE_CHECKING:
from cwl_utils.parser.cwl_v1_2 import LoadingOptions
+ from .builder import Builder
+ from .cwlprov.provenance_profile import ProvenanceProfile
+ from .cwlprov.ro import ResearchObject
+ from .mutation import MutationManager
from .process import Process
- from .provenance import ResearchObject # pylint: disable=unused-import
- from .provenance_profile import ProvenanceProfile
+ from .secrets import SecretStore
+ from .software_requirements import DependenciesConfiguration
class ContextBase:
- """Shared kwargs based initilizer for {Runtime,Loading}Context."""
+ """Shared kwargs based initializer for :py:class:`RuntimeContext` and :py:class:`LoadingContext`."""
def __init__(self, kwargs: Optional[Dict[str, Any]] = None) -> None:
"""Initialize."""
@@ -53,9 +52,8 @@ def __init__(self, kwargs: Optional[Dict[str, Any]] = None) -> None:
setattr(self, k, v)
-def make_tool_notimpl(
- toolpath_object: CommentedMap, loadingContext: "LoadingContext"
-) -> "Process":
+def make_tool_notimpl(toolpath_object: CommentedMap, loadingContext: "LoadingContext") -> "Process":
+ """Fake implementation of the make tool function."""
raise NotImplementedError()
@@ -79,7 +77,7 @@ def log_handler(
def set_log_dir(outdir: str, log_dir: str, subdir_name: str) -> str:
- """Default handler for setting the log directory."""
+ """Set the log directory."""
if log_dir == "":
return outdir
else:
@@ -89,158 +87,158 @@ def set_log_dir(outdir: str, log_dir: str, subdir_name: str) -> str:
class LoadingContext(ContextBase):
def __init__(self, kwargs: Optional[Dict[str, Any]] = None) -> None:
"""Initialize the LoadingContext from the kwargs."""
- self.debug = False # type: bool
- self.metadata = {} # type: CWLObjectType
- self.requirements = None # type: Optional[List[CWLObjectType]]
- self.hints = None # type: Optional[List[CWLObjectType]]
- self.overrides_list = [] # type: List[CWLObjectType]
- self.loader = None # type: Optional[Loader]
- self.avsc_names = None # type: Optional[Names]
- self.disable_js_validation = False # type: bool
+ self.debug: bool = False
+ self.metadata: CWLObjectType = {}
+ self.requirements: Optional[List[CWLObjectType]] = None
+ self.hints: Optional[List[CWLObjectType]] = None
+ self.overrides_list: List[CWLObjectType] = []
+ self.loader: Optional[Loader] = None
+ self.avsc_names: Optional[Names] = None
+ self.disable_js_validation: bool = False
self.js_hint_options_file: Optional[str] = None
- self.do_validate = True # type: bool
- self.enable_dev = False # type: bool
- self.strict = True # type: bool
- self.resolver = None # type: Optional[ResolverType]
- self.fetcher_constructor = None # type: Optional[FetcherCallableType]
+ self.do_validate: bool = True
+ self.enable_dev: bool = False
+ self.strict: bool = True
+ self.resolver: Optional[ResolverType] = None
+ self.fetcher_constructor: Optional[FetcherCallableType] = None
self.construct_tool_object = default_make_tool
- self.research_obj = None # type: Optional[ResearchObject]
- self.orcid = "" # type: str
- self.cwl_full_name = "" # type: str
- self.host_provenance = False # type: bool
- self.user_provenance = False # type: bool
- self.prov_obj = None # type: Optional[ProvenanceProfile]
- self.do_update = None # type: Optional[bool]
- self.jobdefaults = None # type: Optional[CommentedMap]
- self.doc_cache = True # type: bool
- self.relax_path_checks = False # type: bool
- self.singularity = False # type: bool
- self.podman = False # type: bool
+ self.research_obj: Optional[ResearchObject] = None
+ self.orcid: str = ""
+ self.cwl_full_name: str = ""
+ self.host_provenance: bool = False
+ self.user_provenance: bool = False
+ self.prov_obj: Optional["ProvenanceProfile"] = None
+ self.do_update: Optional[bool] = None
+ self.jobdefaults: Optional[CommentedMap] = None
+ self.doc_cache: bool = True
+ self.relax_path_checks: bool = False
+ self.singularity: bool = False
+ self.podman: bool = False
self.eval_timeout: float = 60
self.codegen_idx: Dict[str, Tuple[Any, "LoadingOptions"]] = {}
self.fast_parser = False
self.skip_resolve_all = False
+ self.skip_schemas = False
super().__init__(kwargs)
- def copy(self):
- # type: () -> LoadingContext
+ def copy(self) -> "LoadingContext":
+ """Return a copy of this :py:class:`LoadingContext`."""
return copy.copy(self)
class RuntimeContext(ContextBase):
+ outdir: Optional[str] = None
+ tmpdir: str = ""
+ tmpdir_prefix: str = DEFAULT_TMP_PREFIX
+ tmp_outdir_prefix: str = ""
+ stagedir: str = ""
+
def __init__(self, kwargs: Optional[Dict[str, Any]] = None) -> None:
"""Initialize the RuntimeContext from the kwargs."""
- select_resources_callable = Callable[ # pylint: disable=unused-variable
+ select_resources_callable = Callable[
[Dict[str, Union[int, float]], RuntimeContext],
Dict[str, Union[int, float]],
]
- self.user_space_docker_cmd = "" # type: Optional[str]
- self.secret_store = None # type: Optional[SecretStore]
- self.no_read_only = False # type: bool
- self.custom_net = None # type: Optional[str]
- self.no_match_user = False # type: bool
- self.preserve_environment = None # type: Optional[Iterable[str]]
- self.preserve_entire_environment = False # type: bool
- self.use_container = True # type: bool
- self.force_docker_pull = False # type: bool
-
- self.tmp_outdir_prefix = "" # type: str
- self.tmpdir_prefix = DEFAULT_TMP_PREFIX # type: str
- self.tmpdir = "" # type: str
- self.rm_tmpdir = True # type: bool
- self.pull_image = True # type: bool
- self.rm_container = True # type: bool
- self.move_outputs = "move" # type: str
- self.log_dir = "" # type: str
+ self.user_space_docker_cmd: Optional[str] = None
+ self.secret_store: Optional["SecretStore"] = None
+ self.no_read_only: bool = False
+ self.custom_net: Optional[str] = None
+ self.no_match_user: bool = False
+ self.preserve_environment: Optional[Iterable[str]] = None
+ self.preserve_entire_environment: bool = False
+ self.use_container: bool = True
+ self.force_docker_pull: bool = False
+
+ self.rm_tmpdir: bool = True
+ self.pull_image: bool = True
+ self.rm_container: bool = True
+ self.move_outputs: Union[Literal["move"], Literal["leave"], Literal["copy"]] = "move"
+ self.log_dir: str = ""
self.set_log_dir = set_log_dir
self.log_dir_handler = log_handler
self.streaming_allowed: bool = False
- self.singularity = False # type: bool
- self.podman = False # type: bool
- self.debug = False # type: bool
- self.compute_checksum = True # type: bool
- self.name = "" # type: str
- self.default_container = "" # type: Optional[str]
- self.find_default_container = (
- None
- ) # type: Optional[Callable[[HasReqsHints], Optional[str]]]
- self.cachedir = None # type: Optional[str]
- self.outdir = None # type: Optional[str]
- self.stagedir = "" # type: str
- self.part_of = "" # type: str
- self.basedir = "" # type: str
- self.toplevel = False # type: bool
- self.mutation_manager = None # type: Optional[MutationManager]
+ self.singularity: bool = False
+ self.podman: bool = False
+ self.debug: bool = False
+ self.compute_checksum: bool = True
+ self.name: str = ""
+ self.default_container: Optional[str] = ""
+ self.find_default_container: Optional[Callable[[HasReqsHints], Optional[str]]] = None
+ self.cachedir: Optional[str] = None
+ self.part_of: str = ""
+ self.basedir: str = ""
+ self.toplevel: bool = False
+ self.mutation_manager: Optional["MutationManager"] = None
self.make_fs_access = StdFsAccess
self.path_mapper = PathMapper
- self.builder = None # type: Optional[Builder]
- self.docker_outdir = "" # type: str
- self.docker_tmpdir = "" # type: str
- self.docker_stagedir = "" # type: str
- self.js_console = False # type: bool
- self.job_script_provider = None # type: Optional[DependenciesConfiguration]
- self.select_resources = None # type: Optional[select_resources_callable]
- self.eval_timeout = 60 # type: float
- self.postScatterEval = (
- None
- ) # type: Optional[Callable[[CWLObjectType], Optional[CWLObjectType]]]
- self.on_error = "stop" # type: str
- self.strict_memory_limit = False # type: bool
- self.strict_cpu_limit = False # type: bool
- self.cidfile_dir = None # type: Optional[str]
- self.cidfile_prefix = None # type: Optional[str]
-
- self.workflow_eval_lock = None # type: Optional[threading.Condition]
- self.research_obj = None # type: Optional[ResearchObject]
- self.orcid = "" # type: str
- self.cwl_full_name = "" # type: str
- self.process_run_id = None # type: Optional[str]
- self.prov_obj = None # type: Optional[ProvenanceProfile]
- self.mpi_config = MpiConfig() # type: MpiConfig
- self.default_stdout = None # type: Optional[Union[IO[bytes], TextIO]]
- self.default_stderr = None # type: Optional[Union[IO[bytes], TextIO]]
+ self.builder: Optional["Builder"] = None
+ self.docker_outdir: str = ""
+ self.docker_tmpdir: str = ""
+ self.docker_stagedir: str = ""
+ self.js_console: bool = False
+ self.job_script_provider: Optional[DependenciesConfiguration] = None
+ self.select_resources: Optional[select_resources_callable] = None
+ self.eval_timeout: float = 60
+ self.postScatterEval: Optional[Callable[[CWLObjectType], Optional[CWLObjectType]]] = None
+ self.on_error: Union[Literal["stop"], Literal["continue"]] = "stop"
+ self.strict_memory_limit: bool = False
+ self.strict_cpu_limit: bool = False
+ self.cidfile_dir: Optional[str] = None
+ self.cidfile_prefix: Optional[str] = None
+
+ self.workflow_eval_lock: Optional[threading.Condition] = None
+ self.research_obj: Optional[ResearchObject] = None
+ self.orcid: str = ""
+ self.cwl_full_name: str = ""
+ self.process_run_id: Optional[str] = None
+ self.prov_obj: Optional[ProvenanceProfile] = None
+ self.mpi_config: MpiConfig = MpiConfig()
+ self.default_stdout: Optional[Union[IO[bytes], TextIO]] = None
+ self.default_stderr: Optional[Union[IO[bytes], TextIO]] = None
+ self.validate_only: bool = False
+ self.validate_stdout: Optional[Union[IO[bytes], TextIO, IO[str]]] = None
super().__init__(kwargs)
if self.tmp_outdir_prefix == "":
self.tmp_outdir_prefix = self.tmpdir_prefix
def get_outdir(self) -> str:
- """Return self.outdir or create one with self.tmp_outdir_prefix."""
+ """Return :py:attr:`outdir` or create one with :py:attr:`tmp_outdir_prefix`."""
if self.outdir:
return self.outdir
return self.create_outdir()
def get_tmpdir(self) -> str:
- """Return self.tmpdir or create one with self.tmpdir_prefix."""
+ """Return :py:attr:`tmpdir` or create one with :py:attr:`tmpdir_prefix`."""
if self.tmpdir:
return self.tmpdir
return self.create_tmpdir()
def get_stagedir(self) -> str:
- """Return self.stagedir or create one with self.tmpdir_prefix."""
+ """Return :py:attr:`stagedir` or create one with :py:attr:`tmpdir_prefix`."""
if self.stagedir:
return self.stagedir
tmp_dir, tmp_prefix = os.path.split(self.tmpdir_prefix)
return tempfile.mkdtemp(prefix=tmp_prefix, dir=tmp_dir)
def create_tmpdir(self) -> str:
- """Create a temporary directory that respects self.tmpdir_prefix."""
+ """Create a temporary directory that respects :py:attr:`tmpdir_prefix`."""
tmp_dir, tmp_prefix = os.path.split(self.tmpdir_prefix)
return tempfile.mkdtemp(prefix=tmp_prefix, dir=tmp_dir)
def create_outdir(self) -> str:
- """Create a temporary directory that respects self.tmp_outdir_prefix."""
+ """Create a temporary directory that respects :py:attr:`tmp_outdir_prefix`."""
out_dir, out_prefix = os.path.split(self.tmp_outdir_prefix)
return tempfile.mkdtemp(prefix=out_prefix, dir=out_dir)
- def copy(self):
- # type: () -> RuntimeContext
+ def copy(self) -> "RuntimeContext":
+ """Return a copy of this :py:class:`RuntimeContext`."""
return copy.copy(self)
-def getdefault(val, default):
- # type: (Any, Any) -> Any
+def getdefault(val: Any, default: Any) -> Any:
+ """Return the ``val`` using the ``default`` as backup in case the val is ``None``."""
if val is None:
return default
else:
diff --git a/cwltool/cuda.py b/cwltool/cuda.py
index 50bee5599..719bfd867 100644
--- a/cwltool/cuda.py
+++ b/cwltool/cuda.py
@@ -1,3 +1,5 @@
+"""Support utilities for CUDA."""
+
import subprocess # nosec
import xml.dom.minidom # nosec
from typing import Tuple
@@ -7,15 +9,42 @@
def cuda_version_and_device_count() -> Tuple[str, int]:
+ """Determine the CUDA version and number of attached CUDA GPUs."""
try:
out = subprocess.check_output(["nvidia-smi", "-q", "-x"]) # nosec
except Exception as e:
_logger.warning("Error checking CUDA version with nvidia-smi: %s", e)
return ("", 0)
dm = xml.dom.minidom.parseString(out) # nosec
- ag = dm.getElementsByTagName("attached_gpus")[0].firstChild
- cv = dm.getElementsByTagName("cuda_version")[0].firstChild
- return (cv.data, int(ag.data))
+
+ ag = dm.getElementsByTagName("attached_gpus")
+ if len(ag) < 1 or ag[0].firstChild is None:
+ _logger.warning(
+ "Error checking CUDA version with nvidia-smi. Missing 'attached_gpus' or it is empty.: %s",
+ out,
+ )
+ return ("", 0)
+ ag_element = ag[0].firstChild
+
+ cv = dm.getElementsByTagName("cuda_version")
+ if len(cv) < 1 or cv[0].firstChild is None:
+ _logger.warning(
+ "Error checking CUDA version with nvidia-smi. Missing 'cuda_version' or it is empty.: %s",
+ out,
+ )
+ return ("", 0)
+ cv_element = cv[0].firstChild
+
+ if isinstance(cv_element, xml.dom.minidom.Text) and isinstance(
+ ag_element, xml.dom.minidom.Text
+ ):
+ return (cv_element.data, int(ag_element.data))
+ _logger.warning(
+ "Error checking CUDA version with nvidia-smi. "
+ "Either 'attached_gpus' or 'cuda_version' was not a text node: %s",
+ out,
+ )
+ return ("", 0)
def cuda_check(cuda_req: CWLObjectType, requestCount: int) -> int:
@@ -27,14 +56,10 @@ def cuda_check(cuda_req: CWLObjectType, requestCount: int) -> int:
return 0
versionf = float(version)
if versionf < vmin:
- _logger.warning(
- "CUDA version '%s' is less than minimum version '%s'", version, vmin
- )
+ _logger.warning("CUDA version '%s' is less than minimum version '%s'", version, vmin)
return 0
if requestCount > devices:
- _logger.warning(
- "Requested %d GPU devices but only %d available", requestCount, devices
- )
+ _logger.warning("Requested %d GPU devices but only %d available", requestCount, devices)
return 0
return requestCount
except Exception as e:
diff --git a/cwltool/cwlprov/__init__.py b/cwltool/cwlprov/__init__.py
new file mode 100644
index 000000000..b8ff8d14d
--- /dev/null
+++ b/cwltool/cwlprov/__init__.py
@@ -0,0 +1,168 @@
+"""Stores Research Object including provenance."""
+
+import hashlib
+import os
+import pwd
+import re
+import uuid
+from getpass import getuser
+from typing import IO, Any, Callable, Dict, List, Optional, Tuple, TypedDict, Union
+
+
+def _whoami() -> Tuple[str, str]:
+ """Return the current operating system account as (username, fullname)."""
+ username = getuser()
+ try:
+ fullname = pwd.getpwuid(os.getuid())[4].split(",")[0]
+ except (KeyError, IndexError):
+ fullname = username
+
+ return (username, fullname)
+
+
+def _check_mod_11_2(numeric_string: str) -> bool:
+ """
+ Validate numeric_string for its MOD-11-2 checksum.
+
+ Any "-" in the numeric_string are ignored.
+
+ The last digit of numeric_string is assumed to be the checksum, 0-9 or X.
+
+ See ISO/IEC 7064:2003 and
+ https://support.orcid.org/knowledgebase/articles/116780-structure-of-the-orcid-identifier
+ """
+ # Strip -
+ nums = numeric_string.replace("-", "")
+ total = 0
+ # skip last (check)digit
+ for num in nums[:-1]:
+ digit = int(num)
+ total = (total + digit) * 2
+ remainder = total % 11
+ result = (12 - remainder) % 11
+ if result == 10:
+ checkdigit = "X"
+ else:
+ checkdigit = str(result)
+ # Compare against last digit or X
+ return nums[-1].upper() == checkdigit
+
+
+def _valid_orcid(orcid: Optional[str]) -> str:
+ """
+ Ensure orcid is a valid ORCID identifier.
+
+ The string must be equivalent to one of these forms:
+
+ 0000-0002-1825-0097
+ orcid.org/0000-0002-1825-0097
+ http://orcid.org/0000-0002-1825-0097
+ https://orcid.org/0000-0002-1825-0097
+
+ If the ORCID number or prefix is invalid, a ValueError is raised.
+
+ The returned ORCID string is always in the form of:
+ https://orcid.org/0000-0002-1825-0097
+ """
+ if orcid is None or not orcid:
+ raise ValueError("ORCID cannot be unspecified")
+ # Liberal in what we consume, e.g. ORCID.org/0000-0002-1825-009x
+ orcid = orcid.lower()
+ match = re.match(
+ # Note: concatenated r"" r"" below so we can add comments to pattern
+ # Optional hostname, with or without protocol
+ r"(http://orcid\.org/|https://orcid\.org/|orcid\.org/)?"
+ # alternative pattern, but probably messier
+ # r"^((https?://)?orcid.org/)?"
+ # ORCID number is always 4x4 numerical digits,
+ # but last digit (modulus 11 checksum)
+ # can also be X (but we made it lowercase above).
+ # e.g. 0000-0002-1825-0097
+ # or 0000-0002-1694-233x
+ r"(?P(\d{4}-\d{4}-\d{4}-\d{3}[0-9x]))$",
+ orcid,
+ )
+
+ help_url = (
+ "https://support.orcid.org/knowledgebase/articles/"
+ "116780-structure-of-the-orcid-identifier"
+ )
+ if not match:
+ raise ValueError(f"Invalid ORCID: {orcid}\n{help_url}")
+
+ # Conservative in what we produce:
+ # a) Ensure any checksum digit is uppercase
+ orcid_num = match.group("orcid").upper()
+ # b) ..and correct
+ if not _check_mod_11_2(orcid_num):
+ raise ValueError(f"Invalid ORCID checksum: {orcid_num}\n{help_url}")
+
+ # c) Re-add the official prefix https://orcid.org/
+ return "https://orcid.org/%s" % orcid_num
+
+
+Annotation = TypedDict(
+ "Annotation",
+ {
+ "uri": str,
+ "about": str,
+ "content": Optional[Union[str, List[str]]],
+ "oa:motivatedBy": Dict[str, str],
+ },
+)
+
+
+class Aggregate(TypedDict, total=False):
+ """RO Aggregate class."""
+
+ uri: Optional[str]
+ bundledAs: Optional[Dict[str, Any]]
+ mediatype: Optional[str]
+ conformsTo: Optional[Union[str, List[str]]]
+ createdOn: Optional[str]
+ createdBy: Optional[Dict[str, str]]
+
+
+# Aggregate.bundledAs is actually type Aggregate, but cyclic definitions are not supported
+class AuthoredBy(TypedDict, total=False):
+ """RO AuthoredBy class."""
+
+ orcid: Optional[str]
+ name: Optional[str]
+ uri: Optional[str]
+
+
+def checksum_copy(
+ src_file: IO[Any],
+ dst_file: Optional[IO[Any]] = None,
+ hasher: Optional[Callable[[], "hashlib._Hash"]] = None,
+ buffersize: int = 1024 * 1024,
+) -> str:
+ """Compute checksums while copying a file."""
+ # TODO: Use hashlib.new(Hasher_str) instead?
+ if hasher:
+ checksum = hasher()
+ else:
+ from .provenance_constants import Hasher
+
+ checksum = Hasher()
+ contents = src_file.read(buffersize)
+ if dst_file and hasattr(dst_file, "name") and hasattr(src_file, "name"):
+ temp_location = os.path.join(os.path.dirname(dst_file.name), str(uuid.uuid4()))
+ try:
+ os.rename(dst_file.name, temp_location)
+ os.link(src_file.name, dst_file.name)
+ dst_file = None
+ os.unlink(temp_location)
+ except OSError:
+ pass
+ if os.path.exists(temp_location):
+ os.rename(temp_location, dst_file.name) # type: ignore
+ while contents != b"":
+ if dst_file is not None:
+ dst_file.write(contents)
+ checksum.update(contents)
+ contents = src_file.read(buffersize)
+ if dst_file is not None:
+ dst_file.flush()
+ return checksum.hexdigest().lower()
diff --git a/cwltool/provenance_constants.py b/cwltool/cwlprov/provenance_constants.py
similarity index 97%
rename from cwltool/provenance_constants.py
rename to cwltool/cwlprov/provenance_constants.py
index 0e4956c4b..ec047df38 100644
--- a/cwltool/provenance_constants.py
+++ b/cwltool/cwlprov/provenance_constants.py
@@ -38,7 +38,7 @@
# BagIt and YAML always use UTF-8
ENCODING = "UTF-8"
-TEXT_PLAIN = 'text/plain; charset="%s"' % ENCODING
+TEXT_PLAIN = f"text/plain; charset={ENCODING!r}"
# sha1, compatible with the File type's "checksum" field
# e.g. "checksum" = "sha1$47a013e660d408619d894b20806b1d5086aab03b"
diff --git a/cwltool/provenance_profile.py b/cwltool/cwlprov/provenance_profile.py
similarity index 90%
rename from cwltool/provenance_profile.py
rename to cwltool/cwlprov/provenance_profile.py
index 158144a64..c8ceee232 100644
--- a/cwltool/provenance_profile.py
+++ b/cwltool/cwlprov/provenance_profile.py
@@ -7,6 +7,7 @@
from pathlib import PurePath, PurePosixPath
from socket import getfqdn
from typing import (
+ TYPE_CHECKING,
Any,
Dict,
List,
@@ -22,12 +23,14 @@
from prov.identifier import Identifier, QualifiedName
from prov.model import PROV, PROV_LABEL, PROV_TYPE, PROV_VALUE, ProvDocument, ProvEntity
from schema_salad.sourceline import SourceLine
-from typing_extensions import TYPE_CHECKING
-from .errors import WorkflowException
-from .job import CommandLineJob, JobBase
-from .loghandler import _logger
-from .process import Process, shortname
+from ..errors import WorkflowException
+from ..job import CommandLineJob, JobBase
+from ..loghandler import _logger
+from ..process import Process, shortname
+from ..stdfsaccess import StdFsAccess
+from ..utils import CWLObjectType, JobsType, get_listing, posix_path, versionstring
+from ..workflow_job import WorkflowJob
from .provenance_constants import (
ACCOUNT_UUID,
CWLPROV,
@@ -46,17 +49,13 @@
WFDESC,
WFPROV,
)
-from .stdfsaccess import StdFsAccess
-from .utils import CWLObjectType, JobsType, get_listing, posix_path, versionstring
-from .workflow_job import WorkflowJob
+from .writablebagfile import create_job, write_bag_file # change this later
if TYPE_CHECKING:
- from .provenance import ResearchObject
+ from .ro import ResearchObject
-def copy_job_order(
- job: Union[Process, JobsType], job_order_object: CWLObjectType
-) -> CWLObjectType:
+def copy_job_order(job: Union[Process, JobsType], job_order_object: CWLObjectType) -> CWLObjectType:
"""Create copy of job object for provenance."""
if not isinstance(job, WorkflowJob):
# direct command line tool execution
@@ -116,10 +115,7 @@ def __init__(
def __str__(self) -> str:
"""Represent this Provenvance profile as a string."""
- return "ProvenanceProfile <{}> in <{}>".format(
- self.workflow_run_uri,
- self.research_object,
- )
+ return f"ProvenanceProfile <{self.workflow_run_uri}> in <{self.research_object}>"
def generate_prov_doc(self) -> Tuple[str, ProvDocument]:
"""Add basic namespaces."""
@@ -142,7 +138,7 @@ def host_provenance(document: ProvDocument) -> None:
},
)
- self.cwltool_version = "cwltool %s" % versionstring().split()[-1]
+ self.cwltool_version = f"cwltool {versionstring().split()[-1]}"
self.document.add_namespace("wfprov", "http://purl.org/wf4ever/wfprov#")
# document.add_namespace('prov', 'http://www.w3.org/ns/prov#')
self.document.add_namespace("wfdesc", "http://purl.org/wf4ever/wfdesc#")
@@ -172,9 +168,7 @@ def host_provenance(document: ProvDocument) -> None:
)
ro_identifier_workflow = self.research_object.base_uri + "workflow/packed.cwl#"
self.wf_ns = self.document.add_namespace("wf", ro_identifier_workflow)
- ro_identifier_input = (
- self.research_object.base_uri + "workflow/primary-job.json#"
- )
+ ro_identifier_input = self.research_object.base_uri + "workflow/primary-job.json#"
self.document.add_namespace("input", ro_identifier_input)
# More info about the account (e.g. username, fullname)
@@ -225,9 +219,7 @@ def host_provenance(document: ProvDocument) -> None:
)
# association between SoftwareAgent and WorkflowRun
main_workflow = "wf:main"
- self.document.wasAssociatedWith(
- self.workflow_run_uri, self.engine_uuid, main_workflow
- )
+ self.document.wasAssociatedWith(self.workflow_run_uri, self.engine_uuid, main_workflow)
self.document.wasStartedBy(
self.workflow_run_uri, None, self.engine_uuid, datetime.datetime.now()
)
@@ -246,7 +238,7 @@ def evaluate(
self.prospective_prov(job)
customised_job = copy_job_order(job, job_order_object)
self.used_artefacts(customised_job, self.workflow_run_uri)
- research_obj.create_job(customised_job)
+ create_job(research_obj, customised_job)
elif hasattr(job, "workflow"):
# record provenance of workflow executions
self.prospective_prov(job)
@@ -296,9 +288,7 @@ def start_process(
self.document.wasAssociatedWith(
process_run_id, self.engine_uuid, str("wf:main/" + process_name)
)
- self.document.wasStartedBy(
- process_run_id, None, self.workflow_run_uri, when, None, None
- )
+ self.document.wasStartedBy(process_run_id, None, self.workflow_run_uri, when, None, None)
return process_run_id
def record_process_end(
@@ -355,9 +345,7 @@ def declare_file(self, value: CWLObjectType) -> Tuple[ProvEntity, ProvEntity, st
relative_path = self.research_object.add_data_file(fhandle)
# FIXME: This naively relies on add_data_file setting hash as filename
checksum = PurePath(relative_path).name
- entity = self.document.entity(
- "data:" + checksum, {PROV_TYPE: WFPROV["Artifact"]}
- )
+ entity = self.document.entity("data:" + checksum, {PROV_TYPE: WFPROV["Artifact"]})
if "checksum" not in value:
value["checksum"] = f"{SHA1}${checksum}"
@@ -367,9 +355,7 @@ def declare_file(self, value: CWLObjectType) -> Tuple[ProvEntity, ProvEntity, st
# By here one of them should have worked!
if not entity or not checksum:
- raise ValueError(
- "class:File but missing checksum/location/content: %r" % value
- )
+ raise ValueError("class:File but missing checksum/location/content: %r" % value)
# Track filename and extension, this is generally useful only for
# secondaryFiles. Note that multiple uses of a file might thus record
@@ -383,17 +369,11 @@ def declare_file(self, value: CWLObjectType) -> Tuple[ProvEntity, ProvEntity, st
)
if "basename" in value:
- file_entity.add_attributes(
- {CWLPROV["basename"]: cast(str, value["basename"])}
- )
+ file_entity.add_attributes({CWLPROV["basename"]: cast(str, value["basename"])})
if "nameroot" in value:
- file_entity.add_attributes(
- {CWLPROV["nameroot"]: cast(str, value["nameroot"])}
- )
+ file_entity.add_attributes({CWLPROV["nameroot"]: cast(str, value["nameroot"])})
if "nameext" in value:
- file_entity.add_attributes(
- {CWLPROV["nameext"]: cast(str, value["nameext"])}
- )
+ file_entity.add_attributes({CWLPROV["nameext"]: cast(str, value["nameext"])})
self.document.specializationOf(file_entity, entity)
# Identify all schema annotations
@@ -410,23 +390,17 @@ def declare_file(self, value: CWLObjectType) -> Tuple[ProvEntity, ProvEntity, st
file_entity.add_attributes({PROV_TYPE: SCHEMA[additional_type]})
else:
for a_entry in cast(List[str], atype):
- additional_type = a_entry.split(sep="/")[
- -1
- ] # find better method?
+ additional_type = a_entry.split(sep="/")[-1] # find better method?
file_entity.add_attributes({PROV_TYPE: SCHEMA[additional_type]})
else:
- file_entity = self._add_nested_annotations(
- s, schema_annotations[s], file_entity
- )
+ file_entity = self._add_nested_annotations(s, schema_annotations[s], file_entity)
# Transfer format annotations to provenance:
if "format" in value:
file_entity.add_attributes({SCHEMA["encodingFormat"]: value["format"]})
# Check for secondaries
- for sec in cast(
- MutableSequence[CWLObjectType], value.get("secondaryFiles", [])
- ):
+ for sec in cast(MutableSequence[CWLObjectType], value.get("secondaryFiles", [])):
# TODO: Record these in a specializationOf entity with UUID?
if sec["class"] == "File":
(sec_entity, _, _) = self.declare_file(sec)
@@ -469,6 +443,9 @@ def declare_directory(self, value: CWLObjectType) -> ProvEntity:
],
)
+ if "basename" in value:
+ coll.add_attributes({CWLPROV["basename"]: cast(str, value["basename"])})
+
# ORE description of ro:Folder, saved separately
coll_b = dir_bundle.entity(
dir_id,
@@ -554,11 +531,9 @@ def declare_directory(self, value: CWLObjectType) -> ProvEntity:
ore_doc.add_bundle(dir_bundle)
ore_doc = ore_doc.flattened()
ore_doc_path = str(PurePosixPath(METADATA, ore_doc_fn))
- with self.research_object.write_bag_file(ore_doc_path) as provenance_file:
+ with write_bag_file(self.research_object, ore_doc_path) as provenance_file:
ore_doc.serialize(provenance_file, format="rdf", rdf_format="turtle")
- self.research_object.add_annotation(
- dir_id, [ore_doc_fn], ORE["isDescribedBy"].uri
- )
+ self.research_object.add_annotation(dir_id, [ore_doc_fn], ORE["isDescribedBy"].uri)
if is_empty:
# Empty directory
@@ -573,7 +548,7 @@ def declare_string(self, value: str) -> Tuple[ProvEntity, str]:
data_file = self.research_object.add_data_file(byte_s, content_type=TEXT_PLAIN)
checksum = PurePosixPath(data_file).name
# FIXME: Don't naively assume add_data_file uses hash in filename!
- data_id = "data:%s" % PurePosixPath(data_file).stem
+ data_id = f"data:{PurePosixPath(data_file).stem}"
entity = self.document.entity(
data_id, {PROV_TYPE: WFPROV["Artifact"], PROV_VALUE: str(value)}
)
@@ -605,7 +580,7 @@ def declare_artefact(self, value: Any) -> ProvEntity:
byte_s = BytesIO(value)
data_file = self.research_object.add_data_file(byte_s)
# FIXME: Don't naively assume add_data_file uses hash in filename!
- data_id = "data:%s" % PurePosixPath(data_file).stem
+ data_id = f"data:{PurePosixPath(data_file).stem}"
return self.document.entity(
data_id,
{PROV_TYPE: WFPROV["Artifact"], PROV_VALUE: str(value)},
@@ -648,7 +623,7 @@ def declare_artefact(self, value: Any) -> ProvEntity:
# Let's iterate and recurse
coll_attribs: List[Tuple[Union[str, Identifier], Any]] = []
- for (key, val) in value.items():
+ for key, val in value.items():
v_ent = self.declare_artefact(val)
self.document.membership(coll, v_ent)
m_entity = self.document.entity(uuid.uuid4().urn)
@@ -656,9 +631,7 @@ def declare_artefact(self, value: Any) -> ProvEntity:
# https://www.w3.org/TR/prov-dictionary/#dictionary-ontological-definition
# as prov.py do not easily allow PROV-N extensions
m_entity.add_asserted_type(PROV["KeyEntityPair"])
- m_entity.add_attributes(
- {PROV["pairKey"]: str(key), PROV["pairEntity"]: v_ent}
- )
+ m_entity.add_attributes({PROV["pairKey"]: str(key), PROV["pairEntity"]: v_ent})
coll_attribs.append((PROV["hadDictionaryMember"], m_entity))
coll.add_attributes(coll_attribs)
self.research_object.add_uri(coll.identifier.uri)
@@ -752,7 +725,7 @@ def generate_output_prov(
# FIXME: Probably not "main" in nested workflows
role = self.wf_ns[f"main/{name}/{output}"]
else:
- role = self.wf_ns["main/%s" % output]
+ role = self.wf_ns[f"main/{output}"]
if not process_run_id:
process_run_id = self.workflow_run_uri
@@ -800,9 +773,7 @@ def prospective_prov(self, job: JobsType) -> None:
)
# TODO: Declare roles/parameters as well
- def activity_has_provenance(
- self, activity: str, prov_ids: Sequence[Identifier]
- ) -> None:
+ def activity_has_provenance(self, activity: str, prov_ids: Sequence[Identifier]) -> None:
"""Add http://www.w3.org/TR/prov-aq/ relations to nested PROV files."""
# NOTE: The below will only work if the corresponding metadata/provenance arcp URI
# is a pre-registered namespace in the PROV Document
@@ -838,19 +809,17 @@ def finalize_prov_profile(self, name: Optional[str]) -> List[QualifiedName]:
prov_ids = []
# https://www.w3.org/TR/prov-xml/
- with self.research_object.write_bag_file(basename + ".xml") as provenance_file:
+ with write_bag_file(self.research_object, basename + ".xml") as provenance_file:
self.document.serialize(provenance_file, format="xml", indent=4)
prov_ids.append(self.provenance_ns[filename + ".xml"])
# https://www.w3.org/TR/prov-n/
- with self.research_object.write_bag_file(
- basename + ".provn"
- ) as provenance_file:
+ with write_bag_file(self.research_object, basename + ".provn") as provenance_file:
self.document.serialize(provenance_file, format="provn", indent=2)
prov_ids.append(self.provenance_ns[filename + ".provn"])
# https://www.w3.org/Submission/prov-json/
- with self.research_object.write_bag_file(basename + ".json") as provenance_file:
+ with write_bag_file(self.research_object, basename + ".json") as provenance_file:
self.document.serialize(provenance_file, format="json", indent=2)
prov_ids.append(self.provenance_ns[filename + ".json"])
@@ -858,24 +827,20 @@ def finalize_prov_profile(self, name: Optional[str]) -> List[QualifiedName]:
# which can be serialized to ttl/nt/jsonld (and more!)
# https://www.w3.org/TR/turtle/
- with self.research_object.write_bag_file(basename + ".ttl") as provenance_file:
+ with write_bag_file(self.research_object, basename + ".ttl") as provenance_file:
self.document.serialize(provenance_file, format="rdf", rdf_format="turtle")
prov_ids.append(self.provenance_ns[filename + ".ttl"])
# https://www.w3.org/TR/n-triples/
- with self.research_object.write_bag_file(basename + ".nt") as provenance_file:
- self.document.serialize(
- provenance_file, format="rdf", rdf_format="ntriples"
- )
+ with write_bag_file(self.research_object, basename + ".nt") as provenance_file:
+ self.document.serialize(provenance_file, format="rdf", rdf_format="ntriples")
prov_ids.append(self.provenance_ns[filename + ".nt"])
# https://www.w3.org/TR/json-ld/
# TODO: Use a nice JSON-LD context
# see also https://eprints.soton.ac.uk/395985/
# 404 Not Found on https://provenance.ecs.soton.ac.uk/prov.jsonld :(
- with self.research_object.write_bag_file(
- basename + ".jsonld"
- ) as provenance_file:
+ with write_bag_file(self.research_object, basename + ".jsonld") as provenance_file:
self.document.serialize(provenance_file, format="rdf", rdf_format="json-ld")
prov_ids.append(self.provenance_ns[filename + ".jsonld"])
diff --git a/cwltool/provenance.py b/cwltool/cwlprov/ro.py
similarity index 52%
rename from cwltool/provenance.py
rename to cwltool/cwlprov/ro.py
index 7c7e38cfd..c34e32082 100644
--- a/cwltool/provenance.py
+++ b/cwltool/cwlprov/ro.py
@@ -1,25 +1,16 @@
-"""Stores Research Object including provenance."""
+"""Stores class definition of ResearchObject and WritableBagFile."""
-import copy
import datetime
import hashlib
import os
-import pwd
-import re
import shutil
import tempfile
+import urllib
import uuid
-from array import array
-from collections import OrderedDict
-from getpass import getuser
-from io import FileIO, TextIOWrapper
-from mmap import mmap
from pathlib import Path, PurePosixPath
from typing import (
IO,
Any,
- BinaryIO,
- Callable,
Dict,
List,
MutableMapping,
@@ -33,13 +24,20 @@
import prov.model as provM
from prov.model import PROV, ProvDocument
-from schema_salad.utils import json_dumps
-from typing_extensions import TYPE_CHECKING, TypedDict
-from .loghandler import _logger
+from ..loghandler import _logger
+from ..stdfsaccess import StdFsAccess
+from ..utils import (
+ CWLObjectType,
+ CWLOutputType,
+ create_tmp_dir,
+ local_path,
+ posix_path,
+ versionstring,
+)
+from . import Aggregate, Annotation, AuthoredBy, _valid_orcid, _whoami, checksum_copy
from .provenance_constants import (
ACCOUNT_UUID,
- CWLPROV,
CWLPROV_VERSION,
DATA,
ENCODING,
@@ -58,213 +56,6 @@
WORKFLOW,
Hasher,
)
-from .stdfsaccess import StdFsAccess
-from .utils import (
- CWLObjectType,
- CWLOutputType,
- create_tmp_dir,
- local_path,
- posix_path,
- versionstring,
-)
-
-if TYPE_CHECKING:
- from .command_line_tool import ( # pylint: disable=unused-import
- CommandLineTool,
- ExpressionTool,
- )
- from .workflow import Workflow # pylint: disable=unused-import
-
-
-def _whoami() -> Tuple[str, str]:
- """Return the current operating system account as (username, fullname)."""
- username = getuser()
- try:
- fullname = pwd.getpwuid(os.getuid())[4].split(",")[0]
- except (KeyError, IndexError):
- fullname = username
-
- return (username, fullname)
-
-
-class WritableBagFile(FileIO):
- """Writes files in research object."""
-
- def __init__(self, research_object: "ResearchObject", rel_path: str) -> None:
- """Initialize an ROBagIt."""
- self.research_object = research_object
- if Path(rel_path).is_absolute():
- raise ValueError("rel_path must be relative: %s" % rel_path)
- self.rel_path = rel_path
- self.hashes = {
- SHA1: hashlib.sha1(), # nosec
- SHA256: hashlib.sha256(),
- SHA512: hashlib.sha512(),
- }
- # Open file in Research Object folder
- path = os.path.abspath(
- os.path.join(research_object.folder, local_path(rel_path))
- )
- if not path.startswith(os.path.abspath(research_object.folder)):
- raise ValueError("Path is outside Research Object: %s" % path)
- _logger.debug("[provenance] Creating WritableBagFile at %s.", path)
- super().__init__(path, mode="w")
-
- def write(self, b: Any) -> int:
- """Write some content to the Bag."""
- real_b = b if isinstance(b, (bytes, mmap, array)) else b.encode("utf-8")
- total = 0
- length = len(real_b)
- while total < length:
- ret = super().write(real_b)
- if ret:
- total += ret
- for val in self.hashes.values():
- # print("Updating hasher %s ", val)
- val.update(real_b)
- return total
-
- def close(self) -> None:
- # FIXME: Convert below block to a ResearchObject method?
- if self.rel_path.startswith("data/"):
- self.research_object.bagged_size[self.rel_path] = self.tell()
- else:
- self.research_object.tagfiles.add(self.rel_path)
-
- super().close()
- # { "sha1": "f572d396fae9206628714fb2ce00f72e94f2258f" }
- checksums = {}
- for name in self.hashes:
- checksums[name] = self.hashes[name].hexdigest().lower()
- self.research_object.add_to_manifest(self.rel_path, checksums)
-
- # To simplify our hash calculation we won't support
- # seeking, reading or truncating, as we can't do
- # similar seeks in the current hash.
- # TODO: Support these? At the expense of invalidating
- # the current hash, then having to recalculate at close()
- def seekable(self) -> bool:
- return False
-
- def readable(self) -> bool:
- return False
-
- def truncate(self, size: Optional[int] = None) -> int:
- # FIXME: This breaks contract IOBase,
- # as it means we would have to recalculate the hash
- if size is not None:
- raise OSError("WritableBagFile can't truncate")
- return self.tell()
-
-
-def _check_mod_11_2(numeric_string: str) -> bool:
- """
- Validate numeric_string for its MOD-11-2 checksum.
-
- Any "-" in the numeric_string are ignored.
-
- The last digit of numeric_string is assumed to be the checksum, 0-9 or X.
-
- See ISO/IEC 7064:2003 and
- https://support.orcid.org/knowledgebase/articles/116780-structure-of-the-orcid-identifier
- """
- # Strip -
- nums = numeric_string.replace("-", "")
- total = 0
- # skip last (check)digit
- for num in nums[:-1]:
- digit = int(num)
- total = (total + digit) * 2
- remainder = total % 11
- result = (12 - remainder) % 11
- if result == 10:
- checkdigit = "X"
- else:
- checkdigit = str(result)
- # Compare against last digit or X
- return nums[-1].upper() == checkdigit
-
-
-def _valid_orcid(orcid: Optional[str]) -> str:
- """
- Ensure orcid is a valid ORCID identifier.
-
- The string must be equivalent to one of these forms:
-
- 0000-0002-1825-0097
- orcid.org/0000-0002-1825-0097
- http://orcid.org/0000-0002-1825-0097
- https://orcid.org/0000-0002-1825-0097
-
- If the ORCID number or prefix is invalid, a ValueError is raised.
-
- The returned ORCID string is always in the form of:
- https://orcid.org/0000-0002-1825-0097
- """
- if orcid is None or not orcid:
- raise ValueError("ORCID cannot be unspecified")
- # Liberal in what we consume, e.g. ORCID.org/0000-0002-1825-009x
- orcid = orcid.lower()
- match = re.match(
- # Note: concatenated r"" r"" below so we can add comments to pattern
- # Optional hostname, with or without protocol
- r"(http://orcid\.org/|https://orcid\.org/|orcid\.org/)?"
- # alternative pattern, but probably messier
- # r"^((https?://)?orcid.org/)?"
- # ORCID number is always 4x4 numerical digits,
- # but last digit (modulus 11 checksum)
- # can also be X (but we made it lowercase above).
- # e.g. 0000-0002-1825-0097
- # or 0000-0002-1694-233x
- r"(?P(\d{4}-\d{4}-\d{4}-\d{3}[0-9x]))$",
- orcid,
- )
-
- help_url = (
- "https://support.orcid.org/knowledgebase/articles/"
- "116780-structure-of-the-orcid-identifier"
- )
- if not match:
- raise ValueError(f"Invalid ORCID: {orcid}\n{help_url}")
-
- # Conservative in what we produce:
- # a) Ensure any checksum digit is uppercase
- orcid_num = match.group("orcid").upper()
- # b) ..and correct
- if not _check_mod_11_2(orcid_num):
- raise ValueError(f"Invalid ORCID checksum: {orcid_num}\n{help_url}")
-
- # c) Re-add the official prefix https://orcid.org/
- return "https://orcid.org/%s" % orcid_num
-
-
-Annotation = TypedDict(
- "Annotation",
- {
- "uri": str,
- "about": str,
- "content": Optional[Union[str, List[str]]],
- "oa:motivatedBy": Dict[str, str],
- },
-)
-Aggregate = TypedDict(
- "Aggregate",
- {
- "uri": Optional[str],
- "bundledAs": Optional[Dict[str, Any]],
- "mediatype": Optional[str],
- "conformsTo": Optional[Union[str, List[str]]],
- "createdOn": Optional[str],
- "createdBy": Optional[Dict[str, str]],
- },
- total=False,
-)
-# Aggregate.bundledAs is actually type Aggregate, but cyclic definitions are not supported
-AuthoredBy = TypedDict(
- "AuthoredBy",
- {"orcid": Optional[str], "name": Optional[str], "uri": Optional[str]},
- total=False,
-)
class ResearchObject:
@@ -284,21 +75,20 @@ def __init__(
self.folder = create_tmp_dir(temp_prefix_ro)
self.closed = False
# map of filename "data/de/alsdklkas": 12398123 bytes
- self.bagged_size = {} # type: Dict[str, int]
- self.tagfiles = set() # type: Set[str]
- self._file_provenance = {} # type: Dict[str, Aggregate]
- self._external_aggregates = [] # type: List[Aggregate]
- self.annotations = [] # type: List[Annotation]
- self._content_types = {} # type: Dict[str,str]
+ self.bagged_size: Dict[str, int] = {}
+ self.tagfiles: Set[str] = set()
+ self._file_provenance: Dict[str, Aggregate] = {}
+ self._external_aggregates: List[Aggregate] = []
+ self.annotations: List[Annotation] = []
+ self._content_types: Dict[str, str] = {}
self.fsaccess = fsaccess
# These should be replaced by generate_prov_doc when workflow/run IDs are known:
- self.engine_uuid = "urn:uuid:%s" % uuid.uuid4()
+ self.engine_uuid = f"urn:uuid:{uuid.uuid4()}"
self.ro_uuid = uuid.uuid4()
- self.base_uri = "arcp://uuid,%s/" % self.ro_uuid
- self.cwltool_version = "cwltool %s" % versionstring().split()[-1]
- ##
- self.relativised_input_object = {} # type: CWLObjectType
+ self.base_uri = f"arcp://uuid,{self.ro_uuid}/"
+ self.cwltool_version = f"cwltool {versionstring().split()[-1]}"
self.has_manifest = False
+ self.relativised_input_object: CWLObjectType = {}
self._initialize()
_logger.debug("[provenance] Temporary research object: %s", self.folder)
@@ -335,29 +125,7 @@ def _initialize_bagit(self) -> None:
with open(bagit, "w", encoding=ENCODING, newline="\n") as bag_it_file:
# TODO: \n or \r\n ?
bag_it_file.write("BagIt-Version: 0.97\n")
- bag_it_file.write("Tag-File-Character-Encoding: %s\n" % ENCODING)
-
- def open_log_file_for_activity(
- self, uuid_uri: str
- ) -> Union[TextIOWrapper, WritableBagFile]:
- self.self_check()
- # Ensure valid UUID for safe filenames
- activity_uuid = uuid.UUID(uuid_uri)
- if activity_uuid.urn == self.engine_uuid:
- # It's the engine aka cwltool!
- name = "engine"
- else:
- name = "activity"
- p = os.path.join(LOGS, f"{name}.{activity_uuid}.txt")
- _logger.debug(f"[provenance] Opening log file for {name}: {p}")
- self.add_annotation(activity_uuid.urn, [p], CWLPROV["log"].uri)
- return self.write_bag_file(p)
-
- def _finalize(self) -> None:
- self._write_ro_manifest()
- self._write_bag_info()
- if not self.has_manifest:
- (Path(self.folder) / "manifest-sha1.txt").touch()
+ bag_it_file.write(f"Tag-File-Character-Encoding: {ENCODING}\n")
def user_provenance(self, document: ProvDocument) -> None:
"""Add the user provenance."""
@@ -398,24 +166,7 @@ def user_provenance(self, document: ProvDocument) -> None:
# get their name wrong!)
document.actedOnBehalfOf(account, user)
- def write_bag_file(
- self, path: str, encoding: Optional[str] = ENCODING
- ) -> Union[TextIOWrapper, WritableBagFile]:
- """Write the bag file into our research object."""
- self.self_check()
- # For some reason below throws BlockingIOError
- # fp = BufferedWriter(WritableBagFile(self, path))
- bag_file = WritableBagFile(self, path)
- if encoding is not None:
- # encoding: match Tag-File-Character-Encoding: UTF-8
- return TextIOWrapper(
- cast(BinaryIO, bag_file), encoding=encoding, newline="\n"
- )
- return bag_file
-
- def add_tagfile(
- self, path: str, timestamp: Optional[datetime.datetime] = None
- ) -> None:
+ def add_tagfile(self, path: str, timestamp: Optional[datetime.datetime] = None) -> None:
"""Add tag files to our research object."""
self.self_check()
checksums = {}
@@ -457,7 +208,7 @@ def guess_mediatype(
rel_path: str,
) -> Tuple[Optional[str], Optional[Union[str, List[str]]]]:
"""Return the mediatypes."""
- media_types = {
+ media_types: Dict[Union[str, None], str] = {
# Adapted from
# https://w3id.org/bundle/2014-11-05/#media-types
"txt": TEXT_PLAIN,
@@ -470,13 +221,13 @@ def guess_mediatype(
"cwl": 'text/x+yaml; charset="UTF-8"',
"provn": 'text/provenance-notation; charset="UTF-8"',
"nt": "application/n-triples",
- } # type: Dict[str, str]
- conforms_to = {
+ }
+ conforms_to: Dict[Union[str, None], str] = {
"provn": "http://www.w3.org/TR/2013/REC-prov-n-20130430/",
"cwl": "https://w3id.org/cwl/",
- } # type: Dict[str, str]
+ }
- prov_conforms_to = {
+ prov_conforms_to: Dict[str, str] = {
"provn": "http://www.w3.org/TR/2013/REC-prov-n-20130430/",
"rdf": "http://www.w3.org/TR/2013/REC-prov-o-20130430/",
"ttl": "http://www.w3.org/TR/2013/REC-prov-o-20130430/",
@@ -484,27 +235,19 @@ def guess_mediatype(
"jsonld": "http://www.w3.org/TR/2013/REC-prov-o-20130430/",
"xml": "http://www.w3.org/TR/2013/NOTE-prov-xml-20130430/",
"json": "http://www.w3.org/Submission/2013/SUBM-prov-json-20130424/",
- } # type: Dict[str, str]
+ }
- extension = rel_path.rsplit(".", 1)[-1].lower() # type: Optional[str]
+ extension: Optional[str] = rel_path.rsplit(".", 1)[-1].lower()
if extension == rel_path:
# No ".", no extension
extension = None
- mediatype = None # type: Optional[str]
- conformsTo = None # type: Optional[Union[str, List[str]]]
- if extension in media_types:
- mediatype = media_types[extension]
-
- if extension in conforms_to:
- # TODO: Open CWL file to read its declared "cwlVersion", e.g.
- # cwlVersion = "v1.0"
- conformsTo = conforms_to[extension]
+ mediatype: Optional[str] = media_types.get(extension, None)
+ conformsTo: Optional[Union[str, List[str]]] = conforms_to.get(extension, None)
+ # TODO: Open CWL file to read its declared "cwlVersion", e.g.
+ # cwlVersion = "v1.0"
- if (
- rel_path.startswith(posix_path(PROVENANCE))
- and extension in prov_conforms_to
- ):
+ if rel_path.startswith(posix_path(PROVENANCE)) and extension in prov_conforms_to:
if ".cwlprov" in rel_path:
# Our own!
conformsTo = [
@@ -517,9 +260,8 @@ def guess_mediatype(
conformsTo = prov_conforms_to[extension]
return (mediatype, conformsTo)
- aggregates = [] # type: List[Aggregate]
+ aggregates: List[Aggregate] = []
for path in self.bagged_size.keys():
-
temp_path = PurePosixPath(path)
folder = temp_path.parent
filename = temp_path.name
@@ -530,17 +272,17 @@ def guess_mediatype(
# TODO: Change to nih:sha-256; hashes
# https://tools.ietf.org/html/rfc6920#section-7
- aggregate_dict = {
+ aggregate_dict: Aggregate = {
"uri": "urn:hash::sha1:" + filename,
"bundledAs": {
# The arcp URI is suitable ORE proxy; local to this Research Object.
# (as long as we don't also aggregate it by relative path!)
"uri": self.base_uri + path,
# relate it to the data/ path
- "folder": "/%s/" % folder,
+ "folder": f"/{folder}/",
"filename": filename,
},
- } # type: Aggregate
+ }
if path in self._file_provenance:
# Made by workflow run, merge captured provenance
bundledAs = aggregate_dict["bundledAs"]
@@ -560,9 +302,7 @@ def guess_mediatype(
for path in self.tagfiles:
if not (
- path.startswith(METADATA)
- or path.startswith(WORKFLOW)
- or path.startswith(SNAPSHOT)
+ path.startswith(METADATA) or path.startswith(WORKFLOW) or path.startswith(SNAPSHOT)
):
# probably a bagit file
continue
@@ -575,11 +315,11 @@ def guess_mediatype(
# we need to relativize them for our current directory for
# as we are saved in metadata/manifest.json
mediatype, conformsTo = guess_mediatype(path)
- rel_aggregates = {
+ rel_aggregates: Aggregate = {
"uri": str(Path(os.pardir) / path),
"mediatype": mediatype,
"conformsTo": conformsTo,
- } # type: Aggregate
+ }
if path in self._file_provenance:
# Propagate file provenance (e.g. timestamp)
@@ -594,11 +334,9 @@ def guess_mediatype(
aggregates.extend(self._external_aggregates)
return aggregates
- def add_uri(
- self, uri: str, timestamp: Optional[datetime.datetime] = None
- ) -> Aggregate:
+ def add_uri(self, uri: str, timestamp: Optional[datetime.datetime] = None) -> Aggregate:
self.self_check()
- aggr = {"uri": uri} # type: Aggregate
+ aggr: Aggregate = {"uri": uri}
aggr["createdOn"], aggr["createdBy"] = self._self_made(timestamp=timestamp)
self._external_aggregates.append(aggr)
return aggr
@@ -611,17 +349,17 @@ def add_annotation(
curr = self.base_uri + METADATA + "/"
content = [c.replace(curr, "").replace(self.base_uri, "../") for c in content]
uri = uuid.uuid4().urn
- ann = {
+ ann: Annotation = {
"uri": uri,
"about": about,
"content": content,
"oa:motivatedBy": {"@id": motivated_by},
- } # type: Annotation
+ }
self.annotations.append(ann)
return uri
def _ro_annotations(self) -> List[Annotation]:
- annotations = [] # type: List[Annotation]
+ annotations: List[Annotation] = []
annotations.append(
{
"uri": uuid.uuid4().urn,
@@ -675,7 +413,7 @@ def _ro_annotations(self) -> List[Annotation]:
return annotations
def _authored_by(self) -> Optional[AuthoredBy]:
- authored_by = {} # type: AuthoredBy
+ authored_by: AuthoredBy = {}
if self.orcid:
authored_by["orcid"] = self.orcid
if self.full_name:
@@ -687,66 +425,12 @@ def _authored_by(self) -> Optional[AuthoredBy]:
return authored_by
return None
- def _write_ro_manifest(self) -> None:
-
- # Does not have to be this order, but it's nice to be consistent
- filename = "manifest.json"
- createdOn, createdBy = self._self_made()
- manifest = OrderedDict(
- {
- "@context": [
- {"@base": f"{self.base_uri}{posix_path(METADATA)}/"},
- "https://w3id.org/bundle/context",
- ],
- "id": "/",
- "conformsTo": CWLPROV_VERSION,
- "manifest": filename,
- "createdOn": createdOn,
- "createdBy": createdBy,
- "authoredBy": self._authored_by(),
- "aggregates": self._ro_aggregates(),
- "annotations": self._ro_annotations(),
- }
- )
-
- json_manifest = json_dumps(manifest, indent=4, ensure_ascii=False)
- rel_path = str(PurePosixPath(METADATA) / filename)
- json_manifest += "\n"
- with self.write_bag_file(rel_path) as manifest_file:
- manifest_file.write(json_manifest)
-
- def _write_bag_info(self) -> None:
-
- with self.write_bag_file("bag-info.txt") as info_file:
- info_file.write("Bag-Software-Agent: %s\n" % self.cwltool_version)
- # FIXME: require sha-512 of payload to comply with profile?
- # FIXME: Update profile
- info_file.write(
- "BagIt-Profile-Identifier: https://w3id.org/ro/bagit/profile\n"
- )
- info_file.write("Bagging-Date: %s\n" % datetime.date.today().isoformat())
- info_file.write(
- "External-Description: Research Object of CWL workflow run\n"
- )
- if self.full_name:
- info_file.write("Contact-Name: %s\n" % self.full_name)
-
- # NOTE: We can't use the urn:uuid:{UUID} of the workflow run (a prov:Activity)
- # as identifier for the RO/bagit (a prov:Entity). However the arcp base URI is good.
- info_file.write("External-Identifier: %s\n" % self.base_uri)
-
- # Calculate size of data/ (assuming no external fetch.txt files)
- total_size = sum(self.bagged_size.values())
- num_files = len(self.bagged_size)
- info_file.write("Payload-Oxum: %d.%d\n" % (total_size, num_files))
- _logger.debug("[provenance] Generated bagit metadata: %s", self.folder)
-
def generate_snapshot(self, prov_dep: CWLObjectType) -> None:
"""Copy all of the CWL files to the snapshot/ directory."""
self.self_check()
for key, value in prov_dep.items():
if key == "location" and cast(str, value).split("/")[-1]:
- location = cast(str, value)
+ location = urllib.parse.unquote(cast(str, value))
filename = location.split("/")[-1]
path = os.path.join(self.folder, SNAPSHOT, filename)
filepath = ""
@@ -762,9 +446,7 @@ def generate_snapshot(self, prov_dep: CWLObjectType) -> None:
shutil.copytree(filepath, path)
else:
shutil.copy(filepath, path)
- timestamp = datetime.datetime.fromtimestamp(
- os.path.getmtime(filepath)
- )
+ timestamp = datetime.datetime.fromtimestamp(os.path.getmtime(filepath))
self.add_tagfile(path, timestamp)
except PermissionError:
pass # FIXME: avoids duplicate snapshotting; need better solution
@@ -775,15 +457,6 @@ def generate_snapshot(self, prov_dep: CWLObjectType) -> None:
else:
pass
- def packed_workflow(self, packed: str) -> None:
- """Pack CWL description to generate re-runnable CWL object in RO."""
- self.self_check()
- rel_path = str(PurePosixPath(WORKFLOW) / "packed.cwl")
- # Write as binary
- with self.write_bag_file(rel_path, encoding=None) as write_pack:
- write_pack.write(packed)
- _logger.debug("[provenance] Added packed workflow: %s", rel_path)
-
def has_data_file(self, sha1hash: str) -> bool:
"""Confirm the presence of the given file in the RO."""
folder = os.path.join(self.folder, DATA, sha1hash[0:2])
@@ -799,9 +472,7 @@ def add_data_file(
"""Copy inputs to data/ folder."""
self.self_check()
tmp_dir, tmp_prefix = os.path.split(self.temp_prefix)
- with tempfile.NamedTemporaryFile(
- prefix=tmp_prefix, dir=tmp_dir, delete=False
- ) as tmp:
+ with tempfile.NamedTemporaryFile(prefix=tmp_prefix, dir=tmp_dir, delete=False) as tmp:
checksum = checksum_copy(from_fp, tmp)
# Calculate hash-based file path
@@ -820,9 +491,7 @@ def add_data_file(
if Hasher == hashlib.sha1:
self._add_to_bagit(rel_path, sha1=checksum)
else:
- _logger.warning(
- "[provenance] Unknown hash method %s for bagit manifest", Hasher
- )
+ _logger.warning("[provenance] Unknown hash method %s for bagit manifest", Hasher)
# Inefficient, bagit support need to checksum again
self._add_to_bagit(rel_path)
_logger.debug("[provenance] Added data file %s", path)
@@ -851,7 +520,7 @@ def add_to_manifest(self, rel_path: str, checksums: Dict[str, str]) -> None:
"""Add files to the research object manifest."""
self.self_check()
if PurePosixPath(rel_path).is_absolute():
- raise ValueError("rel_path must be relative: %s" % rel_path)
+ raise ValueError(f"rel_path must be relative: {rel_path}")
if os.path.commonprefix(["data/", rel_path]) == "data/":
# payload file, go to manifest
@@ -862,21 +531,19 @@ def add_to_manifest(self, rel_path: str, checksums: Dict[str, str]) -> None:
manifest = "tagmanifest"
# Add checksums to corresponding manifest files
- for (method, hash_value) in checksums.items():
+ for method, hash_value in checksums.items():
# File not in manifest because we bailed out on
# existence in bagged_size above
manifestpath = os.path.join(self.folder, f"{manifest}-{method.lower()}.txt")
# encoding: match Tag-File-Character-Encoding: UTF-8
- with open(
- manifestpath, "a", encoding=ENCODING, newline="\n"
- ) as checksum_file:
+ with open(manifestpath, "a", encoding=ENCODING, newline="\n") as checksum_file:
line = f"{hash_value} {rel_path}\n"
_logger.debug("[provenance] Added to %s: %s", manifestpath, line)
checksum_file.write(line)
def _add_to_bagit(self, rel_path: str, **checksums: str) -> None:
if PurePosixPath(rel_path).is_absolute():
- raise ValueError("rel_path must be relative: %s" % rel_path)
+ raise ValueError(f"rel_path must be relative: {rel_path}")
lpath = os.path.join(self.folder, local_path(rel_path))
if not os.path.exists(lpath):
raise OSError(f"File {rel_path} does not exist within RO: {lpath}")
@@ -895,41 +562,6 @@ def _add_to_bagit(self, rel_path: str, **checksums: str) -> None:
self.add_to_manifest(rel_path, checksums)
- def create_job(
- self, builder_job: CWLObjectType, is_output: bool = False
- ) -> CWLObjectType:
- # TODO customise the file
- """Generate the new job object with RO specific relative paths."""
- copied = copy.deepcopy(builder_job)
- relativised_input_objecttemp = {} # type: CWLObjectType
- self._relativise_files(copied)
-
- def jdefault(o: Any) -> Dict[Any, Any]:
- return dict(o)
-
- if is_output:
- rel_path = PurePosixPath(WORKFLOW) / "primary-output.json"
- else:
- rel_path = PurePosixPath(WORKFLOW) / "primary-job.json"
- j = json_dumps(copied, indent=4, ensure_ascii=False, default=jdefault)
- with self.write_bag_file(str(rel_path)) as file_path:
- file_path.write(j + "\n")
- _logger.debug("[provenance] Generated customised job file: %s", rel_path)
- # Generate dictionary with keys as workflow level input IDs and values
- # as
- # 1) for files the relativised location containing hash
- # 2) for other attributes, the actual value.
- for key, value in copied.items():
- if isinstance(value, MutableMapping):
- if value.get("class") in ("File", "Directory"):
- relativised_input_objecttemp[key] = value
- else:
- relativised_input_objecttemp[key] = value
- self.relativised_input_object.update(
- {k: v for k, v in relativised_input_objecttemp.items() if v}
- )
- return self.relativised_input_object
-
def _relativise_files(
self,
structure: Union[CWLObjectType, CWLOutputType, MutableSequence[CWLObjectType]],
@@ -940,14 +572,13 @@ def _relativise_files(
if isinstance(structure, MutableMapping):
if structure.get("class") == "File":
- relative_path = None # type: Optional[Union[str, PurePosixPath]]
+ relative_path: Optional[Union[str, PurePosixPath]] = None
if "checksum" in structure:
raw_checksum = cast(str, structure["checksum"])
alg, checksum = raw_checksum.split("$")
if alg != SHA1:
raise TypeError(
- "Only SHA1 CWL checksums are currently supported: "
- "{}".format(structure)
+ f"Only SHA1 CWL checksums are currently supported: {structure}"
)
if self.has_data_file(checksum):
prefix = checksum[0:2]
@@ -957,9 +588,7 @@ def _relativise_files(
# Register in RO; but why was this not picked
# up by used_artefacts?
_logger.info("[provenance] Adding to RO %s", structure["location"])
- with self.fsaccess.open(
- cast(str, structure["location"]), "rb"
- ) as fp:
+ with self.fsaccess.open(cast(str, structure["location"]), "rb") as fp:
relative_path = self.add_data_file(fp)
checksum = PurePosixPath(relative_path).name
structure["checksum"] = f"{SHA1}${checksum}"
@@ -984,65 +613,3 @@ def _relativise_files(
for obj in structure:
# Recurse and rewrite any nested File objects
self._relativise_files(cast(CWLOutputType, obj))
-
- def close(self, save_to: Optional[str] = None) -> None:
- """Close the Research Object, optionally saving to specified folder.
-
- Closing will remove any temporary files used by this research object.
- After calling this method, this ResearchObject instance can no longer
- be used, except for no-op calls to .close().
-
- The 'saveTo' folder should not exist - if it does, it will be deleted.
-
- It is safe to call this function multiple times without the
- 'saveTo' argument, e.g. within a try..finally block to
- ensure the temporary files of this Research Object are removed.
- """
- if save_to is None:
- if not self.closed:
- _logger.debug("[provenance] Deleting temporary %s", self.folder)
- shutil.rmtree(self.folder, ignore_errors=True)
- else:
- save_to = os.path.abspath(save_to)
- _logger.info("[provenance] Finalizing Research Object")
- self._finalize() # write manifest etc.
- # TODO: Write as archive (.zip or .tar) based on extension?
-
- if os.path.isdir(save_to):
- _logger.info("[provenance] Deleting existing %s", save_to)
- shutil.rmtree(save_to)
- shutil.move(self.folder, save_to)
- _logger.info("[provenance] Research Object saved to %s", save_to)
- self.folder = save_to
- self.closed = True
-
-
-def checksum_copy(
- src_file: IO[Any],
- dst_file: Optional[IO[Any]] = None,
- hasher=Hasher, # type: Callable[[], hashlib._Hash]
- buffersize: int = 1024 * 1024,
-) -> str:
- """Compute checksums while copying a file."""
- # TODO: Use hashlib.new(Hasher_str) instead?
- checksum = hasher()
- contents = src_file.read(buffersize)
- if dst_file and hasattr(dst_file, "name") and hasattr(src_file, "name"):
- temp_location = os.path.join(os.path.dirname(dst_file.name), str(uuid.uuid4()))
- try:
- os.rename(dst_file.name, temp_location)
- os.link(src_file.name, dst_file.name)
- dst_file = None
- os.unlink(temp_location)
- except OSError:
- pass
- if os.path.exists(temp_location):
- os.rename(temp_location, dst_file.name) # type: ignore
- while contents != b"":
- if dst_file is not None:
- dst_file.write(contents)
- checksum.update(contents)
- contents = src_file.read(buffersize)
- if dst_file is not None:
- dst_file.flush()
- return checksum.hexdigest().lower()
diff --git a/cwltool/cwlprov/writablebagfile.py b/cwltool/cwlprov/writablebagfile.py
new file mode 100644
index 000000000..d5ff3c731
--- /dev/null
+++ b/cwltool/cwlprov/writablebagfile.py
@@ -0,0 +1,273 @@
+"""Stores class definition of ResearchObject and WritableBagFile."""
+
+import copy
+import datetime
+import hashlib
+import os
+import shutil
+import uuid
+from array import array
+from collections import OrderedDict
+from io import FileIO, TextIOWrapper
+from mmap import mmap
+from pathlib import Path, PurePosixPath
+from typing import Any, BinaryIO, Dict, MutableMapping, Optional, Union, cast
+
+from schema_salad.utils import json_dumps
+
+from ..loghandler import _logger
+from ..utils import CWLObjectType, local_path, posix_path
+from .provenance_constants import (
+ CWLPROV,
+ CWLPROV_VERSION,
+ ENCODING,
+ LOGS,
+ METADATA,
+ SHA1,
+ SHA256,
+ SHA512,
+ WORKFLOW,
+)
+from .ro import ResearchObject
+
+
+class WritableBagFile(FileIO):
+ """Writes files in research object."""
+
+ def __init__(self, research_object: "ResearchObject", rel_path: str) -> None:
+ """Initialize an ROBagIt."""
+ self.research_object = research_object
+ if Path(rel_path).is_absolute():
+ raise ValueError("rel_path must be relative: %s" % rel_path)
+ self.rel_path = rel_path
+ self.hashes = {
+ SHA1: hashlib.sha1(), # nosec
+ SHA256: hashlib.sha256(),
+ SHA512: hashlib.sha512(),
+ }
+ # Open file in Research Object folder
+ path = os.path.abspath(os.path.join(research_object.folder, local_path(rel_path)))
+ if not path.startswith(os.path.abspath(research_object.folder)):
+ raise ValueError("Path is outside Research Object: %s" % path)
+ _logger.debug("[provenance] Creating WritableBagFile at %s.", path)
+ super().__init__(path, mode="w")
+
+ def write(self, b: Any) -> int:
+ """Write some content to the Bag."""
+ real_b = b if isinstance(b, (bytes, mmap, array)) else b.encode("utf-8")
+ total = 0
+ length = len(real_b)
+ while total < length:
+ ret = super().write(real_b)
+ if ret:
+ total += ret
+ for val in self.hashes.values():
+ val.update(real_b)
+ return total
+
+ def close(self) -> None:
+ """
+ Flush and close this stream.
+
+ Finalize checksums and manifests.
+ """
+ # FIXME: Convert below block to a ResearchObject method?
+ if self.rel_path.startswith("data/"):
+ self.research_object.bagged_size[self.rel_path] = self.tell()
+ else:
+ self.research_object.tagfiles.add(self.rel_path)
+
+ super().close()
+ # { "sha1": "f572d396fae9206628714fb2ce00f72e94f2258f" }
+ checksums = {}
+ for name, val in self.hashes.items():
+ checksums[name] = val.hexdigest().lower()
+ self.research_object.add_to_manifest(self.rel_path, checksums)
+
+ # To simplify our hash calculation we won't support
+ # seeking, reading or truncating, as we can't do
+ # similar seeks in the current hash.
+ # TODO: Support these? At the expense of invalidating
+ # the current hash, then having to recalculate at close()
+ def seekable(self) -> bool:
+ """Return False, seeking is not supported."""
+ return False
+
+ def readable(self) -> bool:
+ """Return False, reading is not supported."""
+ return False
+
+ def truncate(self, size: Optional[int] = None) -> int:
+ """Resize the stream, only if we haven't started writing."""
+ # FIXME: This breaks contract IOBase,
+ # as it means we would have to recalculate the hash
+ if size is not None:
+ raise OSError("WritableBagFile can't truncate")
+ return self.tell()
+
+
+def write_bag_file(
+ research_object: "ResearchObject", path: str, encoding: Optional[str] = ENCODING
+) -> Union[TextIOWrapper, WritableBagFile]:
+ """Write the bag file into our research object."""
+ research_object.self_check()
+ # For some reason below throws BlockingIOError
+ # fp = BufferedWriter(WritableBagFile(self, path))
+ bag_file = WritableBagFile(research_object, path)
+ if encoding is not None:
+ # encoding: match Tag-File-Character-Encoding: UTF-8
+ return TextIOWrapper(cast(BinaryIO, bag_file), encoding=encoding, newline="\n")
+ return bag_file
+
+
+def open_log_file_for_activity(
+ research_object: "ResearchObject", uuid_uri: str
+) -> Union[TextIOWrapper, WritableBagFile]:
+ """Begin the per-activity log."""
+ research_object.self_check()
+ # Ensure valid UUID for safe filenames
+ activity_uuid = uuid.UUID(uuid_uri)
+ if activity_uuid.urn == research_object.engine_uuid:
+ # It's the engine aka cwltool!
+ name = "engine"
+ else:
+ name = "activity"
+ p = os.path.join(LOGS, f"{name}.{activity_uuid}.txt")
+ _logger.debug(f"[provenance] Opening log file for {name}: {p}")
+ research_object.add_annotation(activity_uuid.urn, [p], CWLPROV["log"].uri)
+ return write_bag_file(research_object, p)
+
+
+def _write_ro_manifest(research_object: "ResearchObject") -> None:
+ # Does not have to be this order, but it's nice to be consistent
+ filename = "manifest.json"
+ createdOn, createdBy = research_object._self_made()
+ manifest = OrderedDict(
+ {
+ "@context": [
+ {"@base": f"{research_object.base_uri}{posix_path(METADATA)}/"},
+ "https://w3id.org/bundle/context",
+ ],
+ "id": "/",
+ "conformsTo": CWLPROV_VERSION,
+ "manifest": filename,
+ "createdOn": createdOn,
+ "createdBy": createdBy,
+ "authoredBy": research_object._authored_by(),
+ "aggregates": research_object._ro_aggregates(),
+ "annotations": research_object._ro_annotations(),
+ }
+ )
+
+ json_manifest = json_dumps(manifest, indent=4, ensure_ascii=False)
+ rel_path = str(PurePosixPath(METADATA) / filename)
+ json_manifest += "\n"
+ with write_bag_file(research_object, rel_path) as manifest_file:
+ manifest_file.write(json_manifest)
+
+
+def _write_bag_info(research_object: "ResearchObject") -> None:
+ with write_bag_file(research_object, "bag-info.txt") as info_file:
+ info_file.write("Bag-Software-Agent: %s\n" % research_object.cwltool_version)
+ # FIXME: require sha-512 of payload to comply with profile?
+ # FIXME: Update profile
+ info_file.write("BagIt-Profile-Identifier: https://w3id.org/ro/bagit/profile\n")
+ info_file.write("Bagging-Date: %s\n" % datetime.date.today().isoformat())
+ info_file.write("External-Description: Research Object of CWL workflow run\n")
+ if research_object.full_name:
+ info_file.write("Contact-Name: %s\n" % research_object.full_name)
+
+ # NOTE: We can't use the urn:uuid:{UUID} of the workflow run (a prov:Activity)
+ # as identifier for the RO/bagit (a prov:Entity). However the arcp base URI is good.
+ info_file.write("External-Identifier: %s\n" % research_object.base_uri)
+
+ # Calculate size of data/ (assuming no external fetch.txt files)
+ total_size = sum(research_object.bagged_size.values())
+ num_files = len(research_object.bagged_size)
+ info_file.write("Payload-Oxum: %d.%d\n" % (total_size, num_files))
+ _logger.debug("[provenance] Generated bagit metadata: %s", research_object.folder)
+
+
+def _finalize(research_object: "ResearchObject") -> None:
+ _write_ro_manifest(research_object)
+ _write_bag_info(research_object)
+ if not research_object.has_manifest:
+ (Path(research_object.folder) / "manifest-sha1.txt").touch()
+
+
+def close_ro(research_object: "ResearchObject", save_to: Optional[str] = None) -> None:
+ """Close the Research Object, optionally saving to specified folder.
+
+ Closing will remove any temporary files used by this research object.
+ After calling this method, this ResearchObject instance can no longer
+ be used, except for no-op calls to .close().
+
+ The 'saveTo' folder should not exist - if it does, it will be deleted.
+
+ It is safe to call this function multiple times without the
+ 'saveTo' argument, e.g. within a try..finally block to
+ ensure the temporary files of this Research Object are removed.
+ """
+ if save_to is None:
+ if not research_object.closed:
+ _logger.debug("[provenance] Deleting temporary %s", research_object.folder)
+ shutil.rmtree(research_object.folder, ignore_errors=True)
+ else:
+ save_to = os.path.abspath(save_to)
+ _logger.info("[provenance] Finalizing Research Object")
+ _finalize(research_object) # write manifest etc.
+ # TODO: Write as archive (.zip or .tar) based on extension?
+
+ if os.path.isdir(save_to):
+ _logger.info("[provenance] Deleting existing %s", save_to)
+ shutil.rmtree(save_to)
+ shutil.move(research_object.folder, save_to)
+ _logger.info("[provenance] Research Object saved to %s", save_to)
+ research_object.folder = save_to
+ research_object.closed = True
+
+
+def packed_workflow(research_object: "ResearchObject", packed: str) -> None:
+ """Pack CWL description to generate re-runnable CWL object in RO."""
+ research_object.self_check()
+ rel_path = str(PurePosixPath(WORKFLOW) / "packed.cwl")
+ # Write as binary
+ with write_bag_file(research_object, rel_path, encoding=None) as write_pack:
+ write_pack.write(packed)
+ _logger.debug("[provenance] Added packed workflow: %s", rel_path)
+
+
+def create_job(
+ research_object: "ResearchObject", builder_job: CWLObjectType, is_output: bool = False
+) -> CWLObjectType:
+ # TODO customise the file
+ """Generate the new job object with RO specific relative paths."""
+ copied = copy.deepcopy(builder_job)
+ relativised_input_objecttemp: CWLObjectType = {}
+ research_object._relativise_files(copied)
+
+ def jdefault(o: Any) -> Dict[Any, Any]:
+ return dict(o)
+
+ if is_output:
+ rel_path = PurePosixPath(WORKFLOW) / "primary-output.json"
+ else:
+ rel_path = PurePosixPath(WORKFLOW) / "primary-job.json"
+ j = json_dumps(copied, indent=4, ensure_ascii=False, default=jdefault)
+ with write_bag_file(research_object, str(rel_path)) as file_path:
+ file_path.write(j + "\n")
+ _logger.debug("[provenance] Generated customised job file: %s", rel_path)
+ # Generate dictionary with keys as workflow level input IDs and values
+ # as
+ # 1) for files the relativised location containing hash
+ # 2) for other attributes, the actual value.
+ for key, value in copied.items():
+ if isinstance(value, MutableMapping):
+ if value.get("class") in ("File", "Directory"):
+ relativised_input_objecttemp[key] = value
+ else:
+ relativised_input_objecttemp[key] = value
+ research_object.relativised_input_object.update(
+ {k: v for k, v in relativised_input_objecttemp.items() if v}
+ )
+ return research_object.relativised_input_object
diff --git a/cwltool/cwlrdf.py b/cwltool/cwlrdf.py
index 08f392632..7dcf85cbc 100644
--- a/cwltool/cwlrdf.py
+++ b/cwltool/cwlrdf.py
@@ -4,11 +4,10 @@
from rdflib import Graph
from rdflib.query import ResultRow
+from ruamel.yaml.comments import CommentedMap
from schema_salad.jsonld_context import makerdf
from schema_salad.utils import ContextType
-from ruamel.yaml.comments import CommentedMap
-
from .cwlviewer import CWLViewer
from .process import Process
@@ -52,8 +51,7 @@ def dot_with_parameters(g: Graph, stdout: Union[TextIO, StreamWriter]) -> None:
for step, run, _ in qres:
stdout.write(
- '"%s" [label="%s"]\n'
- % (lastpart(step), f"{lastpart(step)} ({lastpart(run)})")
+ '"{}" [label="{}"]\n'.format(lastpart(step), f"{lastpart(step)} ({lastpart(run)})")
)
qres = cast(
@@ -70,12 +68,8 @@ def dot_with_parameters(g: Graph, stdout: Union[TextIO, StreamWriter]) -> None:
for step, inp, source in qres:
stdout.write('"%s" [shape=box]\n' % (lastpart(inp)))
- stdout.write(
- '"{}" -> "{}" [label="{}"]\n'.format(lastpart(source), lastpart(inp), "")
- )
- stdout.write(
- '"{}" -> "{}" [label="{}"]\n'.format(lastpart(inp), lastpart(step), "")
- )
+ stdout.write('"{}" -> "{}" [label="{}"]\n'.format(lastpart(source), lastpart(inp), ""))
+ stdout.write('"{}" -> "{}" [label="{}"]\n'.format(lastpart(inp), lastpart(step), ""))
qres = cast(
Iterator[ResultRow],
@@ -90,9 +84,7 @@ def dot_with_parameters(g: Graph, stdout: Union[TextIO, StreamWriter]) -> None:
for step, out in qres:
stdout.write('"%s" [shape=box]\n' % (lastpart(out)))
- stdout.write(
- '"{}" -> "{}" [label="{}"]\n'.format(lastpart(step), lastpart(out), "")
- )
+ stdout.write('"{}" -> "{}" [label="{}"]\n'.format(lastpart(step), lastpart(out), ""))
qres = cast(
Iterator[ResultRow],
@@ -107,9 +99,7 @@ def dot_with_parameters(g: Graph, stdout: Union[TextIO, StreamWriter]) -> None:
for out, source in qres:
stdout.write('"%s" [shape=octagon]\n' % (lastpart(out)))
- stdout.write(
- '"{}" -> "{}" [label="{}"]\n'.format(lastpart(source), lastpart(out), "")
- )
+ stdout.write('"{}" -> "{}" [label="{}"]\n'.format(lastpart(source), lastpart(out), ""))
qres = cast(
Iterator[ResultRow],
@@ -127,7 +117,7 @@ def dot_with_parameters(g: Graph, stdout: Union[TextIO, StreamWriter]) -> None:
def dot_without_parameters(g: Graph, stdout: Union[TextIO, StreamWriter]) -> None:
- dotname = {} # type: Dict[str,str]
+ dotname: Dict[str, str] = {}
clusternode = {}
stdout.write("compound=true\n")
@@ -161,7 +151,7 @@ def dot_without_parameters(g: Graph, stdout: Union[TextIO, StreamWriter]) -> Non
),
) # ResultRow because the query is of type SELECT
- currentwf = None # type: Optional[str]
+ currentwf: Optional[str] = None
for wf, step, _run, runtype in qres:
if step not in dotname:
dotname[step] = lastpart(step)
@@ -172,7 +162,7 @@ def dot_without_parameters(g: Graph, stdout: Union[TextIO, StreamWriter]) -> Non
if wf in subworkflows:
if wf not in dotname:
dotname[wf] = "cluster_" + lastpart(wf)
- stdout.write(f'subgraph "{dotname[wf]}" {{ label="{lastpart(wf)}"\n')
+ stdout.write(f'subgraph "{dotname[wf]}" {{ label="{lastpart(wf)}"\n') # noqa: B907
currentwf = wf
clusternode[wf] = step
else:
@@ -180,8 +170,7 @@ def dot_without_parameters(g: Graph, stdout: Union[TextIO, StreamWriter]) -> Non
if str(runtype) != "https://w3id.org/cwl/cwl#Workflow":
stdout.write(
- '"%s" [label="%s"]\n'
- % (dotname[step], urllib.parse.urldefrag(str(step))[1])
+ f'"{dotname[step]}" [label="{urllib.parse.urldefrag(str(step))[1]}"]\n' # noqa: B907
)
if currentwf is not None:
@@ -211,7 +200,7 @@ def dot_without_parameters(g: Graph, stdout: Union[TextIO, StreamWriter]) -> Non
if sinkrun in clusternode:
attr += ' lhead="%s"' % dotname[sinkrun]
sink = clusternode[sinkrun]
- stdout.write(f'"{dotname[src]}" -> "{dotname[sink]}" [{attr}]\n')
+ stdout.write(f'"{dotname[src]}" -> "{dotname[sink]}" [{attr}]\n') # noqa: B907
def printdot(
@@ -219,5 +208,5 @@ def printdot(
ctx: ContextType,
stdout: IO[str],
) -> None:
- cwl_viewer = CWLViewer(printrdf(wf, ctx, "n3")) # type: CWLViewer
+ cwl_viewer: CWLViewer = CWLViewer(printrdf(wf, ctx, "n3"))
stdout.write(cwl_viewer.dot().replace(f"{wf.metadata['id']}#", ""))
diff --git a/cwltool/cwlviewer.py b/cwltool/cwlviewer.py
index d259f278c..47a404a25 100644
--- a/cwltool/cwlviewer.py
+++ b/cwltool/cwlviewer.py
@@ -52,9 +52,7 @@ def _set_inner_edges(self) -> None:
else "lightgoldenrodyellow"
)
source_style = (
- "dashed"
- if inner_edge_row["source_step_class"].endswith("Operation")
- else "filled"
+ "dashed" if inner_edge_row["source_step_class"].endswith("Operation") else "filled"
)
n = pydot.Node(
"",
@@ -77,9 +75,7 @@ def _set_inner_edges(self) -> None:
else "lightgoldenrodyellow"
)
target_style = (
- "dashed"
- if inner_edge_row["target_step_class"].endswith("Operation")
- else "filled"
+ "dashed" if inner_edge_row["target_step_class"].endswith("Operation") else "filled"
)
n = pydot.Node(
"",
@@ -123,9 +119,7 @@ def _set_input_edges(self) -> None:
)
n.set_name(str(input_row["input"]))
inputs_subgraph.add_node(n)
- self._dot_graph.add_edge(
- pydot.Edge(str(input_row["input"]), str(input_row["step"]))
- )
+ self._dot_graph.add_edge(pydot.Edge(str(input_row["input"]), str(input_row["step"])))
def _set_output_edges(self) -> None:
with open(_get_output_edges_query_path) as f:
@@ -153,9 +147,7 @@ def _set_output_edges(self) -> None:
)
n.set_name(str(output_edge_row["output"]))
outputs_graph.add_node(n)
- self._dot_graph.add_edge(
- pydot.Edge(output_edge_row["step"], output_edge_row["output"])
- )
+ self._dot_graph.add_edge(pydot.Edge(output_edge_row["step"], output_edge_row["output"]))
def _get_root_graph_uri(self) -> rdflib.term.Identifier:
with open(_get_root_query_path) as f:
diff --git a/cwltool/docker.py b/cwltool/docker.py
index af3c49d88..5c4fa8ea8 100644
--- a/cwltool/docker.py
+++ b/cwltool/docker.py
@@ -2,9 +2,9 @@
import csv
import datetime
+import json
import math
import os
-import re
import shutil
import subprocess # nosec
import sys
@@ -23,9 +23,9 @@
from .pathmapper import MapperEnt, PathMapper
from .utils import CWLObjectType, create_tmp_dir, ensure_writable
-_IMAGES = set() # type: Set[str]
+_IMAGES: Set[str] = set()
_IMAGES_LOCK = threading.Lock()
-__docker_machine_mounts = None # type: Optional[List[str]]
+__docker_machine_mounts: Optional[List[str]] = None
__docker_machine_mounts_lock = threading.Lock()
@@ -47,7 +47,7 @@ def _get_docker_machine_mounts() -> List[str]:
"-t",
"vboxsf",
],
- universal_newlines=True,
+ text=True,
).splitlines()
]
return __docker_machine_mounts
@@ -77,13 +77,13 @@ def _check_docker_machine_path(path: Optional[str]) -> None:
class DockerCommandLineJob(ContainerCommandLineJob):
- """Runs a CommandLineJob in a software container using the Docker engine."""
+ """Runs a :py:class:`~cwltool.job.CommandLineJob` in a software container using the Docker engine."""
def __init__(
self,
builder: Builder,
joborder: CWLObjectType,
- make_path_mapper: Callable[..., PathMapper],
+ make_path_mapper: Callable[[List[CWLObjectType], str, RuntimeContext, bool], PathMapper],
requirements: List[CWLObjectType],
hints: List[CWLObjectType],
name: str,
@@ -102,55 +102,32 @@ def get_image(
"""
Retrieve the relevant Docker container image.
- Returns True upon success
+ :returns: True upon success
"""
found = False
- if (
- "dockerImageId" not in docker_requirement
- and "dockerPull" in docker_requirement
- ):
+ if "dockerImageId" not in docker_requirement and "dockerPull" in docker_requirement:
docker_requirement["dockerImageId"] = docker_requirement["dockerPull"]
with _IMAGES_LOCK:
if docker_requirement["dockerImageId"] in _IMAGES:
return True
- for line in (
- subprocess.check_output( # nosec
- [self.docker_exec, "images", "--no-trunc", "--all"]
- )
- .decode("utf-8")
- .splitlines()
- ):
+ if (docker_image_id := docker_requirement.get("dockerImageId")) is not None:
try:
- match = re.match(r"^([^ ]+)\s+([^ ]+)\s+([^ ]+)", line)
- split = docker_requirement["dockerImageId"].split(":")
- if len(split) == 1:
- split.append("latest")
- elif len(split) == 2:
- # if split[1] doesn't match valid tag names, it is a part of repository
- if not re.match(r"[\w][\w.-]{0,127}", split[1]):
- split[0] = split[0] + ":" + split[1]
- split[1] = "latest"
- elif len(split) == 3:
- if re.match(r"[\w][\w.-]{0,127}", split[2]):
- split[0] = split[0] + ":" + split[1]
- split[1] = split[2]
- del split[2]
-
- # check for repository:tag match or image id match
- if match and (
- (split[0] == match.group(1) and split[1] == match.group(2))
- or docker_requirement["dockerImageId"] == match.group(3)
- ):
- found = True
- break
- except ValueError:
+ manifest = json.loads(
+ subprocess.check_output(
+ [self.docker_exec, "inspect", docker_image_id]
+ ).decode( # nosec
+ "utf-8"
+ )
+ )
+ found = manifest is not None
+ except (OSError, subprocess.CalledProcessError, UnicodeError):
pass
if (force_pull or not found) and pull_image:
- cmd = [] # type: List[str]
+ cmd: List[str] = []
if "dockerPull" in docker_requirement:
cmd = [self.docker_exec, "pull", str(docker_requirement["dockerPull"])]
_logger.info(str(cmd))
@@ -178,18 +155,14 @@ def get_image(
docker_requirement["dockerLoad"],
)
with open(docker_requirement["dockerLoad"], "rb") as dload:
- loadproc = subprocess.Popen( # nosec
- cmd, stdin=dload, stdout=sys.stderr
- )
+ loadproc = subprocess.Popen(cmd, stdin=dload, stdout=sys.stderr) # nosec
else:
loadproc = subprocess.Popen( # nosec
cmd, stdin=subprocess.PIPE, stdout=sys.stderr
)
assert loadproc.stdin is not None # nosec
- _logger.info(
- "Sending GET request to %s", docker_requirement["dockerLoad"]
- )
- req = requests.get(docker_requirement["dockerLoad"], stream=True)
+ _logger.info("Sending GET request to %s", docker_requirement["dockerLoad"])
+ req = requests.get(docker_requirement["dockerLoad"], stream=True, timeout=60)
size = 0
for chunk in req.iter_content(1024 * 1024):
size += len(chunk)
@@ -229,16 +202,12 @@ def get_from_requirements(
if not shutil.which(self.docker_exec):
raise WorkflowException(f"{self.docker_exec} executable is not available")
- if self.get_image(
- cast(Dict[str, str], r), pull_image, force_pull, tmp_outdir_prefix
- ):
+ if self.get_image(cast(Dict[str, str], r), pull_image, force_pull, tmp_outdir_prefix):
return cast(Optional[str], r["dockerImageId"])
raise WorkflowException("Docker image %s not found" % r["dockerImageId"])
@staticmethod
- def append_volume(
- runtime: List[str], source: str, target: str, writable: bool = False
- ) -> None:
+ def append_volume(runtime: List[str], source: str, target: str, writable: bool = False) -> None:
"""Add binding arguments to the runtime list."""
options = [
"type=bind",
@@ -307,9 +276,7 @@ def add_writable_directory_volume(
os.makedirs(host_outdir_tgt)
else:
if self.inplace_update:
- self.append_volume(
- runtime, volume.resolved, volume.target, writable=True
- )
+ self.append_volume(runtime, volume.resolved, volume.target, writable=True)
else:
if not host_outdir_tgt:
tmpdir = create_tmp_dir(tmpdir_prefix)
@@ -374,7 +341,6 @@ def create_runtime(
runtime.append("--workdir=%s" % (self.builder.outdir))
if not user_space_docker_cmd:
-
if not runtimeContext.no_read_only:
runtime.append("--read-only=true")
@@ -390,9 +356,7 @@ def create_runtime(
euid, egid = docker_vm_id()
euid, egid = euid or os.geteuid(), egid or os.getgid()
- if runtimeContext.no_match_user is False and (
- euid is not None and egid is not None
- ):
+ if runtimeContext.no_match_user is False and (euid is not None and egid is not None):
runtime.append("--user=%d:%d" % (euid, egid))
if runtimeContext.rm_container:
@@ -401,7 +365,7 @@ def create_runtime(
if self.builder.resources.get("cudaDeviceCount"):
runtime.append("--gpus=" + str(self.builder.resources["cudaDeviceCount"]))
- cidfile_path = None # type: Optional[str]
+ cidfile_path: Optional[str] = None
# add parameters to docker to write a container ID file
if runtimeContext.user_space_docker_cmd is None:
if runtimeContext.cidfile_dir:
@@ -464,13 +428,13 @@ def create_runtime(
class PodmanCommandLineJob(DockerCommandLineJob):
- """Runs a CommandLineJob in a software container using the podman engine."""
+ """Runs a :py:class:`~cwltool.job.CommandLineJob` in a software container using the podman engine."""
def __init__(
self,
builder: Builder,
joborder: CWLObjectType,
- make_path_mapper: Callable[..., PathMapper],
+ make_path_mapper: Callable[[List[CWLObjectType], str, RuntimeContext, bool], PathMapper],
requirements: List[CWLObjectType],
hints: List[CWLObjectType],
name: str,
diff --git a/cwltool/docker_id.py b/cwltool/docker_id.py
index c38949d91..bb436b2cb 100644
--- a/cwltool/docker_id.py
+++ b/cwltool/docker_id.py
@@ -23,16 +23,14 @@ def docker_vm_id() -> Tuple[Optional[int], Optional[int]]:
def check_output_and_strip(cmd: List[str]) -> Optional[str]:
"""
- Pass a command list to subprocess.check_output.
+ Pass a command list to :py:func:`subprocess.check_output`.
Returning None if an expected exception is raised
:param cmd: The command to execute
- :return: Stripped string output of the command, or None if error
+ :return: Stripped string output of the command, or ``None`` if error
"""
try:
- result = subprocess.check_output( # nosec
- cmd, stderr=subprocess.STDOUT, universal_newlines=True
- )
+ result = subprocess.check_output(cmd, stderr=subprocess.STDOUT, text=True) # nosec
return result.strip()
except (OSError, subprocess.CalledProcessError, TypeError, AttributeError):
# OSError is raised if command doesn't exist
@@ -45,7 +43,7 @@ def docker_machine_name() -> Optional[str]:
"""
Get the machine name of the active docker-machine machine.
- :return: Name of the active machine or None if error
+ :return: Name of the active machine or ``None`` if error
"""
return check_output_and_strip(["docker-machine", "active"])
@@ -65,7 +63,7 @@ def boot2docker_running() -> bool:
"""
Check if boot2docker CLI reports that boot2docker vm is running.
- :return: True if vm is running, False otherwise
+ :return: ``True`` if vm is running, ``False`` otherwise
"""
return cmd_output_matches(["boot2docker", "status"], "running")
@@ -74,7 +72,7 @@ def docker_machine_running() -> bool:
"""
Ask docker-machine for the active machine and checks if its VM is running.
- :return: True if vm is running, False otherwise
+ :return: ``True`` if vm is running, ``False`` otherwise
"""
machine_name = docker_machine_name()
if not machine_name:
diff --git a/cwltool/errors.py b/cwltool/errors.py
index b74bee75e..a39fb3bc9 100644
--- a/cwltool/errors.py
+++ b/cwltool/errors.py
@@ -11,4 +11,4 @@ class ArgumentException(Exception):
class GraphTargetMissingException(WorkflowException):
- """When a $graph is encountered and there is no target and no main/#main."""
+ """When a ``$graph`` is encountered and there is no target and no ``main``/``#main``."""
diff --git a/cwltool/executors.py b/cwltool/executors.py
index 16cf5f591..02b4a40d8 100644
--- a/cwltool/executors.py
+++ b/cwltool/executors.py
@@ -20,17 +20,19 @@
)
import psutil
+from mypy_extensions import mypyc_attr
from schema_salad.exceptions import ValidationException
from schema_salad.sourceline import SourceLine
from .command_line_tool import CallbackJob, ExpressionJob
from .context import RuntimeContext, getdefault
+from .cuda import cuda_version_and_device_count
+from .cwlprov.provenance_profile import ProvenanceProfile
from .errors import WorkflowException
from .job import JobBase
from .loghandler import _logger
from .mutation import MutationManager
from .process import Process, cleanIntermediate, relocateOutputs
-from .provenance_profile import ProvenanceProfile
from .task_queue import TaskQueue
from .update import ORIGINAL_CWLVERSION
from .utils import CWLObjectType, JobsType
@@ -40,14 +42,15 @@
TMPDIR_LOCK = Lock()
+@mypyc_attr(allow_interpreted_subclasses=True)
class JobExecutor(metaclass=ABCMeta):
"""Abstract base job executor."""
def __init__(self) -> None:
"""Initialize."""
- self.final_output = [] # type: MutableSequence[Optional[CWLObjectType]]
- self.final_status = [] # type: List[str]
- self.output_dirs = set() # type: Set[str]
+ self.final_output: MutableSequence[Optional[CWLObjectType]] = []
+ self.final_status: List[str] = []
+ self.output_dirs: Set[str] = set()
def __call__(
self,
@@ -56,12 +59,9 @@ def __call__(
runtime_context: RuntimeContext,
logger: logging.Logger = _logger,
) -> Tuple[Optional[CWLObjectType], str]:
-
return self.execute(process, job_order_object, runtime_context, logger)
- def output_callback(
- self, out: Optional[CWLObjectType], process_status: str
- ) -> None:
+ def output_callback(self, out: Optional[CWLObjectType], process_status: str) -> None:
"""Collect the final status and outputs."""
self.final_status.append(process_status)
self.final_output.append(out)
@@ -84,14 +84,18 @@ def execute(
logger: logging.Logger = _logger,
) -> Tuple[Union[Optional[CWLObjectType]], str]:
"""Execute the process."""
+
+ self.final_output = []
+ self.final_status = []
+
if not runtime_context.basedir:
raise WorkflowException("Must provide 'basedir' in runtimeContext")
def check_for_abstract_op(tool: CWLObjectType) -> None:
if tool["class"] == "Operation":
- raise SourceLine(
- tool, "class", WorkflowException, runtime_context.debug
- ).makeError("Workflow has unrunnable abstract Operation")
+ raise SourceLine(tool, "class", WorkflowException, runtime_context.debug).makeError(
+ "Workflow has unrunnable abstract Operation"
+ )
process.visit(check_for_abstract_op)
@@ -107,7 +111,7 @@ def check_for_abstract_op(tool: CWLObjectType) -> None:
runtime_context.toplevel = True
runtime_context.workflow_eval_lock = threading.Condition(threading.RLock())
- job_reqs = None # type: Optional[List[CWLObjectType]]
+ job_reqs: Optional[List[CWLObjectType]] = None
if "https://w3id.org/cwl/cwl#requirements" in job_order_object:
if process.metadata.get(ORIGINAL_CWLVERSION) == "v1.0":
raise WorkflowException(
@@ -119,10 +123,8 @@ def check_for_abstract_op(tool: CWLObjectType) -> None:
List[CWLObjectType],
job_order_object["https://w3id.org/cwl/cwl#requirements"],
)
- elif (
- "cwl:defaults" in process.metadata
- and "https://w3id.org/cwl/cwl#requirements"
- in cast(CWLObjectType, process.metadata["cwl:defaults"])
+ elif "cwl:defaults" in process.metadata and "https://w3id.org/cwl/cwl#requirements" in cast(
+ CWLObjectType, process.metadata["cwl:defaults"]
):
if process.metadata.get(ORIGINAL_CWLVERSION) == "v1.0":
raise WorkflowException(
@@ -141,12 +143,10 @@ def check_for_abstract_op(tool: CWLObjectType) -> None:
process.requirements.append(req)
self.run_jobs(process, job_order_object, logger, runtime_context)
+ if runtime_context.validate_only is True:
+ return (None, "ValidationSuccess")
- if (
- self.final_output
- and self.final_output[0] is not None
- and finaloutdir is not None
- ):
+ if self.final_output and self.final_output[0] is not None and finaloutdir is not None:
self.final_output[0] = relocateOutputs(
self.final_output[0],
finaloutdir,
@@ -159,7 +159,7 @@ def check_for_abstract_op(tool: CWLObjectType) -> None:
if runtime_context.rm_tmpdir:
if not runtime_context.cachedir:
- output_dirs = self.output_dirs # type: Iterable[str]
+ output_dirs: Iterable[str] = self.output_dirs
else:
output_dirs = filter(
lambda x: not x.startswith(runtime_context.cachedir), # type: ignore
@@ -168,19 +168,14 @@ def check_for_abstract_op(tool: CWLObjectType) -> None:
cleanIntermediate(output_dirs)
if self.final_output and self.final_status:
-
if (
runtime_context.research_obj is not None
- and isinstance(
- process, (JobBase, Process, WorkflowJobStep, WorkflowJob)
- )
+ and isinstance(process, (JobBase, Process, WorkflowJobStep, WorkflowJob))
and process.parent_wf
):
- process_run_id = None # type: Optional[str]
+ process_run_id: Optional[str] = None
name = "primary"
- process.parent_wf.generate_output_prov(
- self.final_output[0], process_run_id, name
- )
+ process.parent_wf.generate_output_prov(self.final_output[0], process_run_id, name)
process.parent_wf.document.wasEndedBy(
process.parent_wf.workflow_run_uri,
None,
@@ -192,6 +187,7 @@ def check_for_abstract_op(tool: CWLObjectType) -> None:
return (None, "permanentFail")
+@mypyc_attr(allow_interpreted_subclasses=True)
class SingleJobExecutor(JobExecutor):
"""Default single-threaded CWL reference executor."""
@@ -202,14 +198,10 @@ def run_jobs(
logger: logging.Logger,
runtime_context: RuntimeContext,
) -> None:
-
- process_run_id = None # type: Optional[str]
+ process_run_id: Optional[str] = None
# define provenance profile for single commandline tool
- if (
- not isinstance(process, Workflow)
- and runtime_context.research_obj is not None
- ):
+ if not isinstance(process, Workflow) and runtime_context.research_obj is not None:
process.provenance_object = ProvenanceProfile(
runtime_context.research_obj,
full_name=runtime_context.cwl_full_name,
@@ -248,6 +240,16 @@ def run_jobs(
process_run_id = prov_obj.record_process_start(process, job)
runtime_context = runtime_context.copy()
runtime_context.process_run_id = process_run_id
+ if runtime_context.validate_only is True:
+ if isinstance(job, WorkflowJob):
+ name = job.tool.lc.filename
+ else:
+ name = getattr(job, "name", str(job))
+ print(
+ f"{name} is valid CWL. No errors detected in the inputs.",
+ file=runtime_context.validate_stdout,
+ )
+ return
job.run(runtime_context)
else:
logger.error("Workflow cannot make any more progress.")
@@ -274,14 +276,16 @@ class MultithreadedJobExecutor(JobExecutor):
def __init__(self) -> None:
"""Initialize."""
super().__init__()
- self.exceptions = [] # type: List[WorkflowException]
- self.pending_jobs = [] # type: List[JobsType]
+ self.exceptions: List[WorkflowException] = []
+ self.pending_jobs: List[JobsType] = []
self.pending_jobs_lock = threading.Lock()
self.max_ram = int(psutil.virtual_memory().available / 2**20)
self.max_cores = float(psutil.cpu_count())
+ self.max_cuda = cuda_version_and_device_count()[1]
self.allocated_ram = float(0)
self.allocated_cores = float(0)
+ self.allocated_cuda: int = 0
def select_resources(
self, request: Dict[str, Union[int, float]], runtime_context: RuntimeContext
@@ -289,12 +293,15 @@ def select_resources(
"""Naïve check for available cpu cores and memory."""
result: Dict[str, Union[int, float]] = {}
maxrsc = {"cores": self.max_cores, "ram": self.max_ram}
- for rsc in ("cores", "ram"):
+ resources_types = {"cores", "ram"}
+ if "cudaDeviceCountMin" in request or "cudaDeviceCountMax" in request:
+ maxrsc["cudaDeviceCount"] = self.max_cuda
+ resources_types.add("cudaDeviceCount")
+ for rsc in resources_types:
rsc_min = request[rsc + "Min"]
if rsc_min > maxrsc[rsc]:
raise WorkflowException(
- f"Requested at least {rsc_min} {rsc} but only "
- f"{maxrsc[rsc]} available"
+ f"Requested at least {rsc_min} {rsc} but only " f"{maxrsc[rsc]} available"
)
rsc_max = request[rsc + "Max"]
if rsc_max < maxrsc[rsc]:
@@ -305,13 +312,14 @@ def select_resources(
result["tmpdirSize"] = math.ceil(request["tmpdirMin"])
result["outdirSize"] = math.ceil(request["outdirMin"])
- if "cudaDeviceCount" in request:
- result["cudaDeviceCount"] = request["cudaDeviceCount"]
-
return result
- def _runner(self, job, runtime_context, TMPDIR_LOCK):
- # type: (Union[JobBase, WorkflowJob, CallbackJob, ExpressionJob], RuntimeContext, threading.Lock) -> None
+ def _runner(
+ self,
+ job: Union[JobBase, WorkflowJob, CallbackJob, ExpressionJob],
+ runtime_context: RuntimeContext,
+ TMPDIR_LOCK: threading.Lock,
+ ) -> None:
"""Job running thread."""
try:
_logger.debug(
@@ -334,6 +342,10 @@ def _runner(self, job, runtime_context, TMPDIR_LOCK):
self.allocated_ram -= ram
cores = job.builder.resources["cores"]
self.allocated_cores -= cores
+ cudaDevices: int = cast(
+ int, job.builder.resources.get("cudaDeviceCount", 0)
+ )
+ self.allocated_cuda -= cudaDevices
runtime_context.workflow_eval_lock.notify_all()
def run_job(
@@ -357,16 +369,21 @@ def run_job(
if isinstance(job, JobBase):
ram = job.builder.resources["ram"]
cores = job.builder.resources["cores"]
- if ram > self.max_ram or cores > self.max_cores:
+ cudaDevices = cast(int, job.builder.resources.get("cudaDeviceCount", 0))
+ if ram > self.max_ram or cores > self.max_cores or cudaDevices > self.max_cuda:
_logger.error(
'Job "%s" cannot be run, requests more resources (%s) '
- "than available on this host (max ram %d, max cores %d",
+ "than available on this host (already allocated ram is %d, "
+ "allocated cores is %d, allocated CUDA is %d, "
+ "max ram %d, max cores %d, max CUDA %d).",
job.name,
job.builder.resources,
self.allocated_ram,
self.allocated_cores,
+ self.allocated_cuda,
self.max_ram,
self.max_cores,
+ self.max_cuda,
)
self.pending_jobs.remove(job)
return
@@ -374,17 +391,21 @@ def run_job(
if (
self.allocated_ram + ram > self.max_ram
or self.allocated_cores + cores > self.max_cores
+ or self.allocated_cuda + cudaDevices > self.max_cuda
):
_logger.debug(
'Job "%s" cannot run yet, resources (%s) are not '
"available (already allocated ram is %d, allocated cores is %d, "
- "max ram %d, max cores %d",
+ "allocated CUDA devices is %d, "
+ "max ram %d, max cores %d, max CUDA %d).",
job.name,
job.builder.resources,
self.allocated_ram,
self.allocated_cores,
+ self.allocated_cuda,
self.max_ram,
self.max_cores,
+ self.max_cuda,
)
n += 1
continue
@@ -394,14 +415,15 @@ def run_job(
self.allocated_ram += ram
cores = job.builder.resources["cores"]
self.allocated_cores += cores
+ cuda = cast(int, job.builder.resources.get("cudaDevices", 0))
+ self.allocated_cuda += cuda
self.taskqueue.add(
functools.partial(self._runner, job, runtime_context, TMPDIR_LOCK),
runtime_context.workflow_eval_lock,
)
self.pending_jobs.remove(job)
- def wait_for_next_completion(self, runtime_context):
- # type: (RuntimeContext) -> None
+ def wait_for_next_completion(self, runtime_context: RuntimeContext) -> None:
"""Wait for jobs to finish."""
if runtime_context.workflow_eval_lock is not None:
runtime_context.workflow_eval_lock.wait(timeout=3)
@@ -415,20 +437,12 @@ def run_jobs(
logger: logging.Logger,
runtime_context: RuntimeContext,
) -> None:
-
- self.taskqueue = TaskQueue(
- threading.Lock(), psutil.cpu_count()
- ) # type: TaskQueue
+ self.taskqueue: TaskQueue = TaskQueue(threading.Lock(), psutil.cpu_count())
try:
-
- jobiter = process.job(
- job_order_object, self.output_callback, runtime_context
- )
+ jobiter = process.job(job_order_object, self.output_callback, runtime_context)
if runtime_context.workflow_eval_lock is None:
- raise WorkflowException(
- "runtimeContext.workflow_eval_lock must not be None"
- )
+ raise WorkflowException("runtimeContext.workflow_eval_lock must not be None")
runtime_context.workflow_eval_lock.acquire()
for job in jobiter:
diff --git a/cwltool/extensions-v1.1.yml b/cwltool/extensions-v1.1.yml
index 603c40f05..b901402e9 100644
--- a/cwltool/extensions-v1.1.yml
+++ b/cwltool/extensions-v1.1.yml
@@ -1,6 +1,7 @@
$base: http://commonwl.org/cwltool#
$namespaces:
cwl: "https://w3id.org/cwl/cwl#"
+ cwltool: "http://commonwl.org/cwltool#"
$graph:
- $import: https://w3id.org/cwl/CommonWorkflowLanguage.yml
diff --git a/cwltool/extensions-v1.2.yml b/cwltool/extensions-v1.2.yml
index d7fef1160..b2ecf5614 100644
--- a/cwltool/extensions-v1.2.yml
+++ b/cwltool/extensions-v1.2.yml
@@ -1,6 +1,7 @@
$base: http://commonwl.org/cwltool#
$namespaces:
cwl: "https://w3id.org/cwl/cwl#"
+ cwltool: "http://commonwl.org/cwltool#"
$graph:
- $import: https://w3id.org/cwl/CommonWorkflowLanguage.yml
diff --git a/cwltool/factory.py b/cwltool/factory.py
index 439177e2d..85d7344e6 100644
--- a/cwltool/factory.py
+++ b/cwltool/factory.py
@@ -18,7 +18,7 @@ def __init__(self, out: Optional[CWLObjectType], status: str) -> None:
class Callable:
- """Result of Factory.make()."""
+ """Result of ::py:func:`Factory.make`."""
def __init__(self, t: Process, factory: "Factory") -> None:
"""Initialize."""
diff --git a/cwltool/job.py b/cwltool/job.py
index 8b206e185..d1950154b 100644
--- a/cwltool/job.py
+++ b/cwltool/job.py
@@ -18,6 +18,7 @@
from threading import Timer
from typing import (
IO,
+ TYPE_CHECKING,
Callable,
Dict,
Iterable,
@@ -38,7 +39,6 @@
from prov.model import PROV
from schema_salad.sourceline import SourceLine
from schema_salad.utils import json_dump, json_dumps
-from typing_extensions import TYPE_CHECKING
from . import env_to_stdout, run_job
from .builder import Builder
@@ -63,7 +63,9 @@
)
if TYPE_CHECKING:
- from .provenance_profile import ProvenanceProfile # pylint: disable=unused-import
+ from .cwlprov.provenance_profile import (
+ ProvenanceProfile, # pylint: disable=unused-import
+ )
needs_shell_quoting_re = re.compile(r"""(^$|[\s|&;()<>\'"$@])""")
FORCE_SHELLED_POPEN = os.getenv("CWLTOOL_FORCE_SHELL_POPEN", "0") == "1"
@@ -91,9 +93,7 @@ def relink_initialworkdir(
# directory, so therefore ineligable for being an output file.
# Thus, none of our business
continue
- host_outdir_tgt = os.path.join(
- host_outdir, vol.target[len(container_outdir) + 1 :]
- )
+ host_outdir_tgt = os.path.join(host_outdir, vol.target[len(container_outdir) + 1 :])
if os.path.islink(host_outdir_tgt) or os.path.isfile(host_outdir_tgt):
try:
os.remove(host_outdir_tgt)
@@ -120,7 +120,7 @@ def __init__(
self,
builder: Builder,
joborder: CWLObjectType,
- make_path_mapper: Callable[..., PathMapper],
+ make_path_mapper: Callable[[List[CWLObjectType], str, RuntimeContext, bool], PathMapper],
requirements: List[CWLObjectType],
hints: List[CWLObjectType],
name: str,
@@ -129,41 +129,41 @@ def __init__(
super().__init__()
self.builder = builder
self.joborder = joborder
- self.stdin = None # type: Optional[str]
- self.stderr = None # type: Optional[str]
- self.stdout = None # type: Optional[str]
- self.successCodes = [] # type: Iterable[int]
- self.temporaryFailCodes = [] # type: Iterable[int]
- self.permanentFailCodes = [] # type: Iterable[int]
+ self.stdin: Optional[str] = None
+ self.stderr: Optional[str] = None
+ self.stdout: Optional[str] = None
+ self.successCodes: Iterable[int] = []
+ self.temporaryFailCodes: Iterable[int] = []
+ self.permanentFailCodes: Iterable[int] = []
self.requirements = requirements
self.hints = hints
self.name = name
- self.command_line = [] # type: List[str]
+ self.command_line: List[str] = []
self.pathmapper = PathMapper([], "", "")
self.make_path_mapper = make_path_mapper
- self.generatemapper = None # type: Optional[PathMapper]
+ self.generatemapper: Optional[PathMapper] = None
# set in CommandLineTool.job(i)
self.collect_outputs = cast(CollectOutputsType, None)
- self.output_callback = None # type: Optional[OutputCallbackType]
+ self.output_callback: Optional[OutputCallbackType] = None
self.outdir = ""
self.tmpdir = ""
- self.environment = {} # type: MutableMapping[str, str]
- self.generatefiles = {
+ self.environment: MutableMapping[str, str] = {}
+ self.generatefiles: DirectoryType = {
"class": "Directory",
"listing": [],
"basename": "",
- } # type: DirectoryType
- self.stagedir = None # type: Optional[str]
+ }
+ self.stagedir: Optional[str] = None
self.inplace_update = False
- self.prov_obj = None # type: Optional[ProvenanceProfile]
- self.parent_wf = None # type: Optional[ProvenanceProfile]
- self.timelimit = None # type: Optional[int]
- self.networkaccess = False # type: bool
- self.mpi_procs = None # type: Optional[int]
+ self.prov_obj: Optional[ProvenanceProfile] = None
+ self.parent_wf: Optional[ProvenanceProfile] = None
+ self.timelimit: Optional[int] = None
+ self.networkaccess: bool = False
+ self.mpi_procs: Optional[int] = None
- def __repr__(self): # type: () -> str
+ def __repr__(self) -> str:
"""Represent this Job object."""
return "CommandLineJob(%s)" % self.name
@@ -176,14 +176,9 @@ def run(
pass
def _setup(self, runtimeContext: RuntimeContext) -> None:
-
- cuda_req, _ = self.builder.get_requirement(
- "http://commonwl.org/cwltool#CUDARequirement"
- )
+ cuda_req, _ = self.builder.get_requirement("http://commonwl.org/cwltool#CUDARequirement")
if cuda_req:
- count = cuda_check(
- cuda_req, math.ceil(self.builder.resources["cudaDeviceCount"])
- )
+ count = cuda_check(cuda_req, math.ceil(self.builder.resources["cudaDeviceCount"]))
if count == 0:
raise WorkflowException("Could not satisfy CUDARequirement")
@@ -195,15 +190,13 @@ def is_streamable(file: str) -> bool:
return False
for inp in self.joborder.values():
if isinstance(inp, dict) and inp.get("location", None) == file:
- return inp.get("streamable", False)
+ return cast(bool, inp.get("streamable", False))
return False
for knownfile in self.pathmapper.files():
p = self.pathmapper.mapper(knownfile)
if p.type == "File" and not os.path.isfile(p[0]) and p.staged:
- if not (
- is_streamable(knownfile) and stat.S_ISFIFO(os.stat(p[0]).st_mode)
- ):
+ if not (is_streamable(knownfile) and stat.S_ISFIFO(os.stat(p[0]).st_mode)):
raise WorkflowException(
"Input file %s (at %s) not found or is not a regular "
"file." % (knownfile, self.pathmapper.mapper(knownfile)[0])
@@ -223,10 +216,7 @@ def is_streamable(file: str) -> bool:
"[job %s] initial work dir %s",
self.name,
json_dumps(
- {
- p: self.generatemapper.mapper(p)
- for p in self.generatemapper.files()
- },
+ {p: self.generatemapper.mapper(p) for p in self.generatemapper.files()},
indent=4,
),
)
@@ -239,7 +229,7 @@ def _execute(
runtime: List[str],
env: MutableMapping[str, str],
runtimeContext: RuntimeContext,
- monitor_function=None, # type: Optional[Callable[[subprocess.Popen[str]], None]]
+ monitor_function: Optional[Callable[["subprocess.Popen[str]"], None]] = None,
) -> None:
"""Execute the tool, either directly or via script.
@@ -284,12 +274,8 @@ def _execute(
]
),
" < %s" % self.stdin if self.stdin else "",
- " > %s" % os.path.join(self.base_path_logs, self.stdout)
- if self.stdout
- else "",
- " 2> %s" % os.path.join(self.base_path_logs, self.stderr)
- if self.stderr
- else "",
+ " > %s" % os.path.join(self.base_path_logs, self.stdout) if self.stdout else "",
+ " 2> %s" % os.path.join(self.base_path_logs, self.stderr) if self.stderr else "",
)
if self.joborder is not None and runtimeContext.research_obj is not None:
job_order = self.joborder
@@ -307,7 +293,7 @@ def _execute(
"or prov_obj is missing from runtimeContext: "
"{}".format(runtimeContext)
)
- outputs = {} # type: CWLObjectType
+ outputs: CWLObjectType = {}
try:
stdin_path = None
if self.stdin is not None:
@@ -341,7 +327,7 @@ def stderr_stdout_log_path(
runtimeContext.secret_store.retrieve(cast(CWLOutputType, env)),
)
- job_script_contents = None # type: Optional[str]
+ job_script_contents: Optional[str] = None
builder: Optional[Builder] = getattr(self, "builder", None)
if builder is not None:
job_script_contents = builder.build_job_script(commands)
@@ -392,8 +378,7 @@ def stderr_stdout_log_path(
)
else:
raise ValueError(
- "'listing' in self.generatefiles but no "
- "generatemapper was setup."
+ "'listing' in self.generatefiles but no " "generatemapper was setup."
)
runtimeContext.log_dir_handler(
self.outdir, self.base_path_logs, stdout_path, stderr_path
@@ -433,9 +418,7 @@ def stderr_stdout_log_path(
_logger.info("[job %s] completed %s", self.name, processStatus)
if _logger.isEnabledFor(logging.DEBUG):
- _logger.debug(
- "[job %s] outputs %s", self.name, json_dumps(outputs, indent=4)
- )
+ _logger.debug("[job %s] outputs %s", self.name, json_dumps(outputs, indent=4))
if self.generatemapper is not None and runtimeContext.secret_store is not None:
# Delete any runtime-generated files containing secrets.
@@ -452,15 +435,13 @@ def stderr_stdout_log_path(
os.remove(host_outdir_tgt)
if runtimeContext.workflow_eval_lock is None:
- raise WorkflowException(
- "runtimeContext.workflow_eval_lock must not be None"
- )
+ raise WorkflowException("runtimeContext.workflow_eval_lock must not be None")
if self.output_callback:
with runtimeContext.workflow_eval_lock:
self.output_callback(outputs, processStatus)
- if self.stagedir is not None and os.path.exists(self.stagedir):
+ if runtimeContext.rm_tmpdir and self.stagedir is not None and os.path.exists(self.stagedir):
_logger.debug(
"[job %s] Removing input staging directory %s",
self.name,
@@ -469,9 +450,7 @@ def stderr_stdout_log_path(
shutil.rmtree(self.stagedir, True)
if runtimeContext.rm_tmpdir:
- _logger.debug(
- "[job %s] Removing temporary directory %s", self.name, self.tmpdir
- )
+ _logger.debug("[job %s] Removing temporary directory %s", self.name, self.tmpdir)
shutil.rmtree(self.tmpdir, True)
@abstractmethod
@@ -507,15 +486,13 @@ def prepare_environment(
self._preserve_environment_on_containers_warning()
env.update(os.environ)
elif runtimeContext.preserve_environment:
- self._preserve_environment_on_containers_warning(
- runtimeContext.preserve_environment
- )
+ self._preserve_environment_on_containers_warning(runtimeContext.preserve_environment)
for key in runtimeContext.preserve_environment:
try:
env[key] = os.environ[key]
except KeyError:
_logger.warning(
- f"Attempting to preserve environment variable '{key}' which is not present"
+ f"Attempting to preserve environment variable {key!r} which is not present"
)
# Set required env vars
@@ -527,24 +504,33 @@ def prepare_environment(
# Set on ourselves
self.environment = env
- def process_monitor(self, sproc): # type: (subprocess.Popen[str]) -> None
+ def process_monitor(self, sproc: "subprocess.Popen[str]") -> None:
+ """Watch a process, logging its max memory usage."""
monitor = psutil.Process(sproc.pid)
# Value must be list rather than integer to utilise pass-by-reference in python
- memory_usage = [None] # type: MutableSequence[Optional[int]]
+ memory_usage: MutableSequence[Optional[int]] = [None]
+
+ mem_tm: "Optional[Timer]" = None
def get_tree_mem_usage(memory_usage: MutableSequence[Optional[int]]) -> None:
- children = monitor.children()
+ nonlocal mem_tm
try:
- rss = monitor.memory_info().rss
- while len(children):
- rss += sum(process.memory_info().rss for process in children)
- children = list(
- itertools.chain(*(process.children() for process in children))
- )
- if memory_usage[0] is None or rss > memory_usage[0]:
- memory_usage[0] = rss
+ with monitor.oneshot():
+ children = monitor.children()
+ rss = monitor.memory_info().rss
+ while len(children):
+ rss += sum(process.memory_info().rss for process in children)
+ children = list(
+ itertools.chain(*(process.children() for process in children))
+ )
+ if memory_usage[0] is None or rss > memory_usage[0]:
+ memory_usage[0] = rss
+ mem_tm = Timer(interval=1, function=get_tree_mem_usage, args=(memory_usage,))
+ mem_tm.daemon = True
+ mem_tm.start()
except psutil.NoSuchProcess:
- mem_tm.cancel()
+ if mem_tm is not None:
+ mem_tm.cancel()
mem_tm = Timer(interval=1, function=get_tree_mem_usage, args=(memory_usage,))
mem_tm.daemon = True
@@ -558,9 +544,7 @@ def get_tree_mem_usage(memory_usage: MutableSequence[Optional[int]]) -> None:
round(memory_usage[0] / (2**20)),
)
else:
- _logger.debug(
- "Could not collect memory usage, job ended before monitoring began."
- )
+ _logger.debug("Could not collect memory usage, job ended before monitoring began.")
class CommandLineJob(JobBase):
@@ -569,7 +553,6 @@ def run(
runtimeContext: RuntimeContext,
tmpdir_lock: Optional[threading.Lock] = None,
) -> None:
-
if tmpdir_lock:
with tmpdir_lock:
if not os.path.exists(self.tmpdir):
@@ -609,8 +592,9 @@ def _required_env(self) -> Dict[str, str]:
env["HOME"] = self.outdir
env["TMPDIR"] = self.tmpdir
env["PATH"] = os.environ["PATH"]
- if "SYSTEMROOT" in os.environ:
- env["SYSTEMROOT"] = os.environ["SYSTEMROOT"]
+ for extra in ("SYSTEMROOT", "QEMU_LD_PREFIX"):
+ if extra in os.environ:
+ env[extra] = os.environ[extra]
return env
@@ -642,9 +626,7 @@ def create_runtime(
@staticmethod
@abstractmethod
- def append_volume(
- runtime: List[str], source: str, target: str, writable: bool = False
- ) -> None:
+ def append_volume(runtime: List[str], source: str, target: str, writable: bool = False) -> None:
"""Add binding arguments to the runtime list."""
@abstractmethod
@@ -683,7 +665,9 @@ def _preserve_environment_on_containers_warning(
flags = "--preserve-environment={" + ", ".join(varnames) + "}"
_logger.warning(
- f"You have specified `{flags}` while running a container which will override variables set in the container. This may break the container, be non-portable, and/or affect reproducibility."
+ f"You have specified {flags!r} while running a container which will "
+ "override variables set in the container. This may break the "
+ "container, be non-portable, and/or affect reproducibility."
)
def create_file_and_add_volume(
@@ -728,11 +712,9 @@ def add_volumes(
"""Append volume mappings to the runtime option list."""
container_outdir = self.builder.outdir
for key, vol in (itm for itm in pathmapper.items() if itm[1].staged):
- host_outdir_tgt = None # type: Optional[str]
+ host_outdir_tgt: Optional[str] = None
if vol.target.startswith(container_outdir + "/"):
- host_outdir_tgt = os.path.join(
- self.outdir, vol.target[len(container_outdir) + 1 :]
- )
+ host_outdir_tgt = os.path.join(self.outdir, vol.target[len(container_outdir) + 1 :])
if not host_outdir_tgt and not any_path_okay:
raise WorkflowException(
"No mandatory DockerRequirement, yet path is outside "
@@ -742,13 +724,9 @@ def add_volumes(
if vol.type in ("File", "Directory"):
self.add_file_or_directory_volume(runtime, vol, host_outdir_tgt)
elif vol.type == "WritableFile":
- self.add_writable_file_volume(
- runtime, vol, host_outdir_tgt, tmpdir_prefix
- )
+ self.add_writable_file_volume(runtime, vol, host_outdir_tgt, tmpdir_prefix)
elif vol.type == "WritableDirectory":
- self.add_writable_directory_volume(
- runtime, vol, host_outdir_tgt, tmpdir_prefix
- )
+ self.add_writable_directory_volume(runtime, vol, host_outdir_tgt, tmpdir_prefix)
elif vol.type in ["CreateFile", "CreateWritableFile"]:
new_path = self.create_file_and_add_volume(
runtime, vol, host_outdir_tgt, secret_store, tmpdir_prefix
@@ -780,18 +758,6 @@ def run(
img_id = str(docker_req["dockerImageId"])
elif "dockerPull" in docker_req:
img_id = str(docker_req["dockerPull"])
- cmd = [user_space_docker_cmd, "pull", img_id]
- _logger.info(str(cmd))
- try:
- subprocess.check_call(cmd, stdout=sys.stderr) # nosec
- except OSError:
- raise SourceLine(
- docker_req, None, WorkflowException, debug
- ).makeError(
- f"Either Docker container {img_id} is not available with "
- f"user space docker implementation {user_space_docker_cmd} "
- f" or {user_space_docker_cmd} is missing or broken."
- )
else:
raise SourceLine(docker_req, None, WorkflowException, debug).makeError(
"Docker image must be specified as 'dockerImageId' or "
@@ -815,11 +781,7 @@ def run(
if default_container:
img_id = str(default_container)
- if (
- docker_req is not None
- and img_id is None
- and runtimeContext.use_container
- ):
+ if docker_req is not None and img_id is None and runtimeContext.use_container:
raise Exception("Docker image not available")
if (
@@ -848,9 +810,7 @@ def run(
_logger.debug("%s error", container, exc_info=True)
if docker_is_req:
raise UnsupportedRequirement(
- "{} is required to run this tool: {}".format(
- container, str(err)
- )
+ f"{container} is required to run this tool: {str(err)}"
) from err
else:
raise WorkflowException(
@@ -858,7 +818,7 @@ def run(
"--no-container to disable {0}, or install "
"a user space Docker replacement like uDocker with "
"--user-space-docker-cmd.: {1}".format(container, err)
- )
+ ) from err
self._setup(runtimeContext)
@@ -886,7 +846,7 @@ def docker_monitor(
tmpdir_prefix: str,
cleanup_cidfile: bool,
docker_exe: str,
- process, # type: subprocess.Popen[str]
+ process: "subprocess.Popen[str]",
) -> None:
"""Record memory usage of the running Docker container."""
# Todo: consider switching to `docker create` / `docker start`
@@ -894,22 +854,24 @@ def docker_monitor(
# to stdout, but the container is frozen, thus allowing us to start the
# monitoring process without dealing with the cidfile or too-fast
# container execution
- cid = None # type: Optional[str]
+ cid: Optional[str] = None
while cid is None:
time.sleep(1)
+ # This is needed to avoid a race condition where the job
+ # was so fast that it already finished when it arrives here
+ if process.returncode is None:
+ process.poll()
if process.returncode is not None:
if cleanup_cidfile:
try:
os.remove(cidfile)
except OSError as exc:
- _logger.warning(
- "Ignored error cleaning up %s cidfile: %s", docker_exe, exc
- )
+ _logger.warning("Ignored error cleaning up %s cidfile: %s", docker_exe, exc)
return
try:
with open(cidfile) as cidhandle:
cid = cidhandle.readline().strip()
- except (OSError):
+ except OSError:
cid = None
max_mem = psutil.virtual_memory().total
tmp_dir, tmp_prefix = os.path.split(tmpdir_prefix)
@@ -931,23 +893,19 @@ def docker_monitor(
except OSError as exc:
_logger.warning("Ignored error with %s stats: %s", docker_exe, exc)
return
- max_mem_percent = 0 # type: float
- mem_percent = 0 # type: float
+ max_mem_percent: float = 0.0
+ mem_percent: float = 0.0
with open(stats_file_name) as stats:
while True:
line = stats.readline()
if not line:
break
try:
- mem_percent = float(
- re.sub(CONTROL_CODE_RE, "", line).replace("%", "")
- )
+ mem_percent = float(re.sub(CONTROL_CODE_RE, "", line).replace("%", ""))
if mem_percent > max_mem_percent:
max_mem_percent = mem_percent
except ValueError as exc:
- _logger.debug(
- "%s stats parsing error in line %s: %s", docker_exe, line, exc
- )
+ _logger.debug("%s stats parsing error in line %s: %s", docker_exe, line, exc)
_logger.info(
"[job %s] Max memory used: %iMiB",
self.name,
@@ -968,14 +926,12 @@ def _job_popen(
job_script_contents: Optional[str] = None,
timelimit: Optional[int] = None,
name: Optional[str] = None,
- monitor_function=None, # type: Optional[Callable[[subprocess.Popen[str]], None]]
- default_stdout=None, # type: Optional[Union[IO[bytes], TextIO]]
- default_stderr=None, # type: Optional[Union[IO[bytes], TextIO]]
+ monitor_function: Optional[Callable[["subprocess.Popen[str]"], None]] = None,
+ default_stdout: Optional[Union[IO[bytes], TextIO]] = None,
+ default_stderr: Optional[Union[IO[bytes], TextIO]] = None,
) -> int:
-
if job_script_contents is None and not FORCE_SHELLED_POPEN:
-
- stdin = subprocess.PIPE # type: Union[IO[bytes], int]
+ stdin: Union[IO[bytes], int] = subprocess.PIPE
if stdin_path is not None:
stdin = open(stdin_path, "rb")
@@ -1056,9 +1012,7 @@ def terminate(): # type: () -> None
job_dir = make_job_dir()
try:
- with open(
- os.path.join(job_dir, "job.json"), mode="w", encoding="utf-8"
- ) as job_file:
+ with open(os.path.join(job_dir, "job.json"), mode="w", encoding="utf-8") as job_file:
json_dump(job_description, job_file, ensure_ascii=False)
job_script = os.path.join(job_dir, "run_job.bash")
with open(job_script, "w") as _:
@@ -1085,6 +1039,26 @@ def terminate(): # type: () -> None
if sproc.stdin is not None:
sproc.stdin.close()
+ tm = None
+ if timelimit is not None and timelimit > 0:
+
+ def terminate(): # type: () -> None
+ try:
+ _logger.warning(
+ "[job %s] exceeded time limit of %d seconds and will be terminated",
+ name,
+ timelimit,
+ )
+ sproc.terminate()
+ except OSError:
+ pass
+
+ tm = Timer(timelimit, terminate)
+ tm.daemon = True
+ tm.start()
+ if monitor_function:
+ monitor_function(sproc)
+
rcode = sproc.wait()
return rcode
diff --git a/cwltool/load_tool.py b/cwltool/load_tool.py
index 22a5a56c0..d6352f918 100644
--- a/cwltool/load_tool.py
+++ b/cwltool/load_tool.py
@@ -21,7 +21,9 @@
)
from cwl_utils.parser import cwl_v1_2, cwl_v1_2_utils
+from ruamel.yaml.comments import CommentedMap, CommentedSeq
from schema_salad.exceptions import ValidationException
+from schema_salad.fetcher import Fetcher
from schema_salad.ref_resolver import Loader, file_uri
from schema_salad.schema import validate_doc
from schema_salad.sourceline import SourceLine, cmap
@@ -33,8 +35,6 @@
json_dumps,
)
-from ruamel.yaml.comments import CommentedMap, CommentedSeq
-
from . import CWL_CONTENT_TYPES, process, update
from .context import LoadingContext
from .errors import GraphTargetMissingException
@@ -43,16 +43,25 @@
from .update import ALLUPDATES
from .utils import CWLObjectType, ResolverType, visit_class
-jobloaderctx = {
+docloaderctx: ContextType = {
"cwl": "https://w3id.org/cwl/cwl#",
"cwltool": "http://commonwl.org/cwltool#",
"path": {"@type": "@id"},
"location": {"@type": "@id"},
"id": "@id",
-} # type: ContextType
+}
+
+jobloader_id_name = "__id"
+jobloaderctx: ContextType = {
+ "cwl": "https://w3id.org/cwl/cwl#",
+ "cwltool": "http://commonwl.org/cwltool#",
+ "path": {"@type": "@id"},
+ "location": {"@type": "@id"},
+ jobloader_id_name: "@id",
+}
-overrides_ctx = {
+overrides_ctx: ContextType = {
"overrideTarget": {"@type": "@id"},
"cwltool": "http://commonwl.org/cwltool#",
"http://commonwl.org/cwltool#overrides": {
@@ -63,7 +72,7 @@
"@id": "https://w3id.org/cwl/cwl#requirements",
"mapSubject": "class",
},
-} # type: ContextType
+}
def default_loader(
@@ -72,7 +81,7 @@ def default_loader(
doc_cache: bool = True,
) -> Loader:
return Loader(
- jobloaderctx,
+ docloaderctx,
fetcher_constructor=fetcher_constructor,
allow_attachments=lambda r: enable_dev,
doc_cache=doc_cache,
@@ -85,7 +94,6 @@ def resolve_tool_uri(
fetcher_constructor: Optional[FetcherCallableType] = None,
document_loader: Optional[Loader] = None,
) -> Tuple[str, str]:
-
uri = None # type: Optional[str]
split = urllib.parse.urlsplit(argsworkflow)
# In case of Windows path, urlsplit misjudge Drive letters as scheme, here we are skipping that
@@ -94,9 +102,7 @@ def resolve_tool_uri(
elif os.path.exists(os.path.abspath(argsworkflow)):
uri = file_uri(str(os.path.abspath(argsworkflow)))
elif resolver is not None:
- uri = resolver(
- document_loader or default_loader(fetcher_constructor), argsworkflow
- )
+ uri = resolver(document_loader or default_loader(fetcher_constructor), argsworkflow)
if uri is None:
raise ValidationException("Not found: '%s'" % argsworkflow)
@@ -137,23 +143,15 @@ def fetch_document(
)
return loadingContext, workflowobj, uri
if isinstance(argsworkflow, MutableMapping):
- uri = (
- cast(str, argsworkflow["id"])
- if argsworkflow.get("id")
- else "_:" + str(uuid.uuid4())
- )
- workflowobj = cast(
- CommentedMap, cmap(cast(Dict[str, Any], argsworkflow), fn=uri)
- )
+ uri = cast(str, argsworkflow["id"]) if argsworkflow.get("id") else "_:" + str(uuid.uuid4())
+ workflowobj = cast(CommentedMap, cmap(cast(Dict[str, Any], argsworkflow), fn=uri))
loadingContext.loader.idx[uri] = workflowobj
return loadingContext, workflowobj, uri
raise ValidationException("Must be URI or object: '%s'" % argsworkflow)
def _convert_stdstreams_to_files(
- workflowobj: Union[
- CWLObjectType, MutableSequence[Union[CWLObjectType, str, int]], str
- ]
+ workflowobj: Union[CWLObjectType, MutableSequence[Union[CWLObjectType, str, int]], str]
) -> None:
if isinstance(workflowobj, MutableMapping):
if workflowobj.get("class") == "CommandLineTool":
@@ -166,13 +164,9 @@ def _convert_stdstreams_to_files(
outputs = workflowobj.get("outputs", [])
if not isinstance(outputs, CommentedSeq):
raise ValidationException('"outputs" section is not ' "valid.")
- for out in cast(
- MutableSequence[CWLObjectType], workflowobj.get("outputs", [])
- ):
+ for out in cast(MutableSequence[CWLObjectType], workflowobj.get("outputs", [])):
if not isinstance(out, CommentedMap):
- raise ValidationException(
- f"Output '{out}' is not a valid OutputParameter."
- )
+ raise ValidationException(f"Output {out!r} is not a valid OutputParameter.")
for streamtype in ["stdout", "stderr"]:
if out.get("type") == streamtype:
if "outputBinding" in out:
@@ -185,27 +179,21 @@ def _convert_stdstreams_to_files(
else:
filename = str(
hashlib.sha1( # nosec
- json_dumps(workflowobj, sort_keys=True).encode(
- "utf-8"
- )
+ json_dumps(workflowobj, sort_keys=True).encode("utf-8")
).hexdigest()
)
workflowobj[streamtype] = filename
out["type"] = "File"
out["outputBinding"] = cmap({"glob": filename})
- for inp in cast(
- MutableSequence[CWLObjectType], workflowobj.get("inputs", [])
- ):
+ for inp in cast(MutableSequence[CWLObjectType], workflowobj.get("inputs", [])):
if inp.get("type") == "stdin":
if "inputBinding" in inp:
raise ValidationException(
- "Not allowed to specify inputBinding when"
- " using stdin shortcut."
+ "Not allowed to specify inputBinding when" " using stdin shortcut."
)
if "stdin" in workflowobj:
raise ValidationException(
- "Not allowed to specify stdin path when"
- " using stdin type shortcut."
+ "Not allowed to specify stdin path when" " using stdin type shortcut."
)
else:
workflowobj["stdin"] = (
@@ -283,7 +271,6 @@ def _fast_parser_convert_stdstreams_to_files(
def _fast_parser_expand_hint_class(
hints: Optional[Any], loadingOptions: cwl_v1_2.LoadingOptions
) -> None:
-
if isinstance(hints, MutableSequence):
for h in hints:
if isinstance(h, MutableMapping) and "class" in h:
@@ -318,8 +305,9 @@ def fast_parser(
fileuri: Optional[str],
uri: str,
loadingContext: LoadingContext,
+ fetcher: Fetcher,
) -> Tuple[Union[CommentedMap, CommentedSeq], CommentedMap]:
- lopt = cwl_v1_2.LoadingOptions(idx=loadingContext.codegen_idx, fileuri=fileuri)
+ lopt = cwl_v1_2.LoadingOptions(idx=loadingContext.codegen_idx, fileuri=fileuri, fetcher=fetcher)
if uri not in loadingContext.codegen_idx:
cwl_v1_2.load_document_with_metadata(
@@ -359,18 +347,26 @@ def fast_parser(
# Need to match the document loader's index with the fast parser index
# Get the base URI (no fragments) for documents that use $graph
nofrag = urllib.parse.urldefrag(uri)[0]
- objects, loadopt = loadingContext.codegen_idx[nofrag]
- fileobj = cmap(
- cast(
- Union[int, float, str, Dict[str, Any], List[Any], None],
- cwl_v1_2.save(objects, relative_uris=False),
+
+ flag = "fastparser-idx-from:" + nofrag
+ if not loadingContext.loader.idx.get(flag):
+ objects, loadopt = loadingContext.codegen_idx[nofrag]
+ fileobj = cmap(
+ cast(
+ Union[int, float, str, Dict[str, Any], List[Any], None],
+ cwl_v1_2.save(objects, relative_uris=False),
+ )
)
- )
- visit_class(
- fileobj,
- ("CommandLineTool", "Workflow", "ExpressionTool"),
- partial(update_index, loadingContext.loader),
- )
+ visit_class(
+ fileobj,
+ ("CommandLineTool", "Workflow", "ExpressionTool"),
+ partial(update_index, loadingContext.loader),
+ )
+ loadingContext.loader.idx[flag] = flag
+ for u in lopt.imports:
+ loadingContext.loader.idx["import:" + u] = "import:" + u
+ for u in lopt.includes:
+ loadingContext.loader.idx["include:" + u] = "include:" + u
return cast(
Union[CommentedMap, CommentedSeq],
@@ -383,7 +379,6 @@ def resolve_and_validate_document(
workflowobj: Union[CommentedMap, CommentedSeq],
uri: str,
preprocess_only: bool = False,
- skip_schemas: Optional[bool] = None,
) -> Tuple[LoadingContext, str]:
"""Validate a CWL document."""
if not loadingContext.loader:
@@ -393,11 +388,7 @@ def resolve_and_validate_document(
loadingContext = loadingContext.copy()
if not isinstance(workflowobj, MutableMapping):
- raise ValueError(
- "workflowjobj must be a dict, got '{}': {}".format(
- type(workflowobj), workflowobj
- )
- )
+ raise ValueError(f"workflowjobj must be a dict, got {type(workflowobj)!r}: {workflowobj}")
jobobj = None
if "cwl:tool" in workflowobj:
@@ -431,12 +422,8 @@ def resolve_and_validate_document(
)
if not isinstance(cwlVersion, str):
- with SourceLine(
- workflowobj, "cwlVersion", ValidationException, loadingContext.debug
- ):
- raise ValidationException(
- f"'cwlVersion' must be a string, got {type(cwlVersion)}"
- )
+ with SourceLine(workflowobj, "cwlVersion", ValidationException, loadingContext.debug):
+ raise ValidationException(f"'cwlVersion' must be a string, got {type(cwlVersion)}")
# strip out version
cwlVersion = re.sub(r"^(?:cwl:|https://w3id.org/cwl/cwl#)", "", cwlVersion)
if cwlVersion not in list(ALLUPDATES):
@@ -453,17 +440,11 @@ def resolve_and_validate_document(
"\n{}".format("\n".join(versions))
)
- if (
- isinstance(jobobj, CommentedMap)
- and "http://commonwl.org/cwltool#overrides" in jobobj
- ):
+ if isinstance(jobobj, CommentedMap) and "http://commonwl.org/cwltool#overrides" in jobobj:
loadingContext.overrides_list.extend(resolve_overrides(jobobj, uri, uri))
del jobobj["http://commonwl.org/cwltool#overrides"]
- if (
- isinstance(jobobj, CommentedMap)
- and "https://w3id.org/cwl/cwl#requirements" in jobobj
- ):
+ if isinstance(jobobj, CommentedMap) and "https://w3id.org/cwl/cwl#requirements" in jobobj:
if cwlVersion not in ("v1.1.0-dev1", "v1.1"):
raise ValidationException(
"`cwl:requirements` in the input object is not part of CWL "
@@ -490,7 +471,7 @@ def resolve_and_validate_document(
idx=loader.idx,
cache=sch_document_loader.cache,
fetcher_constructor=loadingContext.fetcher_constructor,
- skip_schemas=skip_schemas,
+ skip_schemas=loadingContext.skip_schemas,
doc_cache=loadingContext.doc_cache,
)
@@ -519,7 +500,9 @@ def resolve_and_validate_document(
#
processobj, metadata = document_loader.resolve_ref(uri)
elif loadingContext.fast_parser:
- processobj, metadata = fast_parser(workflowobj, fileuri, uri, loadingContext)
+ processobj, metadata = fast_parser(
+ workflowobj, fileuri, uri, loadingContext, document_loader.fetcher
+ )
else:
document_loader.resolve_all(workflowobj, fileuri)
processobj, metadata = document_loader.resolve_ref(uri)
@@ -539,9 +522,7 @@ def resolve_and_validate_document(
metadata = copy.copy(metadata)
if not isinstance(metadata, CommentedMap):
- raise ValidationException(
- "metadata must be a CommentedMap, was %s" % type(metadata)
- )
+ raise ValidationException("metadata must be a CommentedMap, was %s" % type(metadata))
if isinstance(processobj, CommentedMap):
uri = processobj["id"]
@@ -589,12 +570,10 @@ def make_tool(
resolveduri: Union[float, str, CommentedMap, CommentedSeq, None]
metadata: CWLObjectType
- if (
- loadingContext.fast_parser
- and isinstance(uri, str)
- and not loadingContext.skip_resolve_all
- ):
- resolveduri, metadata = fast_parser(None, None, uri, loadingContext)
+ if loadingContext.fast_parser and isinstance(uri, str) and not loadingContext.skip_resolve_all:
+ resolveduri, metadata = fast_parser(
+ None, None, uri, loadingContext, loadingContext.loader.fetcher
+ )
else:
resolveduri, metadata = loadingContext.loader.resolve_ref(uri)
@@ -608,9 +587,7 @@ def make_tool(
raise GraphTargetMissingException(
"Tool file contains graph of multiple objects, must specify "
"one of #%s"
- % ", #".join(
- urllib.parse.urldefrag(i["id"])[1] for i in resolveduri if "id" in i
- )
+ % ", #".join(urllib.parse.urldefrag(i["id"])[1] for i in resolveduri if "id" in i)
)
elif isinstance(resolveduri, MutableMapping):
processobj = resolveduri
@@ -632,11 +609,12 @@ def load_tool(
argsworkflow: Union[str, CWLObjectType],
loadingContext: Optional[LoadingContext] = None,
) -> Process:
-
loadingContext, workflowobj, uri = fetch_document(argsworkflow, loadingContext)
loadingContext, uri = resolve_and_validate_document(
- loadingContext, workflowobj, uri
+ loadingContext,
+ workflowobj,
+ uri,
)
return make_tool(uri, loadingContext)
@@ -666,7 +644,6 @@ def recursive_resolve_and_validate_document(
workflowobj: Union[CommentedMap, CommentedSeq],
uri: str,
preprocess_only: bool = False,
- skip_schemas: Optional[bool] = None,
) -> Tuple[LoadingContext, str, Process]:
"""Validate a CWL document, checking that a tool object can be built."""
loadingContext, uri = resolve_and_validate_document(
@@ -674,7 +651,6 @@ def recursive_resolve_and_validate_document(
workflowobj,
uri,
preprocess_only=preprocess_only,
- skip_schemas=skip_schemas,
)
tool = make_tool(uri, loadingContext)
return loadingContext, uri, tool
diff --git a/cwltool/loghandler.py b/cwltool/loghandler.py
index 6d8c633c4..0d613c820 100644
--- a/cwltool/loghandler.py
+++ b/cwltool/loghandler.py
@@ -21,6 +21,10 @@ def configure_logging(
rdflib_logger = logging.getLogger("rdflib.term")
rdflib_logger.addHandler(stderr_handler)
rdflib_logger.setLevel(logging.ERROR)
+ deps_logger = logging.getLogger("galaxy.tool_util.deps")
+ deps_logger.addHandler(stderr_handler)
+ ss_logger = logging.getLogger("salad")
+ ss_logger.addHandler(stderr_handler)
if quiet:
# Silence STDERR, not an eventual provenance log file
stderr_handler.setLevel(logging.WARN)
@@ -29,10 +33,9 @@ def configure_logging(
base_logger.setLevel(logging.DEBUG)
stderr_handler.setLevel(logging.DEBUG)
rdflib_logger.setLevel(logging.DEBUG)
+ deps_logger.setLevel(logging.DEBUG)
fmtclass = coloredlogs.ColoredFormatter if enable_color else logging.Formatter
formatter = fmtclass("%(levelname)s %(message)s")
if timestamps:
- formatter = fmtclass(
- "[%(asctime)s] %(levelname)s %(message)s", "%Y-%m-%d %H:%M:%S"
- )
+ formatter = fmtclass("[%(asctime)s] %(levelname)s %(message)s", "%Y-%m-%d %H:%M:%S")
stderr_handler.setFormatter(formatter)
diff --git a/cwltool/main.py b/cwltool/main.py
index eb8daf9d4..0950059e6 100755
--- a/cwltool/main.py
+++ b/cwltool/main.py
@@ -14,8 +14,7 @@
import time
import urllib
import warnings
-from codecs import StreamWriter, getwriter
-from collections.abc import MutableMapping, MutableSequence
+from codecs import getwriter
from typing import (
IO,
Any,
@@ -27,7 +26,6 @@
MutableSequence,
Optional,
Sized,
- TextIO,
Tuple,
Union,
cast,
@@ -35,7 +33,10 @@
import argcomplete
import coloredlogs
-import pkg_resources # part of setuptools
+import ruamel.yaml
+from importlib_resources import files
+from ruamel.yaml.comments import CommentedMap, CommentedSeq
+from ruamel.yaml.main import YAML
from schema_salad.exceptions import ValidationException
from schema_salad.ref_resolver import Loader, file_uri, uri_file_path
from schema_salad.sourceline import cmap, strip_dup_lineno
@@ -47,13 +48,17 @@
yaml_no_ts,
)
-import ruamel.yaml
-from ruamel.yaml.comments import CommentedMap, CommentedSeq
-from ruamel.yaml.main import YAML
-
from . import CWL_CONTENT_TYPES, workflow
from .argparser import arg_parser, generate_parser, get_default_args
from .context import LoadingContext, RuntimeContext, getdefault
+from .cwlprov.ro import ResearchObject # , WritableBagFile
+from .cwlprov.writablebagfile import ( # change this later
+ WritableBagFile,
+ close_ro,
+ create_job,
+ open_log_file_for_activity,
+ packed_workflow,
+)
from .cwlrdf import printdot, printrdf
from .errors import (
ArgumentException,
@@ -65,6 +70,7 @@
from .load_tool import (
default_loader,
fetch_document,
+ jobloader_id_name,
jobloaderctx,
load_overrides,
make_tool,
@@ -87,7 +93,6 @@
use_standard_schema,
)
from .procgenerator import ProcessGenerator
-from .provenance import ResearchObject, WritableBagFile
from .resolver import ga4gh_tool_registries, tool_resolver
from .secrets import SecretStore
from .software_requirements import (
@@ -185,9 +190,7 @@ def generate_example_input(
"float": 0.1,
"double": 0.1,
"string": "a_string",
- "File": ruamel.yaml.comments.CommentedMap(
- [("class", "File"), ("path", "a/file/path")]
- ),
+ "File": ruamel.yaml.comments.CommentedMap([("class", "File"), ("path", "a/file/path")]),
"Directory": ruamel.yaml.comments.CommentedMap(
[("class", "Directory"), ("path", "a/directory/path")]
),
@@ -254,17 +257,17 @@ def generate_example_input(
example.insert(0, shortname(cast(str, field["name"])), value, f_comment)
elif "default" in inptype:
example = inptype["default"]
- comment = 'default value of type "{}".'.format(inptype["type"])
+ comment = f"default value of type {inptype['type']!r}"
else:
example = defaults.get(cast(str, inptype["type"]), str(inptype))
- comment = 'type "{}".'.format(inptype["type"])
+ comment = f"type {inptype['type']!r}"
else:
if not default:
example = defaults.get(str(inptype), str(inptype))
- comment = f'type "{inptype}"'
+ comment = f"type {inptype!r}"
else:
example = default
- comment = f'default value of type "{inptype}".'
+ comment = f"default value of type {inptype!r}."
return example, comment
@@ -306,16 +309,10 @@ def realize_input_schema(
if isinstance(entry["type"], Mapping):
entry["type"] = cast(
CWLOutputAtomType,
- realize_input_schema(
- [cast(CWLObjectType, entry["type"])], schema_defs
- ),
+ realize_input_schema([cast(CWLObjectType, entry["type"])], schema_defs),
)
if entry["type"] == "array":
- items = (
- entry["items"]
- if not isinstance(entry["items"], str)
- else [entry["items"]]
- )
+ items = entry["items"] if not isinstance(entry["items"], str) else [entry["items"]]
entry["items"] = cast(
CWLOutputAtomType,
realize_input_schema(
@@ -327,9 +324,7 @@ def realize_input_schema(
entry["fields"] = cast(
CWLOutputAtomType,
realize_input_schema(
- cast(
- MutableSequence[Union[str, CWLObjectType]], entry["fields"]
- ),
+ cast(MutableSequence[Union[str, CWLObjectType]], entry["fields"]),
schema_defs,
),
)
@@ -356,7 +351,6 @@ def load_job_order(
overrides_list: List[CWLObjectType],
tool_file_uri: str,
) -> Tuple[Optional[CWLObjectType], str, Loader]:
-
job_order_object = None
job_order_file = None
@@ -368,9 +362,7 @@ def load_job_order(
elif len(args.job_order) == 1 and args.job_order[0] == "-":
yaml = yaml_no_ts()
job_order_object = yaml.load(stdin)
- job_order_object, _ = loader.resolve_all(
- job_order_object, file_uri(os.getcwd()) + "/"
- )
+ job_order_object, _ = loader.resolve_all(job_order_object, file_uri(os.getcwd()) + "/")
else:
job_order_file = None
@@ -378,9 +370,7 @@ def load_job_order(
input_basedir = args.basedir if args.basedir else os.getcwd()
elif job_order_file is not None:
input_basedir = (
- args.basedir
- if args.basedir
- else os.path.abspath(os.path.dirname(job_order_file))
+ args.basedir if args.basedir else os.path.abspath(os.path.dirname(job_order_file))
)
job_order_object, _ = loader.resolve_ref(
job_order_file,
@@ -388,22 +378,15 @@ def load_job_order(
content_types=CWL_CONTENT_TYPES,
)
- if (
- job_order_object is not None
- and "http://commonwl.org/cwltool#overrides" in job_order_object
- ):
+ if job_order_object is not None and "http://commonwl.org/cwltool#overrides" in job_order_object:
ov_uri = file_uri(job_order_file or input_basedir)
- overrides_list.extend(
- resolve_overrides(job_order_object, ov_uri, tool_file_uri)
- )
+ overrides_list.extend(resolve_overrides(job_order_object, ov_uri, tool_file_uri))
del job_order_object["http://commonwl.org/cwltool#overrides"]
if job_order_object is None:
input_basedir = args.basedir if args.basedir else os.getcwd()
- if job_order_object is not None and not isinstance(
- job_order_object, MutableMapping
- ):
+ if job_order_object is not None and not isinstance(job_order_object, MutableMapping):
_logger.error(
"CWL input object at %s is not formatted correctly, it should be a "
"JSON/YAML dictionary, not %s.\n"
@@ -449,9 +432,7 @@ def init_job_order(
cmd_line = vars(toolparser.parse_args(args.job_order))
for record_name in records:
record = {}
- record_items = {
- k: v for k, v in cmd_line.items() if k.startswith(record_name)
- }
+ record_items = {k: v for k, v in cmd_line.items() if k.startswith(record_name)}
for key, value in record_items.items():
record[key[len(record_name) + 1 :]] = value
del cmd_line[key]
@@ -463,12 +444,10 @@ def init_job_order(
loader.resolve_ref(cmd_line["job_order"])[0],
)
except Exception:
- _logger.exception(
- "Failed to resolv job_order: %s", cmd_line["job_order"]
- )
+ _logger.exception("Failed to resolv job_order: %s", cmd_line["job_order"])
exit(1)
else:
- job_order_object = {"id": args.workflow}
+ job_order_object = {jobloader_id_name: args.workflow}
del cmd_line["job_order"]
@@ -528,7 +507,7 @@ def expand_formats(p: CWLObjectType) -> None:
process.inputs_record_schema, job_order_object, discover_secondaryFiles=True
)
basedir: Optional[str] = None
- uri = cast(str, job_order_object["id"])
+ uri = cast(str, job_order_object[jobloader_id_name])
if uri == args.workflow:
basedir = os.path.dirname(uri)
uri = ""
@@ -551,8 +530,8 @@ def expand_formats(p: CWLObjectType) -> None:
if "cwl:tool" in job_order_object:
del job_order_object["cwl:tool"]
- if "id" in job_order_object:
- del job_order_object["id"]
+ if jobloader_id_name in job_order_object:
+ del job_order_object[jobloader_id_name]
return job_order_object
@@ -633,9 +612,7 @@ def loadref(base: str, uri: str) -> Union[CommentedMap, CommentedSeq, str, None]
nestdirs=nestdirs,
)
if sfs is not None:
- deps["secondaryFiles"] = cast(
- MutableSequence[CWLOutputAtomType], mergedirs(sfs)
- )
+ deps["secondaryFiles"] = cast(MutableSequence[CWLOutputAtomType], mergedirs(sfs))
return deps
@@ -669,12 +646,9 @@ def setup_schema(
if custom_schema_callback is not None:
custom_schema_callback()
elif args.enable_ext:
- with pkg_resources.resource_stream(__name__, "extensions.yml") as res:
- ext10 = res.read().decode("utf-8")
- with pkg_resources.resource_stream(__name__, "extensions-v1.1.yml") as res:
- ext11 = res.read().decode("utf-8")
- with pkg_resources.resource_stream(__name__, "extensions-v1.2.yml") as res:
- ext12 = res.read().decode("utf-8")
+ ext10 = files("cwltool").joinpath("extensions.yml").read_text("utf-8")
+ ext11 = files("cwltool").joinpath("extensions-v1.1.yml").read_text("utf-8")
+ ext12 = files("cwltool").joinpath("extensions-v1.2.yml").read_text("utf-8")
use_custom_schema("v1.0", "http://commonwl.org/cwltool", ext10)
use_custom_schema("v1.1", "http://commonwl.org/cwltool", ext11)
use_custom_schema("v1.2", "http://commonwl.org/cwltool", ext12)
@@ -697,12 +671,9 @@ def __init__(self) -> None:
"""Use the default formatter with our custom formatstring."""
super().__init__("[%(asctime)sZ] %(message)s")
- def formatTime(
- self, record: logging.LogRecord, datefmt: Optional[str] = None
- ) -> str:
- formatted_time = time.strftime(
- "%Y-%m-%dT%H:%M:%S", time.gmtime(float(record.created))
- )
+ def formatTime(self, record: logging.LogRecord, datefmt: Optional[str] = None) -> str:
+ """Override the default formatTime to include the timezone."""
+ formatted_time = time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime(float(record.created)))
with_msecs = f"{formatted_time},{record.msecs:03f}"
return with_msecs
@@ -725,7 +696,7 @@ def setup_provenance(
full_name=args.cwl_full_name,
)
runtimeContext.research_obj = ro
- log_file_io = ro.open_log_file_for_activity(ro.engine_uuid)
+ log_file_io = open_log_file_for_activity(ro, ro.engine_uuid)
prov_log_handler = logging.StreamHandler(log_file_io)
prov_log_handler.setFormatter(ProvLogFormatter())
@@ -756,9 +727,7 @@ def setup_loadingContext(
doc_cache=args.doc_cache,
)
loadingContext.research_obj = runtimeContext.research_obj
- loadingContext.disable_js_validation = args.disable_js_validation or (
- not args.do_validate
- )
+ loadingContext.disable_js_validation = args.disable_js_validation or (not args.do_validate)
loadingContext.construct_tool_object = getdefault(
loadingContext.construct_tool_object, workflow.default_make_tool
)
@@ -772,15 +741,11 @@ def setup_loadingContext(
def make_template(tool: Process, target: IO[str]) -> None:
"""Make a template CWL input object for the give Process."""
- def my_represent_none(
- self: Any, data: Any
- ) -> Any: # pylint: disable=unused-argument
+ def my_represent_none(self: Any, data: Any) -> Any: # pylint: disable=unused-argument
"""Force clean representation of 'null'."""
return self.represent_scalar("tag:yaml.org,2002:null", "null")
- ruamel.yaml.representer.RoundTripRepresenter.add_representer(
- type(None), my_represent_none
- )
+ ruamel.yaml.representer.RoundTripRepresenter.add_representer(type(None), my_represent_none)
yaml = YAML()
yaml.default_flow_style = False
yaml.indent = 4
@@ -929,8 +894,7 @@ def check_working_directories(
):
sl = (
"/"
- if getattr(runtimeContext, dirprefix).endswith("/")
- or dirprefix == "cachedir"
+ if getattr(runtimeContext, dirprefix).endswith("/") or dirprefix == "cachedir"
else ""
)
setattr(
@@ -958,8 +922,7 @@ def print_targets(
if tool.tool[f]:
_logger.info("%s %s%s targets:", prefix[:-1], f[0].upper(), f[1:-1])
print(
- " "
- + "\n ".join([f"{prefix}{shortname(t['id'])}" for t in tool.tool[f]]),
+ " " + "\n ".join([f"{prefix}{shortname(t['id'])}" for t in tool.tool[f]]),
file=stdout,
)
if "steps" in tool.tool:
@@ -976,9 +939,7 @@ def print_targets(
process = make_tool(cast(CommentedMap, cmap(run)), loading_context)
else:
process = run
- print_targets(
- process, stdout, loading_context, f"{prefix}{shortname(t['id'])}/"
- )
+ print_targets(process, stdout, loading_context, f"{prefix}{shortname(t['id'])}/")
def main(
@@ -1004,7 +965,7 @@ def main(
if hasattr(sys.stdout, "detach"):
stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
else:
- stdout = getwriter("utf-8")(sys.stdout) # type: ignore[assignment,arg-type]
+ stdout = getwriter("utf-8")(sys.stdout) # type: ignore
else:
stdout = sys.stdout
stdout = cast(IO[str], stdout)
@@ -1091,9 +1052,7 @@ def main(
if argsl is None:
raise Exception("argsl cannot be None")
try:
- prov_log_stream, prov_log_handler = setup_provenance(
- args, argsl, runtimeContext
- )
+ prov_log_stream, prov_log_handler = setup_provenance(args, argsl, runtimeContext)
except ArgumentException:
return 1
@@ -1105,9 +1064,7 @@ def main(
fetcher_constructor=loadingContext.fetcher_constructor,
)
- try_again_msg = (
- "" if args.debug else ", try again with --debug for more information"
- )
+ try_again_msg = "" if args.debug else ", try again with --debug for more information"
try:
job_order_object, input_basedir, jobloader = load_job_order(
@@ -1120,17 +1077,13 @@ def main(
if args.overrides:
loadingContext.overrides_list.extend(
- load_overrides(
- file_uri(os.path.abspath(args.overrides)), tool_file_uri
- )
+ load_overrides(file_uri(os.path.abspath(args.overrides)), tool_file_uri)
)
loadingContext, workflowobj, uri = fetch_document(uri, loadingContext)
if args.print_deps and loadingContext.loader:
- printdeps(
- workflowobj, loadingContext.loader, stdout, args.relative_deps, uri
- )
+ printdeps(workflowobj, loadingContext.loader, stdout, args.relative_deps, uri)
return 0
loadingContext, uri = resolve_and_validate_document(
@@ -1138,7 +1091,6 @@ def main(
workflowobj,
uri,
preprocess_only=(args.print_pre or args.pack),
- skip_schemas=args.skip_schemas,
)
if loadingContext.loader is None:
@@ -1151,9 +1103,7 @@ def main(
if args.provenance and runtimeContext.research_obj:
# Can't really be combined with args.pack at same time
- runtimeContext.research_obj.packed_workflow(
- print_pack(loadingContext, uri)
- )
+ packed_workflow(runtimeContext.research_obj, print_pack(loadingContext, uri))
if args.print_pre:
json_dump(
@@ -1185,7 +1135,7 @@ def main(
make_template(tool, stdout)
return 0
- if args.validate:
+ if len(args.job_order) == 0 and args.validate:
print(f"{args.workflow} is valid CWL.", file=stdout)
return 0
@@ -1238,10 +1188,8 @@ def main(
)
return 0
- except (ValidationException) as exc:
- _logger.error(
- "Tool definition failed validation:\n%s", str(exc), exc_info=args.debug
- )
+ except ValidationException as exc:
+ _logger.error("Tool definition failed validation:\n%s", str(exc), exc_info=args.debug)
return 1
except (RuntimeError, WorkflowException) as exc:
_logger.error(
@@ -1282,12 +1230,8 @@ def main(
runtimeContext.log_dir = args.log_dir
- runtimeContext.secret_store = getdefault(
- runtimeContext.secret_store, SecretStore()
- )
- runtimeContext.make_fs_access = getdefault(
- runtimeContext.make_fs_access, StdFsAccess
- )
+ runtimeContext.secret_store = getdefault(runtimeContext.secret_store, SecretStore())
+ runtimeContext.make_fs_access = getdefault(runtimeContext.make_fs_access, StdFsAccess)
if not executor:
if args.parallel:
@@ -1308,13 +1252,9 @@ def main(
tfjob_order.update(loadingContext.jobdefaults)
if job_order_object:
tfjob_order.update(job_order_object)
- tfout, tfstatus = real_executor(
- tool.embedded_tool, tfjob_order, runtimeContext
- )
+ tfout, tfstatus = real_executor(tool.embedded_tool, tfjob_order, runtimeContext)
if not tfout or tfstatus != "success":
- raise WorkflowException(
- "ProcessGenerator failed to generate workflow"
- )
+ raise WorkflowException("ProcessGenerator failed to generate workflow")
tool, job_order_object = tool.result(tfjob_order, tfout, runtimeContext)
if not job_order_object:
job_order_object = None
@@ -1343,12 +1283,8 @@ def main(
del args.workflow
del args.job_order
- conf_file = getattr(
- args, "beta_dependency_resolvers_configuration", None
- ) # str
- use_conda_dependencies = getattr(
- args, "beta_conda_dependencies", None
- ) # str
+ conf_file = getattr(args, "beta_dependency_resolvers_configuration", None) # str
+ use_conda_dependencies = getattr(args, "beta_conda_dependencies", None) # str
if conf_file or use_conda_dependencies:
runtimeContext.job_script_provider = DependenciesConfiguration(args)
@@ -1357,15 +1293,20 @@ def main(
find_default_container,
default_container=runtimeContext.default_container,
use_biocontainers=args.beta_use_biocontainers,
+ container_image_cache_path=args.beta_dependencies_directory,
)
+ runtimeContext.validate_only = args.validate
+ runtimeContext.validate_stdout = stdout
(out, status) = real_executor(
tool, initialized_job_order_object, runtimeContext, logger=_logger
)
+ if runtimeContext.validate_only is True:
+ return 0
if out is not None:
if runtimeContext.research_obj is not None:
- runtimeContext.research_obj.create_job(out, True)
+ create_job(runtimeContext.research_obj, out, True)
def remove_at_id(doc: CWLObjectType) -> None:
for key in list(doc.keys()):
@@ -1401,9 +1342,7 @@ def loc_to_path(obj: CWLObjectType) -> None:
if args.write_summary:
with open(args.write_summary, "w") as output_file:
- json_dump(
- out, output_file, indent=4, ensure_ascii=False, default=str
- )
+ json_dump(out, output_file, indent=4, ensure_ascii=False, default=str)
else:
json_dump(out, stdout, indent=4, ensure_ascii=False, default=str)
if hasattr(stdout, "flush"):
@@ -1415,10 +1354,8 @@ def loc_to_path(obj: CWLObjectType) -> None:
_logger.info("Final process status is %s", status)
return 0
- except (ValidationException) as exc:
- _logger.error(
- "Input object failed validation:\n%s", str(exc), exc_info=args.debug
- )
+ except ValidationException as exc:
+ _logger.error("Input object failed validation:\n%s", str(exc), exc_info=args.debug)
return 1
except UnsupportedRequirement as exc:
_logger.error(
@@ -1455,7 +1392,7 @@ def loc_to_path(obj: CWLObjectType) -> None:
research_obj = runtimeContext.research_obj
if loadingContext.loader is not None:
research_obj.generate_snapshot(
- prov_deps(workflowobj, loadingContext.loader, uri)
+ prov_deps(cast(CWLObjectType, processobj), loadingContext.loader, uri)
)
else:
_logger.warning(
@@ -1464,9 +1401,7 @@ def loc_to_path(obj: CWLObjectType) -> None:
)
if prov_log_handler is not None:
# Stop logging so we won't half-log adding ourself to RO
- _logger.debug(
- "[provenance] Closing provenance log file %s", prov_log_handler
- )
+ _logger.debug("[provenance] Closing provenance log file %s", prov_log_handler)
_logger.removeHandler(prov_log_handler)
# Ensure last log lines are written out
prov_log_handler.flush()
@@ -1476,7 +1411,7 @@ def loc_to_path(obj: CWLObjectType) -> None:
# Why not use prov_log_handler.stream ? That is not part of the
# public API for logging.StreamHandler
prov_log_handler.close()
- research_obj.close(args.provenance)
+ close_ro(research_obj, args.provenance)
_logger.removeHandler(stderr_handler)
_logger.addHandler(defaultStreamHandler)
@@ -1486,11 +1421,12 @@ def find_default_container(
builder: HasReqsHints,
default_container: Optional[str] = None,
use_biocontainers: Optional[bool] = None,
+ container_image_cache_path: Optional[str] = None,
) -> Optional[str]:
"""Find a container."""
if not default_container and use_biocontainers:
default_container = get_container_from_software_requirements(
- use_biocontainers, builder
+ use_biocontainers, builder, container_image_cache_path
)
return default_container
@@ -1507,7 +1443,8 @@ def windows_check() -> None:
"Windows Subsystem for Linux 2 (WSL2). If don't need to execute "
"CWL documents, then you can ignore this warning, but please "
"consider migrating to https://pypi.org/project/cwl-utils/ "
- "for your CWL document processing needs."
+ "for your CWL document processing needs.",
+ stacklevel=1,
)
diff --git a/cwltool/mpi.py b/cwltool/mpi.py
index a66017796..b35b72ee5 100644
--- a/cwltool/mpi.py
+++ b/cwltool/mpi.py
@@ -59,7 +59,7 @@ def load(cls: Type[MpiConfigT], config_file_name: str) -> MpiConfigT:
return cls(**data)
except TypeError as e:
unknown = set(data.keys()) - set(inspect.signature(cls).parameters)
- raise ValueError(f"Unknown key(s) in MPI configuration: {unknown}")
+ raise ValueError(f"Unknown key(s) in MPI configuration: {unknown}") from e
def pass_through_env_vars(self, env: MutableMapping[str, str]) -> None:
"""Take the configured list of environment variables and pass them to the executed process."""
diff --git a/cwltool/mutation.py b/cwltool/mutation.py
index 65f894e16..077b92cb7 100644
--- a/cwltool/mutation.py
+++ b/cwltool/mutation.py
@@ -20,7 +20,7 @@ class MutationManager:
def __init__(self) -> None:
"""Initialize."""
- self.generations = {} # type: Dict[str, MutationState]
+ self.generations: Dict[str, MutationState] = {}
def register_reader(self, stepname: str, obj: CWLObjectType) -> None:
loc = cast(str, obj["location"])
@@ -73,9 +73,7 @@ def register_mutation(self, stepname: str, obj: CWLObjectType) -> None:
)
)
- self.generations[loc] = MutationState(
- current.generation + 1, current.readers, stepname
- )
+ self.generations[loc] = MutationState(current.generation + 1, current.readers, stepname)
def set_generation(self, obj: CWLObjectType) -> None:
loc = cast(str, obj["location"])
diff --git a/cwltool/pack.py b/cwltool/pack.py
index cd26d3483..c9fbc4e04 100644
--- a/cwltool/pack.py
+++ b/cwltool/pack.py
@@ -14,11 +14,10 @@
cast,
)
+from ruamel.yaml.comments import CommentedMap, CommentedSeq
from schema_salad.ref_resolver import Loader, SubLoader
from schema_salad.utils import ResolveType
-from ruamel.yaml.comments import CommentedMap, CommentedSeq
-
from .context import LoadingContext
from .load_tool import fetch_document, resolve_and_validate_document
from .process import shortname, uniquename
@@ -118,7 +117,6 @@ def pack(
rewrite_out: Optional[Dict[str, str]] = None,
loader: Optional[Loader] = None,
) -> CWLObjectType:
-
# The workflow document we have in memory right now may have been
# updated to the internal CWL version. We need to reload the
# document to go back to its original version.
@@ -155,16 +153,12 @@ def pack(
document_loader.idx[po["id"]] = CommentedMap(po.items())
document_loader.idx[metadata["id"]] = CommentedMap(metadata.items())
- found_versions = {
- cast(str, loadingContext.metadata["cwlVersion"])
- } # type: Set[str]
+ found_versions = {cast(str, loadingContext.metadata["cwlVersion"])} # type: Set[str]
def loadref(base: Optional[str], lr_uri: str) -> ResolveType:
lr_loadingContext = loadingContext.copy()
lr_loadingContext.metadata = {}
- lr_loadingContext, lr_workflowobj, lr_uri = fetch_document(
- lr_uri, lr_loadingContext
- )
+ lr_loadingContext, lr_workflowobj, lr_uri = fetch_document(lr_uri, lr_loadingContext)
lr_loadingContext, lr_uri = resolve_and_validate_document(
lr_loadingContext, lr_workflowobj, lr_uri
)
@@ -204,9 +198,7 @@ def loadref(base: Optional[str], lr_uri: str) -> ResolveType:
mainpath, _ = urllib.parse.urldefrag(uri)
- def rewrite_id(
- r: str, mainuri: str, rewrite: Dict[str, str], names: Set[str]
- ) -> None:
+ def rewrite_id(r: str, mainuri: str, rewrite: Dict[str, str], names: Set[str]) -> None:
if r == mainuri:
rewrite[r] = "#main"
elif r.startswith(mainuri) and r[len(mainuri)] in ("#", "/"):
@@ -230,9 +222,7 @@ def rewrite_id(
for r in sorted_output_ids:
rewrite_id(r, uri, rewrite_outputs, output_names)
- packed = CommentedMap(
- (("$graph", CommentedSeq()), ("cwlVersion", update_to_version))
- )
+ packed = CommentedMap((("$graph", CommentedSeq()), ("cwlVersion", update_to_version)))
namespaces = metadata.get("$namespaces", None)
schemas: Set[str] = set()
@@ -301,9 +291,7 @@ def rewrite_id(
v + "/",
)
- for r in list(
- rewrite_inputs.keys()
- ): # again, to process the outputSource references
+ for r in list(rewrite_inputs.keys()): # again, to process the outputSource references
v = rewrite_inputs[r]
replace_refs(packed, rewrite_inputs, r + "/" if "#" in r else r + "#", v + "/")
diff --git a/cwltool/pathmapper.py b/cwltool/pathmapper.py
index aeddcf35f..660b5ddb1 100644
--- a/cwltool/pathmapper.py
+++ b/cwltool/pathmapper.py
@@ -7,6 +7,7 @@
from pathlib import Path
from typing import Dict, Iterator, List, Optional, Tuple, cast
+from mypy_extensions import mypyc_attr
from schema_salad.exceptions import ValidationException
from schema_salad.ref_resolver import uri_file_path
from schema_salad.sourceline import SourceLine
@@ -15,11 +16,34 @@
from .stdfsaccess import abspath
from .utils import CWLObjectType, dedup, downloadHttpFile
-MapperEnt = collections.namedtuple(
- "MapperEnt", ["resolved", "target", "type", "staged"]
-)
+MapperEnt = collections.namedtuple("MapperEnt", ["resolved", "target", "type", "staged"])
+""" Mapper entries.
+.. py:attribute:: resolved
+ :type: str
+ The "real" path on the local file system (after resolving relative paths
+ and traversing symlinks
+
+.. py:attribute:: target
+ :type: str
+
+ The path on the target file system (under stagedir)
+
+.. py:attribute:: type
+ :type: str
+
+ The object type. One of "File", "Directory", "CreateFile", "WritableFile",
+ or "CreateWritableFile".
+
+.. py:attribute:: staged
+ :type: bool
+
+ If the File has been staged yet
+"""
+
+
+@mypyc_attr(allow_interpreted_subclasses=True)
class PathMapper:
"""
Mapping of files from relative path provided in the file to a tuple.
@@ -28,16 +52,16 @@ class PathMapper:
The tao of PathMapper:
- The initializer takes a list of File and Directory objects, a base
- directory (for resolving relative references) and a staging directory
- (where the files are mapped to).
+ The initializer takes a list of ``class: File`` and ``class: Directory``
+ objects, a base directory (for resolving relative references) and a staging
+ directory (where the files are mapped to).
The purpose of the setup method is to determine where each File or
Directory should be placed on the target file system (relative to
stagedir).
- If separatedirs=True, unrelated files will be isolated in their own
- directories under stagedir. If separatedirs=False, files and directories
+ If ``separatedirs=True``, unrelated files will be isolated in their own
+ directories under stagedir. If ``separatedirs=False``, files and directories
will all be placed in stagedir (with the possibility for name
collisions...)
@@ -65,7 +89,7 @@ def __init__(
separateDirs: bool = True,
) -> None:
"""Initialize the PathMapper."""
- self._pathmap = {} # type: Dict[str, MapperEnt]
+ self._pathmap: Dict[str, MapperEnt] = {}
self.stagedir = stagedir
self.separateDirs = separateDirs
self.setup(dedup(referenced_files), basedir)
@@ -164,18 +188,18 @@ def visit(
)
def setup(self, referenced_files: List[CWLObjectType], basedir: str) -> None:
-
# Go through each file and set the target to its own directory along
# with any secondary files.
stagedir = self.stagedir
for fob in referenced_files:
if self.separateDirs:
stagedir = os.path.join(self.stagedir, "stg%s" % uuid.uuid4())
+ copy = cast(bool, fob.get("writable", False) or False)
self.visit(
fob,
stagedir,
basedir,
- copy=cast(bool, fob.get("writable", False)),
+ copy=copy,
staged=True,
)
@@ -212,9 +236,8 @@ def reversemap(
return (k, v[0])
return None
- def update(
- self, key: str, resolved: str, target: str, ctype: str, stage: bool
- ) -> MapperEnt:
+ def update(self, key: str, resolved: str, target: str, ctype: str, stage: bool) -> MapperEnt:
+ """Update an existine entry."""
m = MapperEnt(resolved, target, ctype, stage)
self._pathmap[key] = m
return m
diff --git a/cwltool/process.py b/cwltool/process.py
index 76f18014c..80d70be74 100644
--- a/cwltool/process.py
+++ b/cwltool/process.py
@@ -14,6 +14,7 @@
import uuid
from os import scandir
from typing import (
+ TYPE_CHECKING,
Any,
Callable,
Dict,
@@ -32,9 +33,10 @@
)
from cwl_utils import expression
+from importlib_resources import files
from mypy_extensions import mypyc_attr
-from pkg_resources import resource_stream
from rdflib import Graph
+from ruamel.yaml.comments import CommentedMap, CommentedSeq
from schema_salad.avro.schema import (
Names,
Schema,
@@ -47,9 +49,6 @@
from schema_salad.sourceline import SourceLine, strip_dup_lineno
from schema_salad.utils import convert_to_dict
from schema_salad.validate import avro_type_name, validate_ex
-from typing_extensions import TYPE_CHECKING
-
-from ruamel.yaml.comments import CommentedMap, CommentedSeq
from .builder import INPUT_OBJ_VOCAB, Builder
from .context import LoadingContext, RuntimeContext, getdefault
@@ -59,13 +58,14 @@
from .pathmapper import MapperEnt, PathMapper
from .secrets import SecretStore
from .stdfsaccess import StdFsAccess
-from .update import INTERNAL_VERSION, ORIGINAL_CWLVERSION
+from .update import INTERNAL_VERSION, ORDERED_VERSIONS, ORIGINAL_CWLVERSION
from .utils import (
CWLObjectType,
CWLOutputAtomType,
CWLOutputType,
HasReqsHints,
JobsGeneratorType,
+ LoadListingType,
OutputCallbackType,
adjustDirObjs,
aslist,
@@ -79,7 +79,7 @@
from .validate_js import validate_js_expressions
if TYPE_CHECKING:
- from .provenance_profile import ProvenanceProfile # pylint: disable=unused-import
+ from .cwlprov.provenance_profile import ProvenanceProfile
class LogAsDebugFilter(logging.Filter):
@@ -95,9 +95,7 @@ def filter(self, record: logging.LogRecord) -> bool:
_logger_validation_warnings = logging.getLogger("cwltool.validation_warnings")
_logger_validation_warnings.setLevel(_logger.getEffectiveLevel())
-_logger_validation_warnings.addFilter(
- LogAsDebugFilter("cwltool.validation_warnings", _logger)
-)
+_logger_validation_warnings.addFilter(LogAsDebugFilter("cwltool.validation_warnings", _logger))
supportedProcessRequirements = [
"DockerRequirement",
@@ -161,14 +159,14 @@ def filter(self, record: logging.LogRecord) -> bool:
"vocab_res_proc.yml",
)
-SCHEMA_CACHE = (
- {}
-) # type: Dict[str, Tuple[Loader, Union[Names, SchemaParseException], CWLObjectType, Loader]]
-SCHEMA_FILE = None # type: Optional[CWLObjectType]
-SCHEMA_DIR = None # type: Optional[CWLObjectType]
-SCHEMA_ANY = None # type: Optional[CWLObjectType]
+SCHEMA_CACHE: Dict[
+ str, Tuple[Loader, Union[Names, SchemaParseException], CWLObjectType, Loader]
+] = {}
+SCHEMA_FILE: Optional[CWLObjectType] = None
+SCHEMA_DIR: Optional[CWLObjectType] = None
+SCHEMA_ANY: Optional[CWLObjectType] = None
-custom_schemas = {} # type: Dict[str, Tuple[str, str]]
+custom_schemas: Dict[str, Tuple[str, str]] = {}
def use_standard_schema(version: str) -> None:
@@ -187,32 +185,28 @@ def use_custom_schema(version: str, name: str, text: str) -> None:
def get_schema(
version: str,
) -> Tuple[Loader, Union[Names, SchemaParseException], CWLObjectType, Loader]:
-
if version in SCHEMA_CACHE:
return SCHEMA_CACHE[version]
- cache = {} # type: Dict[str, Union[str, Graph, bool]]
+ cache: Dict[str, Union[str, Graph, bool]] = {}
version = version.split("#")[-1]
if ".dev" in version:
version = ".".join(version.split(".")[:-1])
for f in cwl_files:
try:
- res = resource_stream(__name__, f"schemas/{version}/{f}")
- cache["https://w3id.org/cwl/" + f] = res.read().decode("UTF-8")
- res.close()
+ res = files("cwltool").joinpath(f"schemas/{version}/{f}")
+ cache["https://w3id.org/cwl/" + f] = res.read_text("UTF-8")
except OSError:
pass
for f in salad_files:
try:
- res = resource_stream(
- __name__,
+ res = files("cwltool").joinpath(
f"schemas/{version}/salad/schema_salad/metaschema/{f}",
)
- cache[
- "https://w3id.org/cwl/salad/schema_salad/metaschema/" + f
- ] = res.read().decode("UTF-8")
- res.close()
+ cache["https://w3id.org/cwl/salad/schema_salad/metaschema/" + f] = res.read_text(
+ "UTF-8"
+ )
except OSError:
pass
@@ -242,9 +236,13 @@ def stage_files(
secret_store: Optional[SecretStore] = None,
fix_conflicts: bool = False,
) -> None:
- """Link or copy files to their targets. Create them as needed."""
+ """
+ Link or copy files to their targets. Create them as needed.
+
+ :raises WorkflowException: if there is a file staging conflict
+ """
items = pathmapper.items() if not symlink else pathmapper.items_exclude_children()
- targets = {} # type: Dict[str, MapperEnt]
+ targets: Dict[str, MapperEnt] = {}
for key, entry in items:
if "File" not in entry.type:
continue
@@ -253,21 +251,21 @@ def stage_files(
elif targets[entry.target].resolved != entry.resolved:
if fix_conflicts:
# find first key that does not clash with an existing entry in targets
- # start with entry.target + '_' + 2 and then keep incrementing the number till there is no clash
+ # start with entry.target + '_' + 2 and then keep incrementing
+ # the number till there is no clash
i = 2
tgt = f"{entry.target}_{i}"
while tgt in targets:
i += 1
tgt = f"{entry.target}_{i}"
- targets[tgt] = pathmapper.update(
- key, entry.resolved, tgt, entry.type, entry.staged
- )
+ targets[tgt] = pathmapper.update(key, entry.resolved, tgt, entry.type, entry.staged)
else:
raise WorkflowException(
"File staging conflict, trying to stage both %s and %s to the same target %s"
% (targets[entry.target].resolved, entry.resolved, entry.target)
)
-
+ # refresh the items, since we may have updated the pathmapper due to file name clashes
+ items = pathmapper.items() if not symlink else pathmapper.items_exclude_children()
for key, entry in items:
if not entry.staged:
continue
@@ -341,9 +339,7 @@ def _relocate(src: str, dst: str) -> None:
return
# If the source is not contained in source_directories we're not allowed to delete it
- src_can_deleted = any(
- os.path.commonprefix([p, src]) == p for p in source_directories
- )
+ src_can_deleted = any(os.path.commonprefix([p, src]) == p for p in source_directories)
_action = "move" if action == "move" and src_can_deleted else "copy"
@@ -392,9 +388,7 @@ def _check_adjust(a_file: CWLObjectType) -> CWLObjectType:
visit_class(outputObj, ("File", "Directory"), _check_adjust)
if compute_checksum:
- visit_class(
- outputObj, ("File",), functools.partial(compute_checksums, fs_access)
- )
+ visit_class(outputObj, ("File",), functools.partial(compute_checksums, fs_access))
return outputObj
@@ -422,6 +416,11 @@ def fill_in_defaults(
job: CWLObjectType,
fsaccess: StdFsAccess,
) -> None:
+ """
+ For each missing input in the input object, copy over the default.
+
+ :raises WorkflowException: if a required input parameter is missing
+ """
debug = _logger.isEnabledFor(logging.DEBUG)
for e, inp in enumerate(inputs):
with SourceLine(inputs, e, WorkflowException, debug):
@@ -434,8 +433,7 @@ def fill_in_defaults(
job[fieldname] = None
else:
raise WorkflowException(
- "Missing required input parameter '%s'"
- % shortname(cast(str, inp["id"]))
+ "Missing required input parameter '%s'" % shortname(cast(str, inp["id"]))
)
@@ -460,9 +458,7 @@ def avroize_type(
cast(MutableSequence[CWLOutputType], field_type["items"]), name_prefix
)
else:
- field_type["type"] = avroize_type(
- cast(CWLOutputType, field_type["type"]), name_prefix
- )
+ field_type["type"] = avroize_type(cast(CWLOutputType, field_type["type"]), name_prefix)
elif field_type == "File":
return "org.w3id.cwl.cwl.File"
elif field_type == "Directory":
@@ -470,14 +466,11 @@ def avroize_type(
return field_type
-def get_overrides(
- overrides: MutableSequence[CWLObjectType], toolid: str
-) -> CWLObjectType:
- req = {} # type: CWLObjectType
+def get_overrides(overrides: MutableSequence[CWLObjectType], toolid: str) -> CWLObjectType:
+ """Combine overrides for the target tool ID."""
+ req: CWLObjectType = {}
if not isinstance(overrides, MutableSequence):
- raise ValidationException(
- "Expected overrides to be a list, but was %s" % type(overrides)
- )
+ raise ValidationException("Expected overrides to be a list, but was %s" % type(overrides))
for ov in overrides:
if ov["overrideTarget"] == toolid:
req.update(ov)
@@ -523,12 +516,17 @@ def eval_resource(
) -> Optional[Union[str, int, float]]:
if isinstance(resource_req, str) and expression.needs_parsing(resource_req):
result = builder.do_eval(resource_req)
+ if isinstance(result, float):
+ if ORDERED_VERSIONS.index(builder.cwlVersion) >= ORDERED_VERSIONS.index("v1.2.0-dev4"):
+ return result
+ raise WorkflowException(
+ "Floats are not valid in resource requirement expressions prior "
+ f"to CWL v1.2: {resource_req} returned {result}."
+ )
if isinstance(result, (str, int)) or result is None:
return result
raise WorkflowException(
- "Got incorrect return type {} from resource expression evaluation of {}.".format(
- type(result), resource_req
- )
+ f"Got incorrect return type {type(result)} from resource expression evaluation of {resource_req}."
)
return resource_req
@@ -539,14 +537,14 @@ def eval_resource(
@mypyc_attr(allow_interpreted_subclasses=True)
class Process(HasReqsHints, metaclass=abc.ABCMeta):
- def __init__(
- self, toolpath_object: CommentedMap, loadingContext: LoadingContext
- ) -> None:
+ """Abstract CWL Process."""
+
+ def __init__(self, toolpath_object: CommentedMap, loadingContext: LoadingContext) -> None:
"""Build a Process object from the provided dictionary."""
super().__init__()
- self.metadata = getdefault(loadingContext.metadata, {}) # type: CWLObjectType
- self.provenance_object = None # type: Optional[ProvenanceProfile]
- self.parent_wf = None # type: Optional[ProvenanceProfile]
+ self.metadata: CWLObjectType = getdefault(loadingContext.metadata, {})
+ self.provenance_object: Optional["ProvenanceProfile"] = None
+ self.parent_wf: Optional["ProvenanceProfile"] = None
global SCHEMA_FILE, SCHEMA_DIR, SCHEMA_ANY # pylint: disable=global-statement
if SCHEMA_FILE is None or SCHEMA_ANY is None or SCHEMA_DIR is None:
get_schema("v1.0")
@@ -569,9 +567,7 @@ def __init__(
self.requirements = copy.deepcopy(getdefault(loadingContext.requirements, []))
tool_requirements = self.tool.get("requirements", [])
if tool_requirements is None:
- raise SourceLine(
- self.tool, "requirements", ValidationException, debug
- ).makeError(
+ raise SourceLine(self.tool, "requirements", ValidationException, debug).makeError(
"If 'requirements' is present then it must be a list "
"or map/dictionary, not empty."
)
@@ -581,17 +577,16 @@ def __init__(
self.requirements.extend(
cast(
List[CWLObjectType],
- get_overrides(
- getdefault(loadingContext.overrides_list, []), self.tool["id"]
- ).get("requirements", []),
+ get_overrides(getdefault(loadingContext.overrides_list, []), self.tool["id"]).get(
+ "requirements", []
+ ),
)
)
self.hints = copy.deepcopy(getdefault(loadingContext.hints, []))
tool_hints = self.tool.get("hints", [])
if tool_hints is None:
raise SourceLine(self.tool, "hints", ValidationException, debug).makeError(
- "If 'hints' is present then it must be a list "
- "or map/dictionary, not empty."
+ "If 'hints' is present then it must be a list " "or map/dictionary, not empty."
)
self.hints.extend(tool_hints)
# Versions of requirements and hints which aren't mutated.
@@ -600,7 +595,7 @@ def __init__(
self.doc_loader = loadingContext.loader
self.doc_schema = loadingContext.avsc_names
- self.formatgraph = None # type: Optional[Graph]
+ self.formatgraph: Optional[Graph] = None
if self.doc_loader is not None:
self.formatgraph = self.doc_loader.graph
@@ -611,7 +606,7 @@ def __init__(
strict=getdefault(loadingContext.strict, False),
)
- self.schemaDefs = {} # type: MutableMapping[str, CWLObjectType]
+ self.schemaDefs: MutableMapping[str, CWLObjectType] = {}
sd, _ = self.get_requirement("SchemaDefRequirement")
@@ -629,16 +624,16 @@ def __init__(
make_avsc_object(convert_to_dict(av), self.names)
# Build record schema from inputs
- self.inputs_record_schema = {
+ self.inputs_record_schema: CWLObjectType = {
"name": "input_record_schema",
"type": "record",
"fields": [],
- } # type: CWLObjectType
- self.outputs_record_schema = {
+ }
+ self.outputs_record_schema: CWLObjectType = {
"name": "outputs_record_schema",
"type": "record",
"fields": [],
- } # type: CWLObjectType
+ }
for key in ("inputs", "outputs"):
for i in self.tool[key]:
@@ -647,9 +642,7 @@ def __init__(
del c["id"]
if "type" not in c:
- raise ValidationException(
- "Missing 'type' in parameter '{}'".format(c["name"])
- )
+ raise ValidationException("Missing 'type' in parameter '{}'".format(c["name"]))
if "default" in c and "null" not in aslist(c["type"]):
nullable = ["null"]
@@ -660,13 +653,9 @@ def __init__(
c["type"] = avroize_type(c["type"], c["name"])
if key == "inputs":
- cast(
- List[CWLObjectType], self.inputs_record_schema["fields"]
- ).append(c)
+ cast(List[CWLObjectType], self.inputs_record_schema["fields"]).append(c)
elif key == "outputs":
- cast(
- List[CWLObjectType], self.outputs_record_schema["fields"]
- ).append(c)
+ cast(List[CWLObjectType], self.outputs_record_schema["fields"]).append(c)
with SourceLine(toolpath_object, "inputs", ValidationException, debug):
self.inputs_record_schema = cast(
@@ -690,9 +679,7 @@ def __init__(
if toolpath_object.get("class") is not None and not getdefault(
loadingContext.disable_js_validation, False
):
- validate_js_options = (
- None
- ) # type: Optional[Dict[str, Union[List[str], str, int]]]
+ validate_js_options: Optional[Dict[str, Union[List[str], str, int]]] = None
if loadingContext.js_hint_options_file is not None:
try:
with open(loadingContext.js_hint_options_file) as options_file:
@@ -746,10 +733,7 @@ def __init__(
else:
var_spool_cwl_detector(self.tool)
- def _init_job(
- self, joborder: CWLObjectType, runtime_context: RuntimeContext
- ) -> Builder:
-
+ def _init_job(self, joborder: CWLObjectType, runtime_context: RuntimeContext) -> Builder:
if self.metadata.get("cwlVersion") != INTERNAL_VERSION:
raise WorkflowException(
"Process object loaded with version '%s', must update to '%s' in order to execute."
@@ -764,7 +748,7 @@ def _init_job(
load_listing_req, _ = self.get_requirement("LoadListingRequirement")
load_listing = (
- cast(str, load_listing_req.get("loadListing"))
+ cast(LoadListingType, load_listing_req.get("loadListing"))
if load_listing_req is not None
else "no_listing"
)
@@ -776,9 +760,7 @@ def _init_job(
normalizeFilesDirs(job)
schema = self.names.get_name("input_record_schema", None)
if schema is None:
- raise WorkflowException(
- "Missing input record schema: " "{}".format(self.names)
- )
+ raise WorkflowException("Missing input record schema: " "{}".format(self.names))
validate_ex(
schema,
job,
@@ -787,7 +769,7 @@ def _init_job(
vocab=INPUT_OBJ_VOCAB,
)
- if load_listing and load_listing != "no_listing":
+ if load_listing != "no_listing":
get_listing(fs_access, job, recursive=(load_listing == "deep_listing"))
visit_class(job, ("File",), functools.partial(add_sizes, fs_access))
@@ -800,22 +782,26 @@ def _init_job(
v = job[k]
dircount = [0]
- def inc(d): # type: (List[int]) -> None
+ def inc(d: List[int]) -> None:
d[0] += 1
- visit_class(v, ("Directory",), lambda x: inc(dircount))
+ visit_class(v, ("Directory",), lambda x: inc(dircount)) # noqa: B023
if dircount[0] == 0:
continue
filecount = [0]
- visit_class(v, ("File",), lambda x: inc(filecount))
+ visit_class(v, ("File",), lambda x: inc(filecount)) # noqa: B023
if filecount[0] > FILE_COUNT_WARNING:
# Long lines in this message are okay, will be reflowed based on terminal columns.
_logger.warning(
strip_dup_lineno(
SourceLine(self.tool["inputs"], i, str).makeError(
- """Recursive directory listing has resulted in a large number of File objects (%s) passed to the input parameter '%s'. This may negatively affect workflow performance and memory use.
-
-If this is a problem, use the hint 'cwltool:LoadListingRequirement' with "shallow_listing" or "no_listing" to change the directory listing behavior:
+ "Recursive directory listing has resulted "
+ "in a large number of File objects (%s) passed "
+ "to the input parameter '%s'. This may "
+ "negatively affect workflow performance and memory use.\n\n"
+ "If this is a problem, use the hint 'cwltool:LoadListingRequirement' "
+ 'with "shallow_listing" or "no_listing" to change the directory '
+ """listing behavior:
$namespaces:
cwltool: "http://commonwl.org/cwltool#"
@@ -832,7 +818,7 @@ def inc(d): # type: (List[int]) -> None
except (ValidationException, WorkflowException) as err:
raise WorkflowException("Invalid job input record:\n" + str(err)) from err
- files = [] # type: List[CWLObjectType]
+ files: List[CWLObjectType] = []
bindings = CommentedSeq()
outdir = ""
tmpdir = ""
@@ -994,21 +980,20 @@ def evalResources(
):
if rsc is None:
continue
- mn = mx = None # type: Optional[Union[int, float]]
+ mn: Optional[Union[int, float]] = None
+ mx: Optional[Union[int, float]] = None
if rsc.get(a + "Min"):
- mn = cast(
- Union[int, float],
- eval_resource(
- builder, cast(Union[str, int, float], rsc[a + "Min"])
- ),
- )
+ with SourceLine(rsc, f"{a}Min", WorkflowException, runtimeContext.debug):
+ mn = cast(
+ Union[int, float],
+ eval_resource(builder, cast(Union[str, int, float], rsc[a + "Min"])),
+ )
if rsc.get(a + "Max"):
- mx = cast(
- Union[int, float],
- eval_resource(
- builder, cast(Union[str, int, float], rsc[a + "Max"])
- ),
- )
+ with SourceLine(rsc, f"{a}Max", WorkflowException, runtimeContext.debug):
+ mx = cast(
+ Union[int, float],
+ eval_resource(builder, cast(Union[str, int, float], rsc[a + "Max"])),
+ )
if mn is None:
mn = mx
elif mx is None:
@@ -1045,20 +1030,14 @@ def checkRequirements(
for i, entry in enumerate(
cast(MutableSequence[CWLObjectType], rec["requirements"])
):
- with SourceLine(
- rec["requirements"], i, UnsupportedRequirement, debug
- ):
- if (
- cast(str, entry["class"])
- not in supported_process_requirements
- ):
+ with SourceLine(rec["requirements"], i, UnsupportedRequirement, debug):
+ if cast(str, entry["class"]) not in supported_process_requirements:
raise UnsupportedRequirement(
f"Unsupported requirement {entry['class']}."
)
- def validate_hints(
- self, avsc_names: Names, hints: List[CWLObjectType], strict: bool
- ) -> None:
+ def validate_hints(self, avsc_names: Names, hints: List[CWLObjectType], strict: bool) -> None:
+ """Process the hints field."""
if self.doc_loader is None:
return
debug = _logger.isEnabledFor(logging.DEBUG)
@@ -1075,9 +1054,7 @@ def validate_hints(
avroname = avro_type_name(self.doc_loader.vocab[classname])
if avsc_names.get_name(avroname, None) is not None:
plain_hint = {
- key: r[key]
- for key in r
- if key not in self.doc_loader.identifiers
+ key: r[key] for key in r if key not in self.doc_loader.identifiers
} # strip identifiers
validate_ex(
cast(
@@ -1110,7 +1087,7 @@ def __str__(self) -> str:
return f"{type(self).__name__}: {self.tool['id']}"
-_names = set() # type: Set[str]
+_names: Set[str] = set()
def uniquename(stem: str, names: Optional[Set[str]] = None) -> str:
@@ -1148,8 +1125,8 @@ def nestdir(base: str, deps: CWLObjectType) -> CWLObjectType:
def mergedirs(
listing: MutableSequence[CWLObjectType],
) -> MutableSequence[CWLObjectType]:
- r = [] # type: List[CWLObjectType]
- ents = {} # type: Dict[str, CWLObjectType]
+ r: List[CWLObjectType] = []
+ ents: Dict[str, CWLObjectType] = {}
for e in listing:
basename = cast(str, e["basename"])
if basename not in ents:
@@ -1163,9 +1140,9 @@ def mergedirs(
if e.get("listing"):
# name already in entries
# merge it into the existing listing
- cast(
- List[CWLObjectType], ents[basename].setdefault("listing", [])
- ).extend(cast(List[CWLObjectType], e["listing"]))
+ cast(List[CWLObjectType], ents[basename].setdefault("listing", [])).extend(
+ cast(List[CWLObjectType], e["listing"])
+ )
for e in ents.values():
if e["class"] == "Directory" and "listing" in e:
e["listing"] = cast(
@@ -1188,30 +1165,25 @@ def scandeps(
urljoin: Callable[[str, str], str] = urllib.parse.urljoin,
nestdirs: bool = True,
) -> MutableSequence[CWLObjectType]:
-
- """Given a CWL document or input object, search for dependencies
- (references to external files) of 'doc' and return them as a list
- of File or Directory objects.
-
- The 'base' is the base URL for relative references.
+ """
+ Search for external files references in a CWL document or input object.
Looks for objects with 'class: File' or 'class: Directory' and
adds them to the list of dependencies.
- Anything in 'urlfields' is also added as a File dependency.
-
- Anything in 'reffields' (such as workflow step 'run') will be
- added as a dependency and also loaded (using the 'loadref'
- function) and recursively scanned for dependencies. Those
- dependencies will be added as secondary files to the primary file.
-
- If "nestdirs" is true, create intermediate directory objects when
- a file is located in a subdirectory under the starting directory.
- This is so that if the dependencies are materialized, they will
- produce the same relative file system locations.
-
+ :param base: the base URL for relative references.
+ :param doc: a CWL document or input object
+ :param urlfields: added as a File dependency
+ :param reffields: field name like a workflow step 'run'; will be
+ added as a dependency and also loaded (using the 'loadref'
+ function) and recursively scanned for dependencies. Those
+ dependencies will be added as secondary files to the primary file.
+ :param nestdirs: if true, create intermediate directory objects when
+ a file is located in a subdirectory under the starting directory.
+ This is so that if the dependencies are materialized, they will
+ produce the same relative file system locations.
+ :returns: A list of File or Directory dependencies
"""
-
r: MutableSequence[CWLObjectType] = []
if isinstance(doc, MutableMapping):
if "id" in doc:
@@ -1224,10 +1196,10 @@ def scandeps(
if doc.get("class") in ("File", "Directory") and "location" in urlfields:
u = cast(Optional[str], doc.get("location", doc.get("path")))
if u and not u.startswith("_:"):
- deps = {
+ deps: CWLObjectType = {
"class": doc["class"],
"location": urljoin(base, u),
- } # type: CWLObjectType
+ }
if "basename" in doc:
deps["basename"] = doc["basename"]
if doc["class"] == "Directory" and "listing" in doc:
@@ -1302,11 +1274,11 @@ def scandeps(
Union[MutableSequence[CWLObjectType], CWLObjectType],
loadref(base, u2),
)
- deps2 = {
+ deps2: CWLObjectType = {
"class": "File",
"location": subid,
"format": CWL_IANA,
- } # type: CWLObjectType
+ }
sf = scandeps(
subid,
sub,
diff --git a/cwltool/procgenerator.py b/cwltool/procgenerator.py
index aabbd354d..0f1801b2d 100644
--- a/cwltool/procgenerator.py
+++ b/cwltool/procgenerator.py
@@ -1,11 +1,10 @@
import copy
from typing import Dict, Optional, Tuple, cast
+from ruamel.yaml.comments import CommentedMap
from schema_salad.exceptions import ValidationException
from schema_salad.sourceline import indent
-from ruamel.yaml.comments import CommentedMap
-
from .context import LoadingContext, RuntimeContext
from .errors import WorkflowException
from .load_tool import load_tool
@@ -23,9 +22,8 @@ def __init__(self, procgenerator: "ProcessGenerator") -> None:
self.jobout = None # type: Optional[CWLObjectType]
self.processStatus = None # type: Optional[str]
- def receive_output(
- self, jobout: Optional[CWLObjectType], processStatus: str
- ) -> None:
+ def receive_output(self, jobout: Optional[CWLObjectType], processStatus: str) -> None:
+ """Process the results."""
self.jobout = jobout
self.processStatus = processStatus
@@ -35,7 +33,6 @@ def job(
output_callbacks: Optional[OutputCallbackType],
runtimeContext: RuntimeContext,
) -> JobsGeneratorType:
-
try:
yield from self.procgenerator.embedded_tool.job(
job_order, self.receive_output, runtimeContext
@@ -61,7 +58,7 @@ def job(
raise
except Exception as exc:
_logger.exception("Unexpected exception")
- raise WorkflowException(str(exc))
+ raise WorkflowException(str(exc)) from exc
class ProcessGenerator(Process):
@@ -87,7 +84,7 @@ def __init__(
raise WorkflowException(
"Tool definition %s failed validation:\n%s"
% (toolpath_object["run"], indent(str(vexc)))
- )
+ ) from vexc
def job(
self,
@@ -95,9 +92,7 @@ def job(
output_callbacks: Optional[OutputCallbackType],
runtimeContext: RuntimeContext,
) -> JobsGeneratorType:
- return ProcessGeneratorJob(self).job(
- job_order, output_callbacks, runtimeContext
- )
+ return ProcessGeneratorJob(self).job(job_order, output_callbacks, runtimeContext)
def result(
self,
@@ -117,7 +112,7 @@ def result(
raise WorkflowException(
"Tool definition %s failed validation:\n%s"
% (jobout["runProcess"], indent(str(vexc)))
- )
+ ) from vexc
if "runInputs" in jobout:
runinputs = cast(CWLObjectType, jobout["runInputs"])
diff --git a/cwltool/resolver.py b/cwltool/resolver.py
index cb5228100..e48957f26 100644
--- a/cwltool/resolver.py
+++ b/cwltool/resolver.py
@@ -25,13 +25,9 @@ def resolve_local(document_loader: Optional[Loader], uri: str) -> Optional[str]:
return pathobj.as_uri()
sharepaths = [
- os.environ.get(
- "XDG_DATA_HOME", os.path.join(os.path.expanduser("~"), ".local", "share")
- )
+ os.environ.get("XDG_DATA_HOME", os.path.join(os.path.expanduser("~"), ".local", "share"))
]
- sharepaths.extend(
- os.environ.get("XDG_DATA_DIRS", "/usr/local/share/:/usr/share/").split(":")
- )
+ sharepaths.extend(os.environ.get("XDG_DATA_DIRS", "/usr/local/share/:/usr/share/").split(":"))
shares = [os.path.join(s, "commonwl", uri) for s in sharepaths]
_logger.debug("Search path is %s", shares)
@@ -53,15 +49,17 @@ def tool_resolver(document_loader: Loader, uri: str) -> Optional[str]:
ga4gh_tool_registries = ["https://dockstore.org/api"]
-# in the TRS registry, a primary descriptor can be reached at {0}/api/ga4gh/v2/tools/{1}/versions/{2}/plain-CWL/descriptor
-# The primary descriptor is a CommandLineTool in the case that the files endpoint only describes one file
-# When the primary descriptor is a Workflow, files need to be imported without stripping off "descriptor", looking at the files endpoint is a workaround
+# in the TRS registry, a primary descriptor can be reached at
+# {0}/api/ga4gh/v2/tools/{1}/versions/{2}/plain-CWL/descriptor
+# The primary descriptor is a CommandLineTool in the case that the files
+# endpoint only describes one file
+# When the primary descriptor is a Workflow, files need to be imported without
+# stripping off "descriptor", looking at the files endpoint is a workaround
# tested with TRS version 2.0.0-beta.2
-# TODO not stripping off "descriptor" when looking for local imports would also work https://github.com/ga4gh/tool-registry-service-schemas/blob/2.0.0-beta.2/src/main/resources/swagger/ga4gh-tool-discovery.yaml#L273
+# TODO not stripping off "descriptor" when looking for local imports would also
+# work https://github.com/ga4gh/tool-registry-service-schemas/blob/2.0.0-beta.2/src/main/resources/swagger/ga4gh-tool-discovery.yaml#L273 # noqa: B950
GA4GH_TRS_FILES = "{0}/api/ga4gh/v2/tools/{1}/versions/{2}/CWL/files"
-GA4GH_TRS_PRIMARY_DESCRIPTOR = (
- "{0}/api/ga4gh/v2/tools/{1}/versions/{2}/plain-CWL/descriptor/{3}"
-)
+GA4GH_TRS_PRIMARY_DESCRIPTOR = "{0}/api/ga4gh/v2/tools/{1}/versions/{2}/plain-CWL/descriptor/{3}"
def resolve_ga4gh_tool(document_loader: Loader, uri: str) -> Optional[str]:
diff --git a/cwltool/run_job.py b/cwltool/run_job.py
index ac2f1025d..a8fe32496 100644
--- a/cwltool/run_job.py
+++ b/cwltool/run_job.py
@@ -12,9 +12,7 @@ def handle_software_environment(cwl_env: Dict[str, str], script: str) -> Dict[st
exec_env["_CWLTOOL"] = "1"
res = subprocess.run(["bash", script], shell=False, env=exec_env) # nosec
if res.returncode != 0:
- sys.stderr.write(
- "Error while using SoftwareRequirements to modify environment\n"
- )
+ sys.stderr.write("Error while using SoftwareRequirements to modify environment\n")
return cwl_env
env = cwl_env.copy()
diff --git a/cwltool/secrets.py b/cwltool/secrets.py
index 7356c4c91..b3f16a3a9 100644
--- a/cwltool/secrets.py
+++ b/cwltool/secrets.py
@@ -10,7 +10,7 @@ class SecretStore:
def __init__(self) -> None:
"""Initialize the secret store."""
- self.secrets = {} # type: Dict[str, str]
+ self.secrets: Dict[str, str] = {}
def add(self, value: Optional[CWLOutputType]) -> Optional[CWLOutputType]:
"""
diff --git a/cwltool/singularity.py b/cwltool/singularity.py
index 7ba7f802b..e083eb092 100644
--- a/cwltool/singularity.py
+++ b/cwltool/singularity.py
@@ -1,4 +1,4 @@
-"""Support for executing Docker containers using the Singularity 2.x engine."""
+"""Support for executing Docker format containers using Singularity {2,3}.x or Apptainer 1.x."""
import logging
import os
@@ -32,22 +32,18 @@
def get_version() -> Tuple[List[int], str]:
"""
- Parse the output of 'singularity --version' to determine the singularity flavor /
- distribution (singularity, singularity-ce or apptainer) and the singularity version.
+ Parse the output of 'singularity --version' to determine the flavor and version.
+
Both pieces of information will be cached.
- Returns
- -------
- A tuple containing:
- - A tuple with major and minor version numbers as integer.
- - A string with the name of the singularity flavor.
+ :returns: A tuple containing:
+ - A tuple with major and minor version numbers as integer.
+ - A string with the name of the singularity flavor.
"""
global _SINGULARITY_VERSION # pylint: disable=global-statement
global _SINGULARITY_FLAVOR # pylint: disable=global-statement
if _SINGULARITY_VERSION is None:
- version_output = check_output( # nosec
- ["singularity", "--version"], universal_newlines=True
- ).strip()
+ version_output = check_output(["singularity", "--version"], text=True).strip() # nosec
version_match = re.match(r"(.+) version ([0-9\.]+)", version_output)
if version_match is None:
@@ -57,9 +53,7 @@ def get_version() -> Tuple[List[int], str]:
_SINGULARITY_VERSION = [int(i) for i in version_string.split(".")]
_SINGULARITY_FLAVOR = version_match.group(1)
- _logger.debug(
- f"Singularity version: {version_string}" " ({_SINGULARITY_FLAVOR}."
- )
+ _logger.debug(f"Singularity version: {version_string}" " ({_SINGULARITY_FLAVOR}.")
return (_SINGULARITY_VERSION, _SINGULARITY_FLAVOR)
@@ -125,7 +119,7 @@ def __init__(
self,
builder: Builder,
joborder: CWLObjectType,
- make_path_mapper: Callable[..., PathMapper],
+ make_path_mapper: Callable[[List[CWLObjectType], str, RuntimeContext, bool], PathMapper],
requirements: List[CWLObjectType],
hints: List[CWLObjectType],
name: str,
@@ -159,13 +153,8 @@ def get_image(
elif is_version_2_6() and "SINGULARITY_PULLFOLDER" in os.environ:
cache_folder = os.environ["SINGULARITY_PULLFOLDER"]
- if (
- "dockerImageId" not in dockerRequirement
- and "dockerPull" in dockerRequirement
- ):
- match = re.search(
- pattern=r"([a-z]*://)", string=dockerRequirement["dockerPull"]
- )
+ if "dockerImageId" not in dockerRequirement and "dockerPull" in dockerRequirement:
+ match = re.search(pattern=r"([a-z]*://)", string=dockerRequirement["dockerPull"])
img_name = _normalize_image_id(dockerRequirement["dockerPull"])
candidates.append(img_name)
if is_version_3_or_newer():
@@ -175,9 +164,7 @@ def get_image(
else:
dockerRequirement["dockerImageId"] = img_name
if not match:
- dockerRequirement["dockerPull"] = (
- "docker://" + dockerRequirement["dockerPull"]
- )
+ dockerRequirement["dockerPull"] = "docker://" + dockerRequirement["dockerPull"]
elif "dockerImageId" in dockerRequirement:
if os.path.isfile(dockerRequirement["dockerImageId"]):
found = True
@@ -224,9 +211,7 @@ def get_image(
"pull",
"--force",
"--name",
- "{}/{}".format(
- cache_folder, dockerRequirement["dockerImageId"]
- ),
+ "{}/{}".format(cache_folder, dockerRequirement["dockerImageId"]),
str(dockerRequirement["dockerPull"]),
]
@@ -305,16 +290,13 @@ def get_from_requirements(
raise WorkflowException("singularity executable is not available")
if not self.get_image(cast(Dict[str, str], r), pull_image, force_pull):
- raise WorkflowException(
- "Container image {} not found".format(r["dockerImageId"])
- )
+ raise WorkflowException("Container image {} not found".format(r["dockerImageId"]))
return os.path.abspath(cast(str, r["dockerImageId"]))
@staticmethod
- def append_volume(
- runtime: List[str], source: str, target: str, writable: bool = False
- ) -> None:
+ def append_volume(runtime: List[str], source: str, target: str, writable: bool = False) -> None:
+ """Add binding arguments to the runtime list."""
runtime.append("--bind")
# Mounts are writable by default, so 'rw' is optional and not
# supported (due to a bug) in some 3.6 series releases.
@@ -401,19 +383,13 @@ def add_writable_directory_volume(
ensure_writable(host_outdir_tgt)
else:
if self.inplace_update:
- self.append_volume(
- runtime, volume.resolved, volume.target, writable=True
- )
+ self.append_volume(runtime, volume.resolved, volume.target, writable=True)
else:
if not host_outdir_tgt:
tmpdir = create_tmp_dir(tmpdir_prefix)
- new_dir = os.path.join(
- tmpdir, os.path.basename(volume.resolved)
- )
+ new_dir = os.path.join(tmpdir, os.path.basename(volume.resolved))
shutil.copytree(volume.resolved, new_dir)
- self.append_volume(
- runtime, new_dir, volume.target, writable=True
- )
+ self.append_volume(runtime, new_dir, volume.target, writable=True)
else:
shutil.copytree(volume.resolved, host_outdir_tgt)
ensure_writable(host_outdir_tgt or new_dir)
diff --git a/cwltool/singularity_utils.py b/cwltool/singularity_utils.py
index d91bb8f58..e4cc88918 100644
--- a/cwltool/singularity_utils.py
+++ b/cwltool/singularity_utils.py
@@ -1,11 +1,11 @@
-"""Support for executing Docker containers using the Singularity 2.x engine."""
+"""Support for executing Docker format containers using Singularity {2,3}.x or Apptainer 1.x."""
import os
import os.path
from subprocess import DEVNULL, PIPE, Popen, TimeoutExpired # nosec
from typing import Optional
-_USERNS = None # type: Optional[bool]
+_USERNS: Optional[bool] = None
def singularity_supports_userns() -> bool:
diff --git a/cwltool/software_requirements.py b/cwltool/software_requirements.py
index 37eafd0b6..9360e8080 100644
--- a/cwltool/software_requirements.py
+++ b/cwltool/software_requirements.py
@@ -7,12 +7,20 @@
ways to adapt new packages managers and such as well.
"""
-import argparse # pylint: disable=unused-import
+import argparse
import os
import string
-from typing import Dict, List, MutableMapping, MutableSequence, Optional, Union, cast
-
-from typing_extensions import TYPE_CHECKING
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Dict,
+ List,
+ MutableMapping,
+ MutableSequence,
+ Optional,
+ Union,
+ cast,
+)
from .utils import HasReqsHints
@@ -68,6 +76,8 @@ def __init__(self, args: argparse.Namespace) -> None:
self.dependency_resolvers_config_file = None
else:
self.use_tool_dependencies = False
+ if self.tool_dependency_dir and not os.path.exists(self.tool_dependency_dir):
+ os.makedirs(self.tool_dependency_dir)
def build_job_script(self, builder: "Builder", command: List[str]) -> str:
ensure_galaxy_lib_available()
@@ -78,31 +88,29 @@ def build_job_script(self, builder: "Builder", command: List[str]) -> str:
app_config = {
"conda_auto_install": True,
"conda_auto_init": True,
+ "debug": builder.debug,
}
- tool_dependency_manager = deps.build_dependency_manager(
+ tool_dependency_manager: "deps.DependencyManager" = deps.build_dependency_manager(
app_config_dict=app_config,
resolution_config_dict=resolution_config_dict,
conf_file=self.dependency_resolvers_config_file,
- ) # type: deps.DependencyManager
- dependencies = get_dependencies(builder)
- handle_dependencies = "" # str
- if dependencies:
+ )
+ handle_dependencies: str = ""
+ if dependencies := get_dependencies(builder):
handle_dependencies = "\n".join(
tool_dependency_manager.dependency_shell_commands(
dependencies, job_directory=builder.tmpdir
)
)
- template_kwds = dict(
- handle_dependencies=handle_dependencies
- ) # type: Dict[str, str]
+ template_kwds: Dict[str, str] = dict(handle_dependencies=handle_dependencies)
job_script = COMMAND_WITH_DEPENDENCIES_TEMPLATE.substitute(template_kwds)
return job_script
def get_dependencies(builder: HasReqsHints) -> ToolRequirements:
(software_requirement, _) = builder.get_requirement("SoftwareRequirement")
- dependencies = [] # type: List[ToolRequirement]
+ dependencies: List[Union["ToolRequirement", Dict[str, Any]]] = []
if software_requirement and software_requirement.get("packages"):
packages = cast(
MutableSequence[MutableMapping[str, Union[str, MutableSequence[str]]]],
@@ -131,7 +139,7 @@ def get_dependencies(builder: HasReqsHints) -> ToolRequirements:
def get_container_from_software_requirements(
- use_biocontainers: bool, builder: HasReqsHints
+ use_biocontainers: bool, builder: HasReqsHints, container_image_cache_path: Optional[str] = "."
) -> Optional[str]:
if use_biocontainers:
ensure_galaxy_lib_available()
@@ -139,19 +147,19 @@ def get_container_from_software_requirements(
from galaxy.tool_util.deps.containers import ContainerRegistry
from galaxy.tool_util.deps.dependencies import AppInfo, ToolInfo
- app_info = AppInfo(
+ app_info: AppInfo = AppInfo(
involucro_auto_init=True,
enable_mulled_containers=True,
- container_image_cache_path=".",
- ) # type: AppInfo
- container_registry = ContainerRegistry(app_info) # type: ContainerRegistry
+ container_image_cache_path=container_image_cache_path,
+ )
+ container_registry: ContainerRegistry = ContainerRegistry(app_info)
requirements = get_dependencies(builder)
- tool_info = ToolInfo(requirements=requirements) # type: ToolInfo
+ tool_info: ToolInfo = ToolInfo(requirements=requirements)
container_description = container_registry.find_best_container_description(
[DOCKER_CONTAINER_TYPE], tool_info
)
if container_description:
- return cast(Optional[str], container_description.identifier)
+ return container_description.identifier
return None
diff --git a/cwltool/stdfsaccess.py b/cwltool/stdfsaccess.py
index 0c8eea15d..069289111 100644
--- a/cwltool/stdfsaccess.py
+++ b/cwltool/stdfsaccess.py
@@ -32,9 +32,7 @@ def _abs(self, p: str) -> str:
return abspath(p, self.basedir)
def glob(self, pattern: str) -> List[str]:
- return [
- file_uri(str(self._abs(line))) for line in glob.glob(self._abs(pattern))
- ]
+ return [file_uri(str(self._abs(line))) for line in glob.glob(self._abs(pattern))]
def open(self, fn: str, mode: str) -> IO[Any]:
return open(self._abs(fn), mode)
@@ -52,10 +50,7 @@ def isdir(self, fn: str) -> bool:
return os.path.isdir(self._abs(fn))
def listdir(self, fn: str) -> List[str]:
- return [
- abspath(urllib.parse.quote(entry), fn)
- for entry in os.listdir(self._abs(fn))
- ]
+ return [abspath(urllib.parse.quote(entry), fn) for entry in os.listdir(self._abs(fn))]
def join(self, path, *paths): # type: (str, *str) -> str
return os.path.join(path, *paths)
diff --git a/cwltool/subgraph.py b/cwltool/subgraph.py
index 147a8143d..f6df7e69f 100644
--- a/cwltool/subgraph.py
+++ b/cwltool/subgraph.py
@@ -36,7 +36,6 @@ def subgraph_visit(
visited: Set[str],
direction: str,
) -> None:
-
if current in visited:
return
visited.add(current)
@@ -175,12 +174,8 @@ def get_subgraph(
if nodes[v].type == STEP:
wfstep = find_step(tool.steps, v, loading_context)[0]
if wfstep is not None:
- for inp in cast(
- MutableSequence[CWLObjectType], wfstep["inputs"]
- ):
- if "source" in inp and u in cast(
- CWLObjectType, inp["source"]
- ):
+ for inp in cast(MutableSequence[CWLObjectType], wfstep["inputs"]):
+ if "source" in inp and u in cast(CWLObjectType, inp["source"]):
rewire[u] = (rn, cast(CWLObjectType, inp["type"]))
break
else:
@@ -198,11 +193,7 @@ def get_subgraph(
continue
if isinstance(in_port["source"], MutableSequence):
in_port["source"] = CommentedSeq(
- [
- rewire[s][0]
- for s in in_port["source"]
- if s in rewire
- ]
+ [rewire[s][0] for s in in_port["source"] if s in rewire]
)
elif in_port["source"] in rewire:
in_port["source"] = rewire[in_port["source"]][0]
@@ -216,9 +207,7 @@ def get_subgraph(
return extracted
-def get_step(
- tool: Workflow, step_id: str, loading_context: LoadingContext
-) -> CommentedMap:
+def get_step(tool: Workflow, step_id: str, loading_context: LoadingContext) -> CommentedMap:
"""Extract a single WorkflowStep for the given step_id."""
extracted = CommentedMap()
diff --git a/cwltool/udocker.py b/cwltool/udocker.py
index 9db34d7f4..6ab54ff40 100644
--- a/cwltool/udocker.py
+++ b/cwltool/udocker.py
@@ -9,10 +9,6 @@ class UDockerCommandLineJob(DockerCommandLineJob):
"""Runs a CommandLineJob in a software container using the udocker engine."""
@staticmethod
- def append_volume(
- runtime: List[str], source: str, target: str, writable: bool = False
- ) -> None:
+ def append_volume(runtime: List[str], source: str, target: str, writable: bool = False) -> None:
"""Add binding arguments to the runtime list."""
- runtime.append(
- "--volume={}:{}:{}".format(source, target, "rw" if writable else "ro")
- )
+ runtime.append("--volume={}:{}:{}".format(source, target, "rw" if writable else "ro"))
diff --git a/cwltool/update.py b/cwltool/update.py
index d5c753ef4..a3e8569c3 100644
--- a/cwltool/update.py
+++ b/cwltool/update.py
@@ -11,12 +11,11 @@
cast,
)
+from ruamel.yaml.comments import CommentedMap, CommentedSeq
from schema_salad.exceptions import ValidationException
from schema_salad.ref_resolver import Loader
from schema_salad.sourceline import SourceLine
-from ruamel.yaml.comments import CommentedMap, CommentedSeq
-
from .loghandler import _logger
from .utils import CWLObjectType, CWLOutputType, aslist, visit_class, visit_field
@@ -55,46 +54,37 @@ def v1_0to1_1(
def rewrite_requirements(t: CWLObjectType) -> None:
if "requirements" in t:
for r in cast(MutableSequence[CWLObjectType], t["requirements"]):
- if isinstance(r, MutableMapping):
- cls = cast(str, r["class"])
- if cls in rewrite:
- r["class"] = rewrite[cls]
- else:
- raise ValidationException(
- "requirements entries must be dictionaries: {} {}.".format(
- type(r), r
- )
- )
+ cls = cast(str, r["class"])
+ if cls in rewrite:
+ r["class"] = rewrite[cls]
if "hints" in t:
- for r in cast(MutableSequence[CWLObjectType], t["hints"]):
+ for index, r in enumerate(cast(MutableSequence[CWLObjectType], t["hints"])):
if isinstance(r, MutableMapping):
+ if "class" not in r:
+ raise SourceLine(r, None, ValidationException).makeError(
+ "'hints' entry missing required key 'class'."
+ )
cls = cast(str, r["class"])
if cls in rewrite:
r["class"] = rewrite[cls]
else:
- raise ValidationException(
- f"hints entries must be dictionaries: {type(r)} {r}."
+ raise SourceLine(t["hints"], index, ValidationException).makeError(
+ f"'hints' entries must be dictionaries: {type(r)} {r}."
)
if "steps" in t:
for s in cast(MutableSequence[CWLObjectType], t["steps"]):
- if isinstance(s, MutableMapping):
- rewrite_requirements(s)
- else:
- raise ValidationException(
- f"steps entries must be dictionaries: {type(s)} {s}."
- )
+ rewrite_requirements(s)
- def update_secondaryFiles(t, top=False):
- # type: (CWLOutputType, bool) -> Union[MutableSequence[MutableMapping[str, str]], MutableMapping[str, str]]
+ def update_secondaryFiles(
+ t: CWLOutputType, top: bool = False
+ ) -> Union[MutableSequence[MutableMapping[str, str]], MutableMapping[str, str]]:
if isinstance(t, CommentedSeq):
new_seq = copy.deepcopy(t)
for index, entry in enumerate(t):
new_seq[index] = update_secondaryFiles(entry)
return new_seq
elif isinstance(t, MutableSequence):
- return CommentedSeq(
- [update_secondaryFiles(cast(CWLOutputType, p)) for p in t]
- )
+ return CommentedSeq([update_secondaryFiles(cast(CWLOutputType, p)) for p in t])
elif isinstance(t, MutableMapping):
return cast(MutableMapping[str, str], t)
elif top:
@@ -135,9 +125,7 @@ def fix_inputBinding(t: CWLObjectType) -> None:
proc["hints"].insert(0, na)
- ll = CommentedMap(
- [("class", "LoadListingRequirement"), ("loadListing", "deep_listing")]
- )
+ ll = CommentedMap([("class", "LoadListingRequirement"), ("loadListing", "deep_listing")])
ll.lc.filename = comment_filename
proc["hints"].insert(
0,
@@ -219,20 +207,20 @@ def v1_2_0dev5to1_2(
"v1.2",
]
-UPDATES = {
+UPDATES: Dict[str, Optional[Callable[[CommentedMap, Loader, str], Tuple[CommentedMap, str]]]] = {
"v1.0": v1_0to1_1,
"v1.1": v1_1to1_2,
"v1.2": None,
-} # type: Dict[str, Optional[Callable[[CommentedMap, Loader, str], Tuple[CommentedMap, str]]]]
+}
-DEVUPDATES = {
+DEVUPDATES: Dict[str, Optional[Callable[[CommentedMap, Loader, str], Tuple[CommentedMap, str]]]] = {
"v1.1.0-dev1": v1_1_0dev1to1_1,
"v1.2.0-dev1": v1_2_0dev1todev2,
"v1.2.0-dev2": v1_2_0dev2todev3,
"v1.2.0-dev3": v1_2_0dev3todev4,
"v1.2.0-dev4": v1_2_0dev4todev5,
"v1.2.0-dev5": v1_2_0dev5to1_2,
-} # type: Dict[str, Optional[Callable[[CommentedMap, Loader, str], Tuple[CommentedMap, str]]]]
+}
ALLUPDATES = UPDATES.copy()
@@ -259,7 +247,7 @@ def checkversion(
Returns the document and the validated version string.
"""
- cdoc = None # type: Optional[CommentedMap]
+ cdoc: Optional[CommentedMap] = None
if isinstance(doc, CommentedSeq):
if not isinstance(metadata, CommentedMap):
raise Exception("Expected metadata to be CommentedMap")
@@ -319,9 +307,7 @@ def update(
(cdoc, version) = checkversion(doc, metadata, enable_dev)
originalversion = copy.copy(version)
- nextupdate = (
- identity
- ) # type: Optional[Callable[[CommentedMap, Loader, str], Tuple[CommentedMap, str]]]
+ nextupdate: Optional[Callable[[CommentedMap, Loader, str], Tuple[CommentedMap, str]]] = identity
while version != update_to and nextupdate:
(cdoc, version) = nextupdate(cdoc, loader, baseuri)
diff --git a/cwltool/utils.py b/cwltool/utils.py
index 697b1c2bd..165492feb 100644
--- a/cwltool/utils.py
+++ b/cwltool/utils.py
@@ -1,5 +1,13 @@
"""Shared functions and other definitions."""
import collections
+
+try:
+ import fcntl
+except ImportError:
+ # Guard against `from .utils import ...` on windows.
+ # See windows_check() in main.py
+ pass
+import importlib.metadata
import os
import random
import shutil
@@ -16,33 +24,34 @@
from itertools import zip_longest
from pathlib import Path, PurePosixPath
from tempfile import NamedTemporaryFile
-from types import ModuleType
from typing import (
IO,
+ TYPE_CHECKING,
Any,
Callable,
+ Deque,
Dict,
Generator,
Iterable,
List,
+ Literal,
MutableMapping,
MutableSequence,
NamedTuple,
Optional,
Set,
Tuple,
+ TypedDict,
Union,
cast,
)
-import pkg_resources
import requests
from cachecontrol import CacheControl
from cachecontrol.caches import FileCache
-from mypy_extensions import TypedDict, mypyc_attr
+from mypy_extensions import mypyc_attr
from schema_salad.exceptions import ValidationException
from schema_salad.ref_resolver import Loader
-from typing_extensions import TYPE_CHECKING, Deque
if TYPE_CHECKING:
from .command_line_tool import CallbackJob, ExpressionJob
@@ -50,13 +59,13 @@
from .stdfsaccess import StdFsAccess
from .workflow_job import WorkflowJob
-__random_outdir = None # type: Optional[str]
+__random_outdir: Optional[str] = None
CONTENT_LIMIT = 64 * 1024
DEFAULT_TMP_PREFIX = tempfile.gettempdir() + os.path.sep
-processes_to_kill = collections.deque() # type: Deque[subprocess.Popen[str]]
+processes_to_kill: Deque["subprocess.Popen[str]"] = collections.deque()
CWLOutputAtomType = Union[
None,
@@ -65,15 +74,11 @@
int,
float,
MutableSequence[
- Union[
- None, bool, str, int, float, MutableSequence[Any], MutableMapping[str, Any]
- ]
+ Union[None, bool, str, int, float, MutableSequence[Any], MutableMapping[str, Any]]
],
MutableMapping[
str,
- Union[
- None, bool, str, int, float, MutableSequence[Any], MutableMapping[str, Any]
- ],
+ Union[None, bool, str, int, float, MutableSequence[Any], MutableMapping[str, Any]],
],
]
CWLOutputType = Union[
@@ -85,9 +90,9 @@
MutableMapping[str, CWLOutputAtomType],
]
CWLObjectType = MutableMapping[str, Optional[CWLOutputType]]
-JobsType = Union[
- "CommandLineJob", "JobBase", "WorkflowJob", "ExpressionJob", "CallbackJob"
-]
+"""Typical raw dictionary found in lightly parsed CWL."""
+
+JobsType = Union["CommandLineJob", "JobBase", "WorkflowJob", "ExpressionJob", "CallbackJob"]
JobsGeneratorType = Generator[Optional[JobsType], None, None]
OutputCallbackType = Callable[[Optional[CWLObjectType], str], None]
ResolverType = Callable[["Loader", str], Optional[str]]
@@ -99,27 +104,27 @@
"DirectoryType", {"class": str, "listing": List[CWLObjectType], "basename": str}
)
JSONAtomType = Union[Dict[str, Any], List[Any], str, int, float, bool, None]
-JSONType = Union[
- Dict[str, JSONAtomType], List[JSONAtomType], str, int, float, bool, None
-]
-WorkflowStateItem = NamedTuple(
- "WorkflowStateItem",
- [
- ("parameter", CWLObjectType),
- ("value", Optional[CWLOutputType]),
- ("success", str),
- ],
-)
+JSONType = Union[Dict[str, JSONAtomType], List[JSONAtomType], str, int, float, bool, None]
+
+
+class WorkflowStateItem(NamedTuple):
+ """Workflow state item."""
+
+ parameter: CWLObjectType
+ value: Optional[CWLOutputType]
+ success: str
+
ParametersType = List[CWLObjectType]
StepType = CWLObjectType # WorkflowStep
+LoadListingType = Union[Literal["no_listing"], Literal["shallow_listing"], Literal["deep_listing"]]
+
def versionstring() -> str:
"""Version of CWLtool used to execute the workflow."""
- pkg = pkg_resources.require("cwltool")
- if pkg:
- return f"{sys.argv[0]} {pkg[0].version}"
+ if pkg := importlib.metadata.version("cwltool"):
+ return f"{sys.argv[0]} {pkg}"
return "{} {}".format(sys.argv[0], "unknown version")
@@ -180,8 +185,7 @@ def cmp_like_py2(dict1: Dict[str, Any], dict2: Dict[str, Any]) -> int:
def bytes2str_in_dicts(
inp: Union[MutableMapping[str, Any], MutableSequence[Any], Any],
-):
- # type: (...) -> Union[str, MutableSequence[Any], MutableMapping[str, Any]]
+) -> Union[str, MutableSequence[Any], MutableMapping[str, Any]]:
"""
Convert any present byte string to unicode string, inplace.
@@ -243,40 +247,20 @@ def random_outdir() -> str:
return __random_outdir
-#
-# Simple multi-platform (fcntl/msvrt) file locking wrapper
-#
-fcntl = None # type: Optional[ModuleType]
-msvcrt = None # type: Optional[ModuleType]
-try:
- import fcntl # type: ignore
-except ImportError:
- import msvcrt # type: ignore
-
-
def shared_file_lock(fd: IO[Any]) -> None:
- if fcntl:
- fcntl.flock(fd.fileno(), fcntl.LOCK_SH)
- elif msvcrt:
- msvcrt.locking(fd.fileno(), msvcrt.LK_LOCK, 1024)
+ fcntl.flock(fd.fileno(), fcntl.LOCK_SH)
def upgrade_lock(fd: IO[Any]) -> None:
- if fcntl:
- fcntl.flock(fd.fileno(), fcntl.LOCK_EX)
- elif msvcrt:
- pass
+ fcntl.flock(fd.fileno(), fcntl.LOCK_EX)
-def adjustFileObjs(
- rec, op
-): # type: (Any, Union[Callable[..., Any], partial[Any]]) -> None
+def adjustFileObjs(rec: Any, op: Union[Callable[..., Any], "partial[Any]"]) -> None:
"""Apply an update function to each File object in the object `rec`."""
visit_class(rec, ("File",), op)
-def adjustDirObjs(rec, op):
- # type: (Any, Union[Callable[..., Any], partial[Any]]) -> None
+def adjustDirObjs(rec: Any, op: Union[Callable[..., Any], "partial[Any]"]) -> None:
"""Apply an update function to each Directory object in the object `rec`."""
visit_class(rec, ("Directory",), op)
@@ -294,7 +278,7 @@ def mark(d: Dict[str, str]) -> None:
adjustDirObjs(e, mark)
dd = []
- markdup = set() # type: Set[str]
+ markdup: Set[str] = set()
for r in listing:
if r["location"] not in marksub and r["location"] not in markdup:
dd.append(r)
@@ -303,28 +287,27 @@ def mark(d: Dict[str, str]) -> None:
return dd
-def get_listing(
- fs_access: "StdFsAccess", rec: CWLObjectType, recursive: bool = True
-) -> None:
+def get_listing(fs_access: "StdFsAccess", rec: CWLObjectType, recursive: bool = True) -> None:
+ """Expand, recursively, any 'listing' fields in a Directory."""
if rec.get("class") != "Directory":
- finddirs = [] # type: List[CWLObjectType]
+ finddirs: List[CWLObjectType] = []
visit_class(rec, ("Directory",), finddirs.append)
for f in finddirs:
get_listing(fs_access, f, recursive=recursive)
return
if "listing" in rec:
return
- listing = [] # type: List[CWLOutputAtomType]
+ listing: List[CWLOutputAtomType] = []
loc = cast(str, rec["location"])
for ld in fs_access.listdir(loc):
parse = urllib.parse.urlparse(ld)
bn = os.path.basename(urllib.request.url2pathname(parse.path))
if fs_access.isdir(ld):
- ent = {
+ ent: MutableMapping[str, Any] = {
"class": "Directory",
"location": ld,
"basename": bn,
- } # type: MutableMapping[str, Any]
+ }
if recursive:
get_listing(fs_access, ent, recursive)
listing.append(ent)
@@ -333,7 +316,7 @@ def get_listing(
rec["listing"] = listing
-def trim_listing(obj): # type: (Dict[str, Any]) -> None
+def trim_listing(obj: Dict[str, Any]) -> None:
"""
Remove 'listing' field from Directory objects that are file references.
@@ -408,7 +391,8 @@ def add_writable_flag(p: str) -> None:
add_writable_flag(path)
-def ensure_non_writable(path): # type: (str) -> None
+def ensure_non_writable(path: str) -> None:
+ """Attempt to change permissions to ensure that a path is not writable."""
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for name in files:
@@ -436,15 +420,13 @@ def normalizeFilesDirs(
]
]
) -> None:
- def addLocation(d): # type: (Dict[str, Any]) -> None
+ def addLocation(d: Dict[str, Any]) -> None:
if "location" not in d:
if d["class"] == "File" and ("contents" not in d):
raise ValidationException(
"Anonymous file object must have 'contents' and 'basename' fields."
)
- if d["class"] == "Directory" and (
- "listing" not in d or "basename" not in d
- ):
+ if d["class"] == "Directory" and ("listing" not in d or "basename" not in d):
raise ValidationException(
"Anonymous directory object must have 'listing' and 'basename' fields."
)
@@ -511,9 +493,8 @@ def __init__(self) -> None:
self.requirements: List[CWLObjectType] = []
self.hints: List[CWLObjectType] = []
- def get_requirement(
- self, feature: str
- ) -> Tuple[Optional[CWLObjectType], Optional[bool]]:
+ def get_requirement(self, feature: str) -> Tuple[Optional[CWLObjectType], Optional[bool]]:
+ """Retrieve the named feature from the requirements field, or the hints field."""
for item in reversed(self.requirements):
if item["class"] == feature:
return (item, True)
diff --git a/cwltool/validate_js.py b/cwltool/validate_js.py
index 9c6379f7b..3b54c4d37 100644
--- a/cwltool/validate_js.py
+++ b/cwltool/validate_js.py
@@ -18,7 +18,8 @@
from cwl_utils.errors import SubstitutionError
from cwl_utils.expression import scanner as scan_expression
from cwl_utils.sandboxjs import code_fragment_to_js, exec_js_process
-from pkg_resources import resource_stream
+from importlib_resources import files
+from ruamel.yaml.comments import CommentedMap, CommentedSeq
from schema_salad.avro.schema import (
ArraySchema,
EnumSchema,
@@ -30,14 +31,12 @@
from schema_salad.utils import json_dumps
from schema_salad.validate import validate_ex
-from ruamel.yaml.comments import CommentedMap, CommentedSeq
-
from .errors import WorkflowException
from .loghandler import _logger
-def is_expression(tool, schema):
- # type: (Any, Optional[Schema]) -> bool
+def is_expression(tool: Any, schema: Optional[Schema]) -> bool:
+ """Test a field/schema combo to see if it is a CWL Expression."""
return (
isinstance(schema, EnumSchema)
and schema.name == "org.w3id.cwl.cwl.Expression"
@@ -46,12 +45,13 @@ def is_expression(tool, schema):
class SuppressLog(logging.Filter):
- def __init__(self, name): # type: (str) -> None
+ def __init__(self, name: str) -> None:
"""Initialize this log suppressor."""
name = str(name)
super().__init__(name)
- def filter(self, record): # type: (logging.LogRecord) -> bool
+ def filter(self, record: logging.LogRecord) -> bool:
+ """Never accept a record."""
return False
@@ -87,20 +87,12 @@ def get_expressions(
if not isinstance(tool, MutableSequence):
return []
- def tmp_expr(
- x: Tuple[int, Union[CommentedMap, str, CommentedSeq]]
- ) -> List[Tuple[str, Optional[SourceLine]]]:
- # using a lambda for this broke mypyc v0.910 and before
- return get_expressions(
- x[1],
- cast(ArraySchema, schema).items,
- SourceLine(tool, x[0], include_traceback=debug),
- )
-
return list(
itertools.chain(
*map(
- tmp_expr,
+ lambda x: get_expressions(
+ x[1], getattr(schema, "items"), SourceLine(tool, x[0]) # noqa: B009
+ ),
enumerate(tool),
)
)
@@ -150,29 +142,25 @@ def jshint_js(
"esversion": 5,
}
- with resource_stream(__name__, "jshint/jshint.js") as res:
- # NOTE: we need a global variable for lodash (which jshint depends on)
- jshint_functions_text = "var global = this;" + res.read().decode("utf-8")
-
- with resource_stream(__name__, "jshint/jshint_wrapper.js") as res2:
- # NOTE: we need to assign to ob, as the expression {validateJS: validateJS} as an expression
- # is interpreted as a block with a label `validateJS`
- jshint_functions_text += (
- "\n"
- + res2.read().decode("utf-8")
- + "\nvar ob = {validateJS: validateJS}; ob"
- )
+ res = files("cwltool").joinpath("jshint/jshint.js")
+ # NOTE: we need a global variable for lodash (which jshint depends on)
+ jshint_functions_text = "var global = this;" + res.read_text("utf-8")
+
+ res2 = files("cwltool").joinpath("jshint/jshint_wrapper.js")
+ # NOTE: we need to assign to ob, as the expression {validateJS: validateJS} as an expression
+ # is interpreted as a block with a label `validateJS`
+ jshint_functions_text += (
+ "\n" + res2.read_text("utf-8") + "\nvar ob = {validateJS: validateJS}; ob"
+ )
returncode, stdout, stderr = exec_js_process(
- "validateJS(%s)"
- % json_dumps({"code": js_text, "options": options, "globals": globals}),
+ "validateJS(%s)" % json_dumps({"code": js_text, "options": options, "globals": globals}),
timeout=eval_timeout,
context=jshint_functions_text,
container_engine=container_engine,
)
- def dump_jshint_error():
- # type: () -> None
+ def dump_jshint_error() -> None:
raise RuntimeError(
'jshint failed to run successfully\nreturncode: %d\nstdout: "%s"\nstderr: "%s"'
% (returncode, stdout, stderr)
@@ -189,7 +177,7 @@ def dump_jshint_error():
except ValueError:
dump_jshint_error()
- jshint_errors = [] # type: List[str]
+ jshint_errors: List[str] = []
js_text_lines = js_text.split("\n")
@@ -205,9 +193,8 @@ def dump_jshint_error():
return JSHintJSReturn(jshint_errors, jshint_json.get("globals", []))
-def print_js_hint_messages(
- js_hint_messages: List[str], source_line: Optional[SourceLine]
-) -> None:
+def print_js_hint_messages(js_hint_messages: List[str], source_line: Optional[SourceLine]) -> None:
+ """Log the message from JSHint, using the line number."""
if source_line is not None:
for js_hint_message in js_hint_messages:
_logger.warning(source_line.makeError(js_hint_message))
@@ -220,7 +207,6 @@ def validate_js_expressions(
container_engine: str = "docker",
eval_timeout: float = 60,
) -> None:
-
if tool.get("requirements") is None:
return
debug = _logger.isEnabledFor(logging.DEBUG)
@@ -260,7 +246,7 @@ def validate_js_expressions(
except SubstitutionError as se:
if source_line:
source_line.raise_type = WorkflowException
- raise source_line.makeError(str(se))
+ raise source_line.makeError(str(se)) from se
else:
raise se
diff --git a/cwltool/workflow.py b/cwltool/workflow.py
index 0b66fa937..8546ca72e 100644
--- a/cwltool/workflow.py
+++ b/cwltool/workflow.py
@@ -17,19 +17,20 @@
)
from uuid import UUID
+from mypy_extensions import mypyc_attr
+from ruamel.yaml.comments import CommentedMap
from schema_salad.exceptions import ValidationException
from schema_salad.sourceline import SourceLine, indent
-from ruamel.yaml.comments import CommentedMap
-
from . import command_line_tool, context, procgenerator
from .checker import circular_dependency_checker, loop_checker, static_checker
from .context import LoadingContext, RuntimeContext, getdefault
+from .cwlprov.provenance_profile import ProvenanceProfile
+from .cwlprov.writablebagfile import create_job
from .errors import WorkflowException
from .load_tool import load_tool
from .loghandler import _logger
from .process import Process, get_overrides, shortname
-from .provenance_profile import ProvenanceProfile
from .utils import (
CWLObjectType,
CWLOutputType,
@@ -41,9 +42,8 @@
from .workflow_job import WorkflowJob
-def default_make_tool(
- toolpath_object: CommentedMap, loadingContext: LoadingContext
-) -> Process:
+def default_make_tool(toolpath_object: CommentedMap, loadingContext: LoadingContext) -> Process:
+ """Instantiate the given CWL Process."""
if not isinstance(toolpath_object, MutableMapping):
raise WorkflowException("Not a dict: '%s'" % toolpath_object)
if "class" in toolpath_object:
@@ -60,14 +60,14 @@ def default_make_tool(
raise WorkflowException(
"Missing or invalid 'class' field in "
- "%s, expecting one of: CommandLineTool, ExpressionTool, Workflow"
- % toolpath_object["id"]
+ "%s, expecting one of: CommandLineTool, ExpressionTool, Workflow" % toolpath_object["id"]
)
context.default_make_tool = default_make_tool
+@mypyc_attr(serializable=True)
class Workflow(Process):
def __init__(
self,
@@ -76,9 +76,9 @@ def __init__(
) -> None:
"""Initialize this Workflow."""
super().__init__(toolpath_object, loadingContext)
- self.provenance_object = None # type: Optional[ProvenanceProfile]
+ self.provenance_object: Optional[ProvenanceProfile] = None
if loadingContext.research_obj is not None:
- run_uuid = None # type: Optional[UUID]
+ run_uuid: Optional[UUID] = None
is_main = not loadingContext.prov_obj # Not yet set
if is_main:
run_uuid = loadingContext.research_obj.ro_uuid
@@ -101,14 +101,12 @@ def __init__(
loadingContext.requirements = self.requirements
loadingContext.hints = self.hints
- self.steps = [] # type: List[WorkflowStep]
+ self.steps: List[WorkflowStep] = []
validation_errors = []
for index, step in enumerate(self.tool.get("steps", [])):
try:
self.steps.append(
- self.make_workflow_step(
- step, index, loadingContext, loadingContext.prov_obj
- )
+ self.make_workflow_step(step, index, loadingContext, loadingContext.prov_obj)
)
except ValidationException as vexc:
if _logger.isEnabledFor(logging.DEBUG):
@@ -124,9 +122,9 @@ def __init__(
workflow_inputs = self.tool["inputs"]
workflow_outputs = self.tool["outputs"]
- step_inputs = [] # type: List[CWLObjectType]
- step_outputs = [] # type: List[CWLObjectType]
- param_to_step = {} # type: Dict[str, CWLObjectType]
+ step_inputs: List[CWLObjectType] = []
+ step_outputs: List[CWLObjectType] = []
+ param_to_step: Dict[str, CWLObjectType] = {}
for step in self.steps:
step_inputs.extend(step.tool["inputs"])
step_outputs.extend(step.tool["outputs"])
@@ -144,7 +142,7 @@ def __init__(
param_to_step,
)
circular_dependency_checker(step_inputs)
- loop_checker([step.tool for step in self.steps])
+ loop_checker(step.tool for step in self.steps)
def make_workflow_step(
self,
@@ -167,7 +165,7 @@ def job(
if runtimeContext.toplevel:
# Record primary-job.json
runtimeContext.research_obj.fsaccess = runtimeContext.make_fs_access("")
- runtimeContext.research_obj.create_job(builder.job)
+ create_job(runtimeContext.research_obj, builder.job)
job = WorkflowJob(self, runtimeContext)
yield job
@@ -213,9 +211,7 @@ def __init__(
loadingContext = loadingContext.copy()
parent_requirements = copy.deepcopy(getdefault(loadingContext.requirements, []))
- loadingContext.requirements = copy.deepcopy(
- toolpath_object.get("requirements", [])
- )
+ loadingContext.requirements = copy.deepcopy(toolpath_object.get("requirements", []))
assert loadingContext.requirements is not None # nosec
for parent_req in parent_requirements:
found_in_step = False
@@ -223,17 +219,14 @@ def __init__(
if parent_req["class"] == step_req["class"]:
found_in_step = True
break
- if (
- not found_in_step
- and parent_req.get("class") != "http://commonwl.org/cwltool#Loop"
- ):
+ if not found_in_step and parent_req.get("class") != "http://commonwl.org/cwltool#Loop":
loadingContext.requirements.append(parent_req)
loadingContext.requirements.extend(
cast(
List[CWLObjectType],
- get_overrides(
- getdefault(loadingContext.overrides_list, []), self.id
- ).get("requirements", []),
+ get_overrides(getdefault(loadingContext.overrides_list, []), self.id).get(
+ "requirements", []
+ ),
)
)
@@ -243,9 +236,9 @@ def __init__(
try:
if isinstance(toolpath_object["run"], CommentedMap):
- self.embedded_tool = loadingContext.construct_tool_object(
+ self.embedded_tool: Process = loadingContext.construct_tool_object(
toolpath_object["run"], loadingContext
- ) # type: Process
+ )
else:
loadingContext.metadata = {}
self.embedded_tool = load_tool(toolpath_object["run"], loadingContext)
@@ -272,7 +265,7 @@ def __init__(
toolpath_object[toolfield] = []
for index, step_entry in enumerate(toolpath_object[stepfield]):
if isinstance(step_entry, str):
- param = CommentedMap() # type: CommentedMap
+ param: CommentedMap = CommentedMap()
inputid = step_entry
else:
param = CommentedMap(step_entry.items())
@@ -306,9 +299,7 @@ def __init__(
else:
step_entry_name = step_entry
validation_errors.append(
- SourceLine(
- self.tool["out"], index, include_traceback=debug
- ).makeError(
+ SourceLine(self.tool["out"], index, include_traceback=debug).makeError(
"Workflow step output '%s' does not correspond to"
% shortname(step_entry_name)
)
@@ -323,9 +314,7 @@ def __init__(
"', '".join(
[
shortname(tool_entry["id"])
- for tool_entry in self.embedded_tool.tool[
- "outputs"
- ]
+ for tool_entry in self.embedded_tool.tool["outputs"]
]
)
)
@@ -371,8 +360,7 @@ def __init__(
(feature, _) = self.get_requirement("ScatterFeatureRequirement")
if not feature:
raise WorkflowException(
- "Workflow contains scatter but ScatterFeatureRequirement "
- "not in requirements"
+ "Workflow contains scatter but ScatterFeatureRequirement " "not in requirements"
)
inputparms = copy.deepcopy(self.tool["inputs"])
@@ -388,9 +376,7 @@ def __init__(
inp_map = {i["id"]: i for i in inputparms}
for inp in scatter:
if inp not in inp_map:
- SourceLine(
- self.tool, "scatter", ValidationException, debug
- ).makeError(
+ SourceLine(self.tool, "scatter", ValidationException, debug).makeError(
"Scatter parameter '%s' does not correspond to "
"an input parameter of this step, expecting '%s'"
% (
@@ -411,7 +397,7 @@ def __init__(
oparam["type"] = {"type": "array", "items": oparam["type"]}
self.tool["inputs"] = inputparms
self.tool["outputs"] = outputparms
- self.prov_obj = None # type: Optional[ProvenanceProfile]
+ self.prov_obj: Optional[ProvenanceProfile] = None
if loadingContext.research_obj is not None:
self.prov_obj = parentworkflowProv
if self.embedded_tool.tool["class"] == "Workflow":
diff --git a/cwltool/workflow_job.py b/cwltool/workflow_job.py
index 4903e5a71..c85e29516 100644
--- a/cwltool/workflow_job.py
+++ b/cwltool/workflow_job.py
@@ -4,6 +4,7 @@
import logging
import threading
from typing import (
+ TYPE_CHECKING,
Dict,
List,
MutableMapping,
@@ -18,7 +19,6 @@
from cwl_utils import expression
from schema_salad.sourceline import SourceLine
from schema_salad.utils import json_dumps
-from typing_extensions import TYPE_CHECKING
from .builder import content_limit_respected_read
from .checker import can_assign_src_to_sink
@@ -43,7 +43,7 @@
)
if TYPE_CHECKING:
- from .provenance_profile import ProvenanceProfile
+ from .cwlprov.provenance_profile import ProvenanceProfile
from .workflow import Workflow, WorkflowStep
@@ -56,7 +56,7 @@ def __init__(self, step: "WorkflowStep") -> None:
self.tool = step.tool
self.id = step.id
self.submitted = False
- self.iterable = None # type: Optional[JobsGeneratorType]
+ self.iterable: Optional[JobsGeneratorType] = None
self.completed = False
self.name = uniquename("step %s" % shortname(self.id))
self.prov_obj = step.prov_obj
@@ -92,11 +92,10 @@ def __init__(
self.processStatus = "success"
self.total = total
self.output_callback = output_callback
- self.steps = [] # type: List[Optional[JobsGeneratorType]]
+ self.steps: List[Optional[JobsGeneratorType]] = []
- def receive_scatter_output(
- self, index: int, jobout: CWLObjectType, processStatus: str
- ) -> None:
+ def receive_scatter_output(self, index: int, jobout: CWLObjectType, processStatus: str) -> None:
+ """Record the results of a scatter operation."""
for key, val in jobout.items():
self.dest[key][index] = val
@@ -138,9 +137,10 @@ def parallel_steps(
while rc.completed < rc.total:
made_progress = False
for index, step in enumerate(steps):
- if getdefault(
- runtimeContext.on_error, "stop"
- ) == "stop" and rc.processStatus not in ("success", "skipped"):
+ if getdefault(runtimeContext.on_error, "stop") == "stop" and rc.processStatus not in (
+ "success",
+ "skipped",
+ ):
break
if step is None:
continue
@@ -174,19 +174,17 @@ def nested_crossproduct_scatter(
) -> JobsGeneratorType:
scatter_key = scatter_keys[0]
jobl = len(cast(Sized, joborder[scatter_key]))
- output = {} # type: ScatterDestinationsType
+ output: ScatterDestinationsType = {}
for i in process.tool["outputs"]:
output[i["id"]] = [None] * jobl
rc = ReceiveScatterOutput(output_callback, output, jobl)
- steps = [] # type: List[Optional[JobsGeneratorType]]
+ steps: List[Optional[JobsGeneratorType]] = []
for index in range(0, jobl):
- sjob = copy.copy(joborder) # type: Optional[CWLObjectType]
+ sjob: Optional[CWLObjectType] = copy.copy(joborder)
assert sjob is not None # nosec
- sjob[scatter_key] = cast(
- MutableMapping[int, CWLObjectType], joborder[scatter_key]
- )[index]
+ sjob[scatter_key] = cast(MutableMapping[int, CWLObjectType], joborder[scatter_key])[index]
if len(scatter_keys) == 1:
if runtimeContext.postScatterEval is not None:
@@ -212,9 +210,8 @@ def nested_crossproduct_scatter(
return parallel_steps(steps, rc, runtimeContext)
-def crossproduct_size(
- joborder: CWLObjectType, scatter_keys: MutableSequence[str]
-) -> int:
+def crossproduct_size(joborder: CWLObjectType, scatter_keys: MutableSequence[str]) -> int:
+ """Compute the size of a cross product."""
scatter_key = scatter_keys[0]
if len(scatter_keys) == 1:
ssum = len(cast(Sized, joborder[scatter_key]))
@@ -232,7 +229,7 @@ def flat_crossproduct_scatter(
output_callback: ScatterOutputCallbackType,
runtimeContext: RuntimeContext,
) -> JobsGeneratorType:
- output = {} # type: ScatterDestinationsType
+ output: ScatterDestinationsType = {}
for i in process.tool["outputs"]:
output[i["id"]] = [None] * crossproduct_size(joborder, scatter_keys)
callback = ReceiveScatterOutput(output_callback, output, 0)
@@ -250,18 +247,16 @@ def _flat_crossproduct_scatter(
callback: ReceiveScatterOutput,
startindex: int,
runtimeContext: RuntimeContext,
-) -> Tuple[List[Optional[JobsGeneratorType]], int,]:
+) -> Tuple[List[Optional[JobsGeneratorType]], int]:
"""Inner loop."""
scatter_key = scatter_keys[0]
jobl = len(cast(Sized, joborder[scatter_key]))
- steps = [] # type: List[Optional[JobsGeneratorType]]
+ steps: List[Optional[JobsGeneratorType]] = []
put = startindex
for index in range(0, jobl):
- sjob = copy.copy(joborder) # type: Optional[CWLObjectType]
+ sjob: Optional[CWLObjectType] = copy.copy(joborder)
assert sjob is not None # nosec
- sjob[scatter_key] = cast(
- MutableMapping[int, CWLObjectType], joborder[scatter_key]
- )[index]
+ sjob[scatter_key] = cast(MutableMapping[int, CWLObjectType], joborder[scatter_key])[index]
if len(scatter_keys) == 1:
if runtimeContext.postScatterEval is not None:
@@ -290,27 +285,26 @@ def dotproduct_scatter(
output_callback: ScatterOutputCallbackType,
runtimeContext: RuntimeContext,
) -> JobsGeneratorType:
- jobl = None # type: Optional[int]
+ jobl: Optional[int] = None
for key in scatter_keys:
if jobl is None:
jobl = len(cast(Sized, joborder[key]))
elif jobl != len(cast(Sized, joborder[key])):
raise WorkflowException(
- "Length of input arrays must be equal when performing "
- "dotproduct scatter."
+ "Length of input arrays must be equal when performing " "dotproduct scatter."
)
if jobl is None:
raise Exception("Impossible codepath")
- output = {} # type: ScatterDestinationsType
+ output: ScatterDestinationsType = {}
for i in process.tool["outputs"]:
output[i["id"]] = [None] * jobl
rc = ReceiveScatterOutput(output_callback, output, jobl)
- steps = [] # type: List[Optional[JobsGeneratorType]]
+ steps: List[Optional[JobsGeneratorType]] = []
for index in range(0, jobl):
- sjobo = copy.copy(joborder) # type: Optional[CWLObjectType]
+ sjobo: Optional[CWLObjectType] = copy.copy(joborder)
assert sjobo is not None # nosec
for key in scatter_keys:
sjobo[key] = cast(MutableMapping[int, CWLObjectType], joborder[key])[index]
@@ -386,7 +380,7 @@ def object_from_state(
sourceField: str,
incomplete: bool = False,
) -> Optional[CWLObjectType]:
- inputobj = {} # type: CWLObjectType
+ inputobj: CWLObjectType = {}
for inp in params:
iid = original_id = cast(str, inp["id"])
if frag_only:
@@ -479,17 +473,17 @@ class WorkflowJob:
def __init__(self, workflow: "Workflow", runtimeContext: RuntimeContext) -> None:
"""Initialize this WorkflowJob."""
self.workflow = workflow
- self.prov_obj = None # type: Optional[ProvenanceProfile]
- self.parent_wf = None # type: Optional[ProvenanceProfile]
+ self.prov_obj: Optional[ProvenanceProfile] = None
+ self.parent_wf: Optional[ProvenanceProfile] = None
self.tool = workflow.tool
if runtimeContext.research_obj is not None:
self.prov_obj = workflow.provenance_object
self.parent_wf = workflow.parent_wf
self.steps = [WorkflowJobStep(s) for s in workflow.steps]
- self.state = {} # type: Dict[str, Optional[WorkflowStateItem]]
+ self.state: Dict[str, Optional[WorkflowStateItem]] = {}
self.processStatus = ""
self.did_callback = False
- self.made_progress = None # type: Optional[bool]
+ self.made_progress: Optional[bool] = None
self.outdir = runtimeContext.get_outdir()
self.name = uniquename(
@@ -508,12 +502,11 @@ def __init__(self, workflow: "Workflow", runtimeContext: RuntimeContext) -> None
)
def do_output_callback(self, final_output_callback: OutputCallbackType) -> None:
-
supportsMultipleInput = bool(
self.workflow.get_requirement("MultipleInputFeatureRequirement")[0]
)
- wo = None # type: Optional[CWLObjectType]
+ wo: Optional[CWLObjectType] = None
try:
wo = object_from_state(
self.state,
@@ -524,16 +517,14 @@ def do_output_callback(self, final_output_callback: OutputCallbackType) -> None:
incomplete=True,
)
except WorkflowException as err:
- _logger.error(
- "[%s] Cannot collect workflow output: %s", self.name, str(err)
- )
+ _logger.error("[%s] Cannot collect workflow output: %s", self.name, str(err))
self.processStatus = "permanentFail"
if (
self.prov_obj
and self.parent_wf
and self.prov_obj.workflow_run_uri != self.parent_wf.workflow_run_uri
):
- process_run_id = None # type: Optional[str]
+ process_run_id: Optional[str] = None
self.prov_obj.generate_output_prov(wo or {}, process_run_id, self.name)
self.prov_obj.document.wasEndedBy(
self.prov_obj.workflow_run_uri,
@@ -543,9 +534,7 @@ def do_output_callback(self, final_output_callback: OutputCallbackType) -> None:
)
prov_ids = self.prov_obj.finalize_prov_profile(self.name)
# Tell parent to associate our provenance files with our wf run
- self.parent_wf.activity_has_provenance(
- self.prov_obj.workflow_run_uri, prov_ids
- )
+ self.parent_wf.activity_has_provenance(self.prov_obj.workflow_run_uri, prov_ids)
_logger.info("[%s] completed %s", self.name, self.processStatus)
if _logger.isEnabledFor(logging.DEBUG):
@@ -563,21 +552,16 @@ def receive_output(
jobout: CWLObjectType,
processStatus: str,
) -> None:
-
for i in outputparms:
if "id" in i:
iid = cast(str, i["id"])
if iid in jobout:
self.state[iid] = WorkflowStateItem(i, jobout[iid], processStatus)
else:
- _logger.error(
- "[%s] Output is missing expected field %s", step.name, iid
- )
+ _logger.error("[%s] Output is missing expected field %s", step.name, iid)
processStatus = "permanentFail"
if _logger.isEnabledFor(logging.DEBUG):
- _logger.debug(
- "[%s] produced output %s", step.name, json_dumps(jobout, indent=4)
- )
+ _logger.debug("[%s] produced output %s", step.name, json_dumps(jobout, indent=4))
if processStatus not in ("success", "skipped"):
if self.processStatus != "permanentFail":
@@ -632,13 +616,9 @@ def try_make_job(
self.receive_output, step, outputparms, final_output_callback
)
- valueFrom = {
- i["id"]: i["valueFrom"] for i in step.tool["inputs"] if "valueFrom" in i
- }
+ valueFrom = {i["id"]: i["valueFrom"] for i in step.tool["inputs"] if "valueFrom" in i}
- loadContents = {
- i["id"] for i in step.tool["inputs"] if i.get("loadContents")
- }
+ loadContents = {i["id"] for i in step.tool["inputs"] if i.get("loadContents")}
if len(valueFrom) > 0 and not bool(
self.workflow.get_requirement("StepInputExpressionRequirement")[0]
@@ -658,13 +638,9 @@ def postScatterEval(io: CWLObjectType) -> Optional[CWLObjectType]:
with fs_access.open(cast(str, val["location"]), "rb") as f:
val["contents"] = content_limit_respected_read(f)
- def valueFromFunc(
- k: str, v: Optional[CWLOutputType]
- ) -> Optional[CWLOutputType]:
+ def valueFromFunc(k: str, v: Optional[CWLOutputType]) -> Optional[CWLOutputType]:
if k in valueFrom:
- adjustDirObjs(
- v, functools.partial(get_listing, fs_access, recursive=True)
- )
+ adjustDirObjs(v, functools.partial(get_listing, fs_access, recursive=True))
return expression.do_eval(
valueFrom[k],
@@ -727,9 +703,7 @@ def valueFromFunc(
runtimeContext = runtimeContext.copy()
runtimeContext.postScatterEval = postScatterEval
- emptyscatter = [
- shortname(s) for s in scatter if len(cast(Sized, inputobj[s])) == 0
- ]
+ emptyscatter = [shortname(s) for s in scatter if len(cast(Sized, inputobj[s])) == 0]
if emptyscatter:
_logger.warning(
"[job %s] Notice: scattering over empty input in "
@@ -739,9 +713,7 @@ def valueFromFunc(
)
if method == "dotproduct" or method is None:
- jobs = dotproduct_scatter(
- step, inputobj, scatter, callback, runtimeContext
- )
+ jobs = dotproduct_scatter(step, inputobj, scatter, callback, runtimeContext)
elif method == "nested_crossproduct":
jobs = nested_crossproduct_scatter(
step, inputobj, scatter, callback, runtimeContext
@@ -752,9 +724,7 @@ def valueFromFunc(
)
else:
if _logger.isEnabledFor(logging.DEBUG):
- _logger.debug(
- "[%s] job input %s", step.name, json_dumps(inputobj, indent=4)
- )
+ _logger.debug("[%s] job input %s", step.name, json_dumps(inputobj, indent=4))
inputobj = postScatterEval(inputobj)
if inputobj is not None:
@@ -814,13 +784,9 @@ def job(
with SourceLine(self.tool["inputs"], index, WorkflowException, debug):
inp_id = shortname(inp["id"])
if inp_id in joborder:
- self.state[inp["id"]] = WorkflowStateItem(
- inp, joborder[inp_id], "success"
- )
+ self.state[inp["id"]] = WorkflowStateItem(inp, joborder[inp_id], "success")
elif "default" in inp:
- self.state[inp["id"]] = WorkflowStateItem(
- inp, inp["default"], "success"
- )
+ self.state[inp["id"]] = WorkflowStateItem(inp, inp["default"], "success")
else:
raise WorkflowException(
"Input '%s' not in input object and does not have a "
@@ -844,9 +810,7 @@ def job(
if not step.submitted:
try:
- step.iterable = self.try_make_job(
- step, output_callback, runtimeContext
- )
+ step.iterable = self.try_make_job(step, output_callback, runtimeContext)
except WorkflowException as exc:
_logger.error("[%s] Cannot make job: %s", step.name, str(exc))
_logger.debug("", exc_info=True)
@@ -886,7 +850,7 @@ def job(
class WorkflowJobLoopStep:
- """Generated for each step in Workflow.steps() containing a http://commonwl.org/cwltool#Loop requirement."""
+ """Generated for each step in Workflow.steps() containing a Loop requirement."""
def __init__(self, step: WorkflowJobStep, container_engine: str):
"""Initialize this WorkflowJobLoopStep."""
@@ -905,9 +869,7 @@ def _set_empty_output(self, loop_req: CWLObjectType) -> None:
if "id" in i:
iid = cast(str, i["id"])
if loop_req.get("outputMethod") == "all":
- self.output_buffer[iid] = cast(
- MutableSequence[Optional[CWLOutputType]], []
- )
+ self.output_buffer[iid] = cast(MutableSequence[Optional[CWLOutputType]], [])
else:
self.output_buffer[iid] = None
@@ -1033,7 +995,6 @@ def loop_callback(
self.processStatus = processStatus
if processStatus not in ("success", "skipped"):
-
_logger.warning(
"[%s] Iteration %i completed %s",
self.step.name,
@@ -1087,9 +1048,7 @@ def loop_callback(
for k, v in inputobj.items():
if k in valueFrom:
- adjustDirObjs(
- v, functools.partial(get_listing, fs_access, recursive=True)
- )
+ adjustDirObjs(v, functools.partial(get_listing, fs_access, recursive=True))
inputobj[k] = cast(
CWLObjectType,
expression.do_eval(
diff --git a/CWLProv.rst b/docs/CWLProv.rst
similarity index 95%
rename from CWLProv.rst
rename to docs/CWLProv.rst
index 58c00d0c3..e2f471ec5 100644
--- a/CWLProv.rst
+++ b/docs/CWLProv.rst
@@ -4,6 +4,8 @@ Provenance capture
It is possible to capture the full provenance of a workflow execution to
a folder, including intermediate values:
+.. code-block:: sh
+
cwltool --provenance revsort-run-1/ tests/wf/revsort.cwl tests/wf/revsort-job.json
Who executed the workflow?
@@ -11,6 +13,8 @@ Who executed the workflow?
Optional parameters are available to capture information about *who* executed the workflow *where*:
+.. code-block:: sh
+
cwltool --orcid https://orcid.org/0000-0002-1825-0097 \
--full-name "Alice W Land" \
--enable-user-provenance --enable-host-provenance \
@@ -29,10 +33,12 @@ since ``--enable-user-provenance --enable-host-provenance``
are only able to identify the local machine account.
It is possible to set the shell environment variables
-`ORCID` and `CWL_FULL_NAME` to avoid supplying ``--orcid``
-or `--full-name` for every workflow run,
+``ORCID`` and ``CWL_FULL_NAME`` to avoid supplying ``--orcid``
+or ``--full-name`` for every workflow run,
for instance by augmenting the ``~/.bashrc`` or equivalent:
+.. code-block:: sh
+
export ORCID=https://orcid.org/0000-0002-1825-0097
export CWL_FULL_NAME="Stian Soiland-Reyes"
@@ -42,25 +48,24 @@ Care should be taken to preserve spaces when setting `--full-name` or `CWL_FULL_
CWLProv folder structure
^^^^^^^^^^^^^^^^^^^^^^^^
-The CWLProv folder structure under revsort-run-1 is a
+The CWLProv folder structure under ``revsort-run-1`` is a
`Research Object `__
that conforms to the `RO BagIt profile `__
and contains `PROV `__
traces detailing the execution of the workflow and its steps.
-
A rough overview of the CWLProv folder structure:
* ``bagit.txt`` - bag marker for `BagIt `__.
* ``bag-info.txt`` - minimal bag metadata. ``The External-Identifier`` key shows which `arcp `__ can be used as base URI within the folder bag.
-* ``manifest-*.txt`` - checksums of files under data/ (algorithms subject to change)
+* ``manifest-*.txt`` - checksums of files under ``data/`` (algorithms subject to change)
* ``tagmanifest-*.txt`` - checksums of the remaining files (algorithms subject to change)
* ``metadata/manifest.json`` - `Research Object manifest `__ as JSON-LD. Types and relates files within bag.
* ``metadata/provenance/primary.cwlprov*`` - `PROV `__ trace of main workflow execution in alternative PROV and RDF formats
* ``data/`` - bag payload, workflow/step input/output data files (content-addressable)
* ``data/32/327fc7aedf4f6b69a42a7c8b808dc5a7aff61376`` - a data item with checksum ``327fc7aedf4f6b69a42a7c8b808dc5a7aff61376`` (checksum algorithm is subject to change)
* ``workflow/packed.cwl`` - The ``cwltool --pack`` standalone version of the executed workflow
-* ``workflow/primary-job.json`` - Job input for use with packed.cwl (references ``data/*``)
+* ``workflow/primary-job.json`` - Job input for use with ``packed.cwl`` (references ``data/*``)
* ``snapshot/`` - Direct copies of original files used for execution, but may have broken relative/absolute paths
@@ -69,7 +74,7 @@ See the `CWLProv paper `__ for more deta
Research Object manifest
^^^^^^^^^^^^^^^^^^^^^^^^
-The file ``metadata/manifest.json`` follows the structure defined for `Research Object Bundles ` - but
+The file ``metadata/manifest.json`` follows the structure defined for `Research Object Bundles `_ - but
note that ``.ro/`` is instead called ``metadata/`` as this conforms to the `RO BagIt profile `__.
Some of the keys of the CWLProv manifest are explained below::
@@ -235,7 +240,7 @@ Note that the `arcp `
Account who launched cwltool
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-If `--enable-user-provenance` was used, the local machine account (e.g. Windows or UNIX user name) who started ``cwltool`` is tracked::
+If ``--enable-user-provenance`` was used, the local machine account (e.g. Windows or UNIX user name) who started ``cwltool`` is tracked::
agent(id:855c6823-bbe7-48a5-be37-b0f07f20c495, [foaf:accountName="stain", prov:type='foaf:OnlineAccount', prov:label="stain"])
@@ -247,7 +252,7 @@ It is assumed that the account was under the control of the named person (in PRO
However we do not have an identifier for neither the account or the person, so every ``cwltool`` run will yield new UUIDs.
-With --enable-user-provenance it is possible to associate the account with a hostname::
+With ``--enable-user-provenance`` it is possible to associate the account with a hostname::
agent(id:855c6823-bbe7-48a5-be37-b0f07f20c495, [cwlprov:hostname="biggie", prov:type='foaf:OnlineAccount', prov:location="biggie"])
@@ -281,9 +286,9 @@ Now what is that workflow again? Well a tiny bit of prospective provenance is in
entity(wf:main, [prov:label="Prospective provenance", wfdesc:hasSubProcess='wf:main/step0'])
entity(wf:main/step0, [prov:type='wfdesc:Process', prov:type='prov:Plan'])
-But we can also expand the `wf` identifiers to find that we are talking about
+But we can also expand the ``wf`` identifiers to find that we are talking about
``arcp://uuid,0e6cb79e-fe70-4807-888c-3a61b9bf232a/workflow/packed.cwl#`` - that is
-the ``main`` workflow in the file `workflow/packed.cwl` of the Research Object.
+the ``main`` workflow in the file ``workflow/packed.cwl`` of the Research Object.
Running workflow steps
^^^^^^^^^^^^^^^^^^^^^^
diff --git a/docs/_static/favicon.ico b/docs/_static/favicon.ico
new file mode 100644
index 000000000..9fadc9a7c
Binary files /dev/null and b/docs/_static/favicon.ico differ
diff --git a/docs/cli.rst b/docs/cli.rst
new file mode 100644
index 000000000..d569f5586
--- /dev/null
+++ b/docs/cli.rst
@@ -0,0 +1,6 @@
+cwltool Command Line Options
+============================
+
+.. autoprogram:: cwltool.argparser:arg_parser()
+ :prog: cwltool
+
diff --git a/docs/conf.py b/docs/conf.py
index 09e950a91..6e04b5d64 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -6,20 +6,27 @@
# -- Path setup --------------------------------------------------------------
+import importlib.metadata
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
-sys.path.insert(0, os.path.abspath('..'))
+import time
+from datetime import datetime, timezone
+
+sys.path.insert(0, os.path.abspath(".."))
# -- Project information -----------------------------------------------------
-project = 'Common Workflow Language reference implementation'
-copyright = '2019, Peter Amstutz and contributors'
-author = 'Peter Amstutz and contributors'
+build_date = datetime.fromtimestamp(
+ int(os.environ.get("SOURCE_DATE_EPOCH", time.time())), timezone.utc
+)
+project = "Common Workflow Language reference implementation"
+copyright = f"2019 — {build_date.year}, Peter Amstutz and contributors to the CWL Project"
+author = "Peter Amstutz and Common Workflow Language Project contributors"
# -- General configuration ---------------------------------------------------
@@ -28,22 +35,34 @@
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
- "sphinx.ext.autodoc",
- "sphinx.ext.autosummary",
- "sphinx.ext.inheritance_diagram",
- "autoapi.extension",
- "sphinx_autodoc_typehints",
- "sphinx_rtd_theme",
- "sphinxcontrib.autoprogram"
+ "sphinx.ext.autodoc",
+ "sphinx.ext.autosummary",
+ "sphinx.ext.autosectionlabel",
+ "sphinx.ext.intersphinx",
+ "sphinx.ext.inheritance_diagram",
+ "autoapi.extension",
+ "sphinx_autodoc_typehints",
+ "sphinx_rtd_theme",
+ "sphinxcontrib.autoprogram",
]
+autosectionlabel_prefix_document = True
+
+intersphinx_mapping = {
+ "python": ("https://docs.python.org/3", None),
+ "schema_salad": ("https://schema-salad.readthedocs.io/en/stable/", None),
+ "rdflib": ("https://rdflib.readthedocs.io/en/6.2.0/", None),
+ # "ruamel.yaml": ("https://yaml.readthedocs.io/en/stable/", None),
+}
+
+
# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
-exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
+exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
@@ -53,23 +72,35 @@
#
html_theme = "sphinx_rtd_theme"
+# html_logo = "_static/logo.png"
+html_favicon = "_static/favicon.ico"
+
+html_theme_options = {
+ "collapse_navigation": False,
+}
+
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
-
+html_static_path = ["_static"]
-from pkg_resources import get_distribution
-release = get_distribution('cwltool').version
-version = '.'.join(release.split('.')[:2])
+release = importlib.metadata.version("cwltool")
+version = ".".join(release.split(".")[:2])
-autoapi_dirs = ['../cwltool']
-autodoc_typehints = 'description'
+autoapi_dirs = ["../cwltool"]
+autodoc_typehints = "description"
autoapi_keep_files = True
-autoapi_ignore = ['*migrations*', '*.pyi']
-autoapi_options = [ 'members', 'undoc-members', 'show-inheritance', 'show-inheritance-diagram', 'show-module-summary', 'imported-members', 'special-members' ]
-#sphinx-autodoc-typehints
+autoapi_ignore = ["*migrations*", "*.pyi"]
+autoapi_options = [
+ "members",
+ "undoc-members",
+ "show-inheritance",
+ "show-inheritance-diagram",
+ "show-module-summary",
+ "imported-members",
+ "special-members",
+]
+# sphinx-autodoc-typehints
always_document_param_types = True
# If False, do not add type info for undocumented parameters.
# If True, add stub documentation for undocumented parameters to be able to add type info.
-
diff --git a/docs/index.rst b/docs/index.rst
index f7924ad3d..2e718f176 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -1,27 +1,15 @@
-==================================================================
-Common Workflow Language tool description reference implementation
-==================================================================
-
-This is the reference implementation of the Common Workflow Language. It is
-intended to be feature complete and provide comprehensive validation of CWL
-files as well as provide other tools related to working with CWL.
-
-cwltool Command Line Options
-============================
-
-.. autoprogram:: cwltool.argparser:arg_parser()
- :prog: cwltool
-
-Modules
-=======
+.. include:: ../README.rst
.. toctree::
:maxdepth: 2
:caption: Contents:
+ cli
loop
+ CWLProv
pythonversions
processgen
+ autoapi/index
Indices and tables
==================
diff --git a/docs/loop.rst b/docs/loop.rst
index 4ed62a6b8..9a1f09808 100644
--- a/docs/loop.rst
+++ b/docs/loop.rst
@@ -12,6 +12,7 @@ The loop condition
The ``loopWhen`` field controls loop termination. It is an expansion of the CWL v1.2 ``when`` construct, which controls conditional execution. This is an expression that must be evaluated with ``inputs`` bound to the step input object and outputs produced in the last step execution, and returns a boolean value. It is an error if this expression returns a value other than ``true`` or ``false``. For example:
.. code:: yaml
+
example:
run:
class: ExpressionTool
@@ -30,6 +31,7 @@ The ``loopWhen`` field controls loop termination. It is an expansion of the CWL
loop:
i1: o1
outputMethod: last
+
This loop executes untile the counter ``i1`` reaches the value of 10, and then terminates. Note that if the ``loopWhen`` condition evaluates to ``false`` prior to the first iteration, the loop is skipped. The value assumed by the output fields depends on the specified ``outputMethod``, as described below.
The loop field
@@ -77,6 +79,7 @@ The ``last`` output mode propagates only the last computed element to the subseq
This is the most recurrent behaviour and it is typical of the optimization processes, when a step must iterate until a desired precision is reached. For example:
.. code:: yaml
+
optimization:
in:
a: a
@@ -93,6 +96,7 @@ This is the most recurrent behaviour and it is typical of the optimization proce
prev_a:
valueFrom: $(inputs.a)
outputMethod: last
+
This loop keeps optimizing the initial ``a`` value until the error value falls below a given (constant) ``threshold``. Then, the last values of ``a`` will be propagated.
The ``all`` output mode propagates a single array with all output values to the subsequent steps when the loop terminates. When a loop with an ``outputMethod`` equal to ``all`` is skipped, each output assumes a ``[]`` value.
@@ -100,6 +104,7 @@ The ``all`` output mode propagates a single array with all output values to the
This behaviour is needed when a recurrent simulation produces loop-carried results, but the subsequent steps need to know the total amount of computed values to proceed. For example:
.. code:: yaml
+
simulation:
in:
a: a
@@ -116,6 +121,7 @@ This behaviour is needed when a recurrent simulation produces loop-carried resul
day:
valueFrom: $(inputs.day + 1)
outputMethod: all
+
In this case, subsequent steps can start processing outputs even before the ``simulation`` step terminates. When a loop with an ``outputMethod`` equal to ``last`` is skipped, each output assumes a ``null`` value.
Loop-independent iterations
@@ -124,6 +130,7 @@ Loop-independent iterations
If a ``cwltool:Loop`` comes with loop-independent iterations, i.e. if each iteration does not depend on the result produced by the previous ones, all iterations can be processed concurrently. For example:
.. code:: yaml
+
example:
run: inner.cwl
in:
@@ -136,4 +143,5 @@ If a ``cwltool:Loop`` comes with loop-independent iterations, i.e. if each itera
i1:
valueFrom: $(inputs.i1 + 1)
outputMethod: all
-Since each iteration of this loop only depends on the input field ``i1``, all its iterations can be processed in parallel if there is enough computing power.
\ No newline at end of file
+
+Since each iteration of this loop only depends on the input field ``i1``, all its iterations can be processed in parallel if there is enough computing power.
diff --git a/docs/pythonversions.rst b/docs/pythonversions.rst
index b3a34e36f..74b1a1b03 100644
--- a/docs/pythonversions.rst
+++ b/docs/pythonversions.rst
@@ -2,16 +2,18 @@
Python version support policy
=============================
-Cwltool will always support `stable Python 3 releases with active branches
-`_.
+`cwltool` will always support `Python 3 versions that are officially supported by the Python Software Foundation
+`_.
-For versions that are no longer supported by Python upstream, cwltool
-support also extends to the default Python version included in the
+For versions that are no longer supported by the Python Software Foundation (or "upstream" for short), cwltool
+support also extends to the latest Python versions included in the
following major Linux distributions:
-* Debian (`stable `_, `oldstable `_)
+* Debian (`stable `_)
* Ubuntu (`LTS release standard support `_)
-* Centos 7 (`while in maintenance `_)
+
+This means that users may need to install a newer version of Python
+from their Linux distributor if the default version is too old.
If there is a conflict between a third party package dependency which
has dropped support for a Python version that cwltool should support
@@ -22,30 +24,34 @@ and downstream users before making the decision to drop support for a
Python version before the date outlined in this policy. The reasoning
for dropping support for a Python version should be outlined here.
-As of February 2022, here are approximate cwltool support periods for
-across Python versions:
+As of 2023-08-14, here are approximate cwltool support periods for Python versions (`EOL` == "End of Life", the end of the support period by that provider):
====== ======================
Python cwltool end of support
====== ======================
-2.7 ended January 2020
-3.5 ended October 2020
-3.6 June 2024 (Centos 7 EOL)
-3.7 June 2023 (upstream EOL)
-3.8 April 2025 (Ubuntu 20.04 EOL)
-3.9 October 2025 (upstream EOL)
-3.10 October 2026 (upstream EOL)
+2.7 ended 2020-01 (upstream EOL)
+3.5 ended 2020-10 (upstream EOL)
+3.6 ended 2023-08-31 (change in cwltool policy)
+3.7 ended 2023-07-27 (upstream EOL)
+3.8 2024-10-14 (upstream EOL)
+3.9 2025-10-01 (upstream EOL)
+3.10 2027-04-01 (Ubuntu 22.04 LTS EOL)
+3.11 2027-10-01 (upstream EOL)
+3.12 2028-10-01 (planned upstream EOL)
+3.13 2029-10-01 (planned upstream EOL)
====== ======================
-Default Python version of supported Linux distributions, for reference
-(as of February 2022)
-
-====== =============================================
-Python Linux distros where it is the default version
-====== =============================================
-3.6 Ubuntu 18.04, Centos 7
-3.7 Debian 10
-3.8 Ubuntu 20.04
-3.9 Debian 11
-3.10 None
-====== =============================================
+Python version of supported Linux distributions, for reference
+(as of August 2023)
+
+============== =============================================
+Python Version Linux distros where it is a supported version
+============== =============================================
+3.6 Ubuntu 18.04 LTS
+3.7 Debian 10
+3.8 Ubuntu 20.04 LTS
+3.9 Debian 11, Ubuntu 20.04 LTS
+3.10 Ubuntu 22.04 LTS
+3.11 Debian 12
+3.12 Debian 13 (planned)
+============== =============================================
diff --git a/docs/requirements.txt b/docs/requirements.txt
index 8a81b7667..7d9f9d0d4 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -1,6 +1,7 @@
sphinx >= 2.2
-sphinx-rtd-theme
+sphinx-rtd-theme==1.3.0
sphinx-autoapi
sphinx-autodoc-typehints
typed_ast;python_version<'3.8'
sphinxcontrib-autoprogram
+importlib_resources
diff --git a/gittaggers.py b/gittaggers.py
deleted file mode 100644
index e91cf0fbf..000000000
--- a/gittaggers.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import subprocess
-import time
-import pkg_resources
-from setuptools.command.egg_info import egg_info
-
-SETUPTOOLS_VER = pkg_resources.get_distribution(
- "setuptools").version.split('.')
-
-RECENT_SETUPTOOLS = int(SETUPTOOLS_VER[0]) > 40 or (
- int(SETUPTOOLS_VER[0]) == 40 and int(SETUPTOOLS_VER[1]) > 0) or (
- int(SETUPTOOLS_VER[0]) == 40 and int(SETUPTOOLS_VER[1]) == 0 and
- int(SETUPTOOLS_VER[2]) > 0)
-
-class EggInfoFromGit(egg_info):
- """Tag the build with git commit timestamp.
-
- If a build tag has already been set (e.g., "egg_info -b", building
- from source package), leave it alone.
- """
-
- def git_timestamp_tag(self):
- gitinfo = subprocess.check_output(
- ['git', 'log', '--first-parent', '--max-count=1',
- '--format=format:%ct', '.']).strip()
- return time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo)))
-
- def tags(self):
- if self.tag_build is None:
- try:
- self.tag_build = self.git_timestamp_tag()
- except subprocess.CalledProcessError:
- pass
- return egg_info.tags(self)
-
- if RECENT_SETUPTOOLS:
- vtags = property(tags)
diff --git a/lint-requirements.txt b/lint-requirements.txt
index 2287ebe3b..8531e58f5 100644
--- a/lint-requirements.txt
+++ b/lint-requirements.txt
@@ -1,3 +1,3 @@
-flake8-bugbear
-black ~= 22.0
+flake8-bugbear<23.10
+black~=23.10
codespell
diff --git a/mypy-requirements.txt b/mypy-requirements.txt
index 3000af08c..5b3b4b5db 100644
--- a/mypy-requirements.txt
+++ b/mypy-requirements.txt
@@ -1,8 +1,9 @@
-mypy==0.991
-ruamel.yaml>=0.16.0,<0.17.22
-schema-salad>=8.2.20211104054942,<9
-cwl-utils >=0.19
+mypy==1.6.1 # also update pyproject.toml
+ruamel.yaml>=0.16.0,<0.19
+cwl-utils>=0.22
types-requests
types-setuptools
types-psutil
types-mock
+galaxy-tool-util>=22.1.2,<23.2,!=23.0.1,!=23.0.2,!=23.0.3,!=23.0.4,!=23.0.5
+galaxy-util<23.2
diff --git a/mypy-stubs/argcomplete/__init__.pyi b/mypy-stubs/argcomplete/__init__.pyi
deleted file mode 100644
index f9204a07b..000000000
--- a/mypy-stubs/argcomplete/__init__.pyi
+++ /dev/null
@@ -1,18 +0,0 @@
-import argparse
-from typing import Any, Optional
-
-class CompletionFinder:
- def __call__(
- self,
- argument_parser: argparse.ArgumentParser,
- always_complete_options: bool = ...,
- exit_method: Any = ...,
- output_stream: Optional[Any] = ...,
- exclude: Optional[Any] = ...,
- validator: Optional[Any] = ...,
- print_suppressed: bool = ...,
- append_space: Optional[Any] = ...,
- default_completer: Any = ...,
- ) -> None: ...
-
-autocomplete: CompletionFinder
diff --git a/mypy-stubs/cachecontrol/__init__.pyi b/mypy-stubs/cachecontrol/__init__.pyi
deleted file mode 100644
index d8c9745da..000000000
--- a/mypy-stubs/cachecontrol/__init__.pyi
+++ /dev/null
@@ -1,9 +0,0 @@
-# Stubs for cachecontrol (Python 2)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any
-
-from .wrapper import CacheControl as CacheControl
-
-__email__ = ... # type: Any
diff --git a/mypy-stubs/cachecontrol/cache.pyi b/mypy-stubs/cachecontrol/cache.pyi
deleted file mode 100644
index 04b98f2ea..000000000
--- a/mypy-stubs/cachecontrol/cache.pyi
+++ /dev/null
@@ -1,5 +0,0 @@
-class BaseCache:
- def get(self, key: str) -> str | None: ...
- def set(self, key: str, value: str) -> None: ...
- def delete(self, key: str) -> None: ...
- def close(self) -> None: ...
diff --git a/mypy-stubs/cachecontrol/caches/__init__.pyi b/mypy-stubs/cachecontrol/caches/__init__.pyi
deleted file mode 100644
index 42c0ad685..000000000
--- a/mypy-stubs/cachecontrol/caches/__init__.pyi
+++ /dev/null
@@ -1,11 +0,0 @@
-# Stubs for cachecontrol.caches (Python 2)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any
-
-from .file_cache import FileCache as FileCache
-
-# from .redis_cache import RedisCache as RedisCache
-
-notice = ... # type: Any
diff --git a/mypy-stubs/cachecontrol/caches/file_cache.pyi b/mypy-stubs/cachecontrol/caches/file_cache.pyi
deleted file mode 100644
index 229c143ba..000000000
--- a/mypy-stubs/cachecontrol/caches/file_cache.pyi
+++ /dev/null
@@ -1,31 +0,0 @@
-from os import PathLike
-from typing import ContextManager
-
-from ..cache import BaseCache as BaseCache
-from ..controller import CacheController as CacheController
-
-class _LockClass:
- path: str
-
-_lock_class = ContextManager[_LockClass]
-
-class FileCache(BaseCache):
- directory: str | PathLike[str]
- forever: bool
- filemode: str
- dirmode: str
- lock_class: _lock_class
- def __init__(
- self,
- directory: str | PathLike[str],
- forever: bool = ...,
- filemode: int = ...,
- dirmode: int = ...,
- use_dir_lock: bool | None = ...,
- lock_class: _lock_class | None = ...,
- ) -> None: ...
- @staticmethod
- def encode(x: str) -> str: ...
- def get(self, key: str) -> None | str: ...
- def set(self, key: str, value: str) -> None: ...
- def delete(self, key: str) -> None: ...
diff --git a/mypy-stubs/cachecontrol/compat.pyi b/mypy-stubs/cachecontrol/compat.pyi
deleted file mode 100644
index fabfae3f8..000000000
--- a/mypy-stubs/cachecontrol/compat.pyi
+++ /dev/null
@@ -1,7 +0,0 @@
-# Stubs for cachecontrol.compat (Python 2)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any
-
-str = ... # type: Any
diff --git a/mypy-stubs/cachecontrol/controller.pyi b/mypy-stubs/cachecontrol/controller.pyi
deleted file mode 100644
index 5118fab02..000000000
--- a/mypy-stubs/cachecontrol/controller.pyi
+++ /dev/null
@@ -1,17 +0,0 @@
-from _typeshed import Incomplete
-
-logger: Incomplete
-URI: Incomplete
-
-class CacheController:
- cache: Incomplete
- cache_etags: Incomplete
- serializer: Incomplete
- cacheable_status_codes: Incomplete
- def __init__(
- self,
- cache: Incomplete | None = ...,
- cache_etags: bool = ...,
- serializer: Incomplete | None = ...,
- status_codes: Incomplete | None = ...,
- ) -> None: ...
diff --git a/mypy-stubs/cachecontrol/wrapper.pyi b/mypy-stubs/cachecontrol/wrapper.pyi
deleted file mode 100644
index a4da67289..000000000
--- a/mypy-stubs/cachecontrol/wrapper.pyi
+++ /dev/null
@@ -1,13 +0,0 @@
-from _typeshed import Incomplete
-from requests import Session
-
-def CacheControl(
- sess: Session,
- cache: Incomplete | None = ...,
- cache_etags: bool = ...,
- serializer: Incomplete | None = ...,
- heuristic: Incomplete | None = ...,
- controller_class: Incomplete | None = ...,
- adapter_class: Incomplete | None = ...,
- cacheable_methods: Incomplete | None = ...,
-) -> Session: ...
diff --git a/mypy-stubs/graphviz/__init__.pyi b/mypy-stubs/graphviz/__init__.pyi
deleted file mode 100644
index 023952bbe..000000000
--- a/mypy-stubs/graphviz/__init__.pyi
+++ /dev/null
@@ -1,12 +0,0 @@
-# Stubs for graphviz (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-# from .backend import ENGINES as ENGINES, ExecutableNotFound as ExecutableNotFound, FORMATS as FORMATS, pipe as pipe, render as render, version as version, view as view
-# from .dot import Digraph as Digraph, Graph as Graph
-# from .files import Source as Source
-# from .lang import nohtml as nohtml
-
-# ENGINES = ENGINES
-# FORMATS = FORMATS
-# ExecutableNotFound = ExecutableNotFound
diff --git a/mypy-stubs/graphviz/_compat.pyi b/mypy-stubs/graphviz/_compat.pyi
deleted file mode 100644
index 4308df0ad..000000000
--- a/mypy-stubs/graphviz/_compat.pyi
+++ /dev/null
@@ -1,15 +0,0 @@
-# Stubs for graphviz._compat (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any
-
-PY2: Any
-string_classes: Any
-text_type = unicode
-
-def iteritems(d): ...
-def makedirs(name, mode: int = ..., exist_ok: bool = ...): ...
-def stderr_write_binary(data): ...
-
-text_type = str
diff --git a/mypy-stubs/graphviz/backend.pyi b/mypy-stubs/graphviz/backend.pyi
deleted file mode 100644
index 1582b9768..000000000
--- a/mypy-stubs/graphviz/backend.pyi
+++ /dev/null
@@ -1,11 +0,0 @@
-# Stubs for graphviz.backend (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-class ExecutableNotFound(RuntimeError):
- def __init__(self, args) -> None: ...
-
-def render(engine, format, filepath, quiet: bool = ...): ...
-def pipe(engine, format, data, quiet: bool = ...): ...
-def version(): ...
-def view(filepath): ...
diff --git a/mypy-stubs/graphviz/dot.pyi b/mypy-stubs/graphviz/dot.pyi
deleted file mode 100644
index 01b81ea0f..000000000
--- a/mypy-stubs/graphviz/dot.pyi
+++ /dev/null
@@ -1,78 +0,0 @@
-# Stubs for graphviz.dot (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-from . import files
-
-class Dot(files.File):
- name: Any = ...
- comment: Any = ...
- graph_attr: Any = ...
- node_attr: Any = ...
- edge_attr: Any = ...
- body: Any = ...
- strict: Any = ...
- def __init__(
- self,
- name: Optional[Any] = ...,
- comment: Optional[Any] = ...,
- filename: Optional[Any] = ...,
- directory: Optional[Any] = ...,
- format: Optional[Any] = ...,
- engine: Optional[Any] = ...,
- encoding: Any = ...,
- graph_attr: Optional[Any] = ...,
- node_attr: Optional[Any] = ...,
- edge_attr: Optional[Any] = ...,
- body: Optional[Any] = ...,
- strict: bool = ...,
- ) -> None: ...
- def clear(self, keep_attrs: bool = ...): ...
- def __iter__(self, subgraph: bool = ...): ...
- source: Any = ...
- def node(
- self,
- name,
- label: Optional[Any] = ...,
- _attributes: Optional[Any] = ...,
- **attrs
- ): ...
- def edge(
- self,
- tail_name,
- head_name,
- label: Optional[Any] = ...,
- _attributes: Optional[Any] = ...,
- **attrs
- ): ...
- def edges(self, tail_head_iter): ...
- def attr(
- self, kw: Optional[Any] = ..., _attributes: Optional[Any] = ..., **attrs
- ): ...
- def subgraph(
- self,
- graph: Optional[Any] = ...,
- name: Optional[Any] = ...,
- comment: Optional[Any] = ...,
- graph_attr: Optional[Any] = ...,
- node_attr: Optional[Any] = ...,
- edge_attr: Optional[Any] = ...,
- body: Optional[Any] = ...,
- ): ...
-
-class SubgraphContext:
- parent: Any = ...
- graph: Any = ...
- def __init__(self, parent, kwargs) -> None: ...
- def __enter__(self): ...
- def __exit__(self, type_, value, traceback): ...
-
-class Graph(Dot):
- @property
- def directed(self): ...
-
-class Digraph(Dot):
- @property
- def directed(self): ...
diff --git a/mypy-stubs/graphviz/files.pyi b/mypy-stubs/graphviz/files.pyi
deleted file mode 100644
index b0b8bdedc..000000000
--- a/mypy-stubs/graphviz/files.pyi
+++ /dev/null
@@ -1,73 +0,0 @@
-# Stubs for graphviz.files (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-class Base:
- @property
- def format(self): ...
- @format.setter
- def format(self, format): ...
- @property
- def engine(self): ...
- @engine.setter
- def engine(self, engine): ...
- @property
- def encoding(self): ...
- @encoding.setter
- def encoding(self, encoding): ...
- def copy(self): ...
-
-class File(Base):
- directory: str = ...
- filename: Any = ...
- format: Any = ...
- engine: Any = ...
- encoding: Any = ...
- def __init__(
- self,
- filename: Optional[Any] = ...,
- directory: Optional[Any] = ...,
- format: Optional[Any] = ...,
- engine: Optional[Any] = ...,
- encoding: Any = ...,
- ) -> None: ...
- def pipe(self, format: Optional[Any] = ...): ...
- @property
- def filepath(self): ...
- def save(self, filename: Optional[Any] = ..., directory: Optional[Any] = ...): ...
- def render(
- self,
- filename: Optional[Any] = ...,
- directory: Optional[Any] = ...,
- view: bool = ...,
- cleanup: bool = ...,
- ): ...
- def view(
- self,
- filename: Optional[Any] = ...,
- directory: Optional[Any] = ...,
- cleanup: bool = ...,
- ): ...
-
-class Source(File):
- @classmethod
- def from_file(
- cls,
- filename,
- directory: Optional[Any] = ...,
- format: Optional[Any] = ...,
- engine: Optional[Any] = ...,
- encoding: Any = ...,
- ): ...
- source: Any = ...
- def __init__(
- self,
- source,
- filename: Optional[Any] = ...,
- directory: Optional[Any] = ...,
- format: Optional[Any] = ...,
- engine: Optional[Any] = ...,
- encoding: Any = ...,
- ) -> None: ...
diff --git a/mypy-stubs/graphviz/lang.pyi b/mypy-stubs/graphviz/lang.pyi
deleted file mode 100644
index 9613c5c50..000000000
--- a/mypy-stubs/graphviz/lang.pyi
+++ /dev/null
@@ -1,22 +0,0 @@
-# Stubs for graphviz.lang (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def quote(
- identifier, html: Any = ..., valid_id: Any = ..., dot_keywords: Any = ...
-): ...
-def quote_edge(identifier): ...
-def a_list(
- label: Optional[Any] = ...,
- kwargs: Optional[Any] = ...,
- attributes: Optional[Any] = ...,
-): ...
-def attr_list(
- label: Optional[Any] = ...,
- kwargs: Optional[Any] = ...,
- attributes: Optional[Any] = ...,
-): ...
-
-class NoHtml: ...
diff --git a/mypy-stubs/graphviz/tools.pyi b/mypy-stubs/graphviz/tools.pyi
deleted file mode 100644
index abad3ca9b..000000000
--- a/mypy-stubs/graphviz/tools.pyi
+++ /dev/null
@@ -1,9 +0,0 @@
-# Stubs for graphviz.tools (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any
-
-def attach(object, name): ...
-def mkdirs(filename, mode: int = ...): ...
-def mapping_items(mapping, _iteritems: Any = ...): ...
diff --git a/mypy-stubs/mistune.pyi b/mypy-stubs/mistune.pyi
index b4fe10213..3778c9195 100644
--- a/mypy-stubs/mistune.pyi
+++ b/mypy-stubs/mistune.pyi
@@ -168,7 +168,7 @@ class Markdown:
renderer: Optional[Renderer] = ...,
inline: Optional[InlineLexer] = ...,
block: Optional[BlockLexer] = ...,
- **kwargs: Any
+ **kwargs: Any,
) -> None: ...
def __call__(self, text: str) -> str: ...
def render(self, text: str) -> str: ...
diff --git a/mypy-stubs/networkx/__init__.pyi b/mypy-stubs/networkx/__init__.pyi
deleted file mode 100644
index 0a17af14c..000000000
--- a/mypy-stubs/networkx/__init__.pyi
+++ /dev/null
@@ -1,10 +0,0 @@
-# Stubs for networkx (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any
-
-m: str
-__license__: Any
-__date__: Any
-__bibtex__: str
diff --git a/mypy-stubs/networkx/algorithms/__init__.pyi b/mypy-stubs/networkx/algorithms/__init__.pyi
deleted file mode 100644
index d4579ee00..000000000
--- a/mypy-stubs/networkx/algorithms/__init__.pyi
+++ /dev/null
@@ -1,54 +0,0 @@
-# Stubs for networkx.algorithms (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from networkx.algorithms.assortativity import *
-from networkx.algorithms.boundary import *
-from networkx.algorithms.bridges import *
-from networkx.algorithms.centrality import *
-from networkx.algorithms.chains import *
-from networkx.algorithms.chordal import *
-from networkx.algorithms.clique import *
-from networkx.algorithms.cluster import *
-from networkx.algorithms.coloring import *
-from networkx.algorithms.communicability_alg import *
-from networkx.algorithms.components import *
-from networkx.algorithms.core import *
-from networkx.algorithms.covering import *
-from networkx.algorithms.cuts import *
-from networkx.algorithms.cycles import *
-from networkx.algorithms.dag import *
-from networkx.algorithms.distance_measures import *
-from networkx.algorithms.distance_regular import *
-from networkx.algorithms.dominance import *
-from networkx.algorithms.dominating import *
-from networkx.algorithms.efficiency import *
-from networkx.algorithms.euler import *
-from networkx.algorithms.graphical import *
-from networkx.algorithms.hierarchy import *
-from networkx.algorithms.hybrid import *
-from networkx.algorithms.isolate import *
-from networkx.algorithms.link_analysis import *
-from networkx.algorithms.link_prediction import *
-from networkx.algorithms.lowest_common_ancestors import *
-from networkx.algorithms.matching import *
-from networkx.algorithms.minors import *
-from networkx.algorithms.mis import *
-from networkx.algorithms.operators import *
-from networkx.algorithms.reciprocity import *
-from networkx.algorithms.richclub import *
-from networkx.algorithms.shortest_paths import *
-from networkx.algorithms.similarity import *
-from networkx.algorithms.simple_paths import *
-from networkx.algorithms.smetric import *
-from networkx.algorithms.structuralholes import *
-from networkx.algorithms.swap import *
-from networkx.algorithms.traversal import *
-from networkx.algorithms.tree.coding import *
-from networkx.algorithms.tree.mst import *
-from networkx.algorithms.tree.operations import *
-from networkx.algorithms.tree.recognition import *
-from networkx.algorithms.triads import *
-from networkx.algorithms.vitality import *
-from networkx.algorithms.voronoi import *
-from networkx.algorithms.wiener import *
diff --git a/mypy-stubs/networkx/algorithms/approximation/__init__.pyi b/mypy-stubs/networkx/algorithms/approximation/__init__.pyi
deleted file mode 100644
index 24da5b4c0..000000000
--- a/mypy-stubs/networkx/algorithms/approximation/__init__.pyi
+++ /dev/null
@@ -1,14 +0,0 @@
-# Stubs for networkx.algorithms.approximation (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from networkx.algorithms.approximation.clique import *
-from networkx.algorithms.approximation.clustering_coefficient import *
-from networkx.algorithms.approximation.connectivity import *
-from networkx.algorithms.approximation.dominating_set import *
-from networkx.algorithms.approximation.independent_set import *
-from networkx.algorithms.approximation.kcomponents import *
-from networkx.algorithms.approximation.matching import *
-from networkx.algorithms.approximation.ramsey import *
-from networkx.algorithms.approximation.steinertree import *
-from networkx.algorithms.approximation.vertex_cover import *
diff --git a/mypy-stubs/networkx/algorithms/approximation/clique.pyi b/mypy-stubs/networkx/algorithms/approximation/clique.pyi
deleted file mode 100644
index b2cf55c17..000000000
--- a/mypy-stubs/networkx/algorithms/approximation/clique.pyi
+++ /dev/null
@@ -1,7 +0,0 @@
-# Stubs for networkx.algorithms.approximation.clique (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def max_clique(G): ...
-def clique_removal(G): ...
-def large_clique_size(G): ...
diff --git a/mypy-stubs/networkx/algorithms/approximation/clustering_coefficient.pyi b/mypy-stubs/networkx/algorithms/approximation/clustering_coefficient.pyi
deleted file mode 100644
index 41ead7e4d..000000000
--- a/mypy-stubs/networkx/algorithms/approximation/clustering_coefficient.pyi
+++ /dev/null
@@ -1,5 +0,0 @@
-# Stubs for networkx.algorithms.approximation.clustering_coefficient (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def average_clustering(G, trials: int = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/approximation/connectivity.pyi b/mypy-stubs/networkx/algorithms/approximation/connectivity.pyi
deleted file mode 100644
index e7723ca0f..000000000
--- a/mypy-stubs/networkx/algorithms/approximation/connectivity.pyi
+++ /dev/null
@@ -1,11 +0,0 @@
-# Stubs for networkx.algorithms.approximation.connectivity (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def local_node_connectivity(G, source, target, cutoff: Optional[Any] = ...): ...
-def node_connectivity(G, s: Optional[Any] = ..., t: Optional[Any] = ...): ...
-def all_pairs_node_connectivity(
- G, nbunch: Optional[Any] = ..., cutoff: Optional[Any] = ...
-): ...
diff --git a/mypy-stubs/networkx/algorithms/approximation/dominating_set.pyi b/mypy-stubs/networkx/algorithms/approximation/dominating_set.pyi
deleted file mode 100644
index d72e997da..000000000
--- a/mypy-stubs/networkx/algorithms/approximation/dominating_set.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.algorithms.approximation.dominating_set (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def min_weighted_dominating_set(G, weight: Optional[Any] = ...): ...
-def min_edge_dominating_set(G): ...
diff --git a/mypy-stubs/networkx/algorithms/approximation/independent_set.pyi b/mypy-stubs/networkx/algorithms/approximation/independent_set.pyi
deleted file mode 100644
index 87b7f0bef..000000000
--- a/mypy-stubs/networkx/algorithms/approximation/independent_set.pyi
+++ /dev/null
@@ -1,5 +0,0 @@
-# Stubs for networkx.algorithms.approximation.independent_set (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def maximum_independent_set(G): ...
diff --git a/mypy-stubs/networkx/algorithms/approximation/kcomponents.pyi b/mypy-stubs/networkx/algorithms/approximation/kcomponents.pyi
deleted file mode 100644
index 0fb63e819..000000000
--- a/mypy-stubs/networkx/algorithms/approximation/kcomponents.pyi
+++ /dev/null
@@ -1,41 +0,0 @@
-# Stubs for networkx.algorithms.approximation.kcomponents (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from collections import Mapping
-from typing import Any
-
-import networkx as nx
-
-def k_components(G, min_density: float = ...): ...
-
-class _AntiGraph(nx.Graph):
- all_edge_dict: Any = ...
- def single_edge_dict(self): ...
- edge_attr_dict_factory: Any = ...
- def __getitem__(self, n): ...
- def neighbors(self, n): ...
-
- class AntiAtlasView(Mapping):
- def __init__(self, graph, node) -> None: ...
- def __len__(self): ...
- def __iter__(self): ...
- def __getitem__(self, nbr): ...
-
- class AntiAdjacencyView(AntiAtlasView):
- def __init__(self, graph) -> None: ...
- def __len__(self): ...
- def __iter__(self): ...
- def __getitem__(self, node): ...
-
- @property
- def adj(self): ...
- def subgraph(self, nodes): ...
-
- class AntiDegreeView(nx.reportviews.DegreeView):
- def __iter__(self): ...
- def __getitem__(self, n): ...
-
- @property
- def degree(self): ...
- def adjacency(self): ...
diff --git a/mypy-stubs/networkx/algorithms/approximation/matching.pyi b/mypy-stubs/networkx/algorithms/approximation/matching.pyi
deleted file mode 100644
index 798518f57..000000000
--- a/mypy-stubs/networkx/algorithms/approximation/matching.pyi
+++ /dev/null
@@ -1,5 +0,0 @@
-# Stubs for networkx.algorithms.approximation.matching (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def min_maximal_matching(G): ...
diff --git a/mypy-stubs/networkx/algorithms/approximation/ramsey.pyi b/mypy-stubs/networkx/algorithms/approximation/ramsey.pyi
deleted file mode 100644
index 847847f18..000000000
--- a/mypy-stubs/networkx/algorithms/approximation/ramsey.pyi
+++ /dev/null
@@ -1,5 +0,0 @@
-# Stubs for networkx.algorithms.approximation.ramsey (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def ramsey_R2(G): ...
diff --git a/mypy-stubs/networkx/algorithms/approximation/steinertree.pyi b/mypy-stubs/networkx/algorithms/approximation/steinertree.pyi
deleted file mode 100644
index e63a0cbbb..000000000
--- a/mypy-stubs/networkx/algorithms/approximation/steinertree.pyi
+++ /dev/null
@@ -1,6 +0,0 @@
-# Stubs for networkx.algorithms.approximation.steinertree (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def metric_closure(G, weight: str = ...): ...
-def steiner_tree(G, terminal_nodes, weight: str = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/approximation/vertex_cover.pyi b/mypy-stubs/networkx/algorithms/approximation/vertex_cover.pyi
deleted file mode 100644
index 50df80be6..000000000
--- a/mypy-stubs/networkx/algorithms/approximation/vertex_cover.pyi
+++ /dev/null
@@ -1,7 +0,0 @@
-# Stubs for networkx.algorithms.approximation.vertex_cover (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def min_weighted_vertex_cover(G, weight: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/assortativity/__init__.pyi b/mypy-stubs/networkx/algorithms/assortativity/__init__.pyi
deleted file mode 100644
index f673b8c83..000000000
--- a/mypy-stubs/networkx/algorithms/assortativity/__init__.pyi
+++ /dev/null
@@ -1,9 +0,0 @@
-# Stubs for networkx.algorithms.assortativity (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from networkx.algorithms.assortativity.connectivity import *
-from networkx.algorithms.assortativity.correlation import *
-from networkx.algorithms.assortativity.mixing import *
-from networkx.algorithms.assortativity.neighbor_degree import *
-from networkx.algorithms.assortativity.pairs import *
diff --git a/mypy-stubs/networkx/algorithms/assortativity/connectivity.pyi b/mypy-stubs/networkx/algorithms/assortativity/connectivity.pyi
deleted file mode 100644
index c9bf01116..000000000
--- a/mypy-stubs/networkx/algorithms/assortativity/connectivity.pyi
+++ /dev/null
@@ -1,15 +0,0 @@
-# Stubs for networkx.algorithms.assortativity.connectivity (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def average_degree_connectivity(
- G,
- source: str = ...,
- target: str = ...,
- nodes: Optional[Any] = ...,
- weight: Optional[Any] = ...,
-): ...
-
-k_nearest_neighbors = average_degree_connectivity
diff --git a/mypy-stubs/networkx/algorithms/assortativity/correlation.pyi b/mypy-stubs/networkx/algorithms/assortativity/correlation.pyi
deleted file mode 100644
index 35774b514..000000000
--- a/mypy-stubs/networkx/algorithms/assortativity/correlation.pyi
+++ /dev/null
@@ -1,22 +0,0 @@
-# Stubs for networkx.algorithms.assortativity.correlation (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def degree_assortativity_coefficient(
- G,
- x: str = ...,
- y: str = ...,
- weight: Optional[Any] = ...,
- nodes: Optional[Any] = ...,
-): ...
-def degree_pearson_correlation_coefficient(
- G,
- x: str = ...,
- y: str = ...,
- weight: Optional[Any] = ...,
- nodes: Optional[Any] = ...,
-): ...
-def attribute_assortativity_coefficient(G, attribute, nodes: Optional[Any] = ...): ...
-def numeric_assortativity_coefficient(G, attribute, nodes: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/assortativity/mixing.pyi b/mypy-stubs/networkx/algorithms/assortativity/mixing.pyi
deleted file mode 100644
index 2b938b7ba..000000000
--- a/mypy-stubs/networkx/algorithms/assortativity/mixing.pyi
+++ /dev/null
@@ -1,36 +0,0 @@
-# Stubs for networkx.algorithms.assortativity.mixing (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def attribute_mixing_dict(
- G, attribute, nodes: Optional[Any] = ..., normalized: bool = ...
-): ...
-def attribute_mixing_matrix(
- G,
- attribute,
- nodes: Optional[Any] = ...,
- mapping: Optional[Any] = ...,
- normalized: bool = ...,
-): ...
-def degree_mixing_dict(
- G,
- x: str = ...,
- y: str = ...,
- weight: Optional[Any] = ...,
- nodes: Optional[Any] = ...,
- normalized: bool = ...,
-): ...
-def degree_mixing_matrix(
- G,
- x: str = ...,
- y: str = ...,
- weight: Optional[Any] = ...,
- nodes: Optional[Any] = ...,
- normalized: bool = ...,
-): ...
-def numeric_mixing_matrix(
- G, attribute, nodes: Optional[Any] = ..., normalized: bool = ...
-): ...
-def mixing_dict(xy, normalized: bool = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/assortativity/neighbor_degree.pyi b/mypy-stubs/networkx/algorithms/assortativity/neighbor_degree.pyi
deleted file mode 100644
index 212c18e66..000000000
--- a/mypy-stubs/networkx/algorithms/assortativity/neighbor_degree.pyi
+++ /dev/null
@@ -1,13 +0,0 @@
-# Stubs for networkx.algorithms.assortativity.neighbor_degree (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def average_neighbor_degree(
- G,
- source: str = ...,
- target: str = ...,
- nodes: Optional[Any] = ...,
- weight: Optional[Any] = ...,
-): ...
diff --git a/mypy-stubs/networkx/algorithms/assortativity/pairs.pyi b/mypy-stubs/networkx/algorithms/assortativity/pairs.pyi
deleted file mode 100644
index d5c9cd8d7..000000000
--- a/mypy-stubs/networkx/algorithms/assortativity/pairs.pyi
+++ /dev/null
@@ -1,14 +0,0 @@
-# Stubs for networkx.algorithms.assortativity.pairs (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def node_attribute_xy(G, attribute, nodes: Optional[Any] = ...): ...
-def node_degree_xy(
- G,
- x: str = ...,
- y: str = ...,
- weight: Optional[Any] = ...,
- nodes: Optional[Any] = ...,
-): ...
diff --git a/mypy-stubs/networkx/algorithms/bipartite/__init__.pyi b/mypy-stubs/networkx/algorithms/bipartite/__init__.pyi
deleted file mode 100644
index 34c5b33f9..000000000
--- a/mypy-stubs/networkx/algorithms/bipartite/__init__.pyi
+++ /dev/null
@@ -1,15 +0,0 @@
-# Stubs for networkx.algorithms.bipartite (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from networkx.algorithms.bipartite.basic import *
-from networkx.algorithms.bipartite.centrality import *
-from networkx.algorithms.bipartite.cluster import *
-from networkx.algorithms.bipartite.covering import *
-from networkx.algorithms.bipartite.edgelist import *
-from networkx.algorithms.bipartite.generators import *
-from networkx.algorithms.bipartite.matching import *
-from networkx.algorithms.bipartite.matrix import *
-from networkx.algorithms.bipartite.projection import *
-from networkx.algorithms.bipartite.redundancy import *
-from networkx.algorithms.bipartite.spectral import *
diff --git a/mypy-stubs/networkx/algorithms/bipartite/basic.pyi b/mypy-stubs/networkx/algorithms/bipartite/basic.pyi
deleted file mode 100644
index a71ac47cc..000000000
--- a/mypy-stubs/networkx/algorithms/bipartite/basic.pyi
+++ /dev/null
@@ -1,12 +0,0 @@
-# Stubs for networkx.algorithms.bipartite.basic (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def color(G): ...
-def is_bipartite(G): ...
-def is_bipartite_node_set(G, nodes): ...
-def sets(G, top_nodes: Optional[Any] = ...): ...
-def density(B, nodes): ...
-def degrees(B, nodes, weight: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/bipartite/centrality.pyi b/mypy-stubs/networkx/algorithms/bipartite/centrality.pyi
deleted file mode 100644
index 57db9a774..000000000
--- a/mypy-stubs/networkx/algorithms/bipartite/centrality.pyi
+++ /dev/null
@@ -1,7 +0,0 @@
-# Stubs for networkx.algorithms.bipartite.centrality (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def degree_centrality(G, nodes): ...
-def betweenness_centrality(G, nodes): ...
-def closeness_centrality(G, nodes, normalized: bool = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/bipartite/cluster.pyi b/mypy-stubs/networkx/algorithms/bipartite/cluster.pyi
deleted file mode 100644
index ec4796faf..000000000
--- a/mypy-stubs/networkx/algorithms/bipartite/cluster.pyi
+++ /dev/null
@@ -1,12 +0,0 @@
-# Stubs for networkx.algorithms.bipartite.cluster (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def latapy_clustering(G, nodes: Optional[Any] = ..., mode: str = ...): ...
-
-clustering = latapy_clustering
-
-def average_clustering(G, nodes: Optional[Any] = ..., mode: str = ...): ...
-def robins_alexander_clustering(G): ...
diff --git a/mypy-stubs/networkx/algorithms/bipartite/covering.pyi b/mypy-stubs/networkx/algorithms/bipartite/covering.pyi
deleted file mode 100644
index 1fe5b80cc..000000000
--- a/mypy-stubs/networkx/algorithms/bipartite/covering.pyi
+++ /dev/null
@@ -1,7 +0,0 @@
-# Stubs for networkx.algorithms.bipartite.covering (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def min_edge_cover(G, matching_algorithm: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/bipartite/edgelist.pyi b/mypy-stubs/networkx/algorithms/bipartite/edgelist.pyi
deleted file mode 100644
index c3ee3d82d..000000000
--- a/mypy-stubs/networkx/algorithms/bipartite/edgelist.pyi
+++ /dev/null
@@ -1,33 +0,0 @@
-# Stubs for networkx.algorithms.bipartite.edgelist (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def write_edgelist(
- G,
- path,
- comments: str = ...,
- delimiter: str = ...,
- data: bool = ...,
- encoding: str = ...,
-): ...
-def generate_edgelist(G, delimiter: str = ..., data: bool = ...): ...
-def parse_edgelist(
- lines,
- comments: str = ...,
- delimiter: Optional[Any] = ...,
- create_using: Optional[Any] = ...,
- nodetype: Optional[Any] = ...,
- data: bool = ...,
-): ...
-def read_edgelist(
- path,
- comments: str = ...,
- delimiter: Optional[Any] = ...,
- create_using: Optional[Any] = ...,
- nodetype: Optional[Any] = ...,
- data: bool = ...,
- edgetype: Optional[Any] = ...,
- encoding: str = ...,
-): ...
diff --git a/mypy-stubs/networkx/algorithms/bipartite/generators.pyi b/mypy-stubs/networkx/algorithms/bipartite/generators.pyi
deleted file mode 100644
index e1a85a260..000000000
--- a/mypy-stubs/networkx/algorithms/bipartite/generators.pyi
+++ /dev/null
@@ -1,18 +0,0 @@
-# Stubs for networkx.algorithms.bipartite.generators (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def complete_bipartite_graph(n1, n2, create_using: Optional[Any] = ...): ...
-def configuration_model(
- aseq, bseq, create_using: Optional[Any] = ..., seed: Optional[Any] = ...
-): ...
-def havel_hakimi_graph(aseq, bseq, create_using: Optional[Any] = ...): ...
-def reverse_havel_hakimi_graph(aseq, bseq, create_using: Optional[Any] = ...): ...
-def alternating_havel_hakimi_graph(aseq, bseq, create_using: Optional[Any] = ...): ...
-def preferential_attachment_graph(
- aseq, p, create_using: Optional[Any] = ..., seed: Optional[Any] = ...
-): ...
-def random_graph(n, m, p, seed: Optional[Any] = ..., directed: bool = ...): ...
-def gnmk_random_graph(n, m, k, seed: Optional[Any] = ..., directed: bool = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/bipartite/matching.pyi b/mypy-stubs/networkx/algorithms/bipartite/matching.pyi
deleted file mode 100644
index 8652e20e6..000000000
--- a/mypy-stubs/networkx/algorithms/bipartite/matching.pyi
+++ /dev/null
@@ -1,11 +0,0 @@
-# Stubs for networkx.algorithms.bipartite.matching (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def hopcroft_karp_matching(G, top_nodes: Optional[Any] = ...): ...
-def eppstein_matching(G, top_nodes: Optional[Any] = ...): ...
-def to_vertex_cover(G, matching, top_nodes: Optional[Any] = ...): ...
-
-maximum_matching = hopcroft_karp_matching
diff --git a/mypy-stubs/networkx/algorithms/bipartite/matrix.pyi b/mypy-stubs/networkx/algorithms/bipartite/matrix.pyi
deleted file mode 100644
index 56588c7b1..000000000
--- a/mypy-stubs/networkx/algorithms/bipartite/matrix.pyi
+++ /dev/null
@@ -1,17 +0,0 @@
-# Stubs for networkx.algorithms.bipartite.matrix (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def biadjacency_matrix(
- G,
- row_order,
- column_order: Optional[Any] = ...,
- dtype: Optional[Any] = ...,
- weight: str = ...,
- format: str = ...,
-): ...
-def from_biadjacency_matrix(
- A, create_using: Optional[Any] = ..., edge_attribute: str = ...
-): ...
diff --git a/mypy-stubs/networkx/algorithms/bipartite/projection.pyi b/mypy-stubs/networkx/algorithms/bipartite/projection.pyi
deleted file mode 100644
index 9f7825752..000000000
--- a/mypy-stubs/networkx/algorithms/bipartite/projection.pyi
+++ /dev/null
@@ -1,14 +0,0 @@
-# Stubs for networkx.algorithms.bipartite.projection (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def projected_graph(B, nodes, multigraph: bool = ...): ...
-def weighted_projected_graph(B, nodes, ratio: bool = ...): ...
-def collaboration_weighted_projected_graph(B, nodes): ...
-def overlap_weighted_projected_graph(B, nodes, jaccard: bool = ...): ...
-def generic_weighted_projected_graph(
- B, nodes, weight_function: Optional[Any] = ...
-): ...
-def project(B, nodes, create_using: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/bipartite/redundancy.pyi b/mypy-stubs/networkx/algorithms/bipartite/redundancy.pyi
deleted file mode 100644
index 5801a14a8..000000000
--- a/mypy-stubs/networkx/algorithms/bipartite/redundancy.pyi
+++ /dev/null
@@ -1,7 +0,0 @@
-# Stubs for networkx.algorithms.bipartite.redundancy (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def node_redundancy(G, nodes: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/bipartite/spectral.pyi b/mypy-stubs/networkx/algorithms/bipartite/spectral.pyi
deleted file mode 100644
index beedadb37..000000000
--- a/mypy-stubs/networkx/algorithms/bipartite/spectral.pyi
+++ /dev/null
@@ -1,7 +0,0 @@
-# Stubs for networkx.algorithms.bipartite.spectral (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def spectral_bipartivity(G, nodes: Optional[Any] = ..., weight: str = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/boundary.pyi b/mypy-stubs/networkx/algorithms/boundary.pyi
deleted file mode 100644
index 44991875c..000000000
--- a/mypy-stubs/networkx/algorithms/boundary.pyi
+++ /dev/null
@@ -1,15 +0,0 @@
-# Stubs for networkx.algorithms.boundary (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def edge_boundary(
- G,
- nbunch1,
- nbunch2: Optional[Any] = ...,
- data: bool = ...,
- keys: bool = ...,
- default: Optional[Any] = ...,
-): ...
-def node_boundary(G, nbunch1, nbunch2: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/bridges.pyi b/mypy-stubs/networkx/algorithms/bridges.pyi
deleted file mode 100644
index e5c3bc3e0..000000000
--- a/mypy-stubs/networkx/algorithms/bridges.pyi
+++ /dev/null
@@ -1,9 +0,0 @@
-# Stubs for networkx.algorithms.bridges (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def bridges(G, root: Optional[Any] = ...): ...
-def has_bridges(G, root: Optional[Any] = ...): ...
-def local_bridges(G, with_span: bool = ..., weight: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/centrality/betweenness.pyi b/mypy-stubs/networkx/algorithms/centrality/betweenness.pyi
deleted file mode 100644
index 5a4be1a4a..000000000
--- a/mypy-stubs/networkx/algorithms/centrality/betweenness.pyi
+++ /dev/null
@@ -1,28 +0,0 @@
-# Stubs for networkx.algorithms.centrality.betweenness (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def betweenness_centrality(
- G,
- k: Optional[Any] = ...,
- normalized: bool = ...,
- weight: Optional[Any] = ...,
- endpoints: bool = ...,
- seed: Optional[Any] = ...,
-): ...
-def edge_betweenness_centrality(
- G,
- k: Optional[Any] = ...,
- normalized: bool = ...,
- weight: Optional[Any] = ...,
- seed: Optional[Any] = ...,
-): ...
-def edge_betweenness(
- G,
- k: Optional[Any] = ...,
- normalized: bool = ...,
- weight: Optional[Any] = ...,
- seed: Optional[Any] = ...,
-): ...
diff --git a/mypy-stubs/networkx/algorithms/centrality/betweenness_subset.pyi b/mypy-stubs/networkx/algorithms/centrality/betweenness_subset.pyi
deleted file mode 100644
index fa6316801..000000000
--- a/mypy-stubs/networkx/algorithms/centrality/betweenness_subset.pyi
+++ /dev/null
@@ -1,15 +0,0 @@
-# Stubs for networkx.algorithms.centrality.betweenness_subset (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def betweenness_centrality_subset(
- G, sources, targets, normalized: bool = ..., weight: Optional[Any] = ...
-): ...
-def edge_betweenness_centrality_subset(
- G, sources, targets, normalized: bool = ..., weight: Optional[Any] = ...
-): ...
-def betweenness_centrality_source(
- G, normalized: bool = ..., weight: Optional[Any] = ..., sources: Optional[Any] = ...
-): ...
diff --git a/mypy-stubs/networkx/algorithms/centrality/closeness.pyi b/mypy-stubs/networkx/algorithms/centrality/closeness.pyi
deleted file mode 100644
index e1abeecfc..000000000
--- a/mypy-stubs/networkx/algorithms/centrality/closeness.pyi
+++ /dev/null
@@ -1,13 +0,0 @@
-# Stubs for networkx.algorithms.centrality.closeness (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def closeness_centrality(
- G,
- u: Optional[Any] = ...,
- distance: Optional[Any] = ...,
- wf_improved: bool = ...,
- reverse: bool = ...,
-): ...
diff --git a/mypy-stubs/networkx/algorithms/centrality/current_flow_betweenness.pyi b/mypy-stubs/networkx/algorithms/centrality/current_flow_betweenness.pyi
deleted file mode 100644
index 4e01428dd..000000000
--- a/mypy-stubs/networkx/algorithms/centrality/current_flow_betweenness.pyi
+++ /dev/null
@@ -1,31 +0,0 @@
-# Stubs for networkx.algorithms.centrality.current_flow_betweenness (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-from networkx.algorithms.centrality.flow_matrix import *
-
-def approximate_current_flow_betweenness_centrality(
- G,
- normalized: bool = ...,
- weight: Optional[Any] = ...,
- dtype: Any = ...,
- solver: str = ...,
- epsilon: float = ...,
- kmax: int = ...,
-): ...
-def current_flow_betweenness_centrality(
- G,
- normalized: bool = ...,
- weight: Optional[Any] = ...,
- dtype: Any = ...,
- solver: str = ...,
-): ...
-def edge_current_flow_betweenness_centrality(
- G,
- normalized: bool = ...,
- weight: Optional[Any] = ...,
- dtype: Any = ...,
- solver: str = ...,
-): ...
diff --git a/mypy-stubs/networkx/algorithms/centrality/current_flow_betweenness_subset.pyi b/mypy-stubs/networkx/algorithms/centrality/current_flow_betweenness_subset.pyi
deleted file mode 100644
index 7b9c3eaec..000000000
--- a/mypy-stubs/networkx/algorithms/centrality/current_flow_betweenness_subset.pyi
+++ /dev/null
@@ -1,26 +0,0 @@
-# Stubs for networkx.algorithms.centrality.current_flow_betweenness_subset (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-from networkx.algorithms.centrality.flow_matrix import *
-
-def current_flow_betweenness_centrality_subset(
- G,
- sources,
- targets,
- normalized: bool = ...,
- weight: Optional[Any] = ...,
- dtype: Any = ...,
- solver: str = ...,
-): ...
-def edge_current_flow_betweenness_centrality_subset(
- G,
- sources,
- targets,
- normalized: bool = ...,
- weight: Optional[Any] = ...,
- dtype: Any = ...,
- solver: str = ...,
-): ...
diff --git a/mypy-stubs/networkx/algorithms/centrality/current_flow_closeness.pyi b/mypy-stubs/networkx/algorithms/centrality/current_flow_closeness.pyi
deleted file mode 100644
index 1555cf8f4..000000000
--- a/mypy-stubs/networkx/algorithms/centrality/current_flow_closeness.pyi
+++ /dev/null
@@ -1,13 +0,0 @@
-# Stubs for networkx.algorithms.centrality.current_flow_closeness (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-from networkx.algorithms.centrality.flow_matrix import *
-
-def current_flow_closeness_centrality(
- G, weight: Optional[Any] = ..., dtype: Any = ..., solver: str = ...
-): ...
-
-information_centrality = current_flow_closeness_centrality
diff --git a/mypy-stubs/networkx/algorithms/centrality/degree_alg.pyi b/mypy-stubs/networkx/algorithms/centrality/degree_alg.pyi
deleted file mode 100644
index cf86db0cb..000000000
--- a/mypy-stubs/networkx/algorithms/centrality/degree_alg.pyi
+++ /dev/null
@@ -1,7 +0,0 @@
-# Stubs for networkx.algorithms.centrality.degree_alg (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def degree_centrality(G): ...
-def in_degree_centrality(G): ...
-def out_degree_centrality(G): ...
diff --git a/mypy-stubs/networkx/algorithms/centrality/dispersion.pyi b/mypy-stubs/networkx/algorithms/centrality/dispersion.pyi
deleted file mode 100644
index 032d455de..000000000
--- a/mypy-stubs/networkx/algorithms/centrality/dispersion.pyi
+++ /dev/null
@@ -1,15 +0,0 @@
-# Stubs for networkx.algorithms.centrality.dispersion (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def dispersion(
- G,
- u: Optional[Any] = ...,
- v: Optional[Any] = ...,
- normalized: bool = ...,
- alpha: float = ...,
- b: float = ...,
- c: float = ...,
-): ...
diff --git a/mypy-stubs/networkx/algorithms/centrality/eigenvector.pyi b/mypy-stubs/networkx/algorithms/centrality/eigenvector.pyi
deleted file mode 100644
index df5313cd8..000000000
--- a/mypy-stubs/networkx/algorithms/centrality/eigenvector.pyi
+++ /dev/null
@@ -1,16 +0,0 @@
-# Stubs for networkx.algorithms.centrality.eigenvector (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def eigenvector_centrality(
- G,
- max_iter: int = ...,
- tol: float = ...,
- nstart: Optional[Any] = ...,
- weight: Optional[Any] = ...,
-): ...
-def eigenvector_centrality_numpy(
- G, weight: Optional[Any] = ..., max_iter: int = ..., tol: int = ...
-): ...
diff --git a/mypy-stubs/networkx/algorithms/centrality/flow_matrix.pyi b/mypy-stubs/networkx/algorithms/centrality/flow_matrix.pyi
deleted file mode 100644
index 1a6009919..000000000
--- a/mypy-stubs/networkx/algorithms/centrality/flow_matrix.pyi
+++ /dev/null
@@ -1,51 +0,0 @@
-# Stubs for networkx.algorithms.centrality.flow_matrix (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def flow_matrix_row(
- G, weight: Optional[Any] = ..., dtype: Any = ..., solver: str = ...
-): ...
-
-class InverseLaplacian:
- dtype: Any = ...
- n: Any = ...
- w: Any = ...
- C: Any = ...
- L1: Any = ...
- def __init__(
- self, L, width: Optional[Any] = ..., dtype: Optional[Any] = ...
- ) -> None: ...
- def init_solver(self, L): ...
- def solve(self, r): ...
- def solve_inverse(self, r): ...
- def get_rows(self, r1, r2): ...
- def get_row(self, r): ...
- def width(self, L): ...
-
-class FullInverseLaplacian(InverseLaplacian):
- IL: Any = ...
- def init_solver(self, L): ...
- def solve(self, rhs): ...
- def solve_inverse(self, r): ...
-
-class SuperLUInverseLaplacian(InverseLaplacian):
- lusolve: Any = ...
- def init_solver(self, L): ...
- def solve_inverse(self, r): ...
- def solve(self, rhs): ...
-
-class CGInverseLaplacian(InverseLaplacian):
- M: Any = ...
- def init_solver(self, L): ...
- def solve(self, rhs): ...
- def solve_inverse(self, r): ...
-
-def laplacian_sparse_matrix(
- G,
- nodelist: Optional[Any] = ...,
- weight: Optional[Any] = ...,
- dtype: Optional[Any] = ...,
- format: str = ...,
-): ...
diff --git a/mypy-stubs/networkx/algorithms/centrality/harmonic.pyi b/mypy-stubs/networkx/algorithms/centrality/harmonic.pyi
deleted file mode 100644
index 521de798a..000000000
--- a/mypy-stubs/networkx/algorithms/centrality/harmonic.pyi
+++ /dev/null
@@ -1,9 +0,0 @@
-# Stubs for networkx.algorithms.centrality.harmonic (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def harmonic_centrality(
- G, nbunch: Optional[Any] = ..., distance: Optional[Any] = ...
-): ...
diff --git a/mypy-stubs/networkx/algorithms/centrality/katz.pyi b/mypy-stubs/networkx/algorithms/centrality/katz.pyi
deleted file mode 100644
index b576d4295..000000000
--- a/mypy-stubs/networkx/algorithms/centrality/katz.pyi
+++ /dev/null
@@ -1,23 +0,0 @@
-# Stubs for networkx.algorithms.centrality.katz (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def katz_centrality(
- G,
- alpha: float = ...,
- beta: float = ...,
- max_iter: int = ...,
- tol: float = ...,
- nstart: Optional[Any] = ...,
- normalized: bool = ...,
- weight: Optional[Any] = ...,
-): ...
-def katz_centrality_numpy(
- G,
- alpha: float = ...,
- beta: float = ...,
- normalized: bool = ...,
- weight: Optional[Any] = ...,
-): ...
diff --git a/mypy-stubs/networkx/algorithms/centrality/load.pyi b/mypy-stubs/networkx/algorithms/centrality/load.pyi
deleted file mode 100644
index 098fb4c83..000000000
--- a/mypy-stubs/networkx/algorithms/centrality/load.pyi
+++ /dev/null
@@ -1,5 +0,0 @@
-# Stubs for networkx.algorithms.centrality.load (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def edge_load_centrality(G, cutoff: bool = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/centrality/reaching.pyi b/mypy-stubs/networkx/algorithms/centrality/reaching.pyi
deleted file mode 100644
index c6f4cd02f..000000000
--- a/mypy-stubs/networkx/algorithms/centrality/reaching.pyi
+++ /dev/null
@@ -1,16 +0,0 @@
-# Stubs for networkx.algorithms.centrality.reaching (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def global_reaching_centrality(
- G, weight: Optional[Any] = ..., normalized: bool = ...
-): ...
-def local_reaching_centrality(
- G,
- v,
- paths: Optional[Any] = ...,
- weight: Optional[Any] = ...,
- normalized: bool = ...,
-): ...
diff --git a/mypy-stubs/networkx/algorithms/centrality/subgraph_alg.pyi b/mypy-stubs/networkx/algorithms/centrality/subgraph_alg.pyi
deleted file mode 100644
index d6bf92e02..000000000
--- a/mypy-stubs/networkx/algorithms/centrality/subgraph_alg.pyi
+++ /dev/null
@@ -1,10 +0,0 @@
-# Stubs for networkx.algorithms.centrality.subgraph_alg (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from networkx.utils import *
-
-def subgraph_centrality_exp(G): ...
-def subgraph_centrality(G): ...
-def communicability_betweenness_centrality(G, normalized: bool = ...): ...
-def estrada_index(G): ...
diff --git a/mypy-stubs/networkx/algorithms/chains.pyi b/mypy-stubs/networkx/algorithms/chains.pyi
deleted file mode 100644
index a139286ca..000000000
--- a/mypy-stubs/networkx/algorithms/chains.pyi
+++ /dev/null
@@ -1,7 +0,0 @@
-# Stubs for networkx.algorithms.chains (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def chain_decomposition(G, root: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/chordal.pyi b/mypy-stubs/networkx/algorithms/chordal.pyi
deleted file mode 100644
index 4c6042db7..000000000
--- a/mypy-stubs/networkx/algorithms/chordal.pyi
+++ /dev/null
@@ -1,14 +0,0 @@
-# Stubs for networkx.algorithms.chordal (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any
-
-import networkx as nx
-
-class NetworkXTreewidthBoundExceeded(nx.NetworkXException): ...
-
-def is_chordal(G): ...
-def find_induced_nodes(G, s, t, treewidth_bound: Any = ...): ...
-def chordal_graph_cliques(G): ...
-def chordal_graph_treewidth(G): ...
diff --git a/mypy-stubs/networkx/algorithms/clique.pyi b/mypy-stubs/networkx/algorithms/clique.pyi
deleted file mode 100644
index ef1a7faa6..000000000
--- a/mypy-stubs/networkx/algorithms/clique.pyi
+++ /dev/null
@@ -1,23 +0,0 @@
-# Stubs for networkx.algorithms.clique (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def enumerate_all_cliques(G): ...
-def find_cliques(G): ...
-def find_cliques_recursive(G): ...
-def make_max_clique_graph(G, create_using: Optional[Any] = ...): ...
-def make_clique_bipartite(
- G,
- fpos: Optional[Any] = ...,
- create_using: Optional[Any] = ...,
- name: Optional[Any] = ...,
-): ...
-def graph_clique_number(G, cliques: Optional[Any] = ...): ...
-def graph_number_of_cliques(G, cliques: Optional[Any] = ...): ...
-def node_clique_number(G, nodes: Optional[Any] = ..., cliques: Optional[Any] = ...): ...
-def number_of_cliques(G, nodes: Optional[Any] = ..., cliques: Optional[Any] = ...): ...
-def cliques_containing_node(
- G, nodes: Optional[Any] = ..., cliques: Optional[Any] = ...
-): ...
diff --git a/mypy-stubs/networkx/algorithms/cluster.pyi b/mypy-stubs/networkx/algorithms/cluster.pyi
deleted file mode 100644
index 3e87622eb..000000000
--- a/mypy-stubs/networkx/algorithms/cluster.pyi
+++ /dev/null
@@ -1,14 +0,0 @@
-# Stubs for networkx.algorithms.cluster (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def triangles(G, nodes: Optional[Any] = ...): ...
-def average_clustering(
- G, nodes: Optional[Any] = ..., weight: Optional[Any] = ..., count_zeros: bool = ...
-): ...
-def clustering(G, nodes: Optional[Any] = ..., weight: Optional[Any] = ...): ...
-def transitivity(G): ...
-def square_clustering(G, nodes: Optional[Any] = ...): ...
-def generalized_degree(G, nodes: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/coloring/__init__.pyi b/mypy-stubs/networkx/algorithms/coloring/__init__.pyi
deleted file mode 100644
index 42d09aecc..000000000
--- a/mypy-stubs/networkx/algorithms/coloring/__init__.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.algorithms.coloring (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from networkx.algorithms.coloring.greedy_coloring import *
-
-# Names in __all__ with no definition:
-# greedy_color
diff --git a/mypy-stubs/networkx/algorithms/coloring/greedy_coloring.pyi b/mypy-stubs/networkx/algorithms/coloring/greedy_coloring.pyi
deleted file mode 100644
index e0cc94ec5..000000000
--- a/mypy-stubs/networkx/algorithms/coloring/greedy_coloring.pyi
+++ /dev/null
@@ -1,13 +0,0 @@
-# Stubs for networkx.algorithms.coloring.greedy_coloring (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def strategy_largest_first(G, colors): ...
-def strategy_random_sequential(G, colors): ...
-def strategy_smallest_last(G, colors): ...
-def strategy_independent_set(G, colors): ...
-def strategy_connected_sequential_bfs(G, colors): ...
-def strategy_connected_sequential_dfs(G, colors): ...
-def strategy_connected_sequential(G, colors, traversal: str = ...): ...
-def strategy_saturation_largest_first(G, colors): ...
-def greedy_color(G, strategy: str = ..., interchange: bool = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/coloring/greedy_coloring_with_interchange.pyi b/mypy-stubs/networkx/algorithms/coloring/greedy_coloring_with_interchange.pyi
deleted file mode 100644
index e21955833..000000000
--- a/mypy-stubs/networkx/algorithms/coloring/greedy_coloring_with_interchange.pyi
+++ /dev/null
@@ -1,26 +0,0 @@
-# Stubs for networkx.algorithms.coloring.greedy_coloring_with_interchange (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any
-
-class Node:
- node_id: Any = ...
- color: int = ...
- adj_list: Any = ...
- adj_color: Any = ...
- def __init__(self, node_id, n) -> None: ...
- def assign_color(self, adj_entry, color): ...
- def clear_color(self, adj_entry, color): ...
- def iter_neighbors(self): ...
- def iter_neighbors_color(self, color): ...
-
-class AdjEntry:
- node_id: Any = ...
- next: Any = ...
- mate: Any = ...
- col_next: Any = ...
- col_prev: Any = ...
- def __init__(self, node_id) -> None: ...
-
-def greedy_coloring_with_interchange(original_graph, nodes): ...
diff --git a/mypy-stubs/networkx/algorithms/communicability_alg.pyi b/mypy-stubs/networkx/algorithms/communicability_alg.pyi
deleted file mode 100644
index 7089dcc8b..000000000
--- a/mypy-stubs/networkx/algorithms/communicability_alg.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.algorithms.communicability_alg (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from networkx.utils import *
-
-def communicability(G): ...
-def communicability_exp(G): ...
diff --git a/mypy-stubs/networkx/algorithms/community/__init__.pyi b/mypy-stubs/networkx/algorithms/community/__init__.pyi
deleted file mode 100644
index 5b7c52e00..000000000
--- a/mypy-stubs/networkx/algorithms/community/__init__.pyi
+++ /dev/null
@@ -1,12 +0,0 @@
-# Stubs for networkx.algorithms.community (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from networkx.algorithms.community.asyn_fluidc import *
-from networkx.algorithms.community.centrality import *
-from networkx.algorithms.community.community_generators import *
-from networkx.algorithms.community.community_utils import *
-from networkx.algorithms.community.kclique import *
-from networkx.algorithms.community.kernighan_lin import *
-from networkx.algorithms.community.label_propagation import *
-from networkx.algorithms.community.quality import *
diff --git a/mypy-stubs/networkx/algorithms/community/asyn_fluidc.pyi b/mypy-stubs/networkx/algorithms/community/asyn_fluidc.pyi
deleted file mode 100644
index f5a443294..000000000
--- a/mypy-stubs/networkx/algorithms/community/asyn_fluidc.pyi
+++ /dev/null
@@ -1,5 +0,0 @@
-# Stubs for networkx.algorithms.community.asyn_fluidc (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def asyn_fluidc(G, k, max_iter: int = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/community/centrality.pyi b/mypy-stubs/networkx/algorithms/community/centrality.pyi
deleted file mode 100644
index 45afe57f8..000000000
--- a/mypy-stubs/networkx/algorithms/community/centrality.pyi
+++ /dev/null
@@ -1,7 +0,0 @@
-# Stubs for networkx.algorithms.community.centrality (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def girvan_newman(G, most_valuable_edge: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/community/community_generators.pyi b/mypy-stubs/networkx/algorithms/community/community_generators.pyi
deleted file mode 100644
index 37248edb1..000000000
--- a/mypy-stubs/networkx/algorithms/community/community_generators.pyi
+++ /dev/null
@@ -1,20 +0,0 @@
-# Stubs for networkx.algorithms.community.community_generators (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def LFR_benchmark_graph(
- n,
- tau1,
- tau2,
- mu,
- average_degree: Optional[Any] = ...,
- min_degree: Optional[Any] = ...,
- max_degree: Optional[Any] = ...,
- min_community: Optional[Any] = ...,
- max_community: Optional[Any] = ...,
- tol: float = ...,
- max_iters: int = ...,
- seed: Optional[Any] = ...,
-): ...
diff --git a/mypy-stubs/networkx/algorithms/community/community_utils.pyi b/mypy-stubs/networkx/algorithms/community/community_utils.pyi
deleted file mode 100644
index 31fc4744e..000000000
--- a/mypy-stubs/networkx/algorithms/community/community_utils.pyi
+++ /dev/null
@@ -1,5 +0,0 @@
-# Stubs for networkx.algorithms.community.community_utils (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def is_partition(G, communities): ...
diff --git a/mypy-stubs/networkx/algorithms/community/kclique.pyi b/mypy-stubs/networkx/algorithms/community/kclique.pyi
deleted file mode 100644
index a86fd4218..000000000
--- a/mypy-stubs/networkx/algorithms/community/kclique.pyi
+++ /dev/null
@@ -1,7 +0,0 @@
-# Stubs for networkx.algorithms.community.kclique (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def k_clique_communities(G, k, cliques: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/community/kernighan_lin.pyi b/mypy-stubs/networkx/algorithms/community/kernighan_lin.pyi
deleted file mode 100644
index a68349552..000000000
--- a/mypy-stubs/networkx/algorithms/community/kernighan_lin.pyi
+++ /dev/null
@@ -1,9 +0,0 @@
-# Stubs for networkx.algorithms.community.kernighan_lin (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def kernighan_lin_bisection(
- G, partition: Optional[Any] = ..., max_iter: int = ..., weight: str = ...
-): ...
diff --git a/mypy-stubs/networkx/algorithms/community/label_propagation.pyi b/mypy-stubs/networkx/algorithms/community/label_propagation.pyi
deleted file mode 100644
index ba1d93bc2..000000000
--- a/mypy-stubs/networkx/algorithms/community/label_propagation.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.algorithms.community.label_propagation (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def asyn_lpa_communities(G, weight: Optional[Any] = ...): ...
-def label_propagation_communities(G): ...
diff --git a/mypy-stubs/networkx/algorithms/community/quality.pyi b/mypy-stubs/networkx/algorithms/community/quality.pyi
deleted file mode 100644
index db195d433..000000000
--- a/mypy-stubs/networkx/algorithms/community/quality.pyi
+++ /dev/null
@@ -1,12 +0,0 @@
-# Stubs for networkx.algorithms.community.quality (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from networkx import NetworkXError
-
-class NotAPartition(NetworkXError):
- def __init__(self, G, collection) -> None: ...
-
-def performance(G, partition): ...
-def coverage(G, partition): ...
-def modularity(G, communities, weight: str = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/components/__init__.pyi b/mypy-stubs/networkx/algorithms/components/__init__.pyi
deleted file mode 100644
index 8894f98d2..000000000
--- a/mypy-stubs/networkx/algorithms/components/__init__.pyi
+++ /dev/null
@@ -1,10 +0,0 @@
-# Stubs for networkx.algorithms.components (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from .attracting import *
-from .biconnected import *
-from .connected import *
-from .semiconnected import *
-from .strongly_connected import *
-from .weakly_connected import *
diff --git a/mypy-stubs/networkx/algorithms/components/attracting.pyi b/mypy-stubs/networkx/algorithms/components/attracting.pyi
deleted file mode 100644
index 166e28588..000000000
--- a/mypy-stubs/networkx/algorithms/components/attracting.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.algorithms.components.attracting (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def attracting_components(G): ...
-def number_attracting_components(G): ...
-def is_attracting_component(G): ...
-def attracting_component_subgraphs(G, copy: bool = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/components/biconnected.pyi b/mypy-stubs/networkx/algorithms/components/biconnected.pyi
deleted file mode 100644
index b5e652374..000000000
--- a/mypy-stubs/networkx/algorithms/components/biconnected.pyi
+++ /dev/null
@@ -1,9 +0,0 @@
-# Stubs for networkx.algorithms.components.biconnected (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def is_biconnected(G): ...
-def biconnected_component_edges(G): ...
-def biconnected_components(G): ...
-def biconnected_component_subgraphs(G, copy: bool = ...): ...
-def articulation_points(G): ...
diff --git a/mypy-stubs/networkx/algorithms/components/connected.pyi b/mypy-stubs/networkx/algorithms/components/connected.pyi
deleted file mode 100644
index 4947a7fe3..000000000
--- a/mypy-stubs/networkx/algorithms/components/connected.pyi
+++ /dev/null
@@ -1,9 +0,0 @@
-# Stubs for networkx.algorithms.components.connected (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def connected_components(G): ...
-def connected_component_subgraphs(G, copy: bool = ...): ...
-def number_connected_components(G): ...
-def is_connected(G): ...
-def node_connected_component(G, n): ...
diff --git a/mypy-stubs/networkx/algorithms/components/semiconnected.pyi b/mypy-stubs/networkx/algorithms/components/semiconnected.pyi
deleted file mode 100644
index cad67d567..000000000
--- a/mypy-stubs/networkx/algorithms/components/semiconnected.pyi
+++ /dev/null
@@ -1,5 +0,0 @@
-# Stubs for networkx.algorithms.components.semiconnected (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def is_semiconnected(G): ...
diff --git a/mypy-stubs/networkx/algorithms/components/strongly_connected.pyi b/mypy-stubs/networkx/algorithms/components/strongly_connected.pyi
deleted file mode 100644
index a6e2baade..000000000
--- a/mypy-stubs/networkx/algorithms/components/strongly_connected.pyi
+++ /dev/null
@@ -1,13 +0,0 @@
-# Stubs for networkx.algorithms.components.strongly_connected (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def strongly_connected_components(G): ...
-def kosaraju_strongly_connected_components(G, source: Optional[Any] = ...): ...
-def strongly_connected_components_recursive(G): ...
-def strongly_connected_component_subgraphs(G, copy: bool = ...): ...
-def number_strongly_connected_components(G): ...
-def is_strongly_connected(G): ...
-def condensation(G, scc: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/components/weakly_connected.pyi b/mypy-stubs/networkx/algorithms/components/weakly_connected.pyi
deleted file mode 100644
index e6e7c1162..000000000
--- a/mypy-stubs/networkx/algorithms/components/weakly_connected.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.algorithms.components.weakly_connected (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def weakly_connected_components(G): ...
-def number_weakly_connected_components(G): ...
-def weakly_connected_component_subgraphs(G, copy: bool = ...): ...
-def is_weakly_connected(G): ...
diff --git a/mypy-stubs/networkx/algorithms/connectivity/__init__.pyi b/mypy-stubs/networkx/algorithms/connectivity/__init__.pyi
deleted file mode 100644
index 3ca76bd89..000000000
--- a/mypy-stubs/networkx/algorithms/connectivity/__init__.pyi
+++ /dev/null
@@ -1,39 +0,0 @@
-# Stubs for networkx.algorithms.connectivity (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from .connectivity import *
-from .cuts import *
-from .disjoint_paths import *
-from .edge_augmentation import *
-from .edge_kcomponents import *
-from .kcomponents import *
-from .kcutsets import *
-from .stoerwagner import *
-from .utils import *
-
-# Names in __all__ with no definition:
-# EdgeComponentAuxGraph
-# all_node_cuts
-# all_pairs_node_connectivity
-# average_node_connectivity
-# bridge_components
-# build_auxiliary_edge_connectivity
-# build_auxiliary_node_connectivity
-# edge_connectivity
-# edge_disjoint_paths
-# is_k_edge_connected
-# is_locally_k_edge_connected
-# k_components
-# k_edge_augmentation
-# k_edge_components
-# k_edge_subgraphs
-# local_edge_connectivity
-# local_node_connectivity
-# minimum_edge_cut
-# minimum_node_cut
-# minimum_st_edge_cut
-# minimum_st_node_cut
-# node_connectivity
-# node_disjoint_paths
-# stoer_wagner
diff --git a/mypy-stubs/networkx/algorithms/connectivity/connectivity.pyi b/mypy-stubs/networkx/algorithms/connectivity/connectivity.pyi
deleted file mode 100644
index 9d8780294..000000000
--- a/mypy-stubs/networkx/algorithms/connectivity/connectivity.pyi
+++ /dev/null
@@ -1,42 +0,0 @@
-# Stubs for networkx.algorithms.connectivity.connectivity (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-from networkx.algorithms.flow import edmonds_karp
-
-default_flow_func = edmonds_karp
-
-def local_node_connectivity(
- G,
- s,
- t,
- flow_func: Optional[Any] = ...,
- auxiliary: Optional[Any] = ...,
- residual: Optional[Any] = ...,
- cutoff: Optional[Any] = ...,
-): ...
-def node_connectivity(
- G, s: Optional[Any] = ..., t: Optional[Any] = ..., flow_func: Optional[Any] = ...
-): ...
-def average_node_connectivity(G, flow_func: Optional[Any] = ...): ...
-def all_pairs_node_connectivity(
- G, nbunch: Optional[Any] = ..., flow_func: Optional[Any] = ...
-): ...
-def local_edge_connectivity(
- G,
- s,
- t,
- flow_func: Optional[Any] = ...,
- auxiliary: Optional[Any] = ...,
- residual: Optional[Any] = ...,
- cutoff: Optional[Any] = ...,
-): ...
-def edge_connectivity(
- G,
- s: Optional[Any] = ...,
- t: Optional[Any] = ...,
- flow_func: Optional[Any] = ...,
- cutoff: Optional[Any] = ...,
-): ...
diff --git a/mypy-stubs/networkx/algorithms/connectivity/cuts.pyi b/mypy-stubs/networkx/algorithms/connectivity/cuts.pyi
deleted file mode 100644
index 8e9f371c5..000000000
--- a/mypy-stubs/networkx/algorithms/connectivity/cuts.pyi
+++ /dev/null
@@ -1,32 +0,0 @@
-# Stubs for networkx.algorithms.connectivity.cuts (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-from networkx.algorithms.flow import edmonds_karp
-
-default_flow_func = edmonds_karp
-
-def minimum_st_edge_cut(
- G,
- s,
- t,
- flow_func: Optional[Any] = ...,
- auxiliary: Optional[Any] = ...,
- residual: Optional[Any] = ...,
-): ...
-def minimum_st_node_cut(
- G,
- s,
- t,
- flow_func: Optional[Any] = ...,
- auxiliary: Optional[Any] = ...,
- residual: Optional[Any] = ...,
-): ...
-def minimum_node_cut(
- G, s: Optional[Any] = ..., t: Optional[Any] = ..., flow_func: Optional[Any] = ...
-): ...
-def minimum_edge_cut(
- G, s: Optional[Any] = ..., t: Optional[Any] = ..., flow_func: Optional[Any] = ...
-): ...
diff --git a/mypy-stubs/networkx/algorithms/connectivity/disjoint_paths.pyi b/mypy-stubs/networkx/algorithms/connectivity/disjoint_paths.pyi
deleted file mode 100644
index 197aefd81..000000000
--- a/mypy-stubs/networkx/algorithms/connectivity/disjoint_paths.pyi
+++ /dev/null
@@ -1,28 +0,0 @@
-# Stubs for networkx.algorithms.connectivity.disjoint_paths (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-from networkx.algorithms.flow import edmonds_karp
-
-default_flow_func = edmonds_karp
-
-def edge_disjoint_paths(
- G,
- s,
- t,
- flow_func: Optional[Any] = ...,
- cutoff: Optional[Any] = ...,
- auxiliary: Optional[Any] = ...,
- residual: Optional[Any] = ...,
-): ...
-def node_disjoint_paths(
- G,
- s,
- t,
- flow_func: Optional[Any] = ...,
- cutoff: Optional[Any] = ...,
- auxiliary: Optional[Any] = ...,
- residual: Optional[Any] = ...,
-): ...
diff --git a/mypy-stubs/networkx/algorithms/connectivity/edge_augmentation.pyi b/mypy-stubs/networkx/algorithms/connectivity/edge_augmentation.pyi
deleted file mode 100644
index cb446bd5a..000000000
--- a/mypy-stubs/networkx/algorithms/connectivity/edge_augmentation.pyi
+++ /dev/null
@@ -1,14 +0,0 @@
-# Stubs for networkx.algorithms.connectivity.edge_augmentation (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from collections import namedtuple
-from typing import Any, Optional
-
-def is_k_edge_connected(G, k): ...
-def is_locally_k_edge_connected(G, s, t, k): ...
-def k_edge_augmentation(
- G, k, avail: Optional[Any] = ..., weight: Optional[Any] = ..., partial: bool = ...
-): ...
-
-# MetaEdge = namedtuple('MetaEdge', )
diff --git a/mypy-stubs/networkx/algorithms/connectivity/edge_kcomponents.pyi b/mypy-stubs/networkx/algorithms/connectivity/edge_kcomponents.pyi
deleted file mode 100644
index 5aaa19809..000000000
--- a/mypy-stubs/networkx/algorithms/connectivity/edge_kcomponents.pyi
+++ /dev/null
@@ -1,14 +0,0 @@
-# Stubs for networkx.algorithms.connectivity.edge_kcomponents (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any
-
-def k_edge_components(G, k): ...
-def k_edge_subgraphs(G, k): ...
-def bridge_components(G): ...
-
-class EdgeComponentAuxGraph:
- def construct(EdgeComponentAuxGraph, G): ...
- def k_edge_components(self, k): ...
- def k_edge_subgraphs(self, k): ...
diff --git a/mypy-stubs/networkx/algorithms/connectivity/kcomponents.pyi b/mypy-stubs/networkx/algorithms/connectivity/kcomponents.pyi
deleted file mode 100644
index ebf249683..000000000
--- a/mypy-stubs/networkx/algorithms/connectivity/kcomponents.pyi
+++ /dev/null
@@ -1,11 +0,0 @@
-# Stubs for networkx.algorithms.connectivity.kcomponents (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-from networkx.algorithms.flow import edmonds_karp
-
-default_flow_func = edmonds_karp
-
-def k_components(G, flow_func: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/connectivity/kcutsets.pyi b/mypy-stubs/networkx/algorithms/connectivity/kcutsets.pyi
deleted file mode 100644
index 0cba1963c..000000000
--- a/mypy-stubs/networkx/algorithms/connectivity/kcutsets.pyi
+++ /dev/null
@@ -1,11 +0,0 @@
-# Stubs for networkx.algorithms.connectivity.kcutsets (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-from networkx.algorithms.flow import edmonds_karp
-
-default_flow_func = edmonds_karp
-
-def all_node_cuts(G, k: Optional[Any] = ..., flow_func: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/connectivity/stoerwagner.pyi b/mypy-stubs/networkx/algorithms/connectivity/stoerwagner.pyi
deleted file mode 100644
index eb366ad95..000000000
--- a/mypy-stubs/networkx/algorithms/connectivity/stoerwagner.pyi
+++ /dev/null
@@ -1,7 +0,0 @@
-# Stubs for networkx.algorithms.connectivity.stoerwagner (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any
-
-def stoer_wagner(G, weight: str = ..., heap: Any = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/connectivity/utils.pyi b/mypy-stubs/networkx/algorithms/connectivity/utils.pyi
deleted file mode 100644
index 1b6a44f58..000000000
--- a/mypy-stubs/networkx/algorithms/connectivity/utils.pyi
+++ /dev/null
@@ -1,6 +0,0 @@
-# Stubs for networkx.algorithms.connectivity.utils (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def build_auxiliary_node_connectivity(G): ...
-def build_auxiliary_edge_connectivity(G): ...
diff --git a/mypy-stubs/networkx/algorithms/core.pyi b/mypy-stubs/networkx/algorithms/core.pyi
deleted file mode 100644
index 3c01a6a7c..000000000
--- a/mypy-stubs/networkx/algorithms/core.pyi
+++ /dev/null
@@ -1,14 +0,0 @@
-# Stubs for networkx.algorithms.core (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def core_number(G): ...
-
-find_cores = core_number
-
-def k_core(G, k: Optional[Any] = ..., core_number: Optional[Any] = ...): ...
-def k_shell(G, k: Optional[Any] = ..., core_number: Optional[Any] = ...): ...
-def k_crust(G, k: Optional[Any] = ..., core_number: Optional[Any] = ...): ...
-def k_corona(G, k, core_number: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/covering.pyi b/mypy-stubs/networkx/algorithms/covering.pyi
deleted file mode 100644
index 0746bedb5..000000000
--- a/mypy-stubs/networkx/algorithms/covering.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.algorithms.covering (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def min_edge_cover(G, matching_algorithm: Optional[Any] = ...): ...
-def is_edge_cover(G, cover): ...
diff --git a/mypy-stubs/networkx/algorithms/cuts.pyi b/mypy-stubs/networkx/algorithms/cuts.pyi
deleted file mode 100644
index 5d1766022..000000000
--- a/mypy-stubs/networkx/algorithms/cuts.pyi
+++ /dev/null
@@ -1,14 +0,0 @@
-# Stubs for networkx.algorithms.cuts (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def cut_size(G, S, T: Optional[Any] = ..., weight: Optional[Any] = ...): ...
-def volume(G, S, weight: Optional[Any] = ...): ...
-def normalized_cut_size(G, S, T: Optional[Any] = ..., weight: Optional[Any] = ...): ...
-def conductance(G, S, T: Optional[Any] = ..., weight: Optional[Any] = ...): ...
-def edge_expansion(G, S, T: Optional[Any] = ..., weight: Optional[Any] = ...): ...
-def mixing_expansion(G, S, T: Optional[Any] = ..., weight: Optional[Any] = ...): ...
-def node_expansion(G, S): ...
-def boundary_expansion(G, S): ...
diff --git a/mypy-stubs/networkx/algorithms/cycles.pyi b/mypy-stubs/networkx/algorithms/cycles.pyi
deleted file mode 100644
index 90417ac6c..000000000
--- a/mypy-stubs/networkx/algorithms/cycles.pyi
+++ /dev/null
@@ -1,11 +0,0 @@
-# Stubs for networkx.algorithms.cycles (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def cycle_basis(G, root: Optional[Any] = ...): ...
-def simple_cycles(G): ...
-def recursive_simple_cycles(G): ...
-def find_cycle(G, source: Optional[Any] = ..., orientation: str = ...): ...
-def minimum_cycle_basis(G, weight: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/dag.pyi b/mypy-stubs/networkx/algorithms/dag.pyi
deleted file mode 100644
index 14b3b51f9..000000000
--- a/mypy-stubs/networkx/algorithms/dag.pyi
+++ /dev/null
@@ -1,18 +0,0 @@
-# Stubs for networkx.algorithms.dag (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def descendants(G, source): ...
-def ancestors(G, source): ...
-def is_directed_acyclic_graph(G): ...
-def topological_sort(G): ...
-def lexicographical_topological_sort(G, key: Optional[Any] = ...): ...
-def is_aperiodic(G): ...
-def transitive_closure(G): ...
-def transitive_reduction(G): ...
-def antichains(G): ...
-def dag_longest_path(G, weight: str = ..., default_weight: int = ...): ...
-def dag_longest_path_length(G, weight: str = ..., default_weight: int = ...): ...
-def dag_to_branching(G): ...
diff --git a/mypy-stubs/networkx/algorithms/distance_measures.pyi b/mypy-stubs/networkx/algorithms/distance_measures.pyi
deleted file mode 100644
index 2c50d5dc9..000000000
--- a/mypy-stubs/networkx/algorithms/distance_measures.pyi
+++ /dev/null
@@ -1,12 +0,0 @@
-# Stubs for networkx.algorithms.distance_measures (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def extrema_bounding(G, compute: str = ...): ...
-def eccentricity(G, v: Optional[Any] = ..., sp: Optional[Any] = ...): ...
-def diameter(G, e: Optional[Any] = ..., usebounds: bool = ...): ...
-def periphery(G, e: Optional[Any] = ..., usebounds: bool = ...): ...
-def radius(G, e: Optional[Any] = ..., usebounds: bool = ...): ...
-def center(G, e: Optional[Any] = ..., usebounds: bool = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/distance_regular.pyi b/mypy-stubs/networkx/algorithms/distance_regular.pyi
deleted file mode 100644
index 8c0c311a0..000000000
--- a/mypy-stubs/networkx/algorithms/distance_regular.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.algorithms.distance_regular (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def is_distance_regular(G): ...
-def global_parameters(b, c): ...
-def intersection_array(G): ...
-def is_strongly_regular(G): ...
diff --git a/mypy-stubs/networkx/algorithms/dominance.pyi b/mypy-stubs/networkx/algorithms/dominance.pyi
deleted file mode 100644
index c38c76b2b..000000000
--- a/mypy-stubs/networkx/algorithms/dominance.pyi
+++ /dev/null
@@ -1,6 +0,0 @@
-# Stubs for networkx.algorithms.dominance (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def immediate_dominators(G, start): ...
-def dominance_frontiers(G, start): ...
diff --git a/mypy-stubs/networkx/algorithms/dominating.pyi b/mypy-stubs/networkx/algorithms/dominating.pyi
deleted file mode 100644
index ba8bc1cd7..000000000
--- a/mypy-stubs/networkx/algorithms/dominating.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.algorithms.dominating (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def dominating_set(G, start_with: Optional[Any] = ...): ...
-def is_dominating_set(G, nbunch): ...
diff --git a/mypy-stubs/networkx/algorithms/efficiency.pyi b/mypy-stubs/networkx/algorithms/efficiency.pyi
deleted file mode 100644
index 3bd27dc2c..000000000
--- a/mypy-stubs/networkx/algorithms/efficiency.pyi
+++ /dev/null
@@ -1,7 +0,0 @@
-# Stubs for networkx.algorithms.efficiency (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def efficiency(G, u, v): ...
-def global_efficiency(G): ...
-def local_efficiency(G): ...
diff --git a/mypy-stubs/networkx/algorithms/euler.pyi b/mypy-stubs/networkx/algorithms/euler.pyi
deleted file mode 100644
index f87988a95..000000000
--- a/mypy-stubs/networkx/algorithms/euler.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.algorithms.euler (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def is_eulerian(G): ...
-def eulerian_circuit(G, source: Optional[Any] = ..., keys: bool = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/flow/__init__.pyi b/mypy-stubs/networkx/algorithms/flow/__init__.pyi
deleted file mode 100644
index c8b739a9c..000000000
--- a/mypy-stubs/networkx/algorithms/flow/__init__.pyi
+++ /dev/null
@@ -1,15 +0,0 @@
-# Stubs for networkx.algorithms.flow (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from .boykovkolmogorov import *
-from .capacityscaling import *
-from .dinitz_alg import *
-from .edmondskarp import *
-from .gomory_hu import *
-from .maxflow import *
-from .mincost import *
-from .networksimplex import *
-from .preflowpush import *
-from .shortestaugmentingpath import *
-from .utils import build_flow_dict, build_residual_network
diff --git a/mypy-stubs/networkx/algorithms/flow/boykovkolmogorov.pyi b/mypy-stubs/networkx/algorithms/flow/boykovkolmogorov.pyi
deleted file mode 100644
index cea4e5e87..000000000
--- a/mypy-stubs/networkx/algorithms/flow/boykovkolmogorov.pyi
+++ /dev/null
@@ -1,15 +0,0 @@
-# Stubs for networkx.algorithms.flow.boykovkolmogorov (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def boykov_kolmogorov(
- G,
- s,
- t,
- capacity: str = ...,
- residual: Optional[Any] = ...,
- value_only: bool = ...,
- cutoff: Optional[Any] = ...,
-): ...
diff --git a/mypy-stubs/networkx/algorithms/flow/capacityscaling.pyi b/mypy-stubs/networkx/algorithms/flow/capacityscaling.pyi
deleted file mode 100644
index 62151938e..000000000
--- a/mypy-stubs/networkx/algorithms/flow/capacityscaling.pyi
+++ /dev/null
@@ -1,9 +0,0 @@
-# Stubs for networkx.algorithms.flow.capacityscaling (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any
-
-def capacity_scaling(
- G, demand: str = ..., capacity: str = ..., weight: str = ..., heap: Any = ...
-): ...
diff --git a/mypy-stubs/networkx/algorithms/flow/dinitz_alg.pyi b/mypy-stubs/networkx/algorithms/flow/dinitz_alg.pyi
deleted file mode 100644
index ccfb48e4d..000000000
--- a/mypy-stubs/networkx/algorithms/flow/dinitz_alg.pyi
+++ /dev/null
@@ -1,15 +0,0 @@
-# Stubs for networkx.algorithms.flow.dinitz_alg (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def dinitz(
- G,
- s,
- t,
- capacity: str = ...,
- residual: Optional[Any] = ...,
- value_only: bool = ...,
- cutoff: Optional[Any] = ...,
-): ...
diff --git a/mypy-stubs/networkx/algorithms/flow/edmondskarp.pyi b/mypy-stubs/networkx/algorithms/flow/edmondskarp.pyi
deleted file mode 100644
index 96d92b927..000000000
--- a/mypy-stubs/networkx/algorithms/flow/edmondskarp.pyi
+++ /dev/null
@@ -1,17 +0,0 @@
-# Stubs for networkx.algorithms.flow.edmondskarp (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-from networkx.algorithms.flow.utils import *
-
-def edmonds_karp(
- G,
- s,
- t,
- capacity: str = ...,
- residual: Optional[Any] = ...,
- value_only: bool = ...,
- cutoff: Optional[Any] = ...,
-): ...
diff --git a/mypy-stubs/networkx/algorithms/flow/gomory_hu.pyi b/mypy-stubs/networkx/algorithms/flow/gomory_hu.pyi
deleted file mode 100644
index a548e83c7..000000000
--- a/mypy-stubs/networkx/algorithms/flow/gomory_hu.pyi
+++ /dev/null
@@ -1,11 +0,0 @@
-# Stubs for networkx.algorithms.flow.gomory_hu (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-from .edmondskarp import edmonds_karp
-
-default_flow_func = edmonds_karp
-
-def gomory_hu_tree(G, capacity: str = ..., flow_func: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/flow/maxflow.pyi b/mypy-stubs/networkx/algorithms/flow/maxflow.pyi
deleted file mode 100644
index e0ebab1f7..000000000
--- a/mypy-stubs/networkx/algorithms/flow/maxflow.pyi
+++ /dev/null
@@ -1,22 +0,0 @@
-# Stubs for networkx.algorithms.flow.maxflow (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-from .preflowpush import preflow_push
-
-default_flow_func = preflow_push
-
-def maximum_flow(
- flowG, _s, _t, capacity: str = ..., flow_func: Optional[Any] = ..., **kwargs
-): ...
-def maximum_flow_value(
- flowG, _s, _t, capacity: str = ..., flow_func: Optional[Any] = ..., **kwargs
-): ...
-def minimum_cut(
- flowG, _s, _t, capacity: str = ..., flow_func: Optional[Any] = ..., **kwargs
-): ...
-def minimum_cut_value(
- flowG, _s, _t, capacity: str = ..., flow_func: Optional[Any] = ..., **kwargs
-): ...
diff --git a/mypy-stubs/networkx/algorithms/flow/mincost.pyi b/mypy-stubs/networkx/algorithms/flow/mincost.pyi
deleted file mode 100644
index 93c4e6904..000000000
--- a/mypy-stubs/networkx/algorithms/flow/mincost.pyi
+++ /dev/null
@@ -1,10 +0,0 @@
-# Stubs for networkx.algorithms.flow.mincost (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def min_cost_flow_cost(
- G, demand: str = ..., capacity: str = ..., weight: str = ...
-): ...
-def min_cost_flow(G, demand: str = ..., capacity: str = ..., weight: str = ...): ...
-def cost_of_flow(G, flowDict, weight: str = ...): ...
-def max_flow_min_cost(G, s, t, capacity: str = ..., weight: str = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/flow/networksimplex.pyi b/mypy-stubs/networkx/algorithms/flow/networksimplex.pyi
deleted file mode 100644
index 1fcedc1d6..000000000
--- a/mypy-stubs/networkx/algorithms/flow/networksimplex.pyi
+++ /dev/null
@@ -1,7 +0,0 @@
-# Stubs for networkx.algorithms.flow.networksimplex (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-range = xrange
-
-def network_simplex(G, demand: str = ..., capacity: str = ..., weight: str = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/flow/preflowpush.pyi b/mypy-stubs/networkx/algorithms/flow/preflowpush.pyi
deleted file mode 100644
index 181bf72b7..000000000
--- a/mypy-stubs/networkx/algorithms/flow/preflowpush.pyi
+++ /dev/null
@@ -1,15 +0,0 @@
-# Stubs for networkx.algorithms.flow.preflowpush (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def preflow_push(
- G,
- s,
- t,
- capacity: str = ...,
- residual: Optional[Any] = ...,
- global_relabel_freq: int = ...,
- value_only: bool = ...,
-): ...
diff --git a/mypy-stubs/networkx/algorithms/flow/shortestaugmentingpath.pyi b/mypy-stubs/networkx/algorithms/flow/shortestaugmentingpath.pyi
deleted file mode 100644
index 5112e85c7..000000000
--- a/mypy-stubs/networkx/algorithms/flow/shortestaugmentingpath.pyi
+++ /dev/null
@@ -1,18 +0,0 @@
-# Stubs for networkx.algorithms.flow.shortestaugmentingpath (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-from .utils import *
-
-def shortest_augmenting_path(
- G,
- s,
- t,
- capacity: str = ...,
- residual: Optional[Any] = ...,
- value_only: bool = ...,
- two_phase: bool = ...,
- cutoff: Optional[Any] = ...,
-): ...
diff --git a/mypy-stubs/networkx/algorithms/flow/utils.pyi b/mypy-stubs/networkx/algorithms/flow/utils.pyi
deleted file mode 100644
index c66c4b476..000000000
--- a/mypy-stubs/networkx/algorithms/flow/utils.pyi
+++ /dev/null
@@ -1,25 +0,0 @@
-# Stubs for networkx.algorithms.flow.utils (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any
-
-class CurrentEdge:
- def __init__(self, edges) -> None: ...
- def get(self): ...
- def move_to_next(self): ...
-
-class Level:
- active: Any = ...
- inactive: Any = ...
- def __init__(self) -> None: ...
-
-class GlobalRelabelThreshold:
- def __init__(self, n, m, freq) -> None: ...
- def add_work(self, work): ...
- def is_reached(self): ...
- def clear_work(self): ...
-
-def build_residual_network(G, capacity): ...
-def detect_unboundedness(R, s, t): ...
-def build_flow_dict(G, R): ...
diff --git a/mypy-stubs/networkx/algorithms/graphical.pyi b/mypy-stubs/networkx/algorithms/graphical.pyi
deleted file mode 100644
index b533e3424..000000000
--- a/mypy-stubs/networkx/algorithms/graphical.pyi
+++ /dev/null
@@ -1,10 +0,0 @@
-# Stubs for networkx.algorithms.graphical (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def is_graphical(sequence, method: str = ...): ...
-def is_valid_degree_sequence_havel_hakimi(deg_sequence): ...
-def is_valid_degree_sequence_erdos_gallai(deg_sequence): ...
-def is_multigraphical(sequence): ...
-def is_pseudographical(sequence): ...
-def is_digraphical(in_sequence, out_sequence): ...
diff --git a/mypy-stubs/networkx/algorithms/hierarchy.pyi b/mypy-stubs/networkx/algorithms/hierarchy.pyi
deleted file mode 100644
index 2788ae53f..000000000
--- a/mypy-stubs/networkx/algorithms/hierarchy.pyi
+++ /dev/null
@@ -1,7 +0,0 @@
-# Stubs for networkx.algorithms.hierarchy (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def flow_hierarchy(G, weight: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/hybrid.pyi b/mypy-stubs/networkx/algorithms/hybrid.pyi
deleted file mode 100644
index 020479b17..000000000
--- a/mypy-stubs/networkx/algorithms/hybrid.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.algorithms.hybrid (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def kl_connected_subgraph(
- G, k, l, low_memory: bool = ..., same_as_graph: bool = ...
-): ...
-def is_kl_connected(G, k, l, low_memory: bool = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/isolate.pyi b/mypy-stubs/networkx/algorithms/isolate.pyi
deleted file mode 100644
index a99d6b999..000000000
--- a/mypy-stubs/networkx/algorithms/isolate.pyi
+++ /dev/null
@@ -1,7 +0,0 @@
-# Stubs for networkx.algorithms.isolate (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def is_isolate(G, n): ...
-def isolates(G): ...
-def number_of_isolates(G): ...
diff --git a/mypy-stubs/networkx/algorithms/isomorphism/__init__.pyi b/mypy-stubs/networkx/algorithms/isomorphism/__init__.pyi
deleted file mode 100644
index 00c077680..000000000
--- a/mypy-stubs/networkx/algorithms/isomorphism/__init__.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.algorithms.isomorphism (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from networkx.algorithms.isomorphism.isomorph import *
-from networkx.algorithms.isomorphism.matchhelpers import *
-from networkx.algorithms.isomorphism.temporalisomorphvf2 import *
-from networkx.algorithms.isomorphism.vf2userfunc import *
diff --git a/mypy-stubs/networkx/algorithms/isomorphism/isomorph.pyi b/mypy-stubs/networkx/algorithms/isomorphism/isomorph.pyi
deleted file mode 100644
index 25e42d410..000000000
--- a/mypy-stubs/networkx/algorithms/isomorphism/isomorph.pyi
+++ /dev/null
@@ -1,21 +0,0 @@
-# Stubs for networkx.algorithms.isomorphism.isomorph (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def could_be_isomorphic(G1, G2): ...
-
-graph_could_be_isomorphic = could_be_isomorphic
-
-def fast_could_be_isomorphic(G1, G2): ...
-
-fast_graph_could_be_isomorphic = fast_could_be_isomorphic
-
-def faster_could_be_isomorphic(G1, G2): ...
-
-faster_graph_could_be_isomorphic = faster_could_be_isomorphic
-
-def is_isomorphic(
- G1, G2, node_match: Optional[Any] = ..., edge_match: Optional[Any] = ...
-): ...
diff --git a/mypy-stubs/networkx/algorithms/isomorphism/isomorphvf2.pyi b/mypy-stubs/networkx/algorithms/isomorphism/isomorphvf2.pyi
deleted file mode 100644
index 30a2d51e1..000000000
--- a/mypy-stubs/networkx/algorithms/isomorphism/isomorphvf2.pyi
+++ /dev/null
@@ -1,64 +0,0 @@
-# Stubs for networkx.algorithms.isomorphism.isomorphvf2 (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-class GraphMatcher:
- G1: Any = ...
- G2: Any = ...
- G1_nodes: Any = ...
- G2_nodes: Any = ...
- old_recursion_limit: Any = ...
- test: str = ...
- def __init__(self, G1, G2) -> None: ...
- def reset_recursion_limit(self): ...
- def candidate_pairs_iter(self): ...
- core_1: Any = ...
- core_2: Any = ...
- inout_1: Any = ...
- inout_2: Any = ...
- state: Any = ...
- mapping: Any = ...
- def initialize(self): ...
- def is_isomorphic(self): ...
- def isomorphisms_iter(self): ...
- def match(self): ...
- def semantic_feasibility(self, G1_node, G2_node): ...
- def subgraph_is_isomorphic(self): ...
- def subgraph_isomorphisms_iter(self): ...
- def syntactic_feasibility(self, G1_node, G2_node): ...
-
-class DiGraphMatcher(GraphMatcher):
- def __init__(self, G1, G2) -> None: ...
- def candidate_pairs_iter(self): ...
- core_1: Any = ...
- core_2: Any = ...
- in_1: Any = ...
- in_2: Any = ...
- out_1: Any = ...
- out_2: Any = ...
- state: Any = ...
- mapping: Any = ...
- def initialize(self): ...
- def syntactic_feasibility(self, G1_node, G2_node): ...
-
-class GMState:
- GM: Any = ...
- G1_node: Any = ...
- G2_node: Any = ...
- depth: Any = ...
- def __init__(
- self, GM, G1_node: Optional[Any] = ..., G2_node: Optional[Any] = ...
- ) -> None: ...
- def restore(self): ...
-
-class DiGMState:
- GM: Any = ...
- G1_node: Any = ...
- G2_node: Any = ...
- depth: Any = ...
- def __init__(
- self, GM, G1_node: Optional[Any] = ..., G2_node: Optional[Any] = ...
- ) -> None: ...
- def restore(self): ...
diff --git a/mypy-stubs/networkx/algorithms/isomorphism/matchhelpers.pyi b/mypy-stubs/networkx/algorithms/isomorphism/matchhelpers.pyi
deleted file mode 100644
index d7b8a66cf..000000000
--- a/mypy-stubs/networkx/algorithms/isomorphism/matchhelpers.pyi
+++ /dev/null
@@ -1,21 +0,0 @@
-# Stubs for networkx.algorithms.isomorphism.matchhelpers (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any
-
-def categorical_node_match(attr, default): ...
-
-categorical_edge_match: Any
-
-def categorical_multiedge_match(attr, default): ...
-def numerical_node_match(attr, default, rtol: float = ..., atol: float = ...): ...
-
-numerical_edge_match: Any
-
-def numerical_multiedge_match(attr, default, rtol: float = ..., atol: float = ...): ...
-def generic_node_match(attr, default, op): ...
-
-generic_edge_match: Any
-
-def generic_multiedge_match(attr, default, op): ...
diff --git a/mypy-stubs/networkx/algorithms/isomorphism/temporalisomorphvf2.pyi b/mypy-stubs/networkx/algorithms/isomorphism/temporalisomorphvf2.pyi
deleted file mode 100644
index d79e96c22..000000000
--- a/mypy-stubs/networkx/algorithms/isomorphism/temporalisomorphvf2.pyi
+++ /dev/null
@@ -1,30 +0,0 @@
-# Stubs for networkx.algorithms.isomorphism.temporalisomorphvf2 (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-from .isomorphvf2 import DiGraphMatcher, GraphMatcher
-
-class TimeRespectingGraphMatcher(GraphMatcher):
- temporal_attribute_name: Any = ...
- delta: Any = ...
- def __init__(self, G1, G2, temporal_attribute_name, delta) -> None: ...
- def one_hop(self, Gx, Gx_node, neighbors): ...
- def two_hop(self, Gx, core_x, Gx_node, neighbors): ...
- def semantic_feasibility(self, G1_node, G2_node): ...
-
-class TimeRespectingDiGraphMatcher(DiGraphMatcher):
- temporal_attribute_name: Any = ...
- delta: Any = ...
- def __init__(self, G1, G2, temporal_attribute_name, delta) -> None: ...
- def get_pred_dates(self, Gx, Gx_node, core_x, pred): ...
- def get_succ_dates(self, Gx, Gx_node, core_x, succ): ...
- def one_hop(self, Gx, Gx_node, core_x, pred, succ): ...
- def two_hop_pred(self, Gx, Gx_node, core_x, pred): ...
- def two_hop_succ(self, Gx, Gx_node, core_x, succ): ...
- def preds(self, Gx, core_x, v, Gx_node: Optional[Any] = ...): ...
- def succs(self, Gx, core_x, v, Gx_node: Optional[Any] = ...): ...
- def test_one(self, pred_dates, succ_dates): ...
- def test_two(self, pred_dates, succ_dates): ...
- def semantic_feasibility(self, G1_node, G2_node): ...
diff --git a/mypy-stubs/networkx/algorithms/isomorphism/vf2userfunc.pyi b/mypy-stubs/networkx/algorithms/isomorphism/vf2userfunc.pyi
deleted file mode 100644
index 43f5ec19b..000000000
--- a/mypy-stubs/networkx/algorithms/isomorphism/vf2userfunc.pyi
+++ /dev/null
@@ -1,30 +0,0 @@
-# Stubs for networkx.algorithms.isomorphism.vf2userfunc (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-from . import isomorphvf2 as vf2
-
-class GraphMatcher(vf2.GraphMatcher):
- node_match: Any = ...
- edge_match: Any = ...
- G1_adj: Any = ...
- G2_adj: Any = ...
- def __init__(
- self, G1, G2, node_match: Optional[Any] = ..., edge_match: Optional[Any] = ...
- ) -> None: ...
- semantic_feasibility: Any = ...
-
-class DiGraphMatcher(vf2.DiGraphMatcher):
- node_match: Any = ...
- edge_match: Any = ...
- G1_adj: Any = ...
- G2_adj: Any = ...
- def __init__(
- self, G1, G2, node_match: Optional[Any] = ..., edge_match: Optional[Any] = ...
- ) -> None: ...
- def semantic_feasibility(self, G1_node, G2_node): ...
-
-class MultiGraphMatcher(GraphMatcher): ...
-class MultiDiGraphMatcher(DiGraphMatcher): ...
diff --git a/mypy-stubs/networkx/algorithms/link_analysis/__init__.pyi b/mypy-stubs/networkx/algorithms/link_analysis/__init__.pyi
deleted file mode 100644
index 5c7882e12..000000000
--- a/mypy-stubs/networkx/algorithms/link_analysis/__init__.pyi
+++ /dev/null
@@ -1,6 +0,0 @@
-# Stubs for networkx.algorithms.link_analysis (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from networkx.algorithms.link_analysis.hits_alg import *
-from networkx.algorithms.link_analysis.pagerank_alg import *
diff --git a/mypy-stubs/networkx/algorithms/link_analysis/hits_alg.pyi b/mypy-stubs/networkx/algorithms/link_analysis/hits_alg.pyi
deleted file mode 100644
index b48e767ad..000000000
--- a/mypy-stubs/networkx/algorithms/link_analysis/hits_alg.pyi
+++ /dev/null
@@ -1,17 +0,0 @@
-# Stubs for networkx.algorithms.link_analysis.hits_alg (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def hits(
- G,
- max_iter: int = ...,
- tol: float = ...,
- nstart: Optional[Any] = ...,
- normalized: bool = ...,
-): ...
-def authority_matrix(G, nodelist: Optional[Any] = ...): ...
-def hub_matrix(G, nodelist: Optional[Any] = ...): ...
-def hits_numpy(G, normalized: bool = ...): ...
-def hits_scipy(G, max_iter: int = ..., tol: float = ..., normalized: bool = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/link_analysis/pagerank_alg.pyi b/mypy-stubs/networkx/algorithms/link_analysis/pagerank_alg.pyi
deleted file mode 100644
index 9beccabd6..000000000
--- a/mypy-stubs/networkx/algorithms/link_analysis/pagerank_alg.pyi
+++ /dev/null
@@ -1,40 +0,0 @@
-# Stubs for networkx.algorithms.link_analysis.pagerank_alg (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def pagerank(
- G,
- alpha: float = ...,
- personalization: Optional[Any] = ...,
- max_iter: int = ...,
- tol: float = ...,
- nstart: Optional[Any] = ...,
- weight: str = ...,
- dangling: Optional[Any] = ...,
-): ...
-def google_matrix(
- G,
- alpha: float = ...,
- personalization: Optional[Any] = ...,
- nodelist: Optional[Any] = ...,
- weight: str = ...,
- dangling: Optional[Any] = ...,
-): ...
-def pagerank_numpy(
- G,
- alpha: float = ...,
- personalization: Optional[Any] = ...,
- weight: str = ...,
- dangling: Optional[Any] = ...,
-): ...
-def pagerank_scipy(
- G,
- alpha: float = ...,
- personalization: Optional[Any] = ...,
- max_iter: int = ...,
- tol: float = ...,
- weight: str = ...,
- dangling: Optional[Any] = ...,
-): ...
diff --git a/mypy-stubs/networkx/algorithms/link_prediction.pyi b/mypy-stubs/networkx/algorithms/link_prediction.pyi
deleted file mode 100644
index 1d9a5813b..000000000
--- a/mypy-stubs/networkx/algorithms/link_prediction.pyi
+++ /dev/null
@@ -1,17 +0,0 @@
-# Stubs for networkx.algorithms.link_prediction (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def resource_allocation_index(G, ebunch: Optional[Any] = ...): ...
-def jaccard_coefficient(G, ebunch: Optional[Any] = ...): ...
-def adamic_adar_index(G, ebunch: Optional[Any] = ...): ...
-def preferential_attachment(G, ebunch: Optional[Any] = ...): ...
-def cn_soundarajan_hopcroft(G, ebunch: Optional[Any] = ..., community: str = ...): ...
-def ra_index_soundarajan_hopcroft(
- G, ebunch: Optional[Any] = ..., community: str = ...
-): ...
-def within_inter_cluster(
- G, ebunch: Optional[Any] = ..., delta: float = ..., community: str = ...
-): ...
diff --git a/mypy-stubs/networkx/algorithms/lowest_common_ancestors.pyi b/mypy-stubs/networkx/algorithms/lowest_common_ancestors.pyi
deleted file mode 100644
index edcf10148..000000000
--- a/mypy-stubs/networkx/algorithms/lowest_common_ancestors.pyi
+++ /dev/null
@@ -1,11 +0,0 @@
-# Stubs for networkx.algorithms.lowest_common_ancestors (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def tree_all_pairs_lowest_common_ancestor(
- G, root: Optional[Any] = ..., pairs: Optional[Any] = ...
-): ...
-def lowest_common_ancestor(G, node1, node2, default: Optional[Any] = ...): ...
-def all_pairs_lowest_common_ancestor(G, pairs: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/matching.pyi b/mypy-stubs/networkx/algorithms/matching.pyi
deleted file mode 100644
index 54494e9da..000000000
--- a/mypy-stubs/networkx/algorithms/matching.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.algorithms.matching (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def maximal_matching(G): ...
-def is_matching(G, matching): ...
-def is_maximal_matching(G, matching): ...
-def max_weight_matching(G, maxcardinality: bool = ..., weight: str = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/minors.pyi b/mypy-stubs/networkx/algorithms/minors.pyi
deleted file mode 100644
index b7e296112..000000000
--- a/mypy-stubs/networkx/algorithms/minors.pyi
+++ /dev/null
@@ -1,20 +0,0 @@
-# Stubs for networkx.algorithms.minors (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def quotient_graph(
- G,
- partition,
- edge_relation: Optional[Any] = ...,
- node_data: Optional[Any] = ...,
- edge_data: Optional[Any] = ...,
- relabel: bool = ...,
- create_using: Optional[Any] = ...,
-): ...
-def contracted_nodes(G, u, v, self_loops: bool = ...): ...
-
-identified_nodes = contracted_nodes
-
-def contracted_edge(G, edge, self_loops: bool = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/mis.pyi b/mypy-stubs/networkx/algorithms/mis.pyi
deleted file mode 100644
index 631a81010..000000000
--- a/mypy-stubs/networkx/algorithms/mis.pyi
+++ /dev/null
@@ -1,7 +0,0 @@
-# Stubs for networkx.algorithms.mis (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def maximal_independent_set(G, nodes: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/operators/__init__.pyi b/mypy-stubs/networkx/algorithms/operators/__init__.pyi
deleted file mode 100644
index f9157aac3..000000000
--- a/mypy-stubs/networkx/algorithms/operators/__init__.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.algorithms.operators (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from networkx.algorithms.operators.all import *
-from networkx.algorithms.operators.binary import *
-from networkx.algorithms.operators.product import *
-from networkx.algorithms.operators.unary import *
diff --git a/mypy-stubs/networkx/algorithms/operators/all.pyi b/mypy-stubs/networkx/algorithms/operators/all.pyi
deleted file mode 100644
index b9fa0cff6..000000000
--- a/mypy-stubs/networkx/algorithms/operators/all.pyi
+++ /dev/null
@@ -1,10 +0,0 @@
-# Stubs for networkx.algorithms.operators.all (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any
-
-def union_all(graphs, rename: Any = ...): ...
-def disjoint_union_all(graphs): ...
-def compose_all(graphs): ...
-def intersection_all(graphs): ...
diff --git a/mypy-stubs/networkx/algorithms/operators/binary.pyi b/mypy-stubs/networkx/algorithms/operators/binary.pyi
deleted file mode 100644
index 439c17124..000000000
--- a/mypy-stubs/networkx/algorithms/operators/binary.pyi
+++ /dev/null
@@ -1,12 +0,0 @@
-# Stubs for networkx.algorithms.operators.binary (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def union(G, H, rename: Any = ..., name: Optional[Any] = ...): ...
-def disjoint_union(G, H): ...
-def intersection(G, H): ...
-def difference(G, H): ...
-def symmetric_difference(G, H): ...
-def compose(G, H): ...
diff --git a/mypy-stubs/networkx/algorithms/operators/product.pyi b/mypy-stubs/networkx/algorithms/operators/product.pyi
deleted file mode 100644
index eea9545f6..000000000
--- a/mypy-stubs/networkx/algorithms/operators/product.pyi
+++ /dev/null
@@ -1,10 +0,0 @@
-# Stubs for networkx.algorithms.operators.product (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def tensor_product(G, H): ...
-def cartesian_product(G, H): ...
-def lexicographic_product(G, H): ...
-def strong_product(G, H): ...
-def power(G, k): ...
-def rooted_product(G, H, root): ...
diff --git a/mypy-stubs/networkx/algorithms/operators/unary.pyi b/mypy-stubs/networkx/algorithms/operators/unary.pyi
deleted file mode 100644
index 8744a753e..000000000
--- a/mypy-stubs/networkx/algorithms/operators/unary.pyi
+++ /dev/null
@@ -1,6 +0,0 @@
-# Stubs for networkx.algorithms.operators.unary (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def complement(G): ...
-def reverse(G, copy: bool = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/reciprocity.pyi b/mypy-stubs/networkx/algorithms/reciprocity.pyi
deleted file mode 100644
index 67f504195..000000000
--- a/mypy-stubs/networkx/algorithms/reciprocity.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.algorithms.reciprocity (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def reciprocity(G, nodes: Optional[Any] = ...): ...
-def overall_reciprocity(G): ...
diff --git a/mypy-stubs/networkx/algorithms/richclub.pyi b/mypy-stubs/networkx/algorithms/richclub.pyi
deleted file mode 100644
index 00f7f620d..000000000
--- a/mypy-stubs/networkx/algorithms/richclub.pyi
+++ /dev/null
@@ -1,5 +0,0 @@
-# Stubs for networkx.algorithms.richclub (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def rich_club_coefficient(G, normalized: bool = ..., Q: int = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/shortest_paths/__init__.pyi b/mypy-stubs/networkx/algorithms/shortest_paths/__init__.pyi
deleted file mode 100644
index 0f156904d..000000000
--- a/mypy-stubs/networkx/algorithms/shortest_paths/__init__.pyi
+++ /dev/null
@@ -1,9 +0,0 @@
-# Stubs for networkx.algorithms.shortest_paths (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from networkx.algorithms.shortest_paths.astar import *
-from networkx.algorithms.shortest_paths.dense import *
-from networkx.algorithms.shortest_paths.generic import *
-from networkx.algorithms.shortest_paths.unweighted import *
-from networkx.algorithms.shortest_paths.weighted import *
diff --git a/mypy-stubs/networkx/algorithms/shortest_paths/astar.pyi b/mypy-stubs/networkx/algorithms/shortest_paths/astar.pyi
deleted file mode 100644
index abe9be2dc..000000000
--- a/mypy-stubs/networkx/algorithms/shortest_paths/astar.pyi
+++ /dev/null
@@ -1,12 +0,0 @@
-# Stubs for networkx.algorithms.shortest_paths.astar (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def astar_path(
- G, source, target, heuristic: Optional[Any] = ..., weight: str = ...
-): ...
-def astar_path_length(
- G, source, target, heuristic: Optional[Any] = ..., weight: str = ...
-): ...
diff --git a/mypy-stubs/networkx/algorithms/shortest_paths/dense.pyi b/mypy-stubs/networkx/algorithms/shortest_paths/dense.pyi
deleted file mode 100644
index eec3dda0e..000000000
--- a/mypy-stubs/networkx/algorithms/shortest_paths/dense.pyi
+++ /dev/null
@@ -1,9 +0,0 @@
-# Stubs for networkx.algorithms.shortest_paths.dense (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def floyd_warshall_numpy(G, nodelist: Optional[Any] = ..., weight: str = ...): ...
-def floyd_warshall_predecessor_and_distance(G, weight: str = ...): ...
-def floyd_warshall(G, weight: str = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/shortest_paths/generic.pyi b/mypy-stubs/networkx/algorithms/shortest_paths/generic.pyi
deleted file mode 100644
index c177663eb..000000000
--- a/mypy-stubs/networkx/algorithms/shortest_paths/generic.pyi
+++ /dev/null
@@ -1,21 +0,0 @@
-# Stubs for networkx.algorithms.shortest_paths.generic (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def has_path(G, source, target): ...
-def shortest_path(
- G,
- source: Optional[Any] = ...,
- target: Optional[Any] = ...,
- weight: Optional[Any] = ...,
-): ...
-def shortest_path_length(
- G,
- source: Optional[Any] = ...,
- target: Optional[Any] = ...,
- weight: Optional[Any] = ...,
-): ...
-def average_shortest_path_length(G, weight: Optional[Any] = ...): ...
-def all_shortest_paths(G, source, target, weight: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/shortest_paths/unweighted.pyi b/mypy-stubs/networkx/algorithms/shortest_paths/unweighted.pyi
deleted file mode 100644
index 50b441237..000000000
--- a/mypy-stubs/networkx/algorithms/shortest_paths/unweighted.pyi
+++ /dev/null
@@ -1,20 +0,0 @@
-# Stubs for networkx.algorithms.shortest_paths.unweighted (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def single_source_shortest_path_length(G, source, cutoff: Optional[Any] = ...): ...
-def single_target_shortest_path_length(G, target, cutoff: Optional[Any] = ...): ...
-def all_pairs_shortest_path_length(G, cutoff: Optional[Any] = ...): ...
-def bidirectional_shortest_path(G, source, target): ...
-def single_source_shortest_path(G, source, cutoff: Optional[Any] = ...): ...
-def single_target_shortest_path(G, target, cutoff: Optional[Any] = ...): ...
-def all_pairs_shortest_path(G, cutoff: Optional[Any] = ...): ...
-def predecessor(
- G,
- source,
- target: Optional[Any] = ...,
- cutoff: Optional[Any] = ...,
- return_seen: Optional[Any] = ...,
-): ...
diff --git a/mypy-stubs/networkx/algorithms/shortest_paths/weighted.pyi b/mypy-stubs/networkx/algorithms/shortest_paths/weighted.pyi
deleted file mode 100644
index 52e388360..000000000
--- a/mypy-stubs/networkx/algorithms/shortest_paths/weighted.pyi
+++ /dev/null
@@ -1,72 +0,0 @@
-# Stubs for networkx.algorithms.shortest_paths.weighted (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def dijkstra_path(G, source, target, weight: str = ...): ...
-def dijkstra_path_length(G, source, target, weight: str = ...): ...
-def single_source_dijkstra_path(
- G, source, cutoff: Optional[Any] = ..., weight: str = ...
-): ...
-def single_source_dijkstra_path_length(
- G, source, cutoff: Optional[Any] = ..., weight: str = ...
-): ...
-def single_source_dijkstra(
- G,
- source,
- target: Optional[Any] = ...,
- cutoff: Optional[Any] = ...,
- weight: str = ...,
-): ...
-def multi_source_dijkstra_path(
- G, sources, cutoff: Optional[Any] = ..., weight: str = ...
-): ...
-def multi_source_dijkstra_path_length(
- G, sources, cutoff: Optional[Any] = ..., weight: str = ...
-): ...
-def multi_source_dijkstra(
- G,
- sources,
- target: Optional[Any] = ...,
- cutoff: Optional[Any] = ...,
- weight: str = ...,
-): ...
-def dijkstra_predecessor_and_distance(
- G, source, cutoff: Optional[Any] = ..., weight: str = ...
-): ...
-def all_pairs_dijkstra(G, cutoff: Optional[Any] = ..., weight: str = ...): ...
-def all_pairs_dijkstra_path_length(
- G, cutoff: Optional[Any] = ..., weight: str = ...
-): ...
-def all_pairs_dijkstra_path(G, cutoff: Optional[Any] = ..., weight: str = ...): ...
-def bellman_ford_predecessor_and_distance(
- G,
- source,
- target: Optional[Any] = ...,
- cutoff: Optional[Any] = ...,
- weight: str = ...,
-): ...
-def bellman_ford_path(G, source, target, weight: str = ...): ...
-def bellman_ford_path_length(G, source, target, weight: str = ...): ...
-def single_source_bellman_ford_path(
- G, source, cutoff: Optional[Any] = ..., weight: str = ...
-): ...
-def single_source_bellman_ford_path_length(
- G, source, cutoff: Optional[Any] = ..., weight: str = ...
-): ...
-def single_source_bellman_ford(
- G,
- source,
- target: Optional[Any] = ...,
- cutoff: Optional[Any] = ...,
- weight: str = ...,
-): ...
-def all_pairs_bellman_ford_path_length(
- G, cutoff: Optional[Any] = ..., weight: str = ...
-): ...
-def all_pairs_bellman_ford_path(G, cutoff: Optional[Any] = ..., weight: str = ...): ...
-def goldberg_radzik(G, source, weight: str = ...): ...
-def negative_edge_cycle(G, weight: str = ...): ...
-def bidirectional_dijkstra(G, source, target, weight: str = ...): ...
-def johnson(G, weight: str = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/similarity.pyi b/mypy-stubs/networkx/algorithms/similarity.pyi
deleted file mode 100644
index ce5ab447b..000000000
--- a/mypy-stubs/networkx/algorithms/similarity.pyi
+++ /dev/null
@@ -1,60 +0,0 @@
-# Stubs for networkx.algorithms.similarity (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from operator import *
-from typing import Any, Optional
-
-def graph_edit_distance(
- G1,
- G2,
- node_match: Optional[Any] = ...,
- edge_match: Optional[Any] = ...,
- node_subst_cost: Optional[Any] = ...,
- node_del_cost: Optional[Any] = ...,
- node_ins_cost: Optional[Any] = ...,
- edge_subst_cost: Optional[Any] = ...,
- edge_del_cost: Optional[Any] = ...,
- edge_ins_cost: Optional[Any] = ...,
- upper_bound: Optional[Any] = ...,
-): ...
-def optimal_edit_paths(
- G1,
- G2,
- node_match: Optional[Any] = ...,
- edge_match: Optional[Any] = ...,
- node_subst_cost: Optional[Any] = ...,
- node_del_cost: Optional[Any] = ...,
- node_ins_cost: Optional[Any] = ...,
- edge_subst_cost: Optional[Any] = ...,
- edge_del_cost: Optional[Any] = ...,
- edge_ins_cost: Optional[Any] = ...,
- upper_bound: Optional[Any] = ...,
-): ...
-def optimize_graph_edit_distance(
- G1,
- G2,
- node_match: Optional[Any] = ...,
- edge_match: Optional[Any] = ...,
- node_subst_cost: Optional[Any] = ...,
- node_del_cost: Optional[Any] = ...,
- node_ins_cost: Optional[Any] = ...,
- edge_subst_cost: Optional[Any] = ...,
- edge_del_cost: Optional[Any] = ...,
- edge_ins_cost: Optional[Any] = ...,
- upper_bound: Optional[Any] = ...,
-): ...
-def optimize_edit_paths(
- G1,
- G2,
- node_match: Optional[Any] = ...,
- edge_match: Optional[Any] = ...,
- node_subst_cost: Optional[Any] = ...,
- node_del_cost: Optional[Any] = ...,
- node_ins_cost: Optional[Any] = ...,
- edge_subst_cost: Optional[Any] = ...,
- edge_del_cost: Optional[Any] = ...,
- edge_ins_cost: Optional[Any] = ...,
- upper_bound: Optional[Any] = ...,
- strictly_decreasing: bool = ...,
-): ...
diff --git a/mypy-stubs/networkx/algorithms/simple_paths.pyi b/mypy-stubs/networkx/algorithms/simple_paths.pyi
deleted file mode 100644
index a410969fb..000000000
--- a/mypy-stubs/networkx/algorithms/simple_paths.pyi
+++ /dev/null
@@ -1,18 +0,0 @@
-# Stubs for networkx.algorithms.simple_paths (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def is_simple_path(G, nodes): ...
-def all_simple_paths(G, source, target, cutoff: Optional[Any] = ...): ...
-def shortest_simple_paths(G, source, target, weight: Optional[Any] = ...): ...
-
-class PathBuffer:
- paths: Any = ...
- sortedpaths: Any = ...
- counter: Any = ...
- def __init__(self) -> None: ...
- def __len__(self): ...
- def push(self, cost, path): ...
- def pop(self): ...
diff --git a/mypy-stubs/networkx/algorithms/smetric.pyi b/mypy-stubs/networkx/algorithms/smetric.pyi
deleted file mode 100644
index 9727d50fd..000000000
--- a/mypy-stubs/networkx/algorithms/smetric.pyi
+++ /dev/null
@@ -1,5 +0,0 @@
-# Stubs for networkx.algorithms.smetric (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def s_metric(G, normalized: bool = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/structuralholes.pyi b/mypy-stubs/networkx/algorithms/structuralholes.pyi
deleted file mode 100644
index 8d299338f..000000000
--- a/mypy-stubs/networkx/algorithms/structuralholes.pyi
+++ /dev/null
@@ -1,9 +0,0 @@
-# Stubs for networkx.algorithms.structuralholes (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def effective_size(G, nodes: Optional[Any] = ..., weight: Optional[Any] = ...): ...
-def constraint(G, nodes: Optional[Any] = ..., weight: Optional[Any] = ...): ...
-def local_constraint(G, u, v, weight: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/swap.pyi b/mypy-stubs/networkx/algorithms/swap.pyi
deleted file mode 100644
index decb8ff08..000000000
--- a/mypy-stubs/networkx/algorithms/swap.pyi
+++ /dev/null
@@ -1,6 +0,0 @@
-# Stubs for networkx.algorithms.swap (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def double_edge_swap(G, nswap: int = ..., max_tries: int = ...): ...
-def connected_double_edge_swap(G, nswap: int = ..., _window_threshold: int = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/threshold.pyi b/mypy-stubs/networkx/algorithms/threshold.pyi
deleted file mode 100644
index 37f86c060..000000000
--- a/mypy-stubs/networkx/algorithms/threshold.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.algorithms.threshold (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def is_threshold_graph(G): ...
-def find_threshold_graph(G, create_using: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/tournament.pyi b/mypy-stubs/networkx/algorithms/tournament.pyi
deleted file mode 100644
index a9b011c36..000000000
--- a/mypy-stubs/networkx/algorithms/tournament.pyi
+++ /dev/null
@@ -1,10 +0,0 @@
-# Stubs for networkx.algorithms.tournament (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def is_tournament(G): ...
-def hamiltonian_path(G): ...
-def random_tournament(n): ...
-def score_sequence(G): ...
-def is_reachable(G, s, t): ...
-def is_strongly_connected(G): ...
diff --git a/mypy-stubs/networkx/algorithms/traversal/__init__.pyi b/mypy-stubs/networkx/algorithms/traversal/__init__.pyi
deleted file mode 100644
index cfb4001b8..000000000
--- a/mypy-stubs/networkx/algorithms/traversal/__init__.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.algorithms.traversal (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from .beamsearch import *
-from .breadth_first_search import *
-from .depth_first_search import *
-from .edgedfs import *
diff --git a/mypy-stubs/networkx/algorithms/traversal/beamsearch.pyi b/mypy-stubs/networkx/algorithms/traversal/beamsearch.pyi
deleted file mode 100644
index 6d1c64596..000000000
--- a/mypy-stubs/networkx/algorithms/traversal/beamsearch.pyi
+++ /dev/null
@@ -1,7 +0,0 @@
-# Stubs for networkx.algorithms.traversal.beamsearch (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def bfs_beam_edges(G, source, value, width: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/traversal/breadth_first_search.pyi b/mypy-stubs/networkx/algorithms/traversal/breadth_first_search.pyi
deleted file mode 100644
index 69af8e2f7..000000000
--- a/mypy-stubs/networkx/algorithms/traversal/breadth_first_search.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.algorithms.traversal.breadth_first_search (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def bfs_edges(G, source, reverse: bool = ...): ...
-def bfs_tree(G, source, reverse: bool = ...): ...
-def bfs_predecessors(G, source): ...
-def bfs_successors(G, source): ...
diff --git a/mypy-stubs/networkx/algorithms/traversal/depth_first_search.pyi b/mypy-stubs/networkx/algorithms/traversal/depth_first_search.pyi
deleted file mode 100644
index 554accd1c..000000000
--- a/mypy-stubs/networkx/algorithms/traversal/depth_first_search.pyi
+++ /dev/null
@@ -1,23 +0,0 @@
-# Stubs for networkx.algorithms.traversal.depth_first_search (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def dfs_edges(G, source: Optional[Any] = ..., depth_limit: Optional[Any] = ...): ...
-def dfs_tree(G, source: Optional[Any] = ..., depth_limit: Optional[Any] = ...): ...
-def dfs_predecessors(
- G, source: Optional[Any] = ..., depth_limit: Optional[Any] = ...
-): ...
-def dfs_successors(
- G, source: Optional[Any] = ..., depth_limit: Optional[Any] = ...
-): ...
-def dfs_postorder_nodes(
- G, source: Optional[Any] = ..., depth_limit: Optional[Any] = ...
-): ...
-def dfs_preorder_nodes(
- G, source: Optional[Any] = ..., depth_limit: Optional[Any] = ...
-): ...
-def dfs_labeled_edges(
- G, source: Optional[Any] = ..., depth_limit: Optional[Any] = ...
-): ...
diff --git a/mypy-stubs/networkx/algorithms/traversal/edgedfs.pyi b/mypy-stubs/networkx/algorithms/traversal/edgedfs.pyi
deleted file mode 100644
index 87c8bb65a..000000000
--- a/mypy-stubs/networkx/algorithms/traversal/edgedfs.pyi
+++ /dev/null
@@ -1,7 +0,0 @@
-# Stubs for networkx.algorithms.traversal.edgedfs (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def edge_dfs(G, source: Optional[Any] = ..., orientation: str = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/tree/__init__.pyi b/mypy-stubs/networkx/algorithms/tree/__init__.pyi
deleted file mode 100644
index db314da6b..000000000
--- a/mypy-stubs/networkx/algorithms/tree/__init__.pyi
+++ /dev/null
@@ -1,9 +0,0 @@
-# Stubs for networkx.algorithms.tree (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from .branchings import *
-from .coding import *
-from .mst import *
-from .operations import *
-from .recognition import *
diff --git a/mypy-stubs/networkx/algorithms/tree/branchings.pyi b/mypy-stubs/networkx/algorithms/tree/branchings.pyi
deleted file mode 100644
index 2dff70833..000000000
--- a/mypy-stubs/networkx/algorithms/tree/branchings.pyi
+++ /dev/null
@@ -1,36 +0,0 @@
-# Stubs for networkx.algorithms.tree.branchings (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-from .recognition import *
-
-def branching_weight(G, attr: str = ..., default: int = ...): ...
-def greedy_branching(G, attr: str = ..., default: int = ..., kind: str = ...): ...
-
-class MultiDiGraph_EdgeKey(networkx.MultiDiGraph):
- edge_index: Any = ...
- def __init__(self, incoming_graph_data: Optional[Any] = ..., **attr) -> None: ...
- def remove_node(self, n): ...
- def remove_nodes_from(self, nbunch): ...
- def fresh_copy(self): ...
- def add_edge(self, u_for_edge, v_for_edge, key_for_edge, **attr): ...
- def add_edges_from(self, ebunch_to_add, **attr): ...
- def remove_edge_with_key(self, key): ...
- def remove_edges_from(self, ebunch): ...
-
-class Edmonds:
- G_original: Any = ...
- store: bool = ...
- edges: Any = ...
- template: Any = ...
- def __init__(self, G, seed: Optional[Any] = ...) -> None: ...
- def find_optimum(
- self, attr: str = ..., default: int = ..., kind: str = ..., style: str = ...
- ): ...
-
-def maximum_branching(G, attr: str = ..., default: int = ...): ...
-def minimum_branching(G, attr: str = ..., default: int = ...): ...
-def maximum_spanning_arborescence(G, attr: str = ..., default: int = ...): ...
-def minimum_spanning_arborescence(G, attr: str = ..., default: int = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/tree/coding.pyi b/mypy-stubs/networkx/algorithms/tree/coding.pyi
deleted file mode 100644
index 3936bdc95..000000000
--- a/mypy-stubs/networkx/algorithms/tree/coding.pyi
+++ /dev/null
@@ -1,10 +0,0 @@
-# Stubs for networkx.algorithms.tree.coding (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-# class NotATree(nx.NetworkXException): ...
-
-def to_nested_tuple(T, root, canonical_form: bool = ...): ...
-def from_nested_tuple(sequence, sensible_relabeling: bool = ...): ...
-def to_prufer_sequence(T): ...
-def from_prufer_sequence(sequence): ...
diff --git a/mypy-stubs/networkx/algorithms/tree/mst.pyi b/mypy-stubs/networkx/algorithms/tree/mst.pyi
deleted file mode 100644
index 13a819bc1..000000000
--- a/mypy-stubs/networkx/algorithms/tree/mst.pyi
+++ /dev/null
@@ -1,26 +0,0 @@
-# Stubs for networkx.algorithms.tree.mst (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def minimum_spanning_edges(
- G,
- algorithm: str = ...,
- weight: str = ...,
- keys: bool = ...,
- data: bool = ...,
- ignore_nan: bool = ...,
-): ...
-def maximum_spanning_edges(
- G,
- algorithm: str = ...,
- weight: str = ...,
- keys: bool = ...,
- data: bool = ...,
- ignore_nan: bool = ...,
-): ...
-def minimum_spanning_tree(
- G, weight: str = ..., algorithm: str = ..., ignore_nan: bool = ...
-): ...
-def maximum_spanning_tree(
- G, weight: str = ..., algorithm: str = ..., ignore_nan: bool = ...
-): ...
diff --git a/mypy-stubs/networkx/algorithms/tree/operations.pyi b/mypy-stubs/networkx/algorithms/tree/operations.pyi
deleted file mode 100644
index 193bf2b01..000000000
--- a/mypy-stubs/networkx/algorithms/tree/operations.pyi
+++ /dev/null
@@ -1,7 +0,0 @@
-# Stubs for networkx.algorithms.tree.operations (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def join(rooted_trees, label_attribute: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/tree/recognition.pyi b/mypy-stubs/networkx/algorithms/tree/recognition.pyi
deleted file mode 100644
index 5440c9389..000000000
--- a/mypy-stubs/networkx/algorithms/tree/recognition.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.algorithms.tree.recognition (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def is_arborescence(G): ...
-def is_branching(G): ...
-def is_forest(G): ...
-def is_tree(G): ...
diff --git a/mypy-stubs/networkx/algorithms/triads.pyi b/mypy-stubs/networkx/algorithms/triads.pyi
deleted file mode 100644
index 1c09deab7..000000000
--- a/mypy-stubs/networkx/algorithms/triads.pyi
+++ /dev/null
@@ -1,5 +0,0 @@
-# Stubs for networkx.algorithms.triads (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def triadic_census(G): ...
diff --git a/mypy-stubs/networkx/algorithms/vitality.pyi b/mypy-stubs/networkx/algorithms/vitality.pyi
deleted file mode 100644
index 6a6e452fa..000000000
--- a/mypy-stubs/networkx/algorithms/vitality.pyi
+++ /dev/null
@@ -1,12 +0,0 @@
-# Stubs for networkx.algorithms.vitality (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def closeness_vitality(
- G,
- node: Optional[Any] = ...,
- weight: Optional[Any] = ...,
- wiener_index: Optional[Any] = ...,
-): ...
diff --git a/mypy-stubs/networkx/algorithms/voronoi.pyi b/mypy-stubs/networkx/algorithms/voronoi.pyi
deleted file mode 100644
index 29c4b2beb..000000000
--- a/mypy-stubs/networkx/algorithms/voronoi.pyi
+++ /dev/null
@@ -1,5 +0,0 @@
-# Stubs for networkx.algorithms.voronoi (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def voronoi_cells(G, center_nodes, weight: str = ...): ...
diff --git a/mypy-stubs/networkx/algorithms/wiener.pyi b/mypy-stubs/networkx/algorithms/wiener.pyi
deleted file mode 100644
index ac052b9e2..000000000
--- a/mypy-stubs/networkx/algorithms/wiener.pyi
+++ /dev/null
@@ -1,7 +0,0 @@
-# Stubs for networkx.algorithms.wiener (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def wiener_index(G, weight: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/classes/__init__.pyi b/mypy-stubs/networkx/classes/__init__.pyi
deleted file mode 100644
index e5869d172..000000000
--- a/mypy-stubs/networkx/classes/__init__.pyi
+++ /dev/null
@@ -1,10 +0,0 @@
-# Stubs for networkx.classes (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from .digraph import DiGraph
-from .function import *
-from .graph import Graph
-from .multidigraph import MultiDiGraph
-from .multigraph import MultiGraph
-from .ordered import *
diff --git a/mypy-stubs/networkx/classes/coreviews.pyi b/mypy-stubs/networkx/classes/coreviews.pyi
deleted file mode 100644
index 9f59dafb0..000000000
--- a/mypy-stubs/networkx/classes/coreviews.pyi
+++ /dev/null
@@ -1,81 +0,0 @@
-# Stubs for networkx.classes.coreviews (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from collections import Mapping
-from typing import Any
-
-class AtlasView(Mapping):
- def __init__(self, d) -> None: ...
- def __len__(self): ...
- def __iter__(self): ...
- def __getitem__(self, key): ...
- def copy(self): ...
-
-class AdjacencyView(AtlasView):
- def __getitem__(self, name): ...
- def copy(self): ...
-
-class MultiAdjacencyView(AdjacencyView):
- def __getitem__(self, name): ...
- def copy(self): ...
-
-class UnionAtlas(Mapping):
- def __init__(self, succ, pred) -> None: ...
- def __len__(self): ...
- def __iter__(self): ...
- def __getitem__(self, key): ...
- def copy(self): ...
-
-class UnionAdjacency(Mapping):
- def __init__(self, succ, pred) -> None: ...
- def __len__(self): ...
- def __iter__(self): ...
- def __getitem__(self, nbr): ...
- def copy(self): ...
-
-class UnionMultiInner(UnionAtlas):
- def __getitem__(self, node): ...
- def copy(self): ...
-
-class UnionMultiAdjacency(UnionAdjacency):
- def __getitem__(self, node): ...
-
-class ReadOnlyGraph:
- def not_allowed(self, *args, **kwds): ...
- add_node: Any = ...
- remove_node: Any = ...
- add_nodes_from: Any = ...
- remove_nodes_from: Any = ...
- add_edge: Any = ...
- remove_edge: Any = ...
- add_edges_from: Any = ...
- add_weighted_edges_from: Any = ...
- remove_edges_from: Any = ...
- clear: Any = ...
-
-class FilterAtlas(Mapping):
- NODE_OK: Any = ...
- def __init__(self, d, NODE_OK) -> None: ...
- def __len__(self): ...
- def __iter__(self): ...
- def __getitem__(self, key): ...
- def copy(self): ...
-
-class FilterAdjacency(Mapping):
- NODE_OK: Any = ...
- EDGE_OK: Any = ...
- def __init__(self, d, NODE_OK, EDGE_OK) -> None: ...
- def __len__(self): ...
- def __iter__(self): ...
- def __getitem__(self, node): ...
- def copy(self): ...
-
-class FilterMultiInner(FilterAdjacency):
- def __iter__(self): ...
- def __getitem__(self, nbr): ...
- def copy(self): ...
-
-class FilterMultiAdjacency(FilterAdjacency):
- def __getitem__(self, node): ...
- def copy(self): ...
diff --git a/mypy-stubs/networkx/classes/digraph.pyi b/mypy-stubs/networkx/classes/digraph.pyi
deleted file mode 100644
index bbe371e94..000000000
--- a/mypy-stubs/networkx/classes/digraph.pyi
+++ /dev/null
@@ -1,54 +0,0 @@
-# Stubs for networkx.classes.digraph (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-from networkx.classes.graph import Graph
-
-class DiGraph(Graph):
- node_dict_factory: Any = ...
- adjlist_outer_dict_factory: Any = ...
- adjlist_inner_dict_factory: Any = ...
- edge_attr_dict_factory: Any = ...
- root_graph: Any = ...
- graph: Any = ...
- def __init__(self, incoming_graph_data: Optional[Any] = ..., **attr) -> None: ...
- @property
- def adj(self): ...
- @property
- def succ(self): ...
- @property
- def pred(self): ...
- def add_node(self, node_for_adding, **attr): ...
- def add_nodes_from(self, nodes_for_adding, **attr): ...
- def remove_node(self, n): ...
- def remove_nodes_from(self, nodes): ...
- def add_edge(self, u_of_edge, v_of_edge, **attr): ...
- def add_edges_from(self, ebunch_to_add, **attr): ...
- def remove_edge(self, u, v): ...
- def remove_edges_from(self, ebunch): ...
- def has_successor(self, u, v): ...
- def has_predecessor(self, u, v): ...
- def successors(self, n): ...
- neighbors: Any = ...
- def predecessors(self, n): ...
- @property
- def edges(self): ...
- out_edges: Any = ...
- @property
- def in_edges(self): ...
- @property
- def degree(self): ...
- @property
- def in_degree(self): ...
- @property
- def out_degree(self): ...
- def clear(self): ...
- def is_multigraph(self): ...
- def is_directed(self): ...
- def fresh_copy(self): ...
- def copy(self, as_view: bool = ...): ...
- def to_undirected(self, reciprocal: bool = ..., as_view: bool = ...): ...
- def subgraph(self, nodes): ...
- def reverse(self, copy: bool = ...): ...
diff --git a/mypy-stubs/networkx/classes/filters.pyi b/mypy-stubs/networkx/classes/filters.pyi
deleted file mode 100644
index 74e85488c..000000000
--- a/mypy-stubs/networkx/classes/filters.pyi
+++ /dev/null
@@ -1,22 +0,0 @@
-# Stubs for networkx.classes.filters (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any
-
-def no_filter(*items): ...
-def hide_nodes(nodes): ...
-def hide_diedges(edges): ...
-def hide_edges(edges): ...
-def hide_multidiedges(edges): ...
-def hide_multiedges(edges): ...
-
-class show_nodes:
- nodes: Any = ...
- def __init__(self, nodes) -> None: ...
- def __call__(self, node): ...
-
-def show_diedges(edges): ...
-def show_edges(edges): ...
-def show_multidiedges(edges): ...
-def show_multiedges(edges): ...
diff --git a/mypy-stubs/networkx/classes/function.pyi b/mypy-stubs/networkx/classes/function.pyi
deleted file mode 100644
index 4de47f5f1..000000000
--- a/mypy-stubs/networkx/classes/function.pyi
+++ /dev/null
@@ -1,45 +0,0 @@
-# Stubs for networkx.classes.function (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def nodes(G): ...
-def edges(G, nbunch: Optional[Any] = ...): ...
-def degree(G, nbunch: Optional[Any] = ..., weight: Optional[Any] = ...): ...
-def neighbors(G, n): ...
-def number_of_nodes(G): ...
-def number_of_edges(G): ...
-def density(G): ...
-def degree_histogram(G): ...
-def is_directed(G): ...
-def freeze(G): ...
-def is_frozen(G): ...
-def add_star(G_to_add_to, nodes_for_star, **attr): ...
-def add_path(G_to_add_to, nodes_for_path, **attr): ...
-def add_cycle(G_to_add_to, nodes_for_cycle, **attr): ...
-def subgraph(G, nbunch): ...
-def induced_subgraph(G, nbunch): ...
-def edge_subgraph(G, edges): ...
-def restricted_view(G, nodes, edges): ...
-def reverse_view(digraph): ...
-def to_directed(graph): ...
-def to_undirected(graph): ...
-def create_empty_copy(G, with_data: bool = ...): ...
-def info(G, n: Optional[Any] = ...): ...
-def set_node_attributes(G, values, name: Optional[Any] = ...): ...
-def get_node_attributes(G, name): ...
-def set_edge_attributes(G, values, name: Optional[Any] = ...): ...
-def get_edge_attributes(G, name): ...
-def all_neighbors(graph, node): ...
-def non_neighbors(graph, node): ...
-def non_edges(graph): ...
-def common_neighbors(G, u, v): ...
-def is_weighted(G, edge: Optional[Any] = ..., weight: str = ...): ...
-def is_negatively_weighted(G, edge: Optional[Any] = ..., weight: str = ...): ...
-def is_empty(G): ...
-def nodes_with_selfloops(G): ...
-def selfloop_edges(
- G, data: bool = ..., keys: bool = ..., default: Optional[Any] = ...
-): ...
-def number_of_selfloops(G): ...
diff --git a/mypy-stubs/networkx/classes/graph.pyi b/mypy-stubs/networkx/classes/graph.pyi
deleted file mode 100644
index 302b25351..000000000
--- a/mypy-stubs/networkx/classes/graph.pyi
+++ /dev/null
@@ -1,67 +0,0 @@
-# Stubs for networkx.classes.graph (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-class Graph:
- node_dict_factory: Any = ...
- adjlist_outer_dict_factory: Any = ...
- adjlist_inner_dict_factory: Any = ...
- edge_attr_dict_factory: Any = ...
- root_graph: Any = ...
- graph: Any = ...
- def __init__(self, incoming_graph_data: Optional[Any] = ..., **attr) -> None: ...
- @property
- def adj(self): ...
- @property
- def name(self): ...
- @name.setter
- def name(self, s): ...
- def __iter__(self): ...
- def __contains__(self, n): ...
- def __len__(self): ...
- def __getitem__(self, n): ...
- def add_node(self, node_for_adding, **attr): ...
- def add_nodes_from(self, nodes_for_adding, **attr): ...
- def remove_node(self, n): ...
- def remove_nodes_from(self, nodes): ...
- @property
- def nodes(self): ...
- node: Any = ...
- def add_path(self, nodes, **attr): ...
- def add_cycle(self, nodes, **attr): ...
- def add_star(self, nodes, **attr): ...
- def nodes_with_selfloops(self): ...
- def number_of_selfloops(self): ...
- def selfloop_edges(
- self, data: bool = ..., keys: bool = ..., default: Optional[Any] = ...
- ): ...
- def number_of_nodes(self): ...
- def order(self): ...
- def has_node(self, n): ...
- def add_edge(self, u_of_edge, v_of_edge, **attr): ...
- def add_edges_from(self, ebunch_to_add, **attr): ...
- def add_weighted_edges_from(self, ebunch_to_add, weight: str = ..., **attr): ...
- def remove_edge(self, u, v): ...
- def remove_edges_from(self, ebunch): ...
- def has_edge(self, u, v): ...
- def neighbors(self, n): ...
- @property
- def edges(self): ...
- def get_edge_data(self, u, v, default: Optional[Any] = ...): ...
- def adjacency(self): ...
- @property
- def degree(self): ...
- def clear(self): ...
- def is_multigraph(self): ...
- def is_directed(self): ...
- def fresh_copy(self): ...
- def copy(self, as_view: bool = ...): ...
- def to_directed(self, as_view: bool = ...): ...
- def to_undirected(self, as_view: bool = ...): ...
- def subgraph(self, nodes): ...
- def edge_subgraph(self, edges): ...
- def size(self, weight: Optional[Any] = ...): ...
- def number_of_edges(self, u: Optional[Any] = ..., v: Optional[Any] = ...): ...
- def nbunch_iter(self, nbunch: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/classes/graphviews.pyi b/mypy-stubs/networkx/classes/graphviews.pyi
deleted file mode 100644
index 064a80f44..000000000
--- a/mypy-stubs/networkx/classes/graphviews.pyi
+++ /dev/null
@@ -1,68 +0,0 @@
-# Stubs for networkx.classes.graphviews (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any
-
-from networkx.classes import DiGraph, Graph, MultiDiGraph, MultiGraph
-from networkx.classes.coreviews import ReadOnlyGraph
-
-class SubGraph(ReadOnlyGraph, Graph):
- root_graph: Any = ...
- graph: Any = ...
- def __init__(
- self, graph, filter_node: Any = ..., filter_edge: Any = ...
- ) -> None: ...
-
-class SubDiGraph(ReadOnlyGraph, DiGraph):
- root_graph: Any = ...
- graph: Any = ...
- def __init__(
- self, graph, filter_node: Any = ..., filter_edge: Any = ...
- ) -> None: ...
-
-class SubMultiGraph(ReadOnlyGraph, MultiGraph):
- root_graph: Any = ...
- graph: Any = ...
- def __init__(
- self, graph, filter_node: Any = ..., filter_edge: Any = ...
- ) -> None: ...
-
-class SubMultiDiGraph(ReadOnlyGraph, MultiDiGraph):
- root_graph: Any = ...
- graph: Any = ...
- def __init__(
- self, graph, filter_node: Any = ..., filter_edge: Any = ...
- ) -> None: ...
-
-class ReverseView(ReadOnlyGraph, DiGraph):
- root_graph: Any = ...
- graph: Any = ...
- def __init__(self, graph) -> None: ...
-
-class MultiReverseView(ReadOnlyGraph, MultiDiGraph):
- root_graph: Any = ...
- graph: Any = ...
- def __init__(self, graph) -> None: ...
-
-class DiGraphView(ReadOnlyGraph, DiGraph):
- root_graph: Any = ...
- graph: Any = ...
- def __init__(self, graph) -> None: ...
-
-class MultiDiGraphView(ReadOnlyGraph, MultiDiGraph):
- root_graph: Any = ...
- graph: Any = ...
- def __init__(self, graph) -> None: ...
-
-class GraphView(ReadOnlyGraph, Graph):
- UnionAdj: Any = ...
- root_graph: Any = ...
- graph: Any = ...
- def __init__(self, graph) -> None: ...
-
-class MultiGraphView(ReadOnlyGraph, MultiGraph):
- UnionAdj: Any = ...
- root_graph: Any = ...
- graph: Any = ...
- def __init__(self, graph) -> None: ...
diff --git a/mypy-stubs/networkx/classes/multidigraph.pyi b/mypy-stubs/networkx/classes/multidigraph.pyi
deleted file mode 100644
index 7ccf4f2d0..000000000
--- a/mypy-stubs/networkx/classes/multidigraph.pyi
+++ /dev/null
@@ -1,38 +0,0 @@
-# Stubs for networkx.classes.multidigraph (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-from networkx.classes.digraph import DiGraph
-from networkx.classes.multigraph import MultiGraph
-
-class MultiDiGraph(MultiGraph, DiGraph):
- edge_key_dict_factory: Any = ...
- def __init__(self, incoming_graph_data: Optional[Any] = ..., **attr) -> None: ...
- @property
- def adj(self): ...
- @property
- def succ(self): ...
- @property
- def pred(self): ...
- def add_edge(self, u_for_edge, v_for_edge, key: Optional[Any] = ..., **attr): ...
- def remove_edge(self, u, v, key: Optional[Any] = ...): ...
- @property
- def edges(self): ...
- out_edges: Any = ...
- @property
- def in_edges(self): ...
- @property
- def degree(self): ...
- @property
- def in_degree(self): ...
- @property
- def out_degree(self): ...
- def is_multigraph(self): ...
- def is_directed(self): ...
- def fresh_copy(self): ...
- def copy(self, as_view: bool = ...): ...
- def to_undirected(self, reciprocal: bool = ..., as_view: bool = ...): ...
- def subgraph(self, nodes): ...
- def reverse(self, copy: bool = ...): ...
diff --git a/mypy-stubs/networkx/classes/multigraph.pyi b/mypy-stubs/networkx/classes/multigraph.pyi
deleted file mode 100644
index 18c4329da..000000000
--- a/mypy-stubs/networkx/classes/multigraph.pyi
+++ /dev/null
@@ -1,34 +0,0 @@
-# Stubs for networkx.classes.multigraph (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-from networkx.classes.graph import Graph
-
-class MultiGraph(Graph):
- edge_key_dict_factory: Any = ...
- def __init__(self, incoming_graph_data: Optional[Any] = ..., **attr) -> None: ...
- @property
- def adj(self): ...
- def new_edge_key(self, u, v): ...
- def add_edge(self, u_for_edge, v_for_edge, key: Optional[Any] = ..., **attr): ...
- def add_edges_from(self, ebunch_to_add, **attr): ...
- def remove_edge(self, u, v, key: Optional[Any] = ...): ...
- def remove_edges_from(self, ebunch): ...
- def has_edge(self, u, v, key: Optional[Any] = ...): ...
- @property
- def edges(self): ...
- def get_edge_data(
- self, u, v, key: Optional[Any] = ..., default: Optional[Any] = ...
- ): ...
- @property
- def degree(self): ...
- def is_multigraph(self): ...
- def is_directed(self): ...
- def fresh_copy(self): ...
- def copy(self, as_view: bool = ...): ...
- def to_directed(self, as_view: bool = ...): ...
- def to_undirected(self, as_view: bool = ...): ...
- def subgraph(self, nodes): ...
- def number_of_edges(self, u: Optional[Any] = ..., v: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/classes/ordered.pyi b/mypy-stubs/networkx/classes/ordered.pyi
deleted file mode 100644
index af305388a..000000000
--- a/mypy-stubs/networkx/classes/ordered.pyi
+++ /dev/null
@@ -1,40 +0,0 @@
-# Stubs for networkx.classes.ordered (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any
-
-from .digraph import DiGraph
-from .graph import Graph
-from .multidigraph import MultiDiGraph
-from .multigraph import MultiGraph
-
-class OrderedGraph(Graph):
- node_dict_factory: Any = ...
- adjlist_outer_dict_factory: Any = ...
- adjlist_inner_dict_factory: Any = ...
- edge_attr_dict_factory: Any = ...
- def fresh_copy(self): ...
-
-class OrderedDiGraph(DiGraph):
- node_dict_factory: Any = ...
- adjlist_outer_dict_factory: Any = ...
- adjlist_inner_dict_factory: Any = ...
- edge_attr_dict_factory: Any = ...
- def fresh_copy(self): ...
-
-class OrderedMultiGraph(MultiGraph):
- node_dict_factory: Any = ...
- adjlist_outer_dict_factory: Any = ...
- adjlist_inner_dict_factory: Any = ...
- edge_key_dict_factory: Any = ...
- edge_attr_dict_factory: Any = ...
- def fresh_copy(self): ...
-
-class OrderedMultiDiGraph(MultiDiGraph):
- node_dict_factory: Any = ...
- adjlist_outer_dict_factory: Any = ...
- adjlist_inner_dict_factory: Any = ...
- edge_key_dict_factory: Any = ...
- edge_attr_dict_factory: Any = ...
- def fresh_copy(self): ...
diff --git a/mypy-stubs/networkx/classes/reportviews.pyi b/mypy-stubs/networkx/classes/reportviews.pyi
deleted file mode 100644
index 41d409d77..000000000
--- a/mypy-stubs/networkx/classes/reportviews.pyi
+++ /dev/null
@@ -1,170 +0,0 @@
-# Stubs for networkx.classes.reportviews (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from collections import Mapping, Set
-from typing import Any, Optional
-
-class NodeView(Mapping, Set):
- def __init__(self, graph) -> None: ...
- def __len__(self): ...
- def __iter__(self): ...
- def __getitem__(self, n): ...
- def __contains__(self, n): ...
- def __call__(self, data: bool = ..., default: Optional[Any] = ...): ...
- def data(self, data: bool = ..., default: Optional[Any] = ...): ...
-
-class NodeDataView(Set):
- def __init__(
- self, nodedict, data: bool = ..., default: Optional[Any] = ...
- ) -> None: ...
- def __len__(self): ...
- def __iter__(self): ...
- def __contains__(self, n): ...
- def __getitem__(self, n): ...
-
-class DiDegreeView:
- def __init__(
- self, G, nbunch: Optional[Any] = ..., weight: Optional[Any] = ...
- ) -> None: ...
- def __call__(self, nbunch: Optional[Any] = ..., weight: Optional[Any] = ...): ...
- def __getitem__(self, n): ...
- def __iter__(self): ...
- def __len__(self): ...
-
-class DegreeView(DiDegreeView):
- def __getitem__(self, n): ...
- def __iter__(self): ...
-
-class OutDegreeView(DiDegreeView):
- def __getitem__(self, n): ...
- def __iter__(self): ...
-
-class InDegreeView(DiDegreeView):
- def __getitem__(self, n): ...
- def __iter__(self): ...
-
-class MultiDegreeView(DiDegreeView):
- def __getitem__(self, n): ...
- def __iter__(self): ...
-
-class DiMultiDegreeView(DiDegreeView):
- def __getitem__(self, n): ...
- def __iter__(self): ...
-
-class InMultiDegreeView(DiDegreeView):
- def __getitem__(self, n): ...
- def __iter__(self): ...
-
-class OutMultiDegreeView(DiDegreeView):
- def __getitem__(self, n): ...
- def __iter__(self): ...
-
-class OutEdgeDataView:
- def __init__(
- self,
- viewer,
- nbunch: Optional[Any] = ...,
- data: bool = ...,
- default: Optional[Any] = ...,
- ) -> None: ...
- def __len__(self): ...
- def __iter__(self): ...
- def __contains__(self, e): ...
-
-class EdgeDataView(OutEdgeDataView):
- def __len__(self): ...
- def __iter__(self): ...
- def __contains__(self, e): ...
-
-class InEdgeDataView(OutEdgeDataView):
- def __iter__(self): ...
- def __contains__(self, e): ...
-
-class OutMultiEdgeDataView(OutEdgeDataView):
- keys: Any = ...
- def __init__(
- self,
- viewer,
- nbunch: Optional[Any] = ...,
- data: bool = ...,
- keys: bool = ...,
- default: Optional[Any] = ...,
- ) -> None: ...
- def __len__(self): ...
- def __iter__(self): ...
- def __contains__(self, e): ...
-
-class MultiEdgeDataView(OutMultiEdgeDataView):
- def __iter__(self): ...
- def __contains__(self, e): ...
-
-class InMultiEdgeDataView(OutMultiEdgeDataView):
- def __iter__(self): ...
- def __contains__(self, e): ...
-
-class OutEdgeView(Set, Mapping):
- dataview: Any = ...
- def __init__(self, G) -> None: ...
- def __len__(self): ...
- def __iter__(self): ...
- def __contains__(self, e): ...
- def __getitem__(self, e): ...
- def __call__(
- self,
- nbunch: Optional[Any] = ...,
- data: bool = ...,
- default: Optional[Any] = ...,
- ): ...
- def data(
- self,
- data: bool = ...,
- default: Optional[Any] = ...,
- nbunch: Optional[Any] = ...,
- ): ...
-
-class EdgeView(OutEdgeView):
- dataview: Any = ...
- def __len__(self): ...
- def __iter__(self): ...
- def __contains__(self, e): ...
-
-class InEdgeView(OutEdgeView):
- dataview: Any = ...
- def __init__(self, G) -> None: ...
- def __iter__(self): ...
- def __contains__(self, e): ...
- def __getitem__(self, e): ...
-
-class OutMultiEdgeView(OutEdgeView):
- dataview: Any = ...
- def __len__(self): ...
- def __iter__(self): ...
- def __contains__(self, e): ...
- def __getitem__(self, e): ...
- def __call__(
- self,
- nbunch: Optional[Any] = ...,
- data: bool = ...,
- keys: bool = ...,
- default: Optional[Any] = ...,
- ): ...
- def data(
- self,
- data: bool = ...,
- keys: bool = ...,
- default: Optional[Any] = ...,
- nbunch: Optional[Any] = ...,
- ): ...
-
-class MultiEdgeView(OutMultiEdgeView):
- dataview: Any = ...
- def __len__(self): ...
- def __iter__(self): ...
-
-class InMultiEdgeView(OutMultiEdgeView):
- dataview: Any = ...
- def __init__(self, G) -> None: ...
- def __iter__(self): ...
- def __contains__(self, e): ...
- def __getitem__(self, e): ...
diff --git a/mypy-stubs/networkx/convert.pyi b/mypy-stubs/networkx/convert.pyi
deleted file mode 100644
index ac3868c1b..000000000
--- a/mypy-stubs/networkx/convert.pyi
+++ /dev/null
@@ -1,19 +0,0 @@
-# Stubs for networkx.convert (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def to_networkx_graph(
- data, create_using: Optional[Any] = ..., multigraph_input: bool = ...
-): ...
-def to_dict_of_lists(G, nodelist: Optional[Any] = ...): ...
-def from_dict_of_lists(d, create_using: Optional[Any] = ...): ...
-def to_dict_of_dicts(
- G, nodelist: Optional[Any] = ..., edge_data: Optional[Any] = ...
-): ...
-def from_dict_of_dicts(
- d, create_using: Optional[Any] = ..., multigraph_input: bool = ...
-): ...
-def to_edgelist(G, nodelist: Optional[Any] = ...): ...
-def from_edgelist(edgelist, create_using: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/convert_matrix.pyi b/mypy-stubs/networkx/convert_matrix.pyi
deleted file mode 100644
index c991bddb6..000000000
--- a/mypy-stubs/networkx/convert_matrix.pyi
+++ /dev/null
@@ -1,74 +0,0 @@
-# Stubs for networkx.convert_matrix (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def to_pandas_adjacency(
- G,
- nodelist: Optional[Any] = ...,
- dtype: Optional[Any] = ...,
- order: Optional[Any] = ...,
- multigraph_weight: Any = ...,
- weight: str = ...,
- nonedge: float = ...,
-): ...
-def from_pandas_adjacency(df, create_using: Optional[Any] = ...): ...
-def to_pandas_edgelist(
- G,
- source: str = ...,
- target: str = ...,
- nodelist: Optional[Any] = ...,
- dtype: Optional[Any] = ...,
- order: Optional[Any] = ...,
-): ...
-def from_pandas_edgelist(
- df,
- source: str = ...,
- target: str = ...,
- edge_attr: Optional[Any] = ...,
- create_using: Optional[Any] = ...,
-): ...
-def to_numpy_matrix(
- G,
- nodelist: Optional[Any] = ...,
- dtype: Optional[Any] = ...,
- order: Optional[Any] = ...,
- multigraph_weight: Any = ...,
- weight: str = ...,
- nonedge: float = ...,
-): ...
-def from_numpy_matrix(
- A, parallel_edges: bool = ..., create_using: Optional[Any] = ...
-): ...
-def to_numpy_recarray(
- G,
- nodelist: Optional[Any] = ...,
- dtype: Optional[Any] = ...,
- order: Optional[Any] = ...,
-): ...
-def to_scipy_sparse_matrix(
- G,
- nodelist: Optional[Any] = ...,
- dtype: Optional[Any] = ...,
- weight: str = ...,
- format: str = ...,
-): ...
-def from_scipy_sparse_matrix(
- A,
- parallel_edges: bool = ...,
- create_using: Optional[Any] = ...,
- edge_attribute: str = ...,
-): ...
-def to_numpy_array(
- G,
- nodelist: Optional[Any] = ...,
- dtype: Optional[Any] = ...,
- order: Optional[Any] = ...,
- multigraph_weight: Any = ...,
- weight: str = ...,
- nonedge: float = ...,
-): ...
-def from_numpy_array(
- A, parallel_edges: bool = ..., create_using: Optional[Any] = ...
-): ...
diff --git a/mypy-stubs/networkx/drawing/__init__.pyi b/mypy-stubs/networkx/drawing/__init__.pyi
deleted file mode 100644
index 44d3f16ba..000000000
--- a/mypy-stubs/networkx/drawing/__init__.pyi
+++ /dev/null
@@ -1,6 +0,0 @@
-# Stubs for networkx.drawing (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from .layout import *
-from .nx_pylab import *
diff --git a/mypy-stubs/networkx/drawing/layout.pyi b/mypy-stubs/networkx/drawing/layout.pyi
deleted file mode 100644
index 63863ff00..000000000
--- a/mypy-stubs/networkx/drawing/layout.pyi
+++ /dev/null
@@ -1,48 +0,0 @@
-# Stubs for networkx.drawing.layout (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def random_layout(
- G, center: Optional[Any] = ..., dim: int = ..., random_state: Optional[Any] = ...
-): ...
-def circular_layout(
- G, scale: int = ..., center: Optional[Any] = ..., dim: int = ...
-): ...
-def shell_layout(
- G,
- nlist: Optional[Any] = ...,
- scale: int = ...,
- center: Optional[Any] = ...,
- dim: int = ...,
-): ...
-def fruchterman_reingold_layout(
- G,
- k: Optional[Any] = ...,
- pos: Optional[Any] = ...,
- fixed: Optional[Any] = ...,
- iterations: int = ...,
- threshold: float = ...,
- weight: str = ...,
- scale: int = ...,
- center: Optional[Any] = ...,
- dim: int = ...,
- random_state: Optional[Any] = ...,
-): ...
-
-spring_layout = fruchterman_reingold_layout
-
-def kamada_kawai_layout(
- G,
- dist: Optional[Any] = ...,
- pos: Optional[Any] = ...,
- weight: str = ...,
- scale: int = ...,
- center: Optional[Any] = ...,
- dim: int = ...,
-): ...
-def spectral_layout(
- G, weight: str = ..., scale: int = ..., center: Optional[Any] = ..., dim: int = ...
-): ...
-def rescale_layout(pos, scale: int = ...): ...
diff --git a/mypy-stubs/networkx/drawing/nx_agraph.pyi b/mypy-stubs/networkx/drawing/nx_agraph.pyi
deleted file mode 100644
index 2e9900dd1..000000000
--- a/mypy-stubs/networkx/drawing/nx_agraph.pyi
+++ /dev/null
@@ -1,22 +0,0 @@
-# Stubs for networkx.drawing.nx_agraph (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def from_agraph(A, create_using: Optional[Any] = ...): ...
-def to_agraph(N): ...
-def write_dot(G, path): ...
-def read_dot(path): ...
-def graphviz_layout(G, prog: str = ..., root: Optional[Any] = ..., args: str = ...): ...
-def pygraphviz_layout(
- G, prog: str = ..., root: Optional[Any] = ..., args: str = ...
-): ...
-def view_pygraphviz(
- G,
- edgelabel: Optional[Any] = ...,
- prog: str = ...,
- args: str = ...,
- suffix: str = ...,
- path: Optional[Any] = ...,
-): ...
diff --git a/mypy-stubs/networkx/drawing/nx_pydot.pyi b/mypy-stubs/networkx/drawing/nx_pydot.pyi
deleted file mode 100644
index 198ff1195..000000000
--- a/mypy-stubs/networkx/drawing/nx_pydot.pyi
+++ /dev/null
@@ -1,15 +0,0 @@
-# Stubs for networkx.drawing.nx_pydot (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-basestring = str
-unicode = str
-
-def write_dot(G, path): ...
-def read_dot(path): ...
-def from_pydot(P): ...
-def to_pydot(N): ...
-def graphviz_layout(G, prog: str = ..., root: Optional[Any] = ..., **kwds): ...
-def pydot_layout(G, prog: str = ..., root: Optional[Any] = ..., **kwds): ...
diff --git a/mypy-stubs/networkx/drawing/nx_pylab.pyi b/mypy-stubs/networkx/drawing/nx_pylab.pyi
deleted file mode 100644
index 72bf69163..000000000
--- a/mypy-stubs/networkx/drawing/nx_pylab.pyi
+++ /dev/null
@@ -1,82 +0,0 @@
-# Stubs for networkx.drawing.nx_pylab (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def draw(G, pos: Optional[Any] = ..., ax: Optional[Any] = ..., **kwds): ...
-def draw_networkx(
- G, pos: Optional[Any] = ..., arrows: bool = ..., with_labels: bool = ..., **kwds
-): ...
-def draw_networkx_nodes(
- G,
- pos,
- nodelist: Optional[Any] = ...,
- node_size: int = ...,
- node_color: str = ...,
- node_shape: str = ...,
- alpha: float = ...,
- cmap: Optional[Any] = ...,
- vmin: Optional[Any] = ...,
- vmax: Optional[Any] = ...,
- ax: Optional[Any] = ...,
- linewidths: Optional[Any] = ...,
- edgecolors: Optional[Any] = ...,
- label: Optional[Any] = ...,
- **kwds
-): ...
-def draw_networkx_edges(
- G,
- pos,
- edgelist: Optional[Any] = ...,
- width: float = ...,
- edge_color: str = ...,
- style: str = ...,
- alpha: float = ...,
- arrowstyle: str = ...,
- arrowsize: int = ...,
- edge_cmap: Optional[Any] = ...,
- edge_vmin: Optional[Any] = ...,
- edge_vmax: Optional[Any] = ...,
- ax: Optional[Any] = ...,
- arrows: bool = ...,
- label: Optional[Any] = ...,
- node_size: int = ...,
- nodelist: Optional[Any] = ...,
- node_shape: str = ...,
- **kwds
-): ...
-def draw_networkx_labels(
- G,
- pos,
- labels: Optional[Any] = ...,
- font_size: int = ...,
- font_color: str = ...,
- font_family: str = ...,
- font_weight: str = ...,
- alpha: float = ...,
- bbox: Optional[Any] = ...,
- ax: Optional[Any] = ...,
- **kwds
-): ...
-def draw_networkx_edge_labels(
- G,
- pos,
- edge_labels: Optional[Any] = ...,
- label_pos: float = ...,
- font_size: int = ...,
- font_color: str = ...,
- font_family: str = ...,
- font_weight: str = ...,
- alpha: float = ...,
- bbox: Optional[Any] = ...,
- ax: Optional[Any] = ...,
- rotate: bool = ...,
- **kwds
-): ...
-def draw_circular(G, **kwargs): ...
-def draw_kamada_kawai(G, **kwargs): ...
-def draw_random(G, **kwargs): ...
-def draw_spectral(G, **kwargs): ...
-def draw_spring(G, **kwargs): ...
-def draw_shell(G, **kwargs): ...
diff --git a/mypy-stubs/networkx/exception.pyi b/mypy-stubs/networkx/exception.pyi
deleted file mode 100644
index 5ebcfec4b..000000000
--- a/mypy-stubs/networkx/exception.pyi
+++ /dev/null
@@ -1,20 +0,0 @@
-# Stubs for networkx.exception (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-class NetworkXException(Exception): ...
-class NetworkXError(NetworkXException): ...
-class NetworkXPointlessConcept(NetworkXException): ...
-class NetworkXAlgorithmError(NetworkXException): ...
-class NetworkXUnfeasible(NetworkXAlgorithmError): ...
-class NetworkXNoPath(NetworkXUnfeasible): ...
-class NetworkXNoCycle(NetworkXUnfeasible): ...
-class HasACycle(NetworkXException): ...
-class NetworkXUnbounded(NetworkXAlgorithmError): ...
-class NetworkXNotImplemented(NetworkXException): ...
-class NodeNotFound(NetworkXException): ...
-class AmbiguousSolution(NetworkXException): ...
-class ExceededMaxIterations(NetworkXException): ...
-
-class PowerIterationFailedConvergence(ExceededMaxIterations):
- def __init__(self, num_iterations, *args, **kw) -> None: ...
diff --git a/mypy-stubs/networkx/generators/atlas.pyi b/mypy-stubs/networkx/generators/atlas.pyi
deleted file mode 100644
index dd46cd773..000000000
--- a/mypy-stubs/networkx/generators/atlas.pyi
+++ /dev/null
@@ -1,6 +0,0 @@
-# Stubs for networkx.generators.atlas (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def graph_atlas(i): ...
-def graph_atlas_g(): ...
diff --git a/mypy-stubs/networkx/generators/classic.pyi b/mypy-stubs/networkx/generators/classic.pyi
deleted file mode 100644
index a0d50f2c4..000000000
--- a/mypy-stubs/networkx/generators/classic.pyi
+++ /dev/null
@@ -1,24 +0,0 @@
-# Stubs for networkx.generators.classic (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def full_rary_tree(r, n, create_using: Optional[Any] = ...): ...
-def balanced_tree(r, h, create_using: Optional[Any] = ...): ...
-def barbell_graph(m1, m2, create_using: Optional[Any] = ...): ...
-def complete_graph(n, create_using: Optional[Any] = ...): ...
-def circular_ladder_graph(n, create_using: Optional[Any] = ...): ...
-def circulant_graph(n, offsets, create_using: Optional[Any] = ...): ...
-def cycle_graph(n, create_using: Optional[Any] = ...): ...
-def dorogovtsev_goltsev_mendes_graph(n, create_using: Optional[Any] = ...): ...
-def empty_graph(n: int = ..., create_using: Optional[Any] = ...): ...
-def ladder_graph(n, create_using: Optional[Any] = ...): ...
-def lollipop_graph(m, n, create_using: Optional[Any] = ...): ...
-def null_graph(create_using: Optional[Any] = ...): ...
-def path_graph(n, create_using: Optional[Any] = ...): ...
-def star_graph(n, create_using: Optional[Any] = ...): ...
-def trivial_graph(create_using: Optional[Any] = ...): ...
-def turan_graph(n, r): ...
-def wheel_graph(n, create_using: Optional[Any] = ...): ...
-def complete_multipartite_graph(*subset_sizes): ...
diff --git a/mypy-stubs/networkx/generators/community.pyi b/mypy-stubs/networkx/generators/community.pyi
deleted file mode 100644
index 71dd2e603..000000000
--- a/mypy-stubs/networkx/generators/community.pyi
+++ /dev/null
@@ -1,20 +0,0 @@
-# Stubs for networkx.generators.community (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def caveman_graph(l, k): ...
-def connected_caveman_graph(l, k): ...
-def relaxed_caveman_graph(l, k, p, seed: Optional[Any] = ...): ...
-def random_partition_graph(
- sizes, p_in, p_out, seed: Optional[Any] = ..., directed: bool = ...
-): ...
-def planted_partition_graph(
- l, k, p_in, p_out, seed: Optional[Any] = ..., directed: bool = ...
-): ...
-def gaussian_random_partition_graph(
- n, s, v, p_in, p_out, directed: bool = ..., seed: Optional[Any] = ...
-): ...
-def ring_of_cliques(num_cliques, clique_size): ...
-def windmill_graph(n, k): ...
diff --git a/mypy-stubs/networkx/generators/degree_seq.pyi b/mypy-stubs/networkx/generators/degree_seq.pyi
deleted file mode 100644
index 85d846470..000000000
--- a/mypy-stubs/networkx/generators/degree_seq.pyi
+++ /dev/null
@@ -1,40 +0,0 @@
-# Stubs for networkx.generators.degree_seq (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def configuration_model(
- deg_sequence, create_using: Optional[Any] = ..., seed: Optional[Any] = ...
-): ...
-def directed_configuration_model(
- in_degree_sequence,
- out_degree_sequence,
- create_using: Optional[Any] = ...,
- seed: Optional[Any] = ...,
-): ...
-def expected_degree_graph(w, seed: Optional[Any] = ..., selfloops: bool = ...): ...
-def havel_hakimi_graph(deg_sequence, create_using: Optional[Any] = ...): ...
-def directed_havel_hakimi_graph(
- in_deg_sequence, out_deg_sequence, create_using: Optional[Any] = ...
-): ...
-def degree_sequence_tree(deg_sequence, create_using: Optional[Any] = ...): ...
-def random_degree_sequence_graph(
- sequence, seed: Optional[Any] = ..., tries: int = ...
-): ...
-
-class DegreeSequenceRandomGraph:
- degree: Any = ...
- m: Any = ...
- dmax: Any = ...
- def __init__(self, degree, seed: Optional[Any] = ...) -> None: ...
- remaining_degree: Any = ...
- graph: Any = ...
- def generate(self): ...
- def update_remaining(self, u, v, aux_graph: Optional[Any] = ...): ...
- def p(self, u, v): ...
- def q(self, u, v): ...
- def suitable_edge(self): ...
- def phase1(self): ...
- def phase2(self): ...
- def phase3(self): ...
diff --git a/mypy-stubs/networkx/generators/directed.pyi b/mypy-stubs/networkx/generators/directed.pyi
deleted file mode 100644
index 41015ef79..000000000
--- a/mypy-stubs/networkx/generators/directed.pyi
+++ /dev/null
@@ -1,27 +0,0 @@
-# Stubs for networkx.generators.directed (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def gn_graph(
- n,
- kernel: Optional[Any] = ...,
- create_using: Optional[Any] = ...,
- seed: Optional[Any] = ...,
-): ...
-def gnr_graph(n, p, create_using: Optional[Any] = ..., seed: Optional[Any] = ...): ...
-def gnc_graph(n, create_using: Optional[Any] = ..., seed: Optional[Any] = ...): ...
-def scale_free_graph(
- n,
- alpha: float = ...,
- beta: float = ...,
- gamma: float = ...,
- delta_in: float = ...,
- delta_out: int = ...,
- create_using: Optional[Any] = ...,
- seed: Optional[Any] = ...,
-): ...
-def random_k_out_graph(
- n, k, alpha, self_loops: bool = ..., seed: Optional[Any] = ...
-): ...
diff --git a/mypy-stubs/networkx/generators/duplication.pyi b/mypy-stubs/networkx/generators/duplication.pyi
deleted file mode 100644
index d999f1b8f..000000000
--- a/mypy-stubs/networkx/generators/duplication.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.generators.duplication (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def partial_duplication_graph(N, n, p, q, seed: Optional[Any] = ...): ...
-def duplication_divergence_graph(n, p, seed: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/generators/ego.pyi b/mypy-stubs/networkx/generators/ego.pyi
deleted file mode 100644
index 3322b0848..000000000
--- a/mypy-stubs/networkx/generators/ego.pyi
+++ /dev/null
@@ -1,14 +0,0 @@
-# Stubs for networkx.generators.ego (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def ego_graph(
- G,
- n,
- radius: int = ...,
- center: bool = ...,
- undirected: bool = ...,
- distance: Optional[Any] = ...,
-): ...
diff --git a/mypy-stubs/networkx/generators/expanders.pyi b/mypy-stubs/networkx/generators/expanders.pyi
deleted file mode 100644
index 13bb9d6f2..000000000
--- a/mypy-stubs/networkx/generators/expanders.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.generators.expanders (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def margulis_gabber_galil_graph(n, create_using: Optional[Any] = ...): ...
-def chordal_cycle_graph(p, create_using: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/generators/geometric.pyi b/mypy-stubs/networkx/generators/geometric.pyi
deleted file mode 100644
index 051b8e5be..000000000
--- a/mypy-stubs/networkx/generators/geometric.pyi
+++ /dev/null
@@ -1,51 +0,0 @@
-# Stubs for networkx.generators.geometric (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def random_geometric_graph(
- n, radius, dim: int = ..., pos: Optional[Any] = ..., p: int = ...
-): ...
-def soft_random_geometric_graph(
- n,
- radius,
- dim: int = ...,
- pos: Optional[Any] = ...,
- p: int = ...,
- p_dist: Optional[Any] = ...,
-): ...
-def geographical_threshold_graph(
- n,
- theta,
- dim: int = ...,
- pos: Optional[Any] = ...,
- weight: Optional[Any] = ...,
- metric: Optional[Any] = ...,
- p_dist: Optional[Any] = ...,
-): ...
-def waxman_graph(
- n,
- beta: float = ...,
- alpha: float = ...,
- L: Optional[Any] = ...,
- domain: Any = ...,
- metric: Optional[Any] = ...,
-): ...
-def navigable_small_world_graph(
- n,
- p: int = ...,
- q: int = ...,
- r: int = ...,
- dim: int = ...,
- seed: Optional[Any] = ...,
-): ...
-def thresholded_random_geometric_graph(
- n,
- radius,
- theta,
- dim: int = ...,
- pos: Optional[Any] = ...,
- weight: Optional[Any] = ...,
- p: int = ...,
-): ...
diff --git a/mypy-stubs/networkx/generators/intersection.pyi b/mypy-stubs/networkx/generators/intersection.pyi
deleted file mode 100644
index 3806fcaf0..000000000
--- a/mypy-stubs/networkx/generators/intersection.pyi
+++ /dev/null
@@ -1,9 +0,0 @@
-# Stubs for networkx.generators.intersection (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def uniform_random_intersection_graph(n, m, p, seed: Optional[Any] = ...): ...
-def k_random_intersection_graph(n, m, k): ...
-def general_random_intersection_graph(n, m, p): ...
diff --git a/mypy-stubs/networkx/generators/joint_degree_seq.pyi b/mypy-stubs/networkx/generators/joint_degree_seq.pyi
deleted file mode 100644
index e21cdfa03..000000000
--- a/mypy-stubs/networkx/generators/joint_degree_seq.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.generators.joint_degree_seq (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def is_valid_joint_degree(joint_degrees): ...
-def joint_degree_graph(joint_degrees, seed: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/generators/lattice.pyi b/mypy-stubs/networkx/generators/lattice.pyi
deleted file mode 100644
index 25d726a95..000000000
--- a/mypy-stubs/networkx/generators/lattice.pyi
+++ /dev/null
@@ -1,23 +0,0 @@
-# Stubs for networkx.generators.lattice (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def grid_2d_graph(m, n, periodic: bool = ..., create_using: Optional[Any] = ...): ...
-def grid_graph(dim, periodic: bool = ...): ...
-def hypercube_graph(n): ...
-def triangular_lattice_graph(
- m,
- n,
- periodic: bool = ...,
- with_positions: bool = ...,
- create_using: Optional[Any] = ...,
-): ...
-def hexagonal_lattice_graph(
- m,
- n,
- periodic: bool = ...,
- with_positions: bool = ...,
- create_using: Optional[Any] = ...,
-): ...
diff --git a/mypy-stubs/networkx/generators/line.pyi b/mypy-stubs/networkx/generators/line.pyi
deleted file mode 100644
index 339d8682c..000000000
--- a/mypy-stubs/networkx/generators/line.pyi
+++ /dev/null
@@ -1,10 +0,0 @@
-# Stubs for networkx.generators.line (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-from networkx.utils.decorators import *
-
-def line_graph(G, create_using: Optional[Any] = ...): ...
-def inverse_line_graph(G): ...
diff --git a/mypy-stubs/networkx/generators/mycielski.pyi b/mypy-stubs/networkx/generators/mycielski.pyi
deleted file mode 100644
index c5b8bf6e0..000000000
--- a/mypy-stubs/networkx/generators/mycielski.pyi
+++ /dev/null
@@ -1,6 +0,0 @@
-# Stubs for networkx.generators.mycielski (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def mycielskian(G, iterations: int = ...): ...
-def mycielski_graph(n): ...
diff --git a/mypy-stubs/networkx/generators/nonisomorphic_trees.pyi b/mypy-stubs/networkx/generators/nonisomorphic_trees.pyi
deleted file mode 100644
index 92c60c35b..000000000
--- a/mypy-stubs/networkx/generators/nonisomorphic_trees.pyi
+++ /dev/null
@@ -1,6 +0,0 @@
-# Stubs for networkx.generators.nonisomorphic_trees (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def nonisomorphic_trees(order, create: str = ...): ...
-def number_of_nonisomorphic_trees(order): ...
diff --git a/mypy-stubs/networkx/generators/random_clustered.pyi b/mypy-stubs/networkx/generators/random_clustered.pyi
deleted file mode 100644
index ae7bf5c3d..000000000
--- a/mypy-stubs/networkx/generators/random_clustered.pyi
+++ /dev/null
@@ -1,9 +0,0 @@
-# Stubs for networkx.generators.random_clustered (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def random_clustered_graph(
- joint_degree_sequence, create_using: Optional[Any] = ..., seed: Optional[Any] = ...
-): ...
diff --git a/mypy-stubs/networkx/generators/random_graphs.pyi b/mypy-stubs/networkx/generators/random_graphs.pyi
deleted file mode 100644
index 16fc313df..000000000
--- a/mypy-stubs/networkx/generators/random_graphs.pyi
+++ /dev/null
@@ -1,34 +0,0 @@
-# Stubs for networkx.generators.random_graphs (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def fast_gnp_random_graph(n, p, seed: Optional[Any] = ..., directed: bool = ...): ...
-def gnp_random_graph(n, p, seed: Optional[Any] = ..., directed: bool = ...): ...
-
-binomial_graph = gnp_random_graph
-erdos_renyi_graph = gnp_random_graph
-
-def dense_gnm_random_graph(n, m, seed: Optional[Any] = ...): ...
-def gnm_random_graph(n, m, seed: Optional[Any] = ..., directed: bool = ...): ...
-def newman_watts_strogatz_graph(n, k, p, seed: Optional[Any] = ...): ...
-def watts_strogatz_graph(n, k, p, seed: Optional[Any] = ...): ...
-def connected_watts_strogatz_graph(
- n, k, p, tries: int = ..., seed: Optional[Any] = ...
-): ...
-def random_regular_graph(d, n, seed: Optional[Any] = ...): ...
-def barabasi_albert_graph(n, m, seed: Optional[Any] = ...): ...
-def extended_barabasi_albert_graph(n, m, p, q, seed: Optional[Any] = ...): ...
-def powerlaw_cluster_graph(n, m, p, seed: Optional[Any] = ...): ...
-def random_lobster(n, p1, p2, seed: Optional[Any] = ...): ...
-def random_shell_graph(constructor, seed: Optional[Any] = ...): ...
-def random_powerlaw_tree(
- n, gamma: int = ..., seed: Optional[Any] = ..., tries: int = ...
-): ...
-def random_powerlaw_tree_sequence(
- n, gamma: int = ..., seed: Optional[Any] = ..., tries: int = ...
-): ...
-def random_kernel_graph(
- n, kernel_integral, kernel_root: Optional[Any] = ..., seed: Optional[Any] = ...
-): ...
diff --git a/mypy-stubs/networkx/generators/small.pyi b/mypy-stubs/networkx/generators/small.pyi
deleted file mode 100644
index 63c549a7b..000000000
--- a/mypy-stubs/networkx/generators/small.pyi
+++ /dev/null
@@ -1,30 +0,0 @@
-# Stubs for networkx.generators.small (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def make_small_graph(graph_description, create_using: Optional[Any] = ...): ...
-def LCF_graph(n, shift_list, repeats, create_using: Optional[Any] = ...): ...
-def bull_graph(create_using: Optional[Any] = ...): ...
-def chvatal_graph(create_using: Optional[Any] = ...): ...
-def cubical_graph(create_using: Optional[Any] = ...): ...
-def desargues_graph(create_using: Optional[Any] = ...): ...
-def diamond_graph(create_using: Optional[Any] = ...): ...
-def dodecahedral_graph(create_using: Optional[Any] = ...): ...
-def frucht_graph(create_using: Optional[Any] = ...): ...
-def heawood_graph(create_using: Optional[Any] = ...): ...
-def hoffman_singleton_graph(): ...
-def house_graph(create_using: Optional[Any] = ...): ...
-def house_x_graph(create_using: Optional[Any] = ...): ...
-def icosahedral_graph(create_using: Optional[Any] = ...): ...
-def krackhardt_kite_graph(create_using: Optional[Any] = ...): ...
-def moebius_kantor_graph(create_using: Optional[Any] = ...): ...
-def octahedral_graph(create_using: Optional[Any] = ...): ...
-def pappus_graph(): ...
-def petersen_graph(create_using: Optional[Any] = ...): ...
-def sedgewick_maze_graph(create_using: Optional[Any] = ...): ...
-def tetrahedral_graph(create_using: Optional[Any] = ...): ...
-def truncated_cube_graph(create_using: Optional[Any] = ...): ...
-def truncated_tetrahedron_graph(create_using: Optional[Any] = ...): ...
-def tutte_graph(create_using: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/generators/social.pyi b/mypy-stubs/networkx/generators/social.pyi
deleted file mode 100644
index 5c5f56136..000000000
--- a/mypy-stubs/networkx/generators/social.pyi
+++ /dev/null
@@ -1,7 +0,0 @@
-# Stubs for networkx.generators.social (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def karate_club_graph(): ...
-def davis_southern_women_graph(): ...
-def florentine_families_graph(): ...
diff --git a/mypy-stubs/networkx/generators/stochastic.pyi b/mypy-stubs/networkx/generators/stochastic.pyi
deleted file mode 100644
index 05a87cd32..000000000
--- a/mypy-stubs/networkx/generators/stochastic.pyi
+++ /dev/null
@@ -1,5 +0,0 @@
-# Stubs for networkx.generators.stochastic (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def stochastic_graph(G, copy: bool = ..., weight: str = ...): ...
diff --git a/mypy-stubs/networkx/generators/trees.pyi b/mypy-stubs/networkx/generators/trees.pyi
deleted file mode 100644
index 1a458af89..000000000
--- a/mypy-stubs/networkx/generators/trees.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.generators.trees (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def prefix_tree(paths): ...
-def random_tree(n, seed: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/generators/triads.pyi b/mypy-stubs/networkx/generators/triads.pyi
deleted file mode 100644
index 9a5f76147..000000000
--- a/mypy-stubs/networkx/generators/triads.pyi
+++ /dev/null
@@ -1,5 +0,0 @@
-# Stubs for networkx.generators.triads (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def triad_graph(triad_name): ...
diff --git a/mypy-stubs/networkx/linalg/__init__.pyi b/mypy-stubs/networkx/linalg/__init__.pyi
deleted file mode 100644
index f0e2c3064..000000000
--- a/mypy-stubs/networkx/linalg/__init__.pyi
+++ /dev/null
@@ -1,10 +0,0 @@
-# Stubs for networkx.linalg (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from networkx.linalg.algebraicconnectivity import *
-from networkx.linalg.attrmatrix import *
-from networkx.linalg.graphmatrix import *
-from networkx.linalg.laplacianmatrix import *
-from networkx.linalg.modularitymatrix import *
-from networkx.linalg.spectrum import *
diff --git a/mypy-stubs/networkx/linalg/algebraicconnectivity.pyi b/mypy-stubs/networkx/linalg/algebraicconnectivity.pyi
deleted file mode 100644
index 12b5a7ffa..000000000
--- a/mypy-stubs/networkx/linalg/algebraicconnectivity.pyi
+++ /dev/null
@@ -1,28 +0,0 @@
-# Stubs for networkx.linalg.algebraicconnectivity (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-class _PCGSolver:
- def __init__(self, A, M) -> None: ...
- def solve(self, B, tol): ...
-
-class _CholeskySolver:
- def __init__(self, A) -> None: ...
- def solve(self, B, tol: Optional[Any] = ...): ...
-
-class _LUSolver:
- def __init__(self, A) -> None: ...
- def solve(self, B, tol: Optional[Any] = ...): ...
-
-def algebraic_connectivity(
- G, weight: str = ..., normalized: bool = ..., tol: float = ..., method: str = ...
-): ...
-def fiedler_vector(
- G, weight: str = ..., normalized: bool = ..., tol: float = ..., method: str = ...
-): ...
-def spectral_ordering(
- G, weight: str = ..., normalized: bool = ..., tol: float = ..., method: str = ...
-): ...
-def setup_module(module): ...
diff --git a/mypy-stubs/networkx/linalg/attrmatrix.pyi b/mypy-stubs/networkx/linalg/attrmatrix.pyi
deleted file mode 100644
index e4a404015..000000000
--- a/mypy-stubs/networkx/linalg/attrmatrix.pyi
+++ /dev/null
@@ -1,23 +0,0 @@
-# Stubs for networkx.linalg.attrmatrix (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def attr_matrix(
- G,
- edge_attr: Optional[Any] = ...,
- node_attr: Optional[Any] = ...,
- normalized: bool = ...,
- rc_order: Optional[Any] = ...,
- dtype: Optional[Any] = ...,
- order: Optional[Any] = ...,
-): ...
-def attr_sparse_matrix(
- G,
- edge_attr: Optional[Any] = ...,
- node_attr: Optional[Any] = ...,
- normalized: bool = ...,
- rc_order: Optional[Any] = ...,
- dtype: Optional[Any] = ...,
-): ...
diff --git a/mypy-stubs/networkx/linalg/graphmatrix.pyi b/mypy-stubs/networkx/linalg/graphmatrix.pyi
deleted file mode 100644
index 2081fb906..000000000
--- a/mypy-stubs/networkx/linalg/graphmatrix.pyi
+++ /dev/null
@@ -1,16 +0,0 @@
-# Stubs for networkx.linalg.graphmatrix (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def incidence_matrix(
- G,
- nodelist: Optional[Any] = ...,
- edgelist: Optional[Any] = ...,
- oriented: bool = ...,
- weight: Optional[Any] = ...,
-): ...
-def adjacency_matrix(G, nodelist: Optional[Any] = ..., weight: str = ...): ...
-
-adj_matrix = adjacency_matrix
diff --git a/mypy-stubs/networkx/linalg/laplacianmatrix.pyi b/mypy-stubs/networkx/linalg/laplacianmatrix.pyi
deleted file mode 100644
index 70b758f3b..000000000
--- a/mypy-stubs/networkx/linalg/laplacianmatrix.pyi
+++ /dev/null
@@ -1,17 +0,0 @@
-# Stubs for networkx.linalg.laplacianmatrix (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def laplacian_matrix(G, nodelist: Optional[Any] = ..., weight: str = ...): ...
-def normalized_laplacian_matrix(
- G, nodelist: Optional[Any] = ..., weight: str = ...
-): ...
-def directed_laplacian_matrix(
- G,
- nodelist: Optional[Any] = ...,
- weight: str = ...,
- walk_type: Optional[Any] = ...,
- alpha: float = ...,
-): ...
diff --git a/mypy-stubs/networkx/linalg/modularitymatrix.pyi b/mypy-stubs/networkx/linalg/modularitymatrix.pyi
deleted file mode 100644
index 5e91a0993..000000000
--- a/mypy-stubs/networkx/linalg/modularitymatrix.pyi
+++ /dev/null
@@ -1,12 +0,0 @@
-# Stubs for networkx.linalg.modularitymatrix (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def modularity_matrix(
- G, nodelist: Optional[Any] = ..., weight: Optional[Any] = ...
-): ...
-def directed_modularity_matrix(
- G, nodelist: Optional[Any] = ..., weight: Optional[Any] = ...
-): ...
diff --git a/mypy-stubs/networkx/linalg/spectrum.pyi b/mypy-stubs/networkx/linalg/spectrum.pyi
deleted file mode 100644
index b0f0fd047..000000000
--- a/mypy-stubs/networkx/linalg/spectrum.pyi
+++ /dev/null
@@ -1,7 +0,0 @@
-# Stubs for networkx.linalg.spectrum (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def laplacian_spectrum(G, weight: str = ...): ...
-def adjacency_spectrum(G, weight: str = ...): ...
-def modularity_spectrum(G): ...
diff --git a/mypy-stubs/networkx/readwrite/__init__.pyi b/mypy-stubs/networkx/readwrite/__init__.pyi
deleted file mode 100644
index d16702b25..000000000
--- a/mypy-stubs/networkx/readwrite/__init__.pyi
+++ /dev/null
@@ -1,18 +0,0 @@
-# Stubs for networkx.readwrite (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from networkx.readwrite.adjlist import *
-from networkx.readwrite.edgelist import *
-from networkx.readwrite.gexf import *
-from networkx.readwrite.gml import *
-from networkx.readwrite.gpickle import *
-from networkx.readwrite.graph6 import *
-from networkx.readwrite.graphml import *
-from networkx.readwrite.json_graph import *
-from networkx.readwrite.leda import *
-from networkx.readwrite.multiline_adjlist import *
-from networkx.readwrite.nx_shp import *
-from networkx.readwrite.nx_yaml import *
-from networkx.readwrite.pajek import *
-from networkx.readwrite.sparse6 import *
diff --git a/mypy-stubs/networkx/readwrite/adjlist.pyi b/mypy-stubs/networkx/readwrite/adjlist.pyi
deleted file mode 100644
index eb9cb374e..000000000
--- a/mypy-stubs/networkx/readwrite/adjlist.pyi
+++ /dev/null
@@ -1,25 +0,0 @@
-# Stubs for networkx.readwrite.adjlist (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def generate_adjlist(G, delimiter: str = ...): ...
-def write_adjlist(
- G, path, comments: str = ..., delimiter: str = ..., encoding: str = ...
-): ...
-def parse_adjlist(
- lines,
- comments: str = ...,
- delimiter: Optional[Any] = ...,
- create_using: Optional[Any] = ...,
- nodetype: Optional[Any] = ...,
-): ...
-def read_adjlist(
- path,
- comments: str = ...,
- delimiter: Optional[Any] = ...,
- create_using: Optional[Any] = ...,
- nodetype: Optional[Any] = ...,
- encoding: str = ...,
-): ...
diff --git a/mypy-stubs/networkx/readwrite/edgelist.pyi b/mypy-stubs/networkx/readwrite/edgelist.pyi
deleted file mode 100644
index 2b50c0dd2..000000000
--- a/mypy-stubs/networkx/readwrite/edgelist.pyi
+++ /dev/null
@@ -1,44 +0,0 @@
-# Stubs for networkx.readwrite.edgelist (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def generate_edgelist(G, delimiter: str = ..., data: bool = ...): ...
-def write_edgelist(
- G,
- path,
- comments: str = ...,
- delimiter: str = ...,
- data: bool = ...,
- encoding: str = ...,
-): ...
-def parse_edgelist(
- lines,
- comments: str = ...,
- delimiter: Optional[Any] = ...,
- create_using: Optional[Any] = ...,
- nodetype: Optional[Any] = ...,
- data: bool = ...,
-): ...
-def read_edgelist(
- path,
- comments: str = ...,
- delimiter: Optional[Any] = ...,
- create_using: Optional[Any] = ...,
- nodetype: Optional[Any] = ...,
- data: bool = ...,
- edgetype: Optional[Any] = ...,
- encoding: str = ...,
-): ...
-def write_weighted_edgelist(
- G, path, comments: str = ..., delimiter: str = ..., encoding: str = ...
-): ...
-def read_weighted_edgelist(
- path,
- comments: str = ...,
- delimiter: Optional[Any] = ...,
- create_using: Optional[Any] = ...,
- nodetype: Optional[Any] = ...,
- encoding: str = ...,
-): ...
diff --git a/mypy-stubs/networkx/readwrite/gexf.pyi b/mypy-stubs/networkx/readwrite/gexf.pyi
deleted file mode 100644
index ee02cb295..000000000
--- a/mypy-stubs/networkx/readwrite/gexf.pyi
+++ /dev/null
@@ -1,80 +0,0 @@
-# Stubs for networkx.readwrite.gexf (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def write_gexf(
- G, path, encoding: str = ..., prettyprint: bool = ..., version: str = ...
-): ...
-def generate_gexf(
- G, encoding: str = ..., prettyprint: bool = ..., version: str = ...
-): ...
-def read_gexf(
- path, node_type: Optional[Any] = ..., relabel: bool = ..., version: str = ...
-): ...
-
-class GEXF:
- versions: Any = ...
- d: Any = ...
- types: Any = ...
- blurb: Any = ...
- xml_type: Any = ...
- python_type: Any = ...
- convert_bool: Any = ...
- NS_GEXF: Any = ...
- NS_VIZ: Any = ...
- NS_XSI: Any = ...
- SCHEMALOCATION: Any = ...
- VERSION: Any = ...
- version: Any = ...
- def set_version(self, version): ...
-
-class GEXFWriter(GEXF):
- prettyprint: Any = ...
- encoding: Any = ...
- xml: Any = ...
- edge_id: Any = ...
- attr_id: Any = ...
- attr: Any = ...
- def __init__(
- self,
- graph: Optional[Any] = ...,
- encoding: str = ...,
- prettyprint: bool = ...,
- version: str = ...,
- ) -> None: ...
- graph_element: Any = ...
- def add_graph(self, G): ...
- def add_meta(self, G, graph_element): ...
- def add_nodes(self, G, graph_element): ...
- def add_edges(self, G, graph_element): ...
- def add_attributes(self, node_or_edge, xml_obj, data, default): ...
- def get_attr_id(self, title, attr_type, edge_or_node, default, mode): ...
- def add_viz(self, element, node_data): ...
- def add_parents(self, node_element, node_data): ...
- def add_slices(self, node_or_edge_element, node_or_edge_data): ...
- def add_spells(self, node_or_edge_element, node_or_edge_data): ...
- def alter_graph_mode_timeformat(self, start_or_end): ...
- def write(self, fh): ...
- def indent(self, elem, level: int = ...): ...
-
-class GEXFReader(GEXF):
- node_type: Any = ...
- simple_graph: bool = ...
- def __init__(self, node_type: Optional[Any] = ..., version: str = ...) -> None: ...
- xml: Any = ...
- def __call__(self, stream): ...
- timeformat: Any = ...
- def make_graph(self, graph_xml): ...
- def add_node(self, G, node_xml, node_attr, node_pid: Optional[Any] = ...): ...
- def add_start_end(self, data, xml): ...
- def add_viz(self, data, node_xml): ...
- def add_parents(self, data, node_xml): ...
- def add_slices(self, data, node_or_edge_xml): ...
- def add_spells(self, data, node_or_edge_xml): ...
- def add_edge(self, G, edge_element, edge_attr): ...
- def decode_attr_elements(self, gexf_keys, obj_xml): ...
- def find_gexf_attributes(self, attributes_element): ...
-
-def relabel_gexf_graph(G): ...
diff --git a/mypy-stubs/networkx/readwrite/gml.pyi b/mypy-stubs/networkx/readwrite/gml.pyi
deleted file mode 100644
index c4f3fff5b..000000000
--- a/mypy-stubs/networkx/readwrite/gml.pyi
+++ /dev/null
@@ -1,14 +0,0 @@
-# Stubs for networkx.readwrite.gml (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-long = int
-unicode = str
-unichr = chr
-
-def read_gml(path, label: str = ..., destringizer: Optional[Any] = ...): ...
-def parse_gml(lines, label: str = ..., destringizer: Optional[Any] = ...): ...
-def generate_gml(G, stringizer: Optional[Any] = ...): ...
-def write_gml(G, path, stringizer: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/readwrite/gpickle.pyi b/mypy-stubs/networkx/readwrite/gpickle.pyi
deleted file mode 100644
index 31e6a873f..000000000
--- a/mypy-stubs/networkx/readwrite/gpickle.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.readwrite.gpickle (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any
-
-def write_gpickle(G, path, protocol: Any = ...): ...
-def read_gpickle(path): ...
diff --git a/mypy-stubs/networkx/readwrite/graph6.pyi b/mypy-stubs/networkx/readwrite/graph6.pyi
deleted file mode 100644
index 29d87748e..000000000
--- a/mypy-stubs/networkx/readwrite/graph6.pyi
+++ /dev/null
@@ -1,10 +0,0 @@
-# Stubs for networkx.readwrite.graph6 (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def from_graph6_bytes(string): ...
-def to_graph6_bytes(G, nodes: Optional[Any] = ..., header: bool = ...): ...
-def read_graph6(path): ...
-def write_graph6(G, path, nodes: Optional[Any] = ..., header: bool = ...): ...
diff --git a/mypy-stubs/networkx/readwrite/graphml.pyi b/mypy-stubs/networkx/readwrite/graphml.pyi
deleted file mode 100644
index b227aa864..000000000
--- a/mypy-stubs/networkx/readwrite/graphml.pyi
+++ /dev/null
@@ -1,105 +0,0 @@
-# Stubs for networkx.readwrite.graphml (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def write_graphml_xml(
- G,
- path,
- encoding: str = ...,
- prettyprint: bool = ...,
- infer_numeric_types: bool = ...,
-): ...
-def write_graphml_lxml(
- G,
- path,
- encoding: str = ...,
- prettyprint: bool = ...,
- infer_numeric_types: bool = ...,
-): ...
-def generate_graphml(G, encoding: str = ..., prettyprint: bool = ...): ...
-def read_graphml(path, node_type: Any = ..., edge_key_type: Any = ...): ...
-def parse_graphml(graphml_string, node_type: Any = ...): ...
-
-class GraphML:
- NS_GRAPHML: str = ...
- NS_XSI: str = ...
- NS_Y: str = ...
- SCHEMALOCATION: Any = ...
- unicode: Any = ...
- long: Any = ...
- types: Any = ...
- xml_type: Any = ...
- python_type: Any = ...
- convert_bool: Any = ...
-
-class GraphMLWriter(GraphML):
- myElement: Any = ...
- infer_numeric_types: Any = ...
- prettyprint: Any = ...
- encoding: Any = ...
- xml: Any = ...
- keys: Any = ...
- attributes: Any = ...
- attribute_types: Any = ...
- def __init__(
- self,
- graph: Optional[Any] = ...,
- encoding: str = ...,
- prettyprint: bool = ...,
- infer_numeric_types: bool = ...,
- ) -> None: ...
- def attr_type(self, name, scope, value): ...
- def get_key(self, name, attr_type, scope, default): ...
- def add_data(
- self, name, element_type, value, scope: str = ..., default: Optional[Any] = ...
- ): ...
- def add_attributes(self, scope, xml_obj, data, default): ...
- def add_nodes(self, G, graph_element): ...
- def add_edges(self, G, graph_element): ...
- def add_graph_element(self, G): ...
- def add_graphs(self, graph_list): ...
- def dump(self, stream): ...
- def indent(self, elem, level: int = ...): ...
-
-class IncrementalElement:
- xml: Any = ...
- prettyprint: Any = ...
- def __init__(self, xml, prettyprint) -> None: ...
- def append(self, element): ...
-
-class GraphMLWriterLxml(GraphMLWriter):
- myElement: Any = ...
- infer_numeric_types: Any = ...
- xml: Any = ...
- keys: Any = ...
- attribute_types: Any = ...
- def __init__(
- self,
- path,
- graph: Optional[Any] = ...,
- encoding: str = ...,
- prettyprint: bool = ...,
- infer_numeric_types: bool = ...,
- ) -> None: ...
- def add_graph_element(self, G): ...
- def add_attributes(self, scope, xml_obj, data, default): ...
- def dump(self): ...
-
-write_graphml = write_graphml_xml
-write_graphml = write_graphml_lxml
-
-class GraphMLReader(GraphML):
- node_type: Any = ...
- edge_key_type: Any = ...
- multigraph: bool = ...
- edge_ids: Any = ...
- def __init__(self, node_type: Any = ..., edge_key_type: Any = ...) -> None: ...
- xml: Any = ...
- def __call__(self, path: Optional[Any] = ..., string: Optional[Any] = ...): ...
- def make_graph(self, graph_xml, graphml_keys, defaults, G: Optional[Any] = ...): ...
- def add_node(self, G, node_xml, graphml_keys, defaults): ...
- def add_edge(self, G, edge_element, graphml_keys): ...
- def decode_data_elements(self, graphml_keys, obj_xml): ...
- def find_graphml_keys(self, graph_element): ...
diff --git a/mypy-stubs/networkx/readwrite/json_graph/__init__.pyi b/mypy-stubs/networkx/readwrite/json_graph/__init__.pyi
deleted file mode 100644
index ddb956c03..000000000
--- a/mypy-stubs/networkx/readwrite/json_graph/__init__.pyi
+++ /dev/null
@@ -1,9 +0,0 @@
-# Stubs for networkx.readwrite.json_graph (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from networkx.readwrite.json_graph.adjacency import *
-from networkx.readwrite.json_graph.cytoscape import *
-from networkx.readwrite.json_graph.jit import *
-from networkx.readwrite.json_graph.node_link import *
-from networkx.readwrite.json_graph.tree import *
diff --git a/mypy-stubs/networkx/readwrite/json_graph/adjacency.pyi b/mypy-stubs/networkx/readwrite/json_graph/adjacency.pyi
deleted file mode 100644
index 82a05102e..000000000
--- a/mypy-stubs/networkx/readwrite/json_graph/adjacency.pyi
+++ /dev/null
@@ -1,10 +0,0 @@
-# Stubs for networkx.readwrite.json_graph.adjacency (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any
-
-def adjacency_data(G, attrs: Any = ...): ...
-def adjacency_graph(
- data, directed: bool = ..., multigraph: bool = ..., attrs: Any = ...
-): ...
diff --git a/mypy-stubs/networkx/readwrite/json_graph/cytoscape.pyi b/mypy-stubs/networkx/readwrite/json_graph/cytoscape.pyi
deleted file mode 100644
index e58ca5c8f..000000000
--- a/mypy-stubs/networkx/readwrite/json_graph/cytoscape.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.readwrite.json_graph.cytoscape (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def cytoscape_data(G, attrs: Optional[Any] = ...): ...
-def cytoscape_graph(data, attrs: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/readwrite/json_graph/jit.pyi b/mypy-stubs/networkx/readwrite/json_graph/jit.pyi
deleted file mode 100644
index 8207cc982..000000000
--- a/mypy-stubs/networkx/readwrite/json_graph/jit.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.readwrite.json_graph.jit (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def jit_graph(data, create_using: Optional[Any] = ...): ...
-def jit_data(G, indent: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/readwrite/json_graph/node_link.pyi b/mypy-stubs/networkx/readwrite/json_graph/node_link.pyi
deleted file mode 100644
index 4024f94c3..000000000
--- a/mypy-stubs/networkx/readwrite/json_graph/node_link.pyi
+++ /dev/null
@@ -1,10 +0,0 @@
-# Stubs for networkx.readwrite.json_graph.node_link (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def node_link_data(G, attrs: Optional[Any] = ...): ...
-def node_link_graph(
- data, directed: bool = ..., multigraph: bool = ..., attrs: Optional[Any] = ...
-): ...
diff --git a/mypy-stubs/networkx/readwrite/json_graph/tree.pyi b/mypy-stubs/networkx/readwrite/json_graph/tree.pyi
deleted file mode 100644
index a6cf9d9bf..000000000
--- a/mypy-stubs/networkx/readwrite/json_graph/tree.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.readwrite.json_graph.tree (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any
-
-def tree_data(G, root, attrs: Any = ...): ...
-def tree_graph(data, attrs: Any = ...): ...
diff --git a/mypy-stubs/networkx/readwrite/leda.pyi b/mypy-stubs/networkx/readwrite/leda.pyi
deleted file mode 100644
index 5aace9c61..000000000
--- a/mypy-stubs/networkx/readwrite/leda.pyi
+++ /dev/null
@@ -1,6 +0,0 @@
-# Stubs for networkx.readwrite.leda (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def read_leda(path, encoding: str = ...): ...
-def parse_leda(lines): ...
diff --git a/mypy-stubs/networkx/readwrite/multiline_adjlist.pyi b/mypy-stubs/networkx/readwrite/multiline_adjlist.pyi
deleted file mode 100644
index c228feb2a..000000000
--- a/mypy-stubs/networkx/readwrite/multiline_adjlist.pyi
+++ /dev/null
@@ -1,27 +0,0 @@
-# Stubs for networkx.readwrite.multiline_adjlist (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def generate_multiline_adjlist(G, delimiter: str = ...): ...
-def write_multiline_adjlist(
- G, path, delimiter: str = ..., comments: str = ..., encoding: str = ...
-): ...
-def parse_multiline_adjlist(
- lines,
- comments: str = ...,
- delimiter: Optional[Any] = ...,
- create_using: Optional[Any] = ...,
- nodetype: Optional[Any] = ...,
- edgetype: Optional[Any] = ...,
-): ...
-def read_multiline_adjlist(
- path,
- comments: str = ...,
- delimiter: Optional[Any] = ...,
- create_using: Optional[Any] = ...,
- nodetype: Optional[Any] = ...,
- edgetype: Optional[Any] = ...,
- encoding: str = ...,
-): ...
diff --git a/mypy-stubs/networkx/readwrite/nx_shp.pyi b/mypy-stubs/networkx/readwrite/nx_shp.pyi
deleted file mode 100644
index a9109b51c..000000000
--- a/mypy-stubs/networkx/readwrite/nx_shp.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.readwrite.nx_shp (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def read_shp(
- path, simplify: bool = ..., geom_attrs: bool = ..., strict: bool = ...
-): ...
-def write_shp(G, outdir): ...
diff --git a/mypy-stubs/networkx/readwrite/nx_yaml.pyi b/mypy-stubs/networkx/readwrite/nx_yaml.pyi
deleted file mode 100644
index d30936a9d..000000000
--- a/mypy-stubs/networkx/readwrite/nx_yaml.pyi
+++ /dev/null
@@ -1,6 +0,0 @@
-# Stubs for networkx.readwrite.nx_yaml (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def write_yaml(G_to_be_yaml, path_for_yaml_output, **kwds): ...
-def read_yaml(path): ...
diff --git a/mypy-stubs/networkx/readwrite/p2g.pyi b/mypy-stubs/networkx/readwrite/p2g.pyi
deleted file mode 100644
index 7711dcf72..000000000
--- a/mypy-stubs/networkx/readwrite/p2g.pyi
+++ /dev/null
@@ -1,7 +0,0 @@
-# Stubs for networkx.readwrite.p2g (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def write_p2g(G, path, encoding: str = ...): ...
-def read_p2g(path, encoding: str = ...): ...
-def parse_p2g(lines): ...
diff --git a/mypy-stubs/networkx/readwrite/pajek.pyi b/mypy-stubs/networkx/readwrite/pajek.pyi
deleted file mode 100644
index f11fa880e..000000000
--- a/mypy-stubs/networkx/readwrite/pajek.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.readwrite.pajek (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def generate_pajek(G): ...
-def write_pajek(G, path, encoding: str = ...): ...
-def read_pajek(path, encoding: str = ...): ...
-def parse_pajek(lines): ...
diff --git a/mypy-stubs/networkx/readwrite/sparse6.pyi b/mypy-stubs/networkx/readwrite/sparse6.pyi
deleted file mode 100644
index 86ae69187..000000000
--- a/mypy-stubs/networkx/readwrite/sparse6.pyi
+++ /dev/null
@@ -1,10 +0,0 @@
-# Stubs for networkx.readwrite.sparse6 (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def from_sparse6_bytes(string): ...
-def to_sparse6_bytes(G, nodes: Optional[Any] = ..., header: bool = ...): ...
-def read_sparse6(path): ...
-def write_sparse6(G, path, nodes: Optional[Any] = ..., header: bool = ...): ...
diff --git a/mypy-stubs/networkx/relabel.pyi b/mypy-stubs/networkx/relabel.pyi
deleted file mode 100644
index eeb7ebf13..000000000
--- a/mypy-stubs/networkx/relabel.pyi
+++ /dev/null
@@ -1,10 +0,0 @@
-# Stubs for networkx.relabel (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def relabel_nodes(G, mapping, copy: bool = ...): ...
-def convert_node_labels_to_integers(
- G, first_label: int = ..., ordering: str = ..., label_attribute: Optional[Any] = ...
-): ...
diff --git a/mypy-stubs/networkx/release.pyi b/mypy-stubs/networkx/release.pyi
deleted file mode 100644
index 388c441de..000000000
--- a/mypy-stubs/networkx/release.pyi
+++ /dev/null
@@ -1,32 +0,0 @@
-# Stubs for networkx.release (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any
-
-basedir: Any
-
-def write_versionfile(): ...
-def get_revision(): ...
-def get_info(dynamic: bool = ...): ...
-
-name: str
-major: str
-minor: str
-dev: bool
-description: str
-long_description: str
-license: str
-authors: Any
-maintainer: str
-maintainer_email: str
-url: str
-download_url: str
-platforms: Any
-keywords: Any
-classifiers: Any
-date: Any
-date_info: Any
-version: Any
-version_info: Any
-vcs_info: Any
diff --git a/mypy-stubs/networkx/tests/__init__.pyi b/mypy-stubs/networkx/tests/__init__.pyi
deleted file mode 100644
index 11a6b20ca..000000000
--- a/mypy-stubs/networkx/tests/__init__.pyi
+++ /dev/null
@@ -1,3 +0,0 @@
-# Stubs for networkx.tests (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
diff --git a/mypy-stubs/networkx/tests/test.pyi b/mypy-stubs/networkx/tests/test.pyi
deleted file mode 100644
index 9f4979fba..000000000
--- a/mypy-stubs/networkx/tests/test.pyi
+++ /dev/null
@@ -1,5 +0,0 @@
-# Stubs for networkx.tests.test (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def run(verbosity: int = ..., doctest: bool = ..., numpy: bool = ...): ...
diff --git a/mypy-stubs/networkx/utils/__init__.pyi b/mypy-stubs/networkx/utils/__init__.pyi
deleted file mode 100644
index 04da9c71c..000000000
--- a/mypy-stubs/networkx/utils/__init__.pyi
+++ /dev/null
@@ -1,11 +0,0 @@
-# Stubs for networkx.utils (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from networkx.utils.contextmanagers import *
-from networkx.utils.decorators import *
-from networkx.utils.heaps import *
-from networkx.utils.misc import *
-from networkx.utils.random_sequence import *
-from networkx.utils.rcm import *
-from networkx.utils.union_find import *
diff --git a/mypy-stubs/networkx/utils/contextmanagers.pyi b/mypy-stubs/networkx/utils/contextmanagers.pyi
deleted file mode 100644
index cbe85f7e3..000000000
--- a/mypy-stubs/networkx/utils/contextmanagers.pyi
+++ /dev/null
@@ -1,5 +0,0 @@
-# Stubs for networkx.utils.contextmanagers (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def reversed(G): ...
diff --git a/mypy-stubs/networkx/utils/decorators.pyi b/mypy-stubs/networkx/utils/decorators.pyi
deleted file mode 100644
index fbb4899d3..000000000
--- a/mypy-stubs/networkx/utils/decorators.pyi
+++ /dev/null
@@ -1,9 +0,0 @@
-# Stubs for networkx.utils.decorators (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-def not_implemented_for(*graph_types): ...
-def open_file(path_arg, mode: str = ...): ...
-def nodes_or_number(which_args): ...
-def preserve_random_state(func): ...
-def random_state(random_state_index): ...
diff --git a/mypy-stubs/networkx/utils/heaps.pyi b/mypy-stubs/networkx/utils/heaps.pyi
deleted file mode 100644
index 4e53dddb1..000000000
--- a/mypy-stubs/networkx/utils/heaps.pyi
+++ /dev/null
@@ -1,42 +0,0 @@
-# Stubs for networkx.utils.heaps (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-class MinHeap:
- class _Item:
- key: Any = ...
- value: Any = ...
- def __init__(self, key, value) -> None: ...
-
- def __init__(self) -> None: ...
- def min(self): ...
- def pop(self): ...
- def get(self, key, default: Optional[Any] = ...): ...
- def insert(self, key, value, allow_increase: bool = ...): ...
- def __nonzero__(self): ...
- def __bool__(self): ...
- def __len__(self): ...
- def __contains__(self, key): ...
-
-class PairingHeap(MinHeap):
- class _Node(MinHeap._Item):
- left: Any = ...
- next: Any = ...
- prev: Any = ...
- parent: Any = ...
- def __init__(self, key, value) -> None: ...
-
- def __init__(self) -> None: ...
- def min(self): ...
- def pop(self): ...
- def get(self, key, default: Optional[Any] = ...): ...
- def insert(self, key, value, allow_increase: bool = ...): ...
-
-class BinaryHeap(MinHeap):
- def __init__(self) -> None: ...
- def min(self): ...
- def pop(self): ...
- def get(self, key, default: Optional[Any] = ...): ...
- def insert(self, key, value, allow_increase: bool = ...): ...
diff --git a/mypy-stubs/networkx/utils/misc.pyi b/mypy-stubs/networkx/utils/misc.pyi
deleted file mode 100644
index 5417f7642..000000000
--- a/mypy-stubs/networkx/utils/misc.pyi
+++ /dev/null
@@ -1,29 +0,0 @@
-# Stubs for networkx.utils.misc (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-basestring = str
-unicode = str
-
-def is_string_like(obj): ...
-def iterable(obj): ...
-def flatten(obj, result: Optional[Any] = ...): ...
-def is_list_of_ints(intlist): ...
-
-PY2: Any
-
-def make_str(x): ...
-def generate_unique_node(): ...
-def default_opener(filename): ...
-def dict_to_numpy_array(d, mapping: Optional[Any] = ...): ...
-def dict_to_numpy_array2(d, mapping: Optional[Any] = ...): ...
-def dict_to_numpy_array1(d, mapping: Optional[Any] = ...): ...
-def is_iterator(obj): ...
-def arbitrary_element(iterable): ...
-def consume(iterator): ...
-def pairwise(iterable, cyclic: bool = ...): ...
-def groups(many_to_one): ...
-def to_tuple(x): ...
-def create_random_state(random_state: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/utils/random_sequence.pyi b/mypy-stubs/networkx/utils/random_sequence.pyi
deleted file mode 100644
index 7ff56e9ee..000000000
--- a/mypy-stubs/networkx/utils/random_sequence.pyi
+++ /dev/null
@@ -1,14 +0,0 @@
-# Stubs for networkx.utils.random_sequence (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def powerlaw_sequence(n, exponent: float = ...): ...
-def zipf_rv(alpha, xmin: int = ..., seed: Optional[Any] = ...): ...
-def cumulative_distribution(distribution): ...
-def discrete_sequence(
- n, distribution: Optional[Any] = ..., cdistribution: Optional[Any] = ...
-): ...
-def random_weighted_sample(mapping, k): ...
-def weighted_choice(mapping): ...
diff --git a/mypy-stubs/networkx/utils/rcm.pyi b/mypy-stubs/networkx/utils/rcm.pyi
deleted file mode 100644
index 00d9fe3fd..000000000
--- a/mypy-stubs/networkx/utils/rcm.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for networkx.utils.rcm (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-def cuthill_mckee_ordering(G, heuristic: Optional[Any] = ...): ...
-def reverse_cuthill_mckee_ordering(G, heuristic: Optional[Any] = ...): ...
diff --git a/mypy-stubs/networkx/utils/union_find.pyi b/mypy-stubs/networkx/utils/union_find.pyi
deleted file mode 100644
index 92d26c367..000000000
--- a/mypy-stubs/networkx/utils/union_find.pyi
+++ /dev/null
@@ -1,14 +0,0 @@
-# Stubs for networkx.utils.union_find (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any, Optional
-
-class UnionFind:
- parents: Any = ...
- weights: Any = ...
- def __init__(self, elements: Optional[Any] = ...) -> None: ...
- def __getitem__(self, object): ...
- def __iter__(self): ...
- def to_sets(self): ...
- def union(self, *objects): ...
diff --git a/mypy-stubs/networkx/version.pyi b/mypy-stubs/networkx/version.pyi
deleted file mode 100644
index 7125b5349..000000000
--- a/mypy-stubs/networkx/version.pyi
+++ /dev/null
@@ -1,12 +0,0 @@
-# Stubs for networkx.version (Python 3.5)
-#
-# NOTE: This dynamically typed stub was automatically generated by stubgen.
-
-from typing import Any
-
-version: str
-date: str
-dev: bool
-version_info: Any
-date_info: Any
-vcs_info: Any
diff --git a/mypy-stubs/prov/constants.py b/mypy-stubs/prov/constants.py
index f711d00dd..314224a63 100644
--- a/mypy-stubs/prov/constants.py
+++ b/mypy-stubs/prov/constants.py
@@ -170,9 +170,7 @@
PROV_ATTRIBUTES = PROV_ATTRIBUTE_QNAMES | PROV_ATTRIBUTE_LITERALS
PROV_RECORD_ATTRIBUTES = list((attr, str(attr)) for attr in PROV_ATTRIBUTES)
-PROV_RECORD_IDS_MAP = dict(
- (PROV_N_MAP[rec_type_id], rec_type_id) for rec_type_id in PROV_N_MAP
-)
+PROV_RECORD_IDS_MAP = dict((PROV_N_MAP[rec_type_id], rec_type_id) for rec_type_id in PROV_N_MAP)
PROV_ID_ATTRIBUTES_MAP = dict(
(prov_id, attribute) for (prov_id, attribute) in PROV_RECORD_ATTRIBUTES
)
diff --git a/mypy-stubs/prov/model.pyi b/mypy-stubs/prov/model.pyi
index 57ac5cb1e..ee2688a4d 100644
--- a/mypy-stubs/prov/model.pyi
+++ b/mypy-stubs/prov/model.pyi
@@ -209,7 +209,7 @@ class ProvBundle:
@property
def document(self) -> ProvDocument | None: ...
@property
- def identifier(self) -> str | None: ...
+ def identifier(self) -> str | None | QualifiedName: ...
@property
def records(self) -> List[ProvRecord]: ...
def set_default_namespace(self, uri: Namespace) -> None: ...
@@ -228,9 +228,7 @@ class ProvBundle:
| Tuple[type | type[int | str] | Tuple[Any, ...], ...]
| None = ...,
) -> List[ProvRecord]: ...
- def get_record(
- self, identifier: Identifier | None
- ) -> ProvRecord | List[ProvRecord] | None: ...
+ def get_record(self, identifier: Identifier | None) -> ProvRecord | List[ProvRecord] | None: ...
def is_document(self) -> bool: ...
def is_bundle(self) -> bool: ...
def has_bundles(self) -> bool: ...
@@ -400,9 +398,7 @@ class ProvBundle:
identifier: str,
other_attributes: _attributes_type | None,
) -> ProvRecord: ...
- def membership(
- self, collection: ProvRecord, entity: ProvEntity | str
- ) -> ProvRecord: ...
+ def membership(self, collection: ProvRecord, entity: ProvEntity | str) -> ProvRecord: ...
def plot(
self,
filename: str | None = ...,
@@ -444,9 +440,7 @@ class ProvDocument(ProvBundle):
def flattened(self) -> ProvDocument: ...
def unified(self) -> ProvDocument: ...
def update(self, other: ProvDocument | ProvBundle) -> None: ...
- def add_bundle(
- self, bundle: ProvBundle, identifier: Incomplete | None = ...
- ) -> None: ...
+ def add_bundle(self, bundle: ProvBundle, identifier: Incomplete | None = ...) -> None: ...
def bundle(self, identifier: Identifier) -> ProvBundle: ...
def serialize(
self, destination: IO[Any] | None = ..., format: str = ..., **args: Any
@@ -456,7 +450,7 @@ class ProvDocument(ProvBundle):
source: IO[Any] | str | None = ...,
content: str | None = ...,
format: str = ...,
- **args: Any
+ **args: Any,
) -> ProvDocument: ...
def sorted_attributes(element: ProvElement, attributes: List[str]) -> List[str]: ...
diff --git a/mypy-stubs/pydot.pyi b/mypy-stubs/pydot.pyi
index 93464c1d4..bd0ab3147 100644
--- a/mypy-stubs/pydot.pyi
+++ b/mypy-stubs/pydot.pyi
@@ -33,9 +33,7 @@ class InvocationException(Exception):
class Node(Common):
obj_dict: Any
- def __init__(
- self, name: str = ..., obj_dict: Any | None = ..., **attrs: str
- ) -> None: ...
+ def __init__(self, name: str = ..., obj_dict: Any | None = ..., **attrs: str) -> None: ...
def set_name(self, node_name: str) -> None: ...
def get_name(self) -> str: ...
def get_port(self) -> str: ...
@@ -49,7 +47,7 @@ class Edge(Common):
src: str = ...,
dst: str = ...,
obj_dict: Any | None = ...,
- **attrs: Dict[str, str]
+ **attrs: Dict[str, str],
) -> None: ...
def get_source(self) -> str: ...
def get_destination(self) -> str: ...
@@ -68,7 +66,7 @@ class Graph(Common):
strict: bool = ...,
suppress_disconnected: bool = ...,
simplify: bool = ...,
- **attrs: Dict[str, str]
+ **attrs: Dict[str, str],
) -> None: ...
def get_graph_type(self) -> str: ...
def get_top_graph_type(self) -> str: ...
@@ -120,7 +118,7 @@ class Subgraph(Graph):
obj_dict: Any | Dict[str, str] = ...,
suppress_disconnected: bool = ...,
simplify: bool = ...,
- **attrs: Dict[str, str]
+ **attrs: Dict[str, str],
) -> None: ...
class Cluster(Graph):
@@ -130,7 +128,7 @@ class Cluster(Graph):
obj_dict: Any | Dict[str, str] = ...,
suppress_disconnected: bool = ...,
simplify: bool = ...,
- **attrs: Dict[str, str]
+ **attrs: Dict[str, str],
) -> None: ...
class Dot(Graph):
diff --git a/mypy-stubs/rdflib/graph.pyi b/mypy-stubs/rdflib/graph.pyi
index 23c2e6e1f..d3e6f2f54 100644
--- a/mypy-stubs/rdflib/graph.pyi
+++ b/mypy-stubs/rdflib/graph.pyi
@@ -172,9 +172,7 @@ class Graph(Node):
data: Optional[Any] = ...,
**args: Any,
) -> "Graph": ...
- def load(
- self, source: Any, publicID: Optional[Any] = ..., format: str = ...
- ) -> "Graph": ...
+ def load(self, source: Any, publicID: Optional[Any] = ..., format: str = ...) -> "Graph": ...
def query(
self,
query_object: Any,
diff --git a/mypy-stubs/rdflib/query.pyi b/mypy-stubs/rdflib/query.pyi
index 73f4008b3..981fe12d2 100644
--- a/mypy-stubs/rdflib/query.pyi
+++ b/mypy-stubs/rdflib/query.pyi
@@ -31,12 +31,12 @@ class Result:
source: IO[Any] | None = ...,
format: str | None = ...,
content_type: str | None = ...,
- **kwargs: Any
+ **kwargs: Any,
) -> Result: ...
def serialize(
self,
destination: str | IO[Any] | None = ...,
encoding: str = ...,
format: str = ...,
- **args: Any
+ **args: Any,
) -> Optional[bytes]: ...
diff --git a/mypy-stubs/rdflib/resource.pyi b/mypy-stubs/rdflib/resource.pyi
index e520dbe66..0dd3b988e 100644
--- a/mypy-stubs/rdflib/resource.pyi
+++ b/mypy-stubs/rdflib/resource.pyi
@@ -23,11 +23,7 @@ class Resource:
def label(self) -> Any: ...
def comment(self) -> Any: ...
def items(self) -> Iterator[Any]: ...
- def transitive_objects(
- self, predicate: Node, remember: Any | None = ...
- ) -> Iterator[Any]: ...
- def transitive_subjects(
- self, predicate: Node, remember: Any | None = ...
- ) -> Iterator[Any]: ...
+ def transitive_objects(self, predicate: Node, remember: Any | None = ...) -> Iterator[Any]: ...
+ def transitive_subjects(self, predicate: Node, remember: Any | None = ...) -> Iterator[Any]: ...
def seq(self) -> Seq | None: ...
def qname(self) -> Any: ...
diff --git a/mypy-stubs/subprocess.pyi b/mypy-stubs/subprocess.pyi
deleted file mode 100644
index 880ab4736..000000000
--- a/mypy-stubs/subprocess.pyi
+++ /dev/null
@@ -1,1098 +0,0 @@
-import sys
-from types import TracebackType
-from typing import (
- IO,
- Any,
- AnyStr,
- Callable,
- Generic,
- Iterable,
- Mapping,
- Sequence,
- Tuple,
- Type,
- TypeVar,
- Union,
- overload,
-)
-
-from _typeshed import Self, StrOrBytesPath
-from typing_extensions import Literal
-
-if sys.version_info >= (3, 9):
- from types import GenericAlias
-
-# We prefer to annotate inputs to methods (eg subprocess.check_call) with these
-# union types.
-# For outputs we use laborious literal based overloads to try to determine
-# which specific return types to use, and prefer to fall back to Any when
-# this does not work, so the caller does not have to use an assertion to confirm
-# which type.
-#
-# For example:
-#
-# try:
-# x = subprocess.check_output(["ls", "-l"])
-# reveal_type(x) # bytes, based on the overloads
-# except TimeoutError as e:
-# reveal_type(e.cmd) # Any, but morally is _CMD
-_FILE = Union[None, int, IO[Any]]
-_TXT = Union[bytes, str]
-if sys.version_info >= (3, 8):
- _CMD = Union[StrOrBytesPath, Sequence[StrOrBytesPath]]
-else:
- # Python 3.6 doesn't support _CMD being a single PathLike.
- # See: https://bugs.python.org/issue31961
- _CMD = Union[_TXT, Sequence[StrOrBytesPath]]
-if sys.platform == "win32":
- _ENV = Mapping[str, str]
-else:
- _ENV = Union[Mapping[bytes, StrOrBytesPath], Mapping[str, StrOrBytesPath]]
-
-_T = TypeVar("_T")
-
-class CompletedProcess(Generic[_T]):
- # morally: _CMD
- args: Any
- returncode: int | None # this optional is REQUIRED for mypyc
- # These can both be None, but requiring checks for None would be tedious
- # and writing all the overloads would be horrific.
- stdout: _T
- stderr: _T
- def __init__(
- self,
- args: _CMD,
- returncode: int,
- stdout: _T | None = ...,
- stderr: _T | None = ...,
- ) -> None: ...
- def check_returncode(self) -> None: ...
- if sys.version_info >= (3, 9):
- def __class_getitem__(cls, item: Any) -> GenericAlias: ...
-
-if sys.version_info >= (3, 7):
- # Nearly the same args as for 3.6, except for capture_output and text
- @overload
- def run(
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- capture_output: bool = ...,
- check: bool = ...,
- encoding: str | None = ...,
- errors: str | None = ...,
- input: str | None = ...,
- text: Literal[True],
- timeout: float | None = ...,
- ) -> CompletedProcess[str]: ...
- @overload
- def run(
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- capture_output: bool = ...,
- check: bool = ...,
- encoding: str,
- errors: str | None = ...,
- input: str | None = ...,
- text: bool | None = ...,
- timeout: float | None = ...,
- ) -> CompletedProcess[str]: ...
- @overload
- def run(
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- capture_output: bool = ...,
- check: bool = ...,
- encoding: str | None = ...,
- errors: str,
- input: str | None = ...,
- text: bool | None = ...,
- timeout: float | None = ...,
- ) -> CompletedProcess[str]: ...
- @overload
- def run(
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- *,
- universal_newlines: Literal[True],
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- # where the *real* keyword only args start
- capture_output: bool = ...,
- check: bool = ...,
- encoding: str | None = ...,
- errors: str | None = ...,
- input: str | None = ...,
- text: bool | None = ...,
- timeout: float | None = ...,
- ) -> CompletedProcess[str]: ...
- @overload
- def run(
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: Literal[False] = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- capture_output: bool = ...,
- check: bool = ...,
- encoding: None = ...,
- errors: None = ...,
- input: bytes | None = ...,
- text: Literal[None, False] = ...,
- timeout: float | None = ...,
- ) -> CompletedProcess[bytes]: ...
- @overload
- def run(
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- capture_output: bool = ...,
- check: bool = ...,
- encoding: str | None = ...,
- errors: str | None = ...,
- input: _TXT | None = ...,
- text: bool | None = ...,
- timeout: float | None = ...,
- ) -> CompletedProcess[Any]: ...
-
-else:
- # Nearly same args as Popen.__init__ except for timeout, input, and check
- @overload
- def run(
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- check: bool = ...,
- encoding: str,
- errors: str | None = ...,
- input: str | None = ...,
- timeout: float | None = ...,
- ) -> CompletedProcess[str]: ...
- @overload
- def run(
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- check: bool = ...,
- encoding: str | None = ...,
- errors: str,
- input: str | None = ...,
- timeout: float | None = ...,
- ) -> CompletedProcess[str]: ...
- @overload
- def run(
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- *,
- universal_newlines: Literal[True],
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- # where the *real* keyword only args start
- check: bool = ...,
- encoding: str | None = ...,
- errors: str | None = ...,
- input: str | None = ...,
- timeout: float | None = ...,
- ) -> CompletedProcess[str]: ...
- @overload
- def run(
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: Literal[False] = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- check: bool = ...,
- encoding: None = ...,
- errors: None = ...,
- input: bytes | None = ...,
- timeout: float | None = ...,
- ) -> CompletedProcess[bytes]: ...
- @overload
- def run(
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- check: bool = ...,
- encoding: str | None = ...,
- errors: str | None = ...,
- input: _TXT | None = ...,
- timeout: float | None = ...,
- ) -> CompletedProcess[Any]: ...
-
-# Same args as Popen.__init__
-def call(
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- timeout: float | None = ...,
-) -> int: ...
-
-# Same args as Popen.__init__
-def check_call(
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- timeout: float | None = ...,
-) -> int: ...
-
-if sys.version_info >= (3, 7):
- # 3.7 added text
- @overload
- def check_output(
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- timeout: float | None = ...,
- input: _TXT | None = ...,
- encoding: str | None = ...,
- errors: str | None = ...,
- text: Literal[True],
- ) -> str: ...
- @overload
- def check_output(
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- timeout: float | None = ...,
- input: _TXT | None = ...,
- encoding: str,
- errors: str | None = ...,
- text: bool | None = ...,
- ) -> str: ...
- @overload
- def check_output(
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- timeout: float | None = ...,
- input: _TXT | None = ...,
- encoding: str | None = ...,
- errors: str,
- text: bool | None = ...,
- ) -> str: ...
- @overload
- def check_output(
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- *,
- universal_newlines: Literal[True],
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- # where the real keyword only ones start
- timeout: float | None = ...,
- input: _TXT | None = ...,
- encoding: str | None = ...,
- errors: str | None = ...,
- text: bool | None = ...,
- ) -> str: ...
- @overload
- def check_output(
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: Literal[False] = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- timeout: float | None = ...,
- input: _TXT | None = ...,
- encoding: None = ...,
- errors: None = ...,
- text: Literal[None, False] = ...,
- ) -> bytes: ...
- @overload
- def check_output(
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- timeout: float | None = ...,
- input: _TXT | None = ...,
- encoding: str | None = ...,
- errors: str | None = ...,
- text: bool | None = ...,
- ) -> Any: ... # morally: -> _TXT
-
-else:
- @overload
- def check_output(
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- timeout: float | None = ...,
- input: _TXT | None = ...,
- encoding: str,
- errors: str | None = ...,
- ) -> str: ...
- @overload
- def check_output(
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- timeout: float | None = ...,
- input: _TXT | None = ...,
- encoding: str | None = ...,
- errors: str,
- ) -> str: ...
- @overload
- def check_output(
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- universal_newlines: Literal[True],
- timeout: float | None = ...,
- input: _TXT | None = ...,
- encoding: str | None = ...,
- errors: str | None = ...,
- ) -> str: ...
- @overload
- def check_output(
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: Literal[False] = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- timeout: float | None = ...,
- input: _TXT | None = ...,
- encoding: None = ...,
- errors: None = ...,
- ) -> bytes: ...
- @overload
- def check_output(
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- timeout: float | None = ...,
- input: _TXT | None = ...,
- encoding: str | None = ...,
- errors: str | None = ...,
- ) -> Any: ... # morally: -> _TXT
-
-PIPE: int
-STDOUT: int
-DEVNULL: int
-
-class SubprocessError(Exception): ...
-
-class TimeoutExpired(SubprocessError):
- def __init__(
- self,
- cmd: _CMD,
- timeout: float,
- output: _TXT | None = ...,
- stderr: _TXT | None = ...,
- ) -> None: ...
- # morally: _CMD
- cmd: Any
- timeout: float
- # morally: _TXT | None
- output: Any
- stdout: Any
- stderr: Any
-
-class CalledProcessError(SubprocessError):
- returncode: int | None # this optional is REQUIRED for mypyc
- # morally: _CMD
- cmd: Any
- # morally: _TXT | None
- output: Any
-
- # morally: _TXT | None
- stdout: Any
- stderr: Any
- def __init__(
- self,
- returncode: int,
- cmd: _CMD,
- output: _TXT | None = ...,
- stderr: _TXT | None = ...,
- ) -> None: ...
-
-class Popen(Generic[AnyStr]):
- args: _CMD
- stdin: IO[AnyStr] | None
- stdout: IO[AnyStr] | None
- stderr: IO[AnyStr] | None
- pid: int
- returncode: int | None # this optional is REQUIRED for mypyc
- universal_newlines: bool
-
- # Technically it is wrong that Popen provides __new__ instead of __init__
- # but this shouldn't come up hopefully?
-
- if sys.version_info >= (3, 7):
- # text is added in 3.7
- @overload
- def __new__(
- cls,
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE | None = ...,
- stdout: _FILE | None = ...,
- stderr: _FILE | None = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: bool = ...,
- startupinfo: Any | None = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- text: bool | None = ...,
- encoding: str,
- errors: str | None = ...,
- ) -> Popen[str]: ...
- @overload
- def __new__(
- cls,
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE | None = ...,
- stdout: _FILE | None = ...,
- stderr: _FILE | None = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: bool = ...,
- startupinfo: Any | None = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- text: bool | None = ...,
- encoding: str | None = ...,
- errors: str,
- ) -> Popen[str]: ...
- @overload
- def __new__(
- cls,
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE | None = ...,
- stdout: _FILE | None = ...,
- stderr: _FILE | None = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- *,
- universal_newlines: Literal[True],
- startupinfo: Any | None = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- # where the *real* keyword only args start
- text: bool | None = ...,
- encoding: str | None = ...,
- errors: str | None = ...,
- ) -> Popen[str]: ...
- @overload
- def __new__(
- cls,
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE | None = ...,
- stdout: _FILE | None = ...,
- stderr: _FILE | None = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: bool = ...,
- startupinfo: Any | None = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- text: Literal[True],
- encoding: str | None = ...,
- errors: str | None = ...,
- ) -> Popen[str]: ...
- @overload
- def __new__(
- cls,
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE | None = ...,
- stdout: _FILE | None = ...,
- stderr: _FILE | None = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: Literal[False] = ...,
- startupinfo: Any | None = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- text: Literal[None, False] = ...,
- encoding: None = ...,
- errors: None = ...,
- ) -> Popen[bytes]: ...
- @overload
- def __new__(
- cls,
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE | None = ...,
- stdout: _FILE | None = ...,
- stderr: _FILE | None = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: bool = ...,
- startupinfo: Any | None = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- text: bool | None = ...,
- encoding: str | None = ...,
- errors: str | None = ...,
- ) -> Popen[Any]: ...
- else:
- @overload
- def __new__(
- cls,
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE | None = ...,
- stdout: _FILE | None = ...,
- stderr: _FILE | None = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: bool = ...,
- startupinfo: Any | None = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- encoding: str,
- errors: str | None = ...,
- ) -> Popen[str]: ...
- @overload
- def __new__(
- cls,
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE | None = ...,
- stdout: _FILE | None = ...,
- stderr: _FILE | None = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: bool = ...,
- startupinfo: Any | None = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- encoding: str | None = ...,
- errors: str,
- ) -> Popen[str]: ...
- @overload
- def __new__(
- cls,
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE | None = ...,
- stdout: _FILE | None = ...,
- stderr: _FILE | None = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- *,
- universal_newlines: Literal[True],
- startupinfo: Any | None = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- # where the *real* keyword only args start
- encoding: str | None = ...,
- errors: str | None = ...,
- ) -> Popen[str]: ...
- @overload
- def __new__(
- cls,
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE | None = ...,
- stdout: _FILE | None = ...,
- stderr: _FILE | None = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: Literal[False] = ...,
- startupinfo: Any | None = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- encoding: None = ...,
- errors: None = ...,
- ) -> Popen[bytes]: ...
- @overload
- def __new__(
- cls,
- args: _CMD,
- bufsize: int = ...,
- executable: StrOrBytesPath | None = ...,
- stdin: _FILE | None = ...,
- stdout: _FILE | None = ...,
- stderr: _FILE | None = ...,
- preexec_fn: Callable[[], Any] | None = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: StrOrBytesPath | None = ...,
- env: _ENV | None = ...,
- universal_newlines: bool = ...,
- startupinfo: Any | None = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- encoding: str | None = ...,
- errors: str | None = ...,
- ) -> Popen[Any]: ...
-
- def poll(self) -> int | None: ...
- if sys.version_info >= (3, 7):
- def wait(self, timeout: float | None = ...) -> int: ...
- else:
- def wait(
- self, timeout: float | None = ..., endtime: float | None = ...
- ) -> int: ...
- # Return str/bytes
- def communicate(
- self,
- input: AnyStr | None = ...,
- timeout: float | None = ...,
- # morally this should be optional
- ) -> Tuple[AnyStr, AnyStr]: ...
- def send_signal(self, sig: int) -> None: ...
- def terminate(self) -> None: ...
- def kill(self) -> None: ...
- def __enter__(self: Self) -> Self: ...
- def __exit__(
- self,
- type: Type[BaseException] | None,
- value: BaseException | None,
- traceback: TracebackType | None,
- ) -> None: ...
- if sys.version_info >= (3, 9):
- def __class_getitem__(cls, item: Any) -> GenericAlias: ...
-
-# The result really is always a str.
-def getstatusoutput(cmd: _TXT) -> Tuple[int, str]: ...
-def getoutput(cmd: _TXT) -> str: ...
-def list2cmdline(seq: Iterable[str]) -> str: ... # undocumented
-
-if sys.platform == "win32":
- class STARTUPINFO:
- if sys.version_info >= (3, 7):
- def __init__(
- self,
- *,
- dwFlags: int = ...,
- hStdInput: Any | None = ...,
- hStdOutput: Any | None = ...,
- hStdError: Any | None = ...,
- wShowWindow: int = ...,
- lpAttributeList: Mapping[str, Any] | None = ...,
- ) -> None: ...
- dwFlags: int
- hStdInput: Any | None
- hStdOutput: Any | None
- hStdError: Any | None
- wShowWindow: int
- if sys.version_info >= (3, 7):
- lpAttributeList: Mapping[str, Any]
- STD_INPUT_HANDLE: Any
- STD_OUTPUT_HANDLE: Any
- STD_ERROR_HANDLE: Any
- SW_HIDE: int
- STARTF_USESTDHANDLES: int
- STARTF_USESHOWWINDOW: int
- CREATE_NEW_CONSOLE: int
- CREATE_NEW_PROCESS_GROUP: int
- if sys.version_info >= (3, 7):
- ABOVE_NORMAL_PRIORITY_CLASS: int
- BELOW_NORMAL_PRIORITY_CLASS: int
- HIGH_PRIORITY_CLASS: int
- IDLE_PRIORITY_CLASS: int
- NORMAL_PRIORITY_CLASS: int
- REALTIME_PRIORITY_CLASS: int
- CREATE_NO_WINDOW: int
- DETACHED_PROCESS: int
- CREATE_DEFAULT_ERROR_MODE: int
- CREATE_BREAKAWAY_FROM_JOB: int
diff --git a/mypy-stubs/urllib/__init__.py b/mypy-stubs/urllib/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/mypy-stubs/urllib/parse.pyi b/mypy-stubs/urllib/parse.pyi
deleted file mode 100644
index 3e14b3ef6..000000000
--- a/mypy-stubs/urllib/parse.pyi
+++ /dev/null
@@ -1,204 +0,0 @@
-# Stubs for urllib.parse
-import sys
-from typing import (
- Any,
- AnyStr,
- Callable,
- Dict,
- Generic,
- Iterator,
- List,
- Mapping,
- NamedTuple,
- Optional,
- Sequence,
- Tuple,
- Union,
- overload,
-)
-
-_Str = Union[bytes, str]
-
-uses_relative: List[str]
-uses_netloc: List[str]
-uses_params: List[str]
-non_hierarchical: List[str]
-uses_query: List[str]
-uses_fragment: List[str]
-scheme_chars: str
-MAX_CACHE_SIZE = 0
-
-class _ResultMixinBase(Generic[AnyStr]):
- def geturl(self) -> AnyStr: ...
-
-class _ResultMixinStr(_ResultMixinBase[str]):
- def encode(self, encoding: str = ..., errors: str = ...) -> _ResultMixinBytes: ...
-
-class _ResultMixinBytes(_ResultMixinBase[str]):
- def decode(self, encoding: str = ..., errors: str = ...) -> _ResultMixinStr: ...
-
-class _NetlocResultMixinBase(Generic[AnyStr]):
- username: AnyStr
- password: AnyStr
- hostname: AnyStr
- port: int
-
-class _NetlocResultMixinStr(_NetlocResultMixinBase[str], _ResultMixinStr): ...
-class _NetlocResultMixinBytes(_NetlocResultMixinBase[bytes], _ResultMixinBytes): ...
-
-class _DefragResultBase(Generic[AnyStr]):
- url: AnyStr
- fragment: AnyStr
- @overload
- def __getitem__(self, x: slice) -> AnyStr: ...
- @overload
- def __getitem__(self, x: int) -> AnyStr: ...
- def __iter__(self) -> Iterator[AnyStr]: ...
-
-_SplitResultBase = NamedTuple(
- "_SplitResultBase",
- [
- ("scheme", str),
- ("netloc", str),
- ("path", str),
- ("query", str),
- ("fragment", str),
- ],
-)
-_SplitResultBytesBase = NamedTuple(
- "_SplitResultBytesBase",
- [
- ("scheme", bytes),
- ("netloc", bytes),
- ("path", bytes),
- ("query", bytes),
- ("fragment", bytes),
- ],
-)
-
-_ParseResultBase = NamedTuple(
- "_ParseResultBase",
- [
- ("scheme", str),
- ("netloc", str),
- ("path", str),
- ("params", str),
- ("query", str),
- ("fragment", str),
- ],
-)
-_ParseResultBytesBase = NamedTuple(
- "_ParseResultBytesBase",
- [
- ("scheme", bytes),
- ("netloc", bytes),
- ("path", bytes),
- ("params", bytes),
- ("query", bytes),
- ("fragment", bytes),
- ],
-)
-
-# Structured result objects for string data
-class DefragResult(_DefragResultBase[str], _ResultMixinStr): ...
-class SplitResult(_SplitResultBase, _NetlocResultMixinStr): ...
-class ParseResult(_ParseResultBase, _NetlocResultMixinStr): ...
-
-# Structured result objects for bytes data
-class DefragResultBytes(_DefragResultBase[bytes], _ResultMixinBytes): ...
-class SplitResultBytes(_SplitResultBytesBase, _NetlocResultMixinBytes): ...
-class ParseResultBytes(_ParseResultBytesBase, _NetlocResultMixinBytes): ...
-
-def parse_qs(
- qs: AnyStr,
- keep_blank_values: bool = ...,
- strict_parsing: bool = ...,
- encoding: str = ...,
- errors: str = ...,
-) -> Dict[AnyStr, List[AnyStr]]: ...
-def parse_qsl(
- qs: AnyStr,
- keep_blank_values: bool = ...,
- strict_parsing: bool = ...,
- encoding: str = ...,
- errors: str = ...,
-) -> List[Tuple[AnyStr, AnyStr]]: ...
-@overload
-def quote(
- string: str, safe: _Str = ..., encoding: str = ..., errors: str = ...
-) -> str: ...
-@overload
-def quote(string: bytes, safe: _Str = ...) -> str: ...
-def quote_from_bytes(bs: bytes, safe: _Str = ...) -> str: ...
-@overload
-def quote_plus(
- string: str, safe: _Str = ..., encoding: str = ..., errors: str = ...
-) -> str: ...
-@overload
-def quote_plus(string: bytes, safe: _Str = ...) -> str: ...
-def unquote(string: str, encoding: str = ..., errors: str = ...) -> str: ...
-def unquote_to_bytes(string: _Str) -> bytes: ...
-def unquote_plus(string: str, encoding: str = ..., errors: str = ...) -> str: ...
-@overload
-def urldefrag(url: str) -> DefragResult: ...
-@overload
-def urldefrag(url: bytes) -> DefragResultBytes: ...
-
-if sys.version_info >= (3, 5):
- def urlencode(
- query: Union[
- Mapping[Any, Any],
- Mapping[Any, Sequence[Any]],
- Sequence[Tuple[Any, Any]],
- Sequence[Tuple[Any, Sequence[Any]]],
- ],
- doseq: bool = ...,
- safe: AnyStr = ...,
- encoding: str = ...,
- errors: str = ...,
- quote_via: Callable[[str, AnyStr, str, str], str] = ...,
- ) -> str: ...
-
-else:
- def urlencode(
- query: Union[
- Mapping[Any, Any],
- Mapping[Any, Sequence[Any]],
- Sequence[Tuple[Any, Any]],
- Sequence[Tuple[Any, Sequence[Any]]],
- ],
- doseq: bool = ...,
- safe: AnyStr = ...,
- encoding: str = ...,
- errors: str = ...,
- ) -> str: ...
-
-def urljoin(
- base: Optional[AnyStr], url: Optional[AnyStr], allow_fragments: bool = ...
-) -> AnyStr: ...
-@overload
-def urlparse(
- url: str, scheme: str = ..., allow_fragments: bool = ...
-) -> ParseResult: ...
-@overload
-def urlparse(
- url: bytes, scheme: bytes = ..., allow_fragments: bool = ...
-) -> ParseResultBytes: ...
-@overload
-def urlsplit(
- url: Optional[str], scheme: str = ..., allow_fragments: bool = ...
-) -> SplitResult: ...
-@overload
-def urlsplit(
- url: bytes, scheme: bytes = ..., allow_fragments: bool = ...
-) -> SplitResultBytes: ...
-@overload
-def urlunparse(
- components: Tuple[AnyStr, AnyStr, AnyStr, AnyStr, AnyStr, AnyStr]
-) -> AnyStr: ...
-@overload
-def urlunparse(components: Sequence[AnyStr]) -> AnyStr: ...
-@overload
-def urlunsplit(components: Tuple[AnyStr, AnyStr, AnyStr, AnyStr, AnyStr]) -> AnyStr: ...
-@overload
-def urlunsplit(components: Sequence[AnyStr]) -> AnyStr: ...
diff --git a/mypy.ini b/mypy.ini
index bac992869..b99f97b82 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -5,6 +5,7 @@ show_column_numbers = true
show_error_codes = true
pretty = true
warn_unreachable = True
+new_type_inference = True
[mypy-galaxy.tool_util.*]
ignore_missing_imports = True
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 000000000..fef570ea2
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,23 @@
+[build-system]
+requires = [
+ "setuptools>=45",
+ "setuptools_scm[toml]>=8.0.4,<9",
+ "mypy==1.6.0", # also update mypy-requirements.txt
+ "types-requests",
+ "types-psutil",
+ "importlib_resources>=1.4", # equivalent to Python 3.9
+ "ruamel.yaml>=0.16.0,<0.18",
+ "schema-salad>=8.4.20230426093816,<9",
+ "cwl-utils>=0.19",
+ "galaxy-tool-util>=22.1.2,<23.2,!=23.0.1,!=23.0.2,!=23.0.3,!=23.0.4,!=23.0.5",
+ "toml",
+ "argcomplete>=1.12.0",
+]
+build-backend = "setuptools.build_meta"
+
+[tool.setuptools_scm]
+write_to = "cwltool/_version.py"
+
+[tool.black]
+line-length = 100
+target-version = [ "py38" ]
diff --git a/release-test.sh b/release-test.sh
index 2ef2fda50..254be6271 100755
--- a/release-test.sh
+++ b/release-test.sh
@@ -25,8 +25,8 @@ run_tests() {
"${test_prefix}"bin/py.test "--ignore=${mod_loc}/schemas/" \
--pyargs -x ${module} -n auto --dist=loadfile
}
-pipver=20.3.3 # minimum required version of pip for Python 3.10
-setuptoolsver=50.0.0 # required for Python 3.10
+pipver=23.1 # minimum required version of pip for Python 3.12
+setuptoolsver=67.6.1 # required for Python 3.12
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
rm -Rf testenv? || /bin/true
@@ -43,20 +43,19 @@ then
&& pip install --force-reinstall -U pip==${pipver} \
&& pip install setuptools==${setuptoolsver} wheel
pip install -rtest-requirements.txt ".${extras}"
- make test
+ #make test
pip uninstall -y ${package} || true; pip uninstall -y ${package} || true; make install
- mkdir testenv1/not-${module}
+ # mkdir testenv1/not-${module}
# if there is a subdir named '${module}' py.test will execute tests
# there instead of the installed module's tests
- pushd testenv1/not-${module}
- # shellcheck disable=SC2086
- test_prefix=../ run_tests; popd
+ # pushd testenv1/not-${module}
+ # #shellcheck disable=SC2086
+ # test_prefix=../ run_tests; popd
fi
python3 -m venv testenv2
python3 -m venv testenv3
-python3 -m venv testenv4
-rm -Rf testenv[234]/local
+rm -Rf testenv[23]/local
# Secondly we test via pip
@@ -69,14 +68,14 @@ rm -f lib/python-wheels/setuptools* \
# The following can fail if you haven't pushed your commits to ${repo}
pip install -e "git+${repo}@${HEAD}#egg=${package}${extras}"
pushd src/${package}
-pip install -rtest-requirements.txt
+pip install -rtest-requirements.txt build
make dist
-make test
+#make test
cp dist/${package}*tar.gz ../../../testenv3/
pip uninstall -y ${package} || true; pip uninstall -y ${package} || true; make install
popd # ../.. no subdir named ${proj} here, safe for py.testing the installed module
# shellcheck disable=SC2086
-run_tests
+#run_tests
popd
# Is the source distribution in testenv2 complete enough to build another
@@ -89,15 +88,16 @@ rm -f lib/python-wheels/setuptools* \
&& pip install --force-reinstall -U pip==${pipver} \
&& pip install setuptools==${setuptoolsver} wheel
package_tar=$(find . -name "${package}*tar.gz")
-pip install "-r${DIR}/test-requirements.txt"
+pip install "-r${DIR}/test-requirements.txt" udocker build
pip install "${package_tar}${extras}"
+udocker install
mkdir out
tar --extract --directory=out -z -f ${package}*.tar.gz
pushd out/${package}*
make dist
make test
pip install "-r${DIR}/mypy-requirements.txt"
-make mypy
+make mypyc
pip uninstall -y ${package} || true; pip uninstall -y ${package} || true; make install
mkdir ../not-${module}
pushd ../not-${module}
diff --git a/requirements.txt b/requirements.txt
index f485c88f1..ca4675508 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,18 +1,14 @@
-requests>=2.4.3
-ruamel.yaml>=0.16.0,<0.17.22;python_version>='3.10'
-ruamel.yaml>=0.15,<0.17.22
-rdflib>=4.2.2,<6.3
-rdflib>= 4.2.2, < 6.0.0;python_version<='3.6'
+requests>=2.6.1
+ruamel.yaml>=0.16.0,<0.19
+rdflib>=4.2.2,<7.1
shellescape>=3.4.1,<3.9
-schema-salad>=8.2.20211104054942,<9
+schema-salad>=8.4.20230426093816,<9
prov==1.5.1
-bagit==1.8.1
mypy-extensions
psutil>=5.6.6
-typing-extensions
+importlib_resources>=1.4 # equivalent to Python 3.9
coloredlogs
pydot>=1.4.1
argcomplete>=1.12.0
-pyparsing != 3.0.2 # breaks --print-dot (pydot) https://github.com/pyparsing/pyparsing/issues/319
-pyparsing < 3;python_version<='3.6' # breaks --print-dot
-cwl-utils>=0.19
+pyparsing!=3.0.2 # breaks --print-dot (pydot) https://github.com/pyparsing/pyparsing/issues/319
+cwl-utils>=0.22
diff --git a/setup.cfg b/setup.cfg
index 42f8e1822..7d28a8ed6 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,9 +1,3 @@
-[flake8]
-ignore = E124,E128,E129,E201,E202,E225,E226,E231,E265,E271,E302,E303,F401,E402,E501,W503,E731,F811,F821,F841,B023
-exclude = cwltool/schemas
-max-line-length = 88
-extend-ignore = E203, W503
-
[aliases]
test=pytest
@@ -15,4 +9,4 @@ use_parentheses = True
line_length = 88
[codespell]
-ignore-words-list=ORE,ore,RO,ro
+ignore-words-list=ORE,ore,RO,ro,recuse
diff --git a/setup.py b/setup.py
index 49fb9d68a..634145e6b 100644
--- a/setup.py
+++ b/setup.py
@@ -4,7 +4,6 @@
import sys
import warnings
-import setuptools.command.egg_info as egg_info_cmd
from setuptools import setup
if os.name == "nt":
@@ -17,19 +16,13 @@
"Windows Subsystem for Linux 2 (WSL2). If don't need to execute "
"CWL documents, then you can ignore this warning, but please "
"consider migrating to https://pypi.org/project/cwl-utils/ "
- "for your CWL document processing needs."
+ "for your CWL document processing needs.",
+ stacklevel=1,
)
SETUP_DIR = os.path.dirname(__file__)
README = os.path.join(SETUP_DIR, "README.rst")
-try:
- import gittaggers
-
- Tagger = gittaggers.EggInfoFromGit
-except ImportError:
- Tagger = egg_info_cmd.egg_info
-
NEEDS_PYTEST = {"pytest", "test", "ptr"}.intersection(sys.argv)
PYTEST_RUNNER = ["pytest-runner", "pytest-cov"] if NEEDS_PYTEST else []
USE_MYPYC = False
@@ -63,10 +56,14 @@
"cwltool/main.py",
"cwltool/mutation.py",
"cwltool/pack.py",
- # "cwltool/pathmapper.py", # class PathMapper needs to be subclassable
+ "cwltool/pathmapper.py",
"cwltool/process.py",
"cwltool/procgenerator.py",
- # "cwltool/provenance.py", # WritableBag is having issues
+ # "cwltool/cwlprov/__init__.py",
+ "cwltool/cwlprov/provenance_constants.py",
+ "cwltool/cwlprov/provenance_profile.py",
+ "cwltool/cwlprov/ro.py",
+ # "cwltool/cwlprov/writablebagfile.py", # WritableBag is having issues
"cwltool/resolver.py",
"cwltool/secrets.py",
"cwltool/singularity.py",
@@ -79,7 +76,7 @@
"cwltool/workflow.py",
]
- from mypyc.build import mypycify
+ from mypyc.build import mypycify # type: ignore[import-untyped]
opt_level = os.getenv("MYPYC_OPT_LEVEL", "3")
ext_modules = mypycify(mypyc_targets, opt_level=opt_level)
@@ -88,7 +85,6 @@
setup(
name="cwltool",
- version="3.1",
description="Common workflow language reference implementation",
long_description=open(README).read(),
long_description_content_type="text/x-rst",
@@ -99,47 +95,47 @@
ext_modules=ext_modules,
# platforms='', # empty as is conveyed by the classifier below
# license='', # empty as is conveyed by the classifier below
- packages=["cwltool", "cwltool.tests"],
+ packages=["cwltool", "cwltool.tests", "cwltool.cwlprov"],
package_dir={"cwltool.tests": "tests"},
include_package_data=True,
install_requires=[
"setuptools",
"requests >= 2.6.1", # >= 2.6.1 to workaround
# https://github.com/ionrock/cachecontrol/issues/137
- "ruamel.yaml >= 0.15, < 0.17.22",
- "rdflib >= 4.2.2, < 6.3.0",
- "rdflib >= 4.2.2, < 6.0.0;python_version<='3.6'",
+ "ruamel.yaml >= 0.16, < 0.19",
+ "rdflib >= 4.2.2, < 7.1.0",
"shellescape >= 3.4.1, < 3.9",
- "schema-salad >= 8.2.20211104054942, < 9",
+ "schema-salad >= 8.4.20230426093816, < 9",
+ "prov == 1.5.1",
"mypy-extensions",
"psutil >= 5.6.6",
- "prov == 1.5.1",
- "bagit >= 1.6.4",
- "typing-extensions",
+ "importlib_resources>=1.4",
"coloredlogs",
"pydot >= 1.4.1",
- "pyparsing != 3.0.2", # breaks --print-dot (pydot) https://github.com/pyparsing/pyparsing/issues/319
- "pyparsing < 3 ;python_version<='3.6'", # breaks --print-dot (pydot)
"argcomplete",
- "cwl-utils >= 0.19",
+ "pyparsing != 3.0.2", # breaks --print-dot (pydot) https://github.com/pyparsing/pyparsing/issues/319
+ "cwl-utils >= 0.22",
],
extras_require={
- "deps": ["galaxy-tool-util >= 22.1.2, <23"],
+ "deps": [
+ "galaxy-tool-util>=22.1.2,<23.2,!=23.0.1,!=23.0.2,!=23.0.3,!=23.0.4,!=23.0.5",
+ "galaxy-util <23.2",
+ ],
},
- python_requires=">=3.6, <4",
- setup_requires=PYTEST_RUNNER,
+ python_requires=">=3.8, <4",
+ use_scm_version=True,
+ setup_requires=PYTEST_RUNNER + ["setuptools_scm>=8.0.4,<9"],
test_suite="tests",
tests_require=[
- "pytest >= 6.2, < 7.3",
+ "bagit >= 1.6.4, < 1.9",
+ "pytest >= 6.2, < 7.5",
"mock >= 2.0.0",
"pytest-mock >= 1.10.0",
"pytest-httpserver",
"arcp >= 0.2.0",
- "rdflib-jsonld>=0.4.0, <= 0.6.1;python_version<='3.6'",
],
entry_points={"console_scripts": ["cwltool=cwltool.main:run"]},
zip_safe=True,
- cmdclass={"egg_info": Tagger},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
@@ -152,12 +148,11 @@
"Operating System :: POSIX",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.6",
- "Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Topic :: Scientific/Engineering :: Astronomy",
diff --git a/test-requirements.txt b/test-requirements.txt
index 3563c134f..9bdecc1cb 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,10 +1,11 @@
-pytest >= 6.2, < 7.3
+bagit>=1.6.4,<1.9
+pytest>=6.2,< 7.5
pytest-xdist
pytest-httpserver
-mock >= 2.0.0
-pytest-mock >= 1.10.0
+mock>=2.0.0
+pytest-mock>=1.10.0
pytest-cov
-arcp >= 0.2.0
-rdflib-jsonld>=0.4.0, <= 0.6.1;python_version<='3.6'
+arcp>=0.2.0
-rrequirements.txt
-galaxy-tool-util >= 22.1.2, < 23
+galaxy-tool-util>=22.1.2,<23.2,!=23.0.1,!=23.0.2,!=23.0.3,!=23.0.4,!=23.0.5
+galaxy-util<23.2
diff --git a/tests/cwl-conformance/cwltool-conftest.py b/tests/cwl-conformance/cwltool-conftest.py
new file mode 100644
index 000000000..e846b2706
--- /dev/null
+++ b/tests/cwl-conformance/cwltool-conftest.py
@@ -0,0 +1,35 @@
+"""
+Example configuration for pytest + cwltest plugin using cwltool directly.
+
+Calls cwltool via Python, instead of a subprocess via `--cwl-runner cwltool`.
+"""
+import json
+from io import StringIO
+from typing import Any, Dict, List, Optional, Tuple
+
+from cwltest import utils
+
+
+def pytest_cwl_execute_test(
+ config: utils.CWLTestConfig, processfile: str, jobfile: Optional[str]
+) -> Tuple[int, Optional[Dict[str, Any]]]:
+ """Use the CWL reference runner (cwltool) to execute tests."""
+ from cwltool import main
+ from cwltool.errors import WorkflowException
+
+ stdout = StringIO()
+ argsl: List[str] = [f"--outdir={config.outdir}"]
+ if config.runner_quiet:
+ argsl.append("--quiet")
+ elif config.verbose:
+ argsl.append("--debug")
+ argsl.extend(config.args)
+ argsl.append(processfile)
+ if jobfile:
+ argsl.append(jobfile)
+ try:
+ result = main.main(argsl=argsl, stdout=stdout)
+ except WorkflowException:
+ return 1, {}
+ out = stdout.getvalue()
+ return result, json.loads(out) if out else {}
diff --git a/tests/nested-array.cwl b/tests/nested-array.cwl
new file mode 100644
index 000000000..8272614fc
--- /dev/null
+++ b/tests/nested-array.cwl
@@ -0,0 +1,11 @@
+cwlVersion: v1.2
+class: CommandLineTool
+baseCommand: echo
+inputs:
+ letters:
+ type: string[][]
+ inputBinding:
+ position: 1
+stdout: echo.txt
+outputs:
+ echo: stdout
diff --git a/tests/reloc/dir1/foo b/tests/reloc/dir1/foo
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tests/reloc/dir2 b/tests/reloc/dir2
deleted file mode 120000
index df490f837..000000000
--- a/tests/reloc/dir2
+++ /dev/null
@@ -1 +0,0 @@
-dir1
\ No newline at end of file
diff --git a/tests/secondary-files-required-container.cwl b/tests/secondary-files-required-container.cwl
new file mode 100644
index 000000000..254470b4f
--- /dev/null
+++ b/tests/secondary-files-required-container.cwl
@@ -0,0 +1,20 @@
+#!/usr/bin/env cwl-runner
+cwlVersion: v1.2
+class: CommandLineTool
+
+hints:
+ DockerRequirement:
+ dockerPull: docker.io/alpine:latest
+
+inputs: []
+
+baseCommand: [ touch, file.ext1, file.ext2 ]
+
+outputs:
+ output:
+ type: File
+ secondaryFiles:
+ - pattern: ^.ext2
+ required: true
+ outputBinding:
+ glob: file.ext1
diff --git a/tests/secondary-files-required-missing-container.cwl b/tests/secondary-files-required-missing-container.cwl
new file mode 100644
index 000000000..931658ae9
--- /dev/null
+++ b/tests/secondary-files-required-missing-container.cwl
@@ -0,0 +1,20 @@
+#!/usr/bin/env cwl-runner
+cwlVersion: v1.2
+class: CommandLineTool
+
+hints:
+ DockerRequirement:
+ dockerPull: docker.io/alpine:latest
+
+inputs: []
+
+baseCommand: [ touch, file.ext1, file.ext2 ]
+
+outputs:
+ output:
+ type: File
+ secondaryFiles:
+ - pattern: ^.ext3
+ required: true
+ outputBinding:
+ glob: file.ext1
diff --git a/tests/test_anon_types.py b/tests/test_anon_types.py
index 6909368ab..19a52cb83 100644
--- a/tests/test_anon_types.py
+++ b/tests/test_anon_types.py
@@ -1,11 +1,11 @@
from typing import cast
import pytest
+from ruamel.yaml.comments import CommentedMap
from schema_salad.sourceline import cmap
from cwltool.command_line_tool import CommandLineTool
from cwltool.context import LoadingContext
-from ruamel.yaml.comments import CommentedMap
snippet = cast(
CommentedMap,
@@ -76,7 +76,8 @@
],
},
],
- "name": "anon_enum_inside_array_inside_schemadef.cwl#vcf2maf_params/ncbi_build",
+ "name": "anon_enum_inside_array_inside_"
+ "schemadef.cwl#vcf2maf_params/ncbi_build",
},
{
"type": [
@@ -89,7 +90,8 @@
],
},
],
- "name": "anon_enum_inside_array_inside_schemadef.cwl#vcf2maf_params/species",
+ "name": "anon_enum_inside_array_inside_"
+ "schemadef.cwl#vcf2maf_params/species",
},
],
}
diff --git a/tests/test_content_type.py b/tests/test_content_type.py
index 9f11880c7..0c3662f0d 100644
--- a/tests/test_content_type.py
+++ b/tests/test_content_type.py
@@ -13,10 +13,7 @@ def test_content_types(caplog: LogCaptureFixture) -> None:
found = False
for record in caplog.records:
- if (
- record.name == "salad"
- and "got content-type of 'text/html'" in record.message
- ):
+ if record.name == "salad" and "got content-type of 'text/html'" in record.message:
found = True
break
assert found
diff --git a/tests/test_cuda.py b/tests/test_cuda.py
index 3ebe476fb..e8de7cd63 100644
--- a/tests/test_cuda.py
+++ b/tests/test_cuda.py
@@ -1,17 +1,17 @@
+from unittest import mock
from unittest.mock import MagicMock
-import mock
import pytest
from schema_salad.avro import schema
from cwltool.builder import Builder
+from cwltool.command_line_tool import CommandLineTool
from cwltool.context import LoadingContext, RuntimeContext
from cwltool.cuda import cuda_version_and_device_count
from cwltool.errors import WorkflowException
from cwltool.job import CommandLineJob
from cwltool.load_tool import load_tool
from cwltool.main import main
-from cwltool.pathmapper import PathMapper
from cwltool.process import use_custom_schema
from cwltool.stdfsaccess import StdFsAccess
from cwltool.update import INTERNAL_VERSION
@@ -23,9 +23,7 @@
@needs_docker
-@pytest.mark.skipif(
- cuda_version[0] == "", reason="nvidia-smi required for CUDA not detected"
-)
+@pytest.mark.skipif(cuda_version[0] == "", reason="nvidia-smi required for CUDA not detected")
def test_cuda_docker() -> None:
params = [
"--enable-ext",
@@ -35,9 +33,7 @@ def test_cuda_docker() -> None:
@needs_singularity_3_or_newer
-@pytest.mark.skipif(
- cuda_version[0] == "", reason="nvidia-smi required for CUDA not detected"
-)
+@pytest.mark.skipif(cuda_version[0] == "", reason="nvidia-smi required for CUDA not detected")
def test_cuda_singularity() -> None:
params = [
"--enable-ext",
@@ -47,9 +43,7 @@ def test_cuda_singularity() -> None:
assert main(params) == 0
-@pytest.mark.skipif(
- cuda_version[0] == "", reason="nvidia-smi required for CUDA not detected"
-)
+@pytest.mark.skipif(cuda_version[0] == "", reason="nvidia-smi required for CUDA not detected")
def test_cuda_no_container() -> None:
params = [
"--enable-ext",
@@ -58,9 +52,7 @@ def test_cuda_no_container() -> None:
assert main(params) == 0
-@pytest.mark.skipif(
- cuda_version[0] == "", reason="nvidia-smi required for CUDA not detected"
-)
+@pytest.mark.skipif(cuda_version[0] == "", reason="nvidia-smi required for CUDA not detected")
def test_cuda_cc_list() -> None:
params = [
"--enable-ext",
@@ -88,7 +80,7 @@ def _makebuilder(cudaReq: CWLObjectType) -> Builder:
False,
False,
False,
- "",
+ "no_listing",
"",
"",
"",
@@ -100,7 +92,6 @@ def _makebuilder(cudaReq: CWLObjectType) -> Builder:
@mock.patch("subprocess.check_output")
@mock.patch("os.makedirs")
def test_cuda_job_setup_check(makedirs: MagicMock, check_output: MagicMock) -> None:
-
runtime_context = RuntimeContext({})
cudaReq: CWLObjectType = {
@@ -117,14 +108,13 @@ def test_cuda_job_setup_check(makedirs: MagicMock, check_output: MagicMock) -> N
"""
- jb = CommandLineJob(builder, {}, PathMapper, [], [], "")
+ jb = CommandLineJob(builder, {}, CommandLineTool.make_path_mapper, [], [], "")
jb._setup(runtime_context)
@mock.patch("subprocess.check_output")
@mock.patch("os.makedirs")
def test_cuda_job_setup_check_err(makedirs: MagicMock, check_output: MagicMock) -> None:
-
runtime_context = RuntimeContext({})
cudaReq: CWLObjectType = {
@@ -140,11 +130,159 @@ def test_cuda_job_setup_check_err(makedirs: MagicMock, check_output: MagicMock)
1.0
"""
- jb = CommandLineJob(builder, {}, PathMapper, [], [], "")
+ jb = CommandLineJob(builder, {}, CommandLineTool.make_path_mapper, [], [], "")
with pytest.raises(WorkflowException):
jb._setup(runtime_context)
+@mock.patch("subprocess.check_output")
+@mock.patch("os.makedirs")
+def test_cuda_job_setup_check_err_empty_attached_gpus(
+ makedirs: MagicMock, check_output: MagicMock, caplog: pytest.LogCaptureFixture
+) -> None:
+ runtime_context = RuntimeContext({})
+
+ cudaReq: CWLObjectType = {
+ "class": "http://commonwl.org/cwltool#CUDARequirement",
+ "cudaVersionMin": "1.0",
+ "cudaComputeCapability": "1.0",
+ }
+ builder = _makebuilder(cudaReq)
+
+ check_output.return_value = """
+
+
+1.0
+
+"""
+
+ jb = CommandLineJob(builder, {}, CommandLineTool.make_path_mapper, [], [], "")
+ with pytest.raises(WorkflowException):
+ jb._setup(runtime_context)
+ assert (
+ "Error checking CUDA version with nvidia-smi. Missing 'attached_gpus' or it is empty."
+ in caplog.text
+ )
+
+
+@mock.patch("subprocess.check_output")
+@mock.patch("os.makedirs")
+def test_cuda_job_setup_check_err_empty_missing_attached_gpus(
+ makedirs: MagicMock, check_output: MagicMock, caplog: pytest.LogCaptureFixture
+) -> None:
+ runtime_context = RuntimeContext({})
+
+ cudaReq: CWLObjectType = {
+ "class": "http://commonwl.org/cwltool#CUDARequirement",
+ "cudaVersionMin": "1.0",
+ "cudaComputeCapability": "1.0",
+ }
+ builder = _makebuilder(cudaReq)
+
+ check_output.return_value = """
+
+1.0
+
+"""
+
+ jb = CommandLineJob(builder, {}, CommandLineTool.make_path_mapper, [], [], "")
+ with pytest.raises(WorkflowException):
+ jb._setup(runtime_context)
+ assert (
+ "Error checking CUDA version with nvidia-smi. Missing 'attached_gpus' or it is empty."
+ in caplog.text
+ )
+
+
+@mock.patch("subprocess.check_output")
+@mock.patch("os.makedirs")
+def test_cuda_job_setup_check_err_empty_cuda_version(
+ makedirs: MagicMock, check_output: MagicMock, caplog: pytest.LogCaptureFixture
+) -> None:
+ runtime_context = RuntimeContext({})
+
+ cudaReq: CWLObjectType = {
+ "class": "http://commonwl.org/cwltool#CUDARequirement",
+ "cudaVersionMin": "1.0",
+ "cudaComputeCapability": "1.0",
+ }
+ builder = _makebuilder(cudaReq)
+
+ check_output.return_value = """
+
+1
+
+
+"""
+
+ jb = CommandLineJob(builder, {}, CommandLineTool.make_path_mapper, [], [], "")
+ with pytest.raises(WorkflowException):
+ jb._setup(runtime_context)
+ assert (
+ "Error checking CUDA version with nvidia-smi. Missing 'cuda_version' or it is empty."
+ in caplog.text
+ )
+
+
+@mock.patch("subprocess.check_output")
+@mock.patch("os.makedirs")
+def test_cuda_job_setup_check_err_missing_cuda_version(
+ makedirs: MagicMock, check_output: MagicMock, caplog: pytest.LogCaptureFixture
+) -> None:
+ runtime_context = RuntimeContext({})
+
+ cudaReq: CWLObjectType = {
+ "class": "http://commonwl.org/cwltool#CUDARequirement",
+ "cudaVersionMin": "1.0",
+ "cudaComputeCapability": "1.0",
+ }
+ builder = _makebuilder(cudaReq)
+
+ check_output.return_value = """
+
+1
+
+"""
+
+ jb = CommandLineJob(builder, {}, CommandLineTool.make_path_mapper, [], [], "")
+ with pytest.raises(WorkflowException):
+ jb._setup(runtime_context)
+ assert (
+ "Error checking CUDA version with nvidia-smi. Missing 'cuda_version' or it is empty."
+ in caplog.text
+ )
+
+
+@mock.patch("subprocess.check_output")
+@mock.patch("os.makedirs")
+def test_cuda_job_setup_check_err_wrong_type_cuda_version(
+ makedirs: MagicMock, check_output: MagicMock, caplog: pytest.LogCaptureFixture
+) -> None:
+ runtime_context = RuntimeContext({})
+
+ cudaReq: CWLObjectType = {
+ "class": "http://commonwl.org/cwltool#CUDARequirement",
+ "cudaVersionMin": "1.0",
+ "cudaComputeCapability": "1.0",
+ }
+ builder = _makebuilder(cudaReq)
+
+ check_output.return_value = """
+
+1
+
+
+"""
+
+ jb = CommandLineJob(builder, {}, CommandLineTool.make_path_mapper, [], [], "")
+ with pytest.raises(WorkflowException):
+ jb._setup(runtime_context)
+ assert (
+ "Error checking CUDA version with nvidia-smi. "
+ "Either 'attached_gpus' or 'cuda_version' was not a text node" in caplog.text
+ )
+
+
def test_cuda_eval_resource_range() -> None:
with open(get_data("cwltool/extensions-v1.1.yml")) as res:
use_custom_schema("v1.2", "http://commonwl.org/cwltool", res.read())
diff --git a/tests/test_default_path.py b/tests/test_default_path.py
index 005bcf0a2..a87e30331 100644
--- a/tests/test_default_path.py
+++ b/tests/test_default_path.py
@@ -5,12 +5,8 @@
def test_default_path() -> None:
"""Error is not raised when default path is not present."""
- loadingContext, workflowobj, uri = fetch_document(
- get_data("tests/wf/default_path.cwl")
- )
- loadingContext, uri = resolve_and_validate_document(
- loadingContext, workflowobj, uri
- )
+ loadingContext, workflowobj, uri = fetch_document(get_data("tests/wf/default_path.cwl"))
+ loadingContext, uri = resolve_and_validate_document(loadingContext, workflowobj, uri)
loader = loadingContext.loader
assert loader
processobj = loader.resolve_ref(uri)[0]
diff --git a/tests/test_dependencies.py b/tests/test_dependencies.py
index e49933fcd..c7964bf9d 100644
--- a/tests/test_dependencies.py
+++ b/tests/test_dependencies.py
@@ -1,9 +1,11 @@
"""Tests of satisfying SoftwareRequirement via dependencies."""
import os
+import tempfile
+from getpass import getuser
from pathlib import Path
from shutil import which
from types import ModuleType
-from typing import Optional
+from typing import Optional, Tuple
import pytest
@@ -13,9 +15,9 @@
from .util import get_data, get_main_output, get_tool_env, needs_docker
-deps = None # type: Optional[ModuleType]
+deps: Optional[ModuleType] = None
try:
- from galaxy.tool_util import deps # type: ignore[no-redef]
+ from galaxy.tool_util import deps
except ImportError:
pass
@@ -26,7 +28,15 @@ def test_biocontainers(tmp_path: Path) -> None:
wflow = get_data("tests/seqtk_seq.cwl")
job = get_data("tests/seqtk_seq_job.json")
error_code, _, _ = get_main_output(
- ["--outdir", str(tmp_path), "--beta-use-biocontainers", wflow, job]
+ [
+ "--outdir",
+ str(tmp_path / "out"),
+ "--beta-use-biocontainers",
+ "--beta-dependencies-directory",
+ str(tmp_path / "deps"),
+ wflow,
+ job,
+ ]
)
assert error_code == 0
@@ -38,34 +48,85 @@ def test_biocontainers_resolution(tmp_path: Path) -> None:
"""Confirm expected container name for --beta-use-biocontainers."""
tool = load_tool(get_data("tests/seqtk_seq.cwl"), LoadingContext())
assert (
- get_container_from_software_requirements(True, tool)
+ get_container_from_software_requirements(
+ True, tool, container_image_cache_path=str(tmp_path)
+ )
== "quay.io/biocontainers/seqtk:r93--0"
)
-@pytest.mark.skipif(not deps, reason="galaxy-tool-util is not installed")
-def test_bioconda(tmp_path: Path) -> None:
+@pytest.fixture(scope="session")
+def bioconda_setup(request: pytest.FixtureRequest) -> Tuple[Optional[int], str]:
+ """
+ Caches the conda environment created for seqtk_seq.cwl.
+
+ Respects ``--basetemp`` via code copied from
+ :py:method:`pytest.TempPathFactory.getbasetemp`.
+ """
+
+ assert request.config.cache
+ deps_dir = request.config.cache.get("bioconda_deps", None)
+ if deps_dir is not None and not Path(deps_dir).exists():
+ # cache value set, but cache is gone :( ... recreate
+ deps_dir = None
+
+ if deps_dir is None:
+ given_basetemp = request.config.option.basetemp
+ if given_basetemp is not None:
+ basetemp = Path(os.path.abspath(str(given_basetemp))).resolve()
+ deps_dir = basetemp / "bioconda"
+ else:
+ from_env = os.environ.get("PYTEST_DEBUG_TEMPROOT")
+ temproot = Path(from_env or tempfile.gettempdir()).resolve()
+ rootdir = temproot.joinpath(f"pytest-of-{getuser() or 'unknown'}")
+ try:
+ rootdir.mkdir(mode=0o700, exist_ok=True)
+ except OSError:
+ rootdir = temproot.joinpath("pytest-of-unknown")
+ rootdir.mkdir(mode=0o700, exist_ok=True)
+ deps_dir = rootdir / "bioconda"
+ request.config.cache.set("bioconda_deps", str(deps_dir))
+
+ deps_dirpath = Path(deps_dir)
+ deps_dirpath.mkdir(parents=True, exist_ok=True)
+
wflow = get_data("tests/seqtk_seq.cwl")
job = get_data("tests/seqtk_seq_job.json")
error_code, _, stderr = get_main_output(
- ["--outdir", str(tmp_path), "--beta-conda-dependencies", "--debug", wflow, job]
+ [
+ "--outdir",
+ str(deps_dirpath / "out"),
+ "--beta-conda-dependencies",
+ "--beta-dependencies-directory",
+ str(deps_dirpath / "deps"),
+ "--debug",
+ wflow,
+ job,
+ ]
)
+ return error_code, stderr
+
+@pytest.mark.skipif(not deps, reason="galaxy-tool-util is not installed")
+def test_bioconda(bioconda_setup: Tuple[Optional[int], str]) -> None:
+ error_code, stderr = bioconda_setup
assert error_code == 0, stderr
@pytest.mark.skipif(not deps, reason="galaxy-tool-util is not installed")
@pytest.mark.skipif(not which("modulecmd"), reason="modulecmd not installed")
-def test_modules(monkeypatch: pytest.MonkeyPatch) -> None:
+def test_modules(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None:
"""Do a basic smoke test using environment modules to satisfy a SoftwareRequirement."""
wflow = get_data("tests/random_lines.cwl")
job = get_data("tests/random_lines_job.json")
- monkeypatch.setenv(
- "MODULEPATH", os.path.join(os.getcwd(), "tests/test_deps_env/modulefiles")
- )
+ monkeypatch.setenv("MODULEPATH", os.path.join(os.getcwd(), "tests/test_deps_env/modulefiles"))
error_code, _, stderr = get_main_output(
[
+ "--outdir",
+ str(tmp_path / "out"),
"--beta-dependency-resolvers-configuration",
+ "--beta-dependencies-directory",
+ str(tmp_path / "deps"),
"tests/test_deps_env_modules_resolvers_conf.yml",
"--debug",
wflow,
@@ -84,9 +145,7 @@ def test_modules_environment(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) ->
Do so by by running `env` as the tool and parsing its output.
"""
- monkeypatch.setenv(
- "MODULEPATH", os.path.join(os.getcwd(), "tests/test_deps_env/modulefiles")
- )
+ monkeypatch.setenv("MODULEPATH", os.path.join(os.getcwd(), "tests/test_deps_env/modulefiles"))
tool_env = get_tool_env(
tmp_path,
[
diff --git a/tests/test_docker.py b/tests/test_docker.py
index d3d049270..abd7811cb 100644
--- a/tests/test_docker.py
+++ b/tests/test_docker.py
@@ -1,11 +1,20 @@
"""Tests for docker engine."""
+import json
import re
from pathlib import Path
from shutil import which
+import pytest
+
from cwltool.main import main
-from .util import get_data, get_main_output, needs_docker
+from .util import (
+ get_data,
+ get_main_output,
+ needs_docker,
+ needs_podman,
+ needs_singularity,
+)
@needs_docker
@@ -136,3 +145,121 @@ def test_docker_strict_memory_limit_warning(tmp_path: Path) -> None:
stderr = re.sub(r"\s\s+", " ", stderr)
assert result_code == 0
assert "Skipping Docker software container '--memory' limit" in stderr
+
+
+@needs_docker
+def test_docker_required_secfile(tmp_path: Path) -> None:
+ result_code, stdout, stderr = get_main_output(
+ [
+ "--outdir",
+ str(tmp_path),
+ get_data("tests/secondary-files-required-container.cwl"),
+ ]
+ )
+ assert result_code == 0, stderr
+ assert (
+ json.loads(stdout)["output"]["secondaryFiles"][0]["checksum"]
+ == "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709"
+ )
+
+
+@needs_podman
+def test_podman_required_secfile(tmp_path: Path) -> None:
+ result_code, stdout, stderr = get_main_output(
+ [
+ "--podman",
+ "--outdir",
+ str(tmp_path),
+ get_data("tests/secondary-files-required-container.cwl"),
+ ]
+ )
+ assert result_code == 0, stderr
+ assert (
+ json.loads(stdout)["output"]["secondaryFiles"][0]["checksum"]
+ == "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709"
+ )
+
+
+@needs_singularity
+def test_singularity_required_secfile(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None:
+ singularity_dir = tmp_path / "singularity"
+ singularity_dir.mkdir()
+ monkeypatch.setenv("CWL_SINGULARITY_CACHE", str(singularity_dir))
+
+ result_code, stdout, stderr = get_main_output(
+ [
+ "--singularity",
+ "--outdir",
+ str(tmp_path / "out"),
+ get_data("tests/secondary-files-required-container.cwl"),
+ ]
+ )
+ assert result_code == 0, stderr
+ assert (
+ json.loads(stdout)["output"]["secondaryFiles"][0]["checksum"]
+ == "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709"
+ )
+
+
+@needs_docker
+def test_docker_required_missing_secfile(tmp_path: Path) -> None:
+ result_code, stdout, stderr = get_main_output(
+ [
+ "--outdir",
+ str(tmp_path),
+ get_data("tests/secondary-files-required-missing-container.cwl"),
+ ]
+ )
+ assert result_code == 1, stderr
+ stderr = re.sub(r"\s\s+", " ", stderr)
+ assert "Job error:" in stderr
+ assert "Error collecting output for parameter 'output'" in stderr
+ assert (
+ "tests/secondary-files-required-missing-container.cwl:16:5: Missing required secondary file"
+ )
+ assert "file.ext3" in stderr
+
+
+@needs_podman
+def test_podman_required_missing_secfile(tmp_path: Path) -> None:
+ result_code, stdout, stderr = get_main_output(
+ [
+ "--podman",
+ "--outdir",
+ str(tmp_path),
+ get_data("tests/secondary-files-required-missing-container.cwl"),
+ ]
+ )
+ assert result_code == 1, stderr
+ stderr = re.sub(r"\s\s+", " ", stderr)
+ assert "Job error:" in stderr
+ assert "Error collecting output for parameter 'output'" in stderr
+ assert (
+ "tests/secondary-files-required-missing-container.cwl:16:5: Missing required secondary file"
+ )
+ assert "file.ext3" in stderr
+
+
+@needs_singularity
+def test_singularity_required_missing_secfile(
+ tmp_path: Path, monkeypatch: pytest.MonkeyPatch
+) -> None:
+ singularity_dir = tmp_path / "singularity"
+ singularity_dir.mkdir()
+ monkeypatch.setenv("CWL_SINGULARITY_CACHE", str(singularity_dir))
+ result_code, stdout, stderr = get_main_output(
+ [
+ "--singularity",
+ "--outdir",
+ str(tmp_path),
+ get_data("tests/secondary-files-required-missing-container.cwl"),
+ ]
+ )
+ assert result_code == 1, stderr
+ stderr = re.sub(r"\s\s+", " ", stderr)
+ assert "Job error:" in stderr
+ assert "Error collecting output for parameter 'output'" in stderr
+ assert (
+ "tests/secondary-files-required-missing-container.cwl:16:5: Missing required secondary file"
+ )
+ assert "file.ext3" in stderr
diff --git a/tests/test_docker_paths_with_colons.py b/tests/test_docker_paths_with_colons.py
index c80a13a1d..ae56de50e 100644
--- a/tests/test_docker_paths_with_colons.py
+++ b/tests/test_docker_paths_with_colons.py
@@ -7,15 +7,10 @@ def test_docker_append_volume_read_only(mocker: Any) -> None:
mocker.patch("os.mkdir")
runtime = ["runtime"]
characters = ":,\"'"
- DockerCommandLineJob.append_volume(
- runtime, "/source" + characters, "/target" + characters
- )
+ DockerCommandLineJob.append_volume(runtime, "/source" + characters, "/target" + characters)
assert runtime == [
"runtime",
- "--mount=type=bind,"
- '"source=/source:,""\'",'
- '"target=/target:,""\'",'
- "readonly",
+ "--mount=type=bind," '"source=/source:,""\'",' '"target=/target:,""\'",' "readonly",
]
diff --git a/tests/test_environment.py b/tests/test_environment.py
index 9ed10ec2a..0fad65d7f 100644
--- a/tests/test_environment.py
+++ b/tests/test_environment.py
@@ -26,9 +26,9 @@ def assert_envvar_matches(check: CheckerTypes, k: str, env: Mapping[str, str]) -
else:
v = env[k]
if isinstance(check, str):
- assert v == check, f'Environment variable {k} == "{v}" != "{check}"'
+ assert v == check, f"Environment variable {k} == {v!r} != {check!r}"
else:
- assert check(v, env), f'Environment variable {k}="{v}" fails check'
+ assert check(v, env), f"Environment variable {k}={v!r} fails check"
def assert_env_matches(
@@ -42,7 +42,7 @@ def assert_env_matches(
e = dict(env)
for k, check in checks.items():
assert k in e
- v = e.pop(k)
+ e.pop(k)
assert_envvar_matches(check, k, env)
if not allow_unexpected:
@@ -210,9 +210,7 @@ def test_basic(crt_params: CheckHolder, tmp_path: Path, monkeypatch: Any) -> Non
@CRT_PARAMS
-def test_preserve_single(
- crt_params: CheckHolder, tmp_path: Path, monkeypatch: Any
-) -> None:
+def test_preserve_single(crt_params: CheckHolder, tmp_path: Path, monkeypatch: Any) -> None:
"""Test that preserving a single env var works."""
tmp_prefix = str(tmp_path / "canary")
extra_env = {
@@ -236,9 +234,7 @@ def test_preserve_single(
@CRT_PARAMS
-def test_preserve_all(
- crt_params: CheckHolder, tmp_path: Path, monkeypatch: Any
-) -> None:
+def test_preserve_all(crt_params: CheckHolder, tmp_path: Path, monkeypatch: Any) -> None:
"""Test that preserving all works."""
tmp_prefix = str(tmp_path / "canary")
extra_env = {
diff --git a/tests/test_examples.py b/tests/test_examples.py
index 99768917d..1e4e939c6 100644
--- a/tests/test_examples.py
+++ b/tests/test_examples.py
@@ -6,6 +6,7 @@
import stat
import subprocess
import sys
+import urllib.parse
from io import StringIO
from pathlib import Path
from typing import Any, Dict, List, Union, cast
@@ -15,6 +16,7 @@
import pytest
from cwl_utils.errors import JavascriptException
from cwl_utils.sandboxjs import param_re
+from ruamel.yaml.comments import CommentedMap, CommentedSeq
from schema_salad.exceptions import ValidationException
import cwltool.checker
@@ -28,7 +30,6 @@
from cwltool.main import main
from cwltool.process import CWL_IANA
from cwltool.utils import CWLObjectType, dedup
-from ruamel.yaml.comments import CommentedMap, CommentedSeq
from .util import get_data, get_main_output, needs_docker, working_directory
@@ -195,9 +196,7 @@ def test_parameter_to_expression(pattern: str, expected: Any) -> None:
)
-@pytest.mark.parametrize(
- "pattern,expected,behavior", param_to_expr_interpolate_escapebehavior
-)
+@pytest.mark.parametrize("pattern,expected,behavior", param_to_expr_interpolate_escapebehavior)
def test_parameter_to_expression_interpolate_escapebehavior(
pattern: str, expected: str, behavior: int
) -> None:
@@ -250,9 +249,8 @@ def test_parameter_to_expression_interpolate_escapebehavior(
@pytest.mark.parametrize("pattern", interpolate_bad_parameters)
def test_expression_interpolate_failures(pattern: str) -> None:
- result = None
with pytest.raises(JavascriptException):
- result = expr.interpolate(pattern, interpolate_input)
+ expr.interpolate(pattern, interpolate_input)
interpolate_escapebehavior = (
@@ -280,14 +278,9 @@ def test_expression_interpolate_failures(pattern: str) -> None:
@pytest.mark.parametrize("pattern,expected,behavior", interpolate_escapebehavior)
-def test_expression_interpolate_escapebehavior(
- pattern: str, expected: str, behavior: int
-) -> None:
+def test_expression_interpolate_escapebehavior(pattern: str, expected: str, behavior: int) -> None:
"""Test escaping behavior in an interpolation context."""
- assert (
- expr.interpolate(pattern, interpolate_input, escaping_behavior=behavior)
- == expected
- )
+ assert expr.interpolate(pattern, interpolate_input, escaping_behavior=behavior) == expected
def test_factory() -> None:
@@ -331,13 +324,9 @@ def test_factory_partial_scatter() -> None:
result = err_info.value.out
assert isinstance(result, dict)
- assert (
- result["out"][0]["checksum"] == "sha1$e5fa44f2b31c1fb553b6021e7360d07d5d91ff5e"
- )
+ assert result["out"][0]["checksum"] == "sha1$e5fa44f2b31c1fb553b6021e7360d07d5d91ff5e"
assert result["out"][1] is None
- assert (
- result["out"][2]["checksum"] == "sha1$a3db5c13ff90a36963278c6a39e4ee3c22e2a436"
- )
+ assert result["out"][2]["checksum"] == "sha1$a3db5c13ff90a36963278c6a39e4ee3c22e2a436"
def test_factory_partial_output() -> None:
@@ -614,15 +603,18 @@ def test_dedupe() -> None:
"fields": [
{
"type": {"items": "string", "type": "array"},
- "name": "file:///home/chapmanb/drive/work/cwl/test_bcbio_cwl/run_info-cwl-workflow/wf-variantcall.cwl#vc_rec/vc_rec/description",
+ "name": "file:///home/chapmanb/drive/work/cwl/test_bcbio_cwl/"
+ "run_info-cwl-workflow/wf-variantcall.cwl#vc_rec/vc_rec/description",
},
{
"type": {"items": "File", "type": "array"},
- "name": "file:///home/chapmanb/drive/work/cwl/test_bcbio_cwl/run_info-cwl-workflow/wf-variantcall.cwl#vc_rec/vc_rec/vrn_file",
+ "name": "file:///home/chapmanb/drive/work/cwl/test_bcbio_cwl/"
+ "run_info-cwl-workflow/wf-variantcall.cwl#vc_rec/vc_rec/vrn_file",
},
],
"type": "record",
- "name": "file:///home/chapmanb/drive/work/cwl/test_bcbio_cwl/run_info-cwl-workflow/wf-variantcall.cwl#vc_rec/vc_rec",
+ "name": "file:///home/chapmanb/drive/work/cwl/test_bcbio_cwl/"
+ "run_info-cwl-workflow/wf-variantcall.cwl#vc_rec/vc_rec",
}
source_to_sink = [
@@ -810,16 +802,12 @@ def test_compare_types_strict(
]
-@pytest.mark.parametrize(
- "src_type,sink_type,link_merge,value_from,expected_type", typechecks
-)
+@pytest.mark.parametrize("src_type,sink_type,link_merge,value_from,expected_type", typechecks)
def test_typechecking(
src_type: Any, sink_type: Any, link_merge: str, value_from: Any, expected_type: str
) -> None:
assert (
- cwltool.checker.check_types(
- src_type, sink_type, linkMerge=link_merge, valueFrom=value_from
- )
+ cwltool.checker.check_types(src_type, sink_type, linkMerge=link_merge, valueFrom=value_from)
== expected_type
)
@@ -829,8 +817,7 @@ def test_lifting() -> None:
# fails if the step 'out' doesn't match.
factory = cwltool.factory.Factory()
with pytest.raises(ValidationException):
- echo = factory.make(get_data("tests/test_bad_outputs_wf.cwl"))
- assert echo(inp="foo") == {"out": "foo\n"}
+ factory.make(get_data("tests/test_bad_outputs_wf.cwl"))
def test_malformed_outputs() -> None:
@@ -869,8 +856,8 @@ def test_format_expr_error() -> None:
stderr = re.sub(r"\s\s+", " ", stderr)
assert (
"An expression in the 'format' field must evaluate to a string, or list "
- "of strings. However a non-string item was received: '42' of "
- "type ''." in stderr
+ "of strings. However a non-string item was received: 42 of "
+ "type ." in stderr
)
@@ -1000,9 +987,7 @@ def test_var_spool_cwl_checker3() -> None:
factory = cwltool.factory.Factory()
try:
factory.make(get_data("tests/portable.cwl"))
- assert (
- "Non-portable reference to /var/spool/cwl detected" not in stream.getvalue()
- )
+ assert "Non-portable reference to /var/spool/cwl detected" not in stream.getvalue()
finally:
_logger.removeHandler(streamhandler)
@@ -1060,12 +1045,8 @@ def test_print_dot() -> None:
stdout = StringIO()
assert main(["--debug", "--print-dot", cwl_path], stdout=stdout) == 0
computed_dot = pydot.graph_from_dot_data(stdout.getvalue())[0]
- computed_edges = sorted(
- (source, target) for source, target in computed_dot.obj_dict["edges"]
- )
- expected_edges = sorted(
- (source, target) for source, target in expected_dot.obj_dict["edges"]
- )
+ computed_edges = sorted((source, target) for source, target in computed_dot.obj_dict["edges"])
+ expected_edges = sorted((source, target) for source, target in expected_dot.obj_dict["edges"])
assert computed_edges == expected_edges
# print CommandLineTool
@@ -1078,14 +1059,10 @@ def test_print_dot() -> None:
@pytest.mark.parametrize("factor", test_factors)
-def test_js_console_cmd_line_tool(
- factor: str, caplog: pytest.LogCaptureFixture
-) -> None:
+def test_js_console_cmd_line_tool(factor: str, caplog: pytest.LogCaptureFixture) -> None:
for test_file in ("js_output.cwl", "js_output_workflow.cwl"):
commands = factor.split()
- commands.extend(
- ["--js-console", "--no-container", get_data("tests/wf/" + test_file)]
- )
+ commands.extend(["--js-console", "--no-container", get_data("tests/wf/" + test_file)])
error_code, _, _ = get_main_output(commands)
logging_output = "\n".join([record.message for record in caplog.records])
assert "[log] Log message" in logging_output
@@ -1113,9 +1090,7 @@ def test_cid_file_dir(tmp_path: Path, factor: str) -> None:
test_file = "cache_test_workflow.cwl"
with working_directory(tmp_path):
commands = factor.split()
- commands.extend(
- ["--cidfile-dir", str(tmp_path), get_data("tests/wf/" + test_file)]
- )
+ commands.extend(["--cidfile-dir", str(tmp_path), get_data("tests/wf/" + test_file)])
error_code, stdout, stderr = get_main_output(commands)
stderr = re.sub(r"\s\s+", " ", stderr)
assert "completed success" in stderr
@@ -1133,9 +1108,7 @@ def test_cid_file_dir_arg_is_file_instead_of_dir(tmp_path: Path, factor: str) ->
bad_cidfile_dir = tmp_path / "cidfile-dir-actually-a-file"
bad_cidfile_dir.touch()
commands = factor.split()
- commands.extend(
- ["--cidfile-dir", str(bad_cidfile_dir), get_data("tests/wf/" + test_file)]
- )
+ commands.extend(["--cidfile-dir", str(bad_cidfile_dir), get_data("tests/wf/" + test_file)])
error_code, _, stderr = get_main_output(commands)
stderr = re.sub(r"\s\s+", " ", stderr)
assert "is not a directory, please check it first" in stderr, stderr
@@ -1318,6 +1291,8 @@ def test_cache_relative_paths(tmp_path: Path, factor: str) -> None:
commands = factor.split()
commands.extend(
[
+ "--out",
+ str(tmp_path / "out"),
"--cachedir",
cache_dir,
get_data(f"tests/{test_file}"),
@@ -1333,6 +1308,8 @@ def test_cache_relative_paths(tmp_path: Path, factor: str) -> None:
commands = factor.split()
commands.extend(
[
+ "--out",
+ str(tmp_path / "out2"),
"--cachedir",
cache_dir,
get_data(f"tests/{test_file}"),
@@ -1351,6 +1328,8 @@ def test_cache_relative_paths(tmp_path: Path, factor: str) -> None:
def test_write_summary(tmp_path: Path) -> None:
"""Test --write-summary."""
commands = [
+ "--out",
+ str(tmp_path / "out1"),
get_data("tests/wf/no-parameters-echo.cwl"),
]
error_code, stdout, stderr = get_main_output(commands)
@@ -1359,6 +1338,8 @@ def test_write_summary(tmp_path: Path) -> None:
final_output_path = str(tmp_path / "final-output.json")
commands_no = [
+ "--out",
+ str(tmp_path / "out2"),
"--write-summary",
final_output_path,
get_data("tests/wf/no-parameters-echo.cwl"),
@@ -1374,10 +1355,11 @@ def test_write_summary(tmp_path: Path) -> None:
@needs_docker
-def test_compute_checksum() -> None:
+def test_compute_checksum(tmp_path: Path) -> None:
runtime_context = RuntimeContext()
runtime_context.compute_checksum = True
runtime_context.use_container = False
+ runtime_context.outdir = str(tmp_path)
factory = cwltool.factory.Factory(runtime_context=runtime_context)
echo = factory.make(get_data("tests/wf/cat-tool.cwl"))
output = echo(
@@ -1402,7 +1384,7 @@ def test_bad_stdin_expr_error() -> None:
assert error_code == 1
stderr = re.sub(r"\s\s+", " ", stderr)
assert (
- "'stdin' expression must return a string or null. Got '1111' for '$(inputs.file1.size)'."
+ "'stdin' expression must return a string or null. Got 1111 for '$(inputs.file1.size)'."
in stderr
)
@@ -1419,8 +1401,7 @@ def test_bad_stderr_expr_error() -> None:
assert error_code == 1
stderr = re.sub(r"\s\s+", " ", stderr)
assert (
- "'stderr' expression must return a string. Got '1111' for '$(inputs.file1.size)'."
- in stderr
+ "'stderr' expression must return a string. Got 1111 for '$(inputs.file1.size)'." in stderr
)
@@ -1436,16 +1417,17 @@ def test_bad_stdout_expr_error() -> None:
assert error_code == 1
stderr = re.sub(r"\s\s+", " ", stderr)
assert (
- "'stdout' expression must return a string. Got '1111' for '$(inputs.file1.size)'."
- in stderr
+ "'stdout' expression must return a string. Got 1111 for '$(inputs.file1.size)'." in stderr
)
@needs_docker
-def test_stdin_with_id_preset() -> None:
+def test_stdin_with_id_preset(tmp_path: Path) -> None:
"""Confirm that a type: stdin with a preset id does not give an error."""
error_code, _, stderr = get_main_output(
[
+ "--out",
+ str(tmp_path),
get_data("tests/wf/1590.cwl"),
"--file1",
get_data("tests/wf/whale.txt"),
@@ -1492,7 +1474,9 @@ def test_bad_userspace_runtime(factor: str) -> None:
)
error_code, stdout, stderr = get_main_output(commands)
stderr = re.sub(r"\s\s+", " ", stderr)
- assert "or quaquioN is missing or broken" in stderr, stderr
+ assert ("or quaquioN is missing or broken" in stderr) or (
+ "No such file or directory: 'quaquioN'" in stderr
+ ), stderr
assert error_code == 1
@@ -1578,7 +1562,8 @@ def test_env_filtering(factor: str) -> None:
local trueExe nextTarget 2>/dev/null
trueExe=$(ps -o comm= $$) || return 1
[ "${trueExe#-}" = "$trueExe" ] || trueExe=${trueExe#-}
- [ "${trueExe#/}" != "$trueExe" ] || trueExe=$([ -n "$ZSH_VERSION" ] && which -p "$trueExe" || which "$trueExe")
+ [ "${trueExe#/}" != "$trueExe" ] || trueExe=$([ -n "$ZSH_VERSION" ] \
+ && which -p "$trueExe" || which "$trueExe")
while nextTarget=$(readlink "$trueExe"); do trueExe=$nextTarget; done
printf '%s\n' "$(basename "$trueExe")"
} ; getTrueShellExeName""",
@@ -1610,17 +1595,15 @@ def test_env_filtering(factor: str) -> None:
def test_v1_0_arg_empty_prefix_separate_false() -> None:
test_file = "tests/arg-empty-prefix-separate-false.cwl"
- error_code, stdout, stderr = get_main_output(
- ["--debug", get_data(test_file), "--echo"]
- )
+ error_code, stdout, stderr = get_main_output(["--debug", get_data(test_file), "--echo"])
stderr = re.sub(r"\s\s+", " ", stderr)
assert "completed success" in stderr
assert error_code == 0
def test_scatter_output_filenames(tmp_path: Path) -> None:
- """If a scatter step produces identically named output then confirm that the final output is renamed correctly."""
- cwd = Path.cwd()
+ """Confirm that the final output is renamed correctly from identically named scatter outputs."""
+ cwd = tmp_path
with working_directory(tmp_path):
rtc = RuntimeContext()
rtc.outdir = str(cwd)
@@ -1680,10 +1663,7 @@ def test_arguments_self() -> None:
outputs = cast(Dict[str, Any], check())
assert "self_review" in outputs
assert len(outputs) == 1
- assert (
- outputs["self_review"]["checksum"]
- == "sha1$724ba28f4a9a1b472057ff99511ed393a45552e1"
- )
+ assert outputs["self_review"]["checksum"] == "sha1$724ba28f4a9a1b472057ff99511ed393a45552e1"
def test_bad_timelimit_expr() -> None:
@@ -1711,7 +1691,7 @@ def test_bad_networkaccess_expr() -> None:
stderr = re.sub(r"\s\s+", " ", stderr)
assert (
"'networkAccess' expression must evaluate to a bool. "
- "Got '42' for expression '${return 42;}" in stderr
+ "Got 42 for expression '${return 42;}" in stderr
)
assert err_code == 1
@@ -1740,7 +1720,7 @@ def test_expression_tool_class() -> None:
factory = cwltool.factory.Factory()
tool_path = get_data("tests/wf/parseInt-tool.cwl")
expression_tool = factory.make(tool_path).t
- assert str(expression_tool) == f"ExpressionTool: file://{tool_path}"
+ assert urllib.parse.unquote(str(expression_tool)) == f"ExpressionTool: file://{tool_path}"
def test_operation_class() -> None:
@@ -1748,7 +1728,7 @@ def test_operation_class() -> None:
factory = cwltool.factory.Factory()
tool_path = get_data("tests/wf/operation/abstract-cosifer.cwl")
expression_tool = factory.make(tool_path).t
- assert str(expression_tool) == f"AbstractOperation: file://{tool_path}"
+ assert urllib.parse.unquote(str(expression_tool)) == f"AbstractOperation: file://{tool_path}"
def test_command_line_tool_class() -> None:
@@ -1756,13 +1736,13 @@ def test_command_line_tool_class() -> None:
factory = cwltool.factory.Factory()
tool_path = get_data("tests/echo.cwl")
expression_tool = factory.make(tool_path).t
- assert str(expression_tool) == f"CommandLineTool: file://{tool_path}"
+ assert urllib.parse.unquote(str(expression_tool)) == f"CommandLineTool: file://{tool_path}"
-def test_record_default_with_long() -> None:
+def test_record_default_with_long(tmp_path: Path) -> None:
"""Confirm that record defaults are respected."""
tool_path = get_data("tests/wf/paramref_arguments_roundtrip.cwl")
- err_code, stdout, stderr = get_main_output([tool_path])
+ err_code, stdout, stderr = get_main_output(["--outdir", str(tmp_path), tool_path])
assert err_code == 0
result = json.loads(stdout)["same_record"]
assert result["first"] == "y"
@@ -1773,9 +1753,7 @@ def test_record_default_with_long() -> None:
assert result["sixth"]["class"] == "File"
assert result["sixth"]["basename"] == "whale.txt"
assert result["sixth"]["size"] == 1111
- assert (
- result["sixth"]["checksum"] == "sha1$327fc7aedf4f6b69a42a7c8b808dc5a7aff61376"
- )
+ assert result["sixth"]["checksum"] == "sha1$327fc7aedf4f6b69a42a7c8b808dc5a7aff61376"
def test_record_outputeval(tmp_path: Path) -> None:
@@ -1787,21 +1765,12 @@ def test_record_outputeval(tmp_path: Path) -> None:
assert "genome_fa" in result
assert result["genome_fa"]["class"] == "File"
assert result["genome_fa"]["basename"] == "GRCm38.primary_assembly.genome.fa"
- assert (
- result["genome_fa"]["checksum"]
- == "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709"
- )
+ assert result["genome_fa"]["checksum"] == "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709"
assert result["genome_fa"]["size"] == 0
assert "annotation_gtf" in result
assert result["annotation_gtf"]["class"] == "File"
- assert (
- result["annotation_gtf"]["basename"]
- == "gencode.vM21.primary_assembly.annotation.gtf"
- )
- assert (
- result["annotation_gtf"]["checksum"]
- == "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709"
- )
+ assert result["annotation_gtf"]["basename"] == "gencode.vM21.primary_assembly.annotation.gtf"
+ assert result["annotation_gtf"]["checksum"] == "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709"
assert result["annotation_gtf"]["size"] == 0
@@ -1832,3 +1801,74 @@ def test_validate_optional_src_with_mandatory_sink() -> None:
stderr = re.sub(r"\s\s+", " ", stderr)
assert 'Source \'opt_file\' of type ["null", "File"] may be incompatible' in stderr
assert "with sink 'r' of type \"File\"" in stderr
+
+
+def test_res_req_expr_float_1_0() -> None:
+ """Confirm expected error when returning a float value from a ResourceRequirement expr in CWL v1.0."""
+ exit_code, stdout, stderr = get_main_output(
+ [
+ get_data("tests/wf/resreq_expr_float_v1_0.cwl"),
+ "--input_bam",
+ get_data("tests/wf/whale.txt"),
+ ]
+ )
+ assert exit_code == 1
+ stderr = re.sub(r"\s\s+", " ", stderr)
+ assert "Floats are not valid in resource requirement expressions" in stderr
+ assert "prior to CWL v1.2" in stderr
+ assert "$((2 * inputs.input_bam.size) / 3.14159) returned" in stderr
+
+
+def test_res_req_expr_float_1_2() -> None:
+ """Confirm no error when returning a float value from a ResourceRequirement expr in CWL v1.0."""
+ exit_code, stdout, stderr = get_main_output(
+ [
+ get_data("tests/wf/resreq_expr_float_v1_2.cwl"),
+ "--input_bam",
+ get_data("tests/wf/whale.txt"),
+ ]
+ )
+ assert exit_code == 0, stderr
+ assert json.loads(stdout)["result"]["outdirSize"] >= 708
+ assert json.loads(stdout)["result"]["tmpdirSize"] >= 708
+
+
+def test_very_small_and_large_floats() -> None:
+ """Confirm that very small or large numbers are not transformed into scientific notation."""
+ exit_code, stdout, stderr = get_main_output(
+ [
+ get_data("tests/wf/floats_small_and_large.cwl"),
+ ]
+ )
+ assert exit_code == 0, stderr
+ assert json.loads(stdout)["result"] == "0.00001 0.0000123 123000 1230000"
+
+
+def test_invalid_nested_array() -> None:
+ """Test feature proposed for CWL v1.3 in a CWL v1.2 document."""
+ exit_code, stdout, stderr = get_main_output(
+ [
+ "--validate",
+ get_data("tests/nested-array.cwl"),
+ ]
+ )
+ assert exit_code == 1, stderr
+ stderr = re.sub(r"\n\s+", " ", stderr)
+ stderr = re.sub(r"\s\s+", " ", stderr)
+ assert "Tool definition failed validation:" in stderr
+ assert (
+ "tests/nested-array.cwl:6:5: Field 'type' references unknown identifier 'string[][]'"
+ ) in stderr
+
+
+def test_input_named_id() -> None:
+ """Confirm that it is valid to have an input named "id"."""
+ exit_code, stdout, stderr = get_main_output(
+ [
+ "--validate",
+ "--debug",
+ get_data("tests/wf/input_named_id.cwl"),
+ get_data("tests/wf/input_named_id.yaml"),
+ ]
+ )
+ assert exit_code == 0, stderr
diff --git a/tests/test_ext.py b/tests/test_ext.py
index 1eb4091e5..0d2665ca1 100644
--- a/tests/test_ext.py
+++ b/tests/test_ext.py
@@ -8,16 +8,16 @@
import cwltool.process
from cwltool.main import main
-from .util import get_data, needs_docker
+from .util import get_data, get_main_output, needs_docker
@needs_docker
def test_missing_enable_ext() -> None:
- # Require that --enable-ext is provided.
- assert (
- main([get_data("tests/wf/listing_deep.cwl"), get_data("tests/listing-job.yml")])
- != 0
+ """Require that --enable-ext is provided."""
+ error_code, _, _ = get_main_output(
+ [get_data("tests/wf/listing_deep.cwl"), get_data("tests/listing-job.yml")]
)
+ assert error_code != 0
@needs_docker
@@ -67,20 +67,7 @@ def test_listing_none() -> None:
@needs_docker
def test_listing_v1_0() -> None:
# Default behavior in 1.0 is deep expansion.
- assert (
- main([get_data("tests/wf/listing_v1_0.cwl"), get_data("tests/listing-job.yml")])
- == 0
- )
-
-
-@pytest.mark.skip(reason="This is not the default behaviour yet")
-@needs_docker
-def test_listing_v1_1() -> None:
- # Default behavior in 1.1 will be no expansion
- assert (
- main([get_data("tests/wf/listing_v1_1.cwl"), get_data("tests/listing-job.yml")])
- != 0
- )
+ assert main([get_data("tests/wf/listing_v1_0.cwl"), get_data("tests/listing-job.yml")]) == 0
@needs_docker
@@ -156,10 +143,7 @@ def test_disable_dir_overwrite_without_ext(tmp_path: Path) -> None:
out = tmp_path / "outdir"
tmp.mkdir()
out.mkdir()
- assert (
- main(["--outdir", str(out), get_data("tests/wf/updatedir.cwl"), "-r", str(tmp)])
- == 0
- )
+ assert main(["--outdir", str(out), get_data("tests/wf/updatedir.cwl"), "-r", str(tmp)]) == 0
assert not os.listdir(tmp)
assert os.listdir(out)
@@ -226,9 +210,7 @@ def test_write_write_conflict(tmp_path: Path) -> None:
with open(tmp_name, "w") as f:
f.write(before_value)
- assert (
- main(["--enable-ext", get_data("tests/wf/mut.cwl"), "-a", str(tmp_name)]) != 0
- )
+ assert main(["--enable-ext", get_data("tests/wf/mut.cwl"), "-a", str(tmp_name)]) != 0
with open(tmp_name) as f:
tmp_value = f.read()
@@ -243,9 +225,7 @@ def test_read_write_conflict(tmp_path: Path) -> None:
with open(tmp_name, "w") as f:
f.write("1")
- assert (
- main(["--enable-ext", get_data("tests/wf/mut3.cwl"), "-a", str(tmp_name)]) != 0
- )
+ assert main(["--enable-ext", get_data("tests/wf/mut3.cwl"), "-a", str(tmp_name)]) != 0
@needs_docker
@@ -289,9 +269,19 @@ def test_warn_large_inputs() -> None:
stderr=stream,
)
- assert (
- "Recursive directory listing has resulted in a large number of File"
- in re.sub("\n *", " ", stream.getvalue())
+ assert "Recursive directory listing has resulted in a large number of File" in re.sub(
+ "\n *", " ", stream.getvalue()
)
finally:
cwltool.process.FILE_COUNT_WARNING = was
+
+
+def test_ext_validation_no_namespace_warning() -> None:
+ error_code, stdout, stderr = get_main_output(
+ ["--validate", "--enable-ext", get_data("tests/wf/mpi_env.cwl")]
+ )
+ assert error_code == 0
+ assert (
+ "URI prefix 'cwltool' of 'cwltool:loop' not recognized, are you "
+ "missing a $namespaces section?"
+ ) not in stderr
diff --git a/tests/test_fetch.py b/tests/test_fetch.py
index 2c06bbfc1..e55491d90 100644
--- a/tests/test_fetch.py
+++ b/tests/test_fetch.py
@@ -66,9 +66,7 @@ def test_resolver(d: Any, a: str) -> str:
load_tool("foo.cwl", loadingContext)
- assert (
- main(["--print-pre", "--debug", "foo.cwl"], loadingContext=loadingContext) == 0
- )
+ assert main(["--print-pre", "--debug", "foo.cwl"], loadingContext=loadingContext) == 0
root = Path(os.path.join(get_data("")))
diff --git a/tests/test_http_input.py b/tests/test_http_input.py
index 00f6e50af..6b4d9b479 100644
--- a/tests/test_http_input.py
+++ b/tests/test_http_input.py
@@ -12,12 +12,14 @@
def test_http_path_mapping(tmp_path: Path) -> None:
-
- input_file_path = "https://raw.githubusercontent.com/common-workflow-language/cwltool/main/tests/2.fasta"
+ input_file_path = (
+ "https://raw.githubusercontent.com/common-workflow-language/cwltool/main/tests/2.fasta"
+ )
base_file: List[CWLObjectType] = [
{
"class": "File",
- "location": "https://raw.githubusercontent.com/common-workflow-language/cwltool/main/tests/2.fasta",
+ "location": "https://raw.githubusercontent.com/common-workflow-language/"
+ "cwltool/main/tests/2.fasta",
"basename": "chr20.fa",
}
]
diff --git a/tests/test_input_deps.py b/tests/test_input_deps.py
index 3b247e1b1..431c51914 100644
--- a/tests/test_input_deps.py
+++ b/tests/test_input_deps.py
@@ -109,7 +109,6 @@ def test_input_deps_secondary_files() -> None:
"""Affirm that secondaryFiles are also represented."""
stream = StringIO()
- data_path = get_data("tests/wf/whale.txt")
main(
[
"--print-input-deps",
diff --git a/tests/test_iwdr.py b/tests/test_iwdr.py
index 9eea8a8e1..5a370ae55 100644
--- a/tests/test_iwdr.py
+++ b/tests/test_iwdr.py
@@ -5,6 +5,8 @@
from stat import S_IWGRP, S_IWOTH, S_IWRITE
from typing import Any
+import pytest
+
from cwltool.factory import Factory
from cwltool.main import main
@@ -35,9 +37,7 @@ def test_passthrough_successive(tmp_path: Path) -> None:
]
)
assert err_code == 0
- children = sorted(
- tmp_path.glob("*")
- ) # This input directory should be left pristine.
+ children = sorted(tmp_path.glob("*")) # This input directory should be left pristine.
assert len(children) == 1
subdir = tmp_path / children[0]
assert len(sorted(subdir.glob("*"))) == 1
@@ -72,7 +72,7 @@ def test_bad_listing_expression(tmp_path: Path) -> None:
assert (
"Expression in a 'InitialWorkdirRequirement.listing' field must return "
"a list containing zero or more of: File or Directory objects; Dirent "
- "objects. Got '42' among the results" in stderr
+ "objects. Got 42 among the results" in stderr
)
assert err_code == 1
@@ -248,7 +248,9 @@ def test_iwdr_permutations_inplace(tmp_path_factory: Any) -> None:
@needs_singularity
-def test_iwdr_permutations_singularity(tmp_path_factory: Any) -> None:
+def test_iwdr_permutations_singularity(
+ tmp_path_factory: pytest.TempPathFactory, monkeypatch: pytest.MonkeyPatch
+) -> None:
misc = tmp_path_factory.mktemp("misc")
fifth = misc / "fifth"
fifth.mkdir()
@@ -271,6 +273,8 @@ def test_iwdr_permutations_singularity(tmp_path_factory: Any) -> None:
twelfth = misc / "twelfth"
twelfth.touch()
outdir = str(tmp_path_factory.mktemp("outdir"))
+ singularity_dir = str(tmp_path_factory.mktemp("singularity"))
+ monkeypatch.setenv("CWL_SINGULARITY_CACHE", singularity_dir)
err_code, stdout, _ = get_main_output(
[
"--outdir",
@@ -308,7 +312,9 @@ def test_iwdr_permutations_singularity(tmp_path_factory: Any) -> None:
@needs_singularity
-def test_iwdr_permutations_singularity_inplace(tmp_path_factory: Any) -> None:
+def test_iwdr_permutations_singularity_inplace(
+ tmp_path_factory: pytest.TempPathFactory, monkeypatch: pytest.MonkeyPatch
+) -> None:
"""IWDR tests using --singularity and a forced InplaceUpdateRequirement."""
misc = tmp_path_factory.mktemp("misc")
fifth = misc / "fifth"
@@ -332,6 +338,8 @@ def test_iwdr_permutations_singularity_inplace(tmp_path_factory: Any) -> None:
twelfth = misc / "twelfth"
twelfth.touch()
outdir = str(tmp_path_factory.mktemp("outdir"))
+ singularity_dir = str(tmp_path_factory.mktemp("singularity"))
+ monkeypatch.setenv("CWL_SINGULARITY_CACHE", singularity_dir)
assert (
main(
[
diff --git a/tests/test_js_sandbox.py b/tests/test_js_sandbox.py
index f27a8ada0..9739c77a7 100644
--- a/tests/test_js_sandbox.py
+++ b/tests/test_js_sandbox.py
@@ -63,6 +63,10 @@ def hide_nodejs(temp_dir: Path) -> str:
if entry not in ("nodejs", "node"):
os.symlink(os.path.join(dirname, entry), new_dir / entry)
paths.append(str(new_dir))
+ dirname_path = Path(dirname)
+ for path in paths:
+ if Path(path).resolve() == dirname_path:
+ paths.remove(path)
return ":".join(paths)
@@ -94,12 +98,17 @@ def test_value_from_two_concatenated_expressions_singularity(
js_engine = sandboxjs.get_js_engine()
js_engine.have_node_slim = False # type: ignore[attr-defined]
js_engine.localdata = threading.local() # type: ignore[attr-defined]
- new_paths = hide_nodejs(tmp_path)
+ hide_base = tmp_path / "hide"
+ hide_base.mkdir()
+ new_paths = hide_nodejs(hide_base)
+ singularity_cache = tmp_path / "singularity"
+ singularity_cache.mkdir()
factory = Factory()
factory.loading_context.singularity = True
factory.loading_context.debug = True
factory.runtime_context.debug = True
with monkeypatch.context() as m:
+ m.setenv("CWL_SINGULARITY_CACHE", str(singularity_cache))
m.setenv("PATH", new_paths)
echo = factory.make(get_data("tests/wf/vf-concat.cwl"))
file = {"class": "File", "location": get_data("tests/wf/whale.txt")}
diff --git a/tests/test_load_tool.py b/tests/test_load_tool.py
index 26ba3f9df..cf2cb620b 100644
--- a/tests/test_load_tool.py
+++ b/tests/test_load_tool.py
@@ -1,8 +1,10 @@
"""Tests for cwltool.load_tool."""
import logging
+import urllib.parse
from pathlib import Path
import pytest
+from schema_salad.exceptions import ValidationException
from cwltool.context import LoadingContext, RuntimeContext
from cwltool.errors import WorkflowException
@@ -24,7 +26,7 @@ def test_check_version() -> None:
Attempting to execute without updating to the internal version should raise an error.
"""
- joborder = {"inp": "abc"} # type: CWLObjectType
+ joborder: CWLObjectType = {"inp": "abc"}
loadingContext = LoadingContext({"do_update": True})
tool = load_tool(get_data("tests/echo.cwl"), loadingContext)
for _ in tool.job(joborder, None, RuntimeContext()):
@@ -51,9 +53,7 @@ def test_use_metadata() -> None:
def test_checklink_outputSource() -> None:
"""Is outputSource resolved correctly independent of value of do_validate."""
- outsrc = (
- Path(get_data("tests/wf/1st-workflow.cwl")).as_uri() + "#argument/classfile"
- )
+ outsrc = Path(get_data("tests/wf/1st-workflow.cwl")).as_uri() + "#argument/classfile"
loadingContext = LoadingContext({"do_validate": True})
tool = load_tool(get_data("tests/wf/1st-workflow.cwl"), loadingContext)
@@ -122,11 +122,48 @@ def test_load_graph_fragment_from_packed() -> None:
# This was solved by making a shallow copy of the metadata
# dict to ensure that the updater did not modify the original
# document.
- uri2 = (
- Path(get_data("tests/wf/packed-with-loadlisting.cwl")).as_uri()
- + "#16169-step.cwl"
- )
- tool2 = load_tool(uri2, loadingContext)
+ uri2 = Path(get_data("tests/wf/packed-with-loadlisting.cwl")).as_uri() + "#16169-step.cwl"
+ load_tool(uri2, loadingContext)
finally:
use_standard_schema("v1.0")
+
+
+def test_import_tracked() -> None:
+ """Test that $import and $include are tracked in the index."""
+
+ loadingContext = LoadingContext({"fast_parser": True})
+ tool = load_tool(get_data("tests/wf/811-12.cwl"), loadingContext)
+ path = f"import:file://{get_data('tests/wf/schemadef-type.yml')}"
+ path2 = f"import:file://{urllib.parse.quote(get_data('tests/wf/schemadef-type.yml'))}"
+
+ assert tool.doc_loader is not None
+ assert path in tool.doc_loader.idx or path2 in tool.doc_loader.idx
+
+ loadingContext = LoadingContext({"fast_parser": False})
+ tool = load_tool(get_data("tests/wf/811.cwl"), loadingContext)
+
+ assert tool.doc_loader is not None
+ assert path in tool.doc_loader.idx or path2 in tool.doc_loader.idx
+
+
+def test_load_badhints() -> None:
+ """Check for expected error while update a bads hints list."""
+ loadingContext = LoadingContext()
+ uri = Path(get_data("tests/wf/hello-workflow-badhints.cwl")).as_uri()
+ with pytest.raises(
+ ValidationException,
+ match=r".*tests\/wf\/hello-workflow-badhints\.cwl\:18:4:\s*'hints'\s*entry\s*missing\s*required\s*key\s*'class'\.",
+ ):
+ load_tool(uri, loadingContext)
+
+
+def test_load_badhints_nodict() -> None:
+ """Check for expected error while update a hints list with a numerical entry."""
+ loadingContext = LoadingContext()
+ uri = Path(get_data("tests/wf/hello-workflow-badhints2.cwl")).as_uri()
+ with pytest.raises(
+ ValidationException,
+ match=r".*tests\/wf\/hello-workflow-badhints2\.cwl:41:5:\s*'hints'\s*entries\s*must\s*be\s*dictionaries:\s*\s*42\.",
+ ):
+ load_tool(uri, loadingContext)
diff --git a/tests/test_loop.py b/tests/test_loop.py
index a71fd1696..4d82e7e91 100644
--- a/tests/test_loop.py
+++ b/tests/test_loop.py
@@ -147,7 +147,7 @@ def test_loop_two_variables() -> None:
def test_loop_two_variables_single_backpropagation() -> None:
- """Test a loop case with two variables, but when only one of them is back-propagated between iterations."""
+ """Test loop with 2 variables, but when only one of them is back-propagated between iterations."""
stream = StringIO()
params = [
"--enable-ext",
@@ -199,7 +199,7 @@ def test_loop_value_from() -> None:
def test_loop_value_from_fail_no_requirement() -> None:
- """Test that a workflow loop fails if a valueFrom directive is specified without StepInputExpressionRequirement."""
+ """Test workflow loop fails for valueFrom without StepInputExpressionRequirement."""
params = [
"--enable-ext",
get_data("tests/loop/invalid-value-from-loop-no-requirement.cwl"),
diff --git a/tests/test_make_template.py b/tests/test_make_template.py
index f2bd2867d..609ede7e4 100644
--- a/tests/test_make_template.py
+++ b/tests/test_make_template.py
@@ -15,7 +15,7 @@ def test_union() -> None:
inputs = cmap(["string", "string[]"])
assert main.generate_example_input(inputs, None) == (
"a_string",
- 'one of type "string", type "string[]"',
+ "one of type 'string', type 'string[]'",
)
@@ -24,5 +24,5 @@ def test_optional_union() -> None:
inputs = cmap(["null", "string", "string[]"])
assert main.generate_example_input(inputs, None) == (
"a_string",
- 'one of type "string", type "string[]" (optional)',
+ "one of type 'string', type 'string[]' (optional)",
)
diff --git a/tests/test_misc_cli.py b/tests/test_misc_cli.py
index 13d6e6887..307153e16 100644
--- a/tests/test_misc_cli.py
+++ b/tests/test_misc_cli.py
@@ -28,18 +28,14 @@ def test_empty_cmdling() -> None:
def test_tool_help() -> None:
"""Test --tool-help."""
- return_code, stdout, stderr = get_main_output(
- ["--tool-help", get_data("tests/echo.cwl")]
- )
+ return_code, stdout, stderr = get_main_output(["--tool-help", get_data("tests/echo.cwl")])
assert return_code == 0
assert "job_order Job input json file" in stdout
def test_basic_pack() -> None:
"""Basic test of --pack. See test_pack.py for detailed testing."""
- return_code, stdout, stderr = get_main_output(
- ["--pack", get_data("tests/wf/revsort.cwl")]
- )
+ return_code, stdout, stderr = get_main_output(["--pack", get_data("tests/wf/revsort.cwl")])
assert return_code == 0
assert "$graph" in stdout
@@ -67,3 +63,22 @@ def test_error_graph_with_no_default() -> None:
"Tool file contains graph of multiple objects, must specify one of #echo, #cat, #collision"
in stderr
)
+
+
+def test_skip_schemas_external_step() -> None:
+ """Test that --skip-schemas works even for bad schemas in external docs."""
+ exit_code, stdout, stderr = get_main_output(
+ [
+ "--print-rdf",
+ "--skip-schemas",
+ get_data("tests/wf/revsort_step_bad_schema.cwl"),
+ ]
+ )
+ assert exit_code == 0
+ assert (
+ "Repeat node-elements inside property elements: " "http://www.w3.org/1999/xhtmlmeta"
+ ) not in stderr
+ assert (
+ "Could not load extension schema https://bad.example.com/missing.ttl: "
+ "Error fetching https://bad.example.com/missing.ttl"
+ ) not in stderr
diff --git a/tests/test_mpi.py b/tests/test_mpi.py
index 0fbedbcdf..e90d5d642 100644
--- a/tests/test_mpi.py
+++ b/tests/test_mpi.py
@@ -6,8 +6,9 @@
from pathlib import Path
from typing import Any, Generator, List, MutableMapping, Optional, Tuple
-import pkg_resources
import pytest
+from importlib_resources import files
+from ruamel.yaml.comments import CommentedMap, CommentedSeq
from schema_salad.avro.schema import Names
from schema_salad.utils import yaml_no_ts
@@ -18,7 +19,6 @@
from cwltool.context import LoadingContext, RuntimeContext
from cwltool.main import main
from cwltool.mpi import MpiConfig, MPIRequirementName
-from ruamel.yaml.comments import CommentedMap, CommentedSeq
from .util import get_data, working_directory
@@ -140,8 +140,7 @@ def test_simple_mpi_tool(self, fake_mpi_conf: str, tmp_path: Path) -> None:
stderr = StringIO()
with working_directory(tmp_path):
rc = main(
- argsl=cwltool_args(fake_mpi_conf)
- + [get_data("tests/wf/mpi_simple.cwl")],
+ argsl=cwltool_args(fake_mpi_conf) + [get_data("tests/wf/mpi_simple.cwl")],
stdout=stdout,
stderr=stderr,
)
@@ -282,19 +281,16 @@ def test_env_passing(monkeypatch: pytest.MonkeyPatch) -> None:
# Reading the schema is super slow - cache for the session
@pytest.fixture(scope="session")
def schema_ext11() -> Generator[Names, None, None]:
- with pkg_resources.resource_stream("cwltool", "extensions-v1.1.yml") as res:
- ext11 = res.read().decode("utf-8")
- cwltool.process.use_custom_schema("v1.1", "http://commonwl.org/cwltool", ext11)
- schema = cwltool.process.get_schema("v1.1")[1]
- assert isinstance(schema, Names)
- yield schema
+ ext11 = files("cwltool").joinpath("extensions-v1.1.yml").read_text("utf-8")
+ cwltool.process.use_custom_schema("v1.1", "http://commonwl.org/cwltool", ext11)
+ schema = cwltool.process.get_schema("v1.1")[1]
+ assert isinstance(schema, Names)
+ yield schema
mpiReq = CommentedMap({"class": MPIRequirementName, "processes": 1})
containerReq = CommentedMap({"class": "DockerRequirement"})
-basetool = CommentedMap(
- {"cwlVersion": "v1.1", "inputs": CommentedSeq(), "outputs": CommentedSeq()}
-)
+basetool = CommentedMap({"cwlVersion": "v1.1", "inputs": CommentedSeq(), "outputs": CommentedSeq()})
def mk_tool(
@@ -350,15 +346,15 @@ def test_docker_required(schema_ext11: Names) -> None:
def test_docker_mpi_both_required(schema_ext11: Names) -> None:
# Both required - error
+ lc, rc, tool = mk_tool(schema_ext11, [], reqs=[mpiReq, containerReq])
+ clt = CommandLineTool(tool, lc)
with pytest.raises(cwltool.errors.UnsupportedRequirement):
- lc, rc, tool = mk_tool(schema_ext11, [], reqs=[mpiReq, containerReq])
- clt = CommandLineTool(tool, lc)
- jr = clt.make_job_runner(rc)
+ clt.make_job_runner(rc)
def test_docker_mpi_both_hinted(schema_ext11: Names) -> None:
# Both hinted - error
+ lc, rc, tool = mk_tool(schema_ext11, [], hints=[mpiReq, containerReq])
+ clt = CommandLineTool(tool, lc)
with pytest.raises(cwltool.errors.UnsupportedRequirement):
- lc, rc, tool = mk_tool(schema_ext11, [], hints=[mpiReq, containerReq])
- clt = CommandLineTool(tool, lc)
- jr = clt.make_job_runner(rc)
+ clt.make_job_runner(rc)
diff --git a/tests/test_override.py b/tests/test_override.py
index afacd3972..980c853bb 100644
--- a/tests/test_override.py
+++ b/tests/test_override.py
@@ -103,11 +103,7 @@ def test_overrides(parameters: List[str], result: Dict[str, str]) -> None:
"enable support for development and deprecated versions.",
),
(
- [
- get_data(
- "tests/override/env-tool_cwl-requirement_override_default_wrongver.yaml"
- )
- ],
+ [get_data("tests/override/env-tool_cwl-requirement_override_default_wrongver.yaml")],
"`cwl:requirements` in the input object is not part of CWL v1.0. You can "
"adjust to use `cwltool:overrides` instead; or you can set the cwlVersion to "
"v1.1 or greater.",
diff --git a/tests/test_pack.py b/tests/test_pack.py
index 40797b8d8..1d38e35e8 100644
--- a/tests/test_pack.py
+++ b/tests/test_pack.py
@@ -44,9 +44,7 @@ def test_packing(unpacked: str, expected: str) -> None:
"""Compare expected version reality with various workflows and --pack."""
loadingContext, workflowobj, uri = fetch_document(get_data(unpacked))
loadingContext.do_update = False
- loadingContext, uri = resolve_and_validate_document(
- loadingContext, workflowobj, uri
- )
+ loadingContext, uri = resolve_and_validate_document(loadingContext, workflowobj, uri)
packed = json.loads(print_pack(loadingContext, uri))
context_dir = os.path.abspath(os.path.dirname(get_data(unpacked)))
@@ -68,13 +66,9 @@ def test_packing(unpacked: str, expected: str) -> None:
def test_pack_single_tool() -> None:
- loadingContext, workflowobj, uri = fetch_document(
- get_data("tests/wf/formattest.cwl")
- )
+ loadingContext, workflowobj, uri = fetch_document(get_data("tests/wf/formattest.cwl"))
loadingContext.do_update = False
- loadingContext, uri = resolve_and_validate_document(
- loadingContext, workflowobj, uri
- )
+ loadingContext, uri = resolve_and_validate_document(loadingContext, workflowobj, uri)
loader = loadingContext.loader
assert loader
loader.resolve_ref(uri)[0]
@@ -91,9 +85,7 @@ def test_pack_fragment() -> None:
loadingContext, workflowobj, uri = fetch_document(get_data("tests/wf/scatter2.cwl"))
packed = cwltool.pack.pack(loadingContext, uri + "#scatterstep/mysub")
- adjustFileObjs(
- packed, partial(make_relative, os.path.abspath(get_data("tests/wf")))
- )
+ adjustFileObjs(packed, partial(make_relative, os.path.abspath(get_data("tests/wf"))))
adjustDirObjs(packed, partial(make_relative, os.path.abspath(get_data("tests/wf"))))
packed_result = json.dumps(packed, sort_keys=True, indent=2)
@@ -103,15 +95,11 @@ def test_pack_fragment() -> None:
def test_pack_rewrites() -> None:
- rewrites = {} # type: Dict[str, str]
+ rewrites: Dict[str, str] = {}
- loadingContext, workflowobj, uri = fetch_document(
- get_data("tests/wf/default-wf5.cwl")
- )
+ loadingContext, workflowobj, uri = fetch_document(get_data("tests/wf/default-wf5.cwl"))
loadingContext.do_update = False
- loadingContext, uri = resolve_and_validate_document(
- loadingContext, workflowobj, uri
- )
+ loadingContext, uri = resolve_and_validate_document(loadingContext, workflowobj, uri)
loader = loadingContext.loader
assert loader
loader.resolve_ref(uri)[0]
@@ -133,13 +121,11 @@ def test_pack_rewrites() -> None:
@pytest.mark.parametrize("cwl_path", cwl_missing_version_paths)
def test_pack_missing_cwlVersion(cwl_path: str) -> None:
- """Ensure the generated pack output is not missing the `cwlVersion` in case of single tool workflow and single step workflow."""
+ """Test generated pack output for `cwlVersion` in case of 1 tool workflow and 1 step workflow."""
# Testing single tool workflow
loadingContext, workflowobj, uri = fetch_document(get_data(cwl_path))
loadingContext.do_update = False
- loadingContext, uri = resolve_and_validate_document(
- loadingContext, workflowobj, uri
- )
+ loadingContext, uri = resolve_and_validate_document(loadingContext, workflowobj, uri)
loader = loadingContext.loader
assert loader
loader.resolve_ref(uri)[0]
@@ -163,9 +149,7 @@ def test_pack_idempotence_workflow(tmp_path: Path) -> None:
def _pack_idempotently(document: str, tmp_path: Path) -> None:
loadingContext, workflowobj, uri = fetch_document(get_data(document))
loadingContext.do_update = False
- loadingContext, uri = resolve_and_validate_document(
- loadingContext, workflowobj, uri
- )
+ loadingContext, uri = resolve_and_validate_document(loadingContext, workflowobj, uri)
loader = loadingContext.loader
assert loader
loader.resolve_ref(uri)[0]
@@ -182,9 +166,7 @@ def _pack_idempotently(document: str, tmp_path: Path) -> None:
loadingContext, workflowobj, uri2 = fetch_document(tmp.name)
loadingContext.do_update = False
- loadingContext, uri2 = resolve_and_validate_document(
- loadingContext, workflowobj, uri2
- )
+ loadingContext, uri2 = resolve_and_validate_document(loadingContext, workflowobj, uri2)
loader2 = loadingContext.loader
assert loader2
loader2.resolve_ref(uri2)[0]
@@ -212,9 +194,7 @@ def test_packed_workflow_execution(
loadingContext.resolver = tool_resolver
loadingContext, workflowobj, uri = fetch_document(get_data(wf_path), loadingContext)
loadingContext.do_update = False
- loadingContext, uri = resolve_and_validate_document(
- loadingContext, workflowobj, uri
- )
+ loadingContext, uri = resolve_and_validate_document(loadingContext, workflowobj, uri)
loader = loadingContext.loader
assert loader
loader.resolve_ref(uri)[0]
diff --git a/tests/test_path_checks.py b/tests/test_path_checks.py
index 0e8cb1214..018a92120 100644
--- a/tests/test_path_checks.py
+++ b/tests/test_path_checks.py
@@ -4,6 +4,7 @@
from typing import IO, Any, List, cast
import pytest
+from ruamel.yaml.comments import CommentedMap
from schema_salad.sourceline import cmap
from cwltool.command_line_tool import CommandLineTool
@@ -12,7 +13,6 @@
from cwltool.stdfsaccess import StdFsAccess
from cwltool.update import INTERNAL_VERSION
from cwltool.utils import CWLObjectType
-from ruamel.yaml.comments import CommentedMap
from .util import needs_docker
@@ -107,7 +107,7 @@ def test_unicode_in_output_files(tmp_path: Path, filename: str) -> None:
assert main(params) == 0
-class TestFsAccess(StdFsAccess):
+class StubFsAccess(StdFsAccess):
"""Stub fs access object that doesn't rely on the filesystem."""
def glob(self, pattern: str) -> List[str]:
@@ -167,9 +167,7 @@ def test_clt_returns_specialchar_names(tmp_path: Path) -> None:
# Mock an "output" file with the above special characters in its name
special = "".join(reserved)
- output_schema = cast(
- CWLObjectType, {"type": "File", "outputBinding": {"glob": special}}
- )
+ output_schema = cast(CWLObjectType, {"type": "File", "outputBinding": {"glob": special}})
mock_output = tmp_path / special
mock_output.touch()
@@ -197,7 +195,7 @@ def test_clt_returns_specialchar_names(tmp_path: Path) -> None:
builder.files, builder.stagedir, RuntimeContext(), True
)
builder.outdir = "/var/spool/cwl"
- fs_access = TestFsAccess("")
+ fs_access = StubFsAccess("")
result = cast(
CWLObjectType,
diff --git a/tests/test_pathmapper.py b/tests/test_pathmapper.py
index fae1cb5d8..b7cf2f6a1 100644
--- a/tests/test_pathmapper.py
+++ b/tests/test_pathmapper.py
@@ -65,9 +65,7 @@ def __init__(
@pytest.mark.parametrize("name,file_dir,expected", normalization_parameters)
-def test_normalizeFilesDirs(
- name: str, file_dir: CWLObjectType, expected: CWLObjectType
-) -> None:
+def test_normalizeFilesDirs(name: str, file_dir: CWLObjectType, expected: CWLObjectType) -> None:
normalizeFilesDirs(file_dir)
assert file_dir == expected, name
diff --git a/tests/test_provenance.py b/tests/test_provenance.py
index a801d2eeb..5dbe27d7c 100644
--- a/tests/test_provenance.py
+++ b/tests/test_provenance.py
@@ -13,9 +13,11 @@
from rdflib.namespace import DC, DCTERMS, RDF
from rdflib.term import Literal
-from cwltool import provenance, provenance_constants
+import cwltool.cwlprov as provenance
+from cwltool.cwlprov import provenance_constants
+from cwltool.cwlprov.ro import ResearchObject
+from cwltool.cwlprov.writablebagfile import close_ro, write_bag_file
from cwltool.main import main
-from cwltool.provenance import ResearchObject
from cwltool.stdfsaccess import StdFsAccess
from .util import get_data, needs_docker, working_directory
@@ -124,6 +126,19 @@ def test_advanced_prov_annotations(tmp_path: Path) -> None:
# )
+@needs_docker
+def test_revsort_workflow_shortcut(tmp_path: Path) -> None:
+ """Confirm that using 'cwl:tool' shortcut still snapshots the CWL files."""
+ folder = cwltool(
+ tmp_path,
+ get_data("tests/wf/revsort-job-shortcut.json"),
+ )
+ check_output_object(folder)
+ check_provenance(folder)
+ assert not (folder / "snapshot" / "revsort-job-shortcut.json").exists()
+ assert len(list((folder / "snapshot").iterdir())) == 4
+
+
@needs_docker
def test_nested_workflow(tmp_path: Path) -> None:
check_provenance(cwltool(tmp_path, get_data("tests/wf/nested.cwl")), nested=True)
@@ -229,10 +244,10 @@ def test_directory_workflow(tmp_path: Path) -> None:
# Input files should be captured by hash value,
# even if they were inside a class: Directory
- for (l, l_hash) in sha1.items():
+ for letter, l_hash in sha1.items():
prefix = l_hash[:2] # first 2 letters
p = folder / "data" / prefix / l_hash
- assert p.is_file(), f"Could not find {l} as {p}"
+ assert p.is_file(), f"Could not find {letter} as {p}"
@needs_docker
@@ -426,9 +441,7 @@ def check_ro(base_path: Path, nested: bool = False) -> None:
packed = urllib.parse.urljoin(arcp_root, "/workflow/packed.cwl")
primary_job = urllib.parse.urljoin(arcp_root, "/workflow/primary-job.json")
- primary_prov_nt = urllib.parse.urljoin(
- arcp_root, "/metadata/provenance/primary.cwlprov.nt"
- )
+ primary_prov_nt = urllib.parse.urljoin(arcp_root, "/metadata/provenance/primary.cwlprov.nt")
uuid = arcp.parse_arcp(arcp_root).uuid
highlights = set(g.subjects(OA.motivatedBy, OA.highlighting))
@@ -584,6 +597,7 @@ def check_prov(
assert (d, RDF.type, PROV.Dictionary) in g
assert (d, RDF.type, PROV.Collection) in g
assert (d, RDF.type, PROV.Entity) in g
+ assert len(list(g.objects(d, CWLPROV.basename))) == 1
files = set()
for entry in g.objects(d, PROV.hadDictionaryMember):
@@ -635,24 +649,24 @@ def check_prov(
@pytest.fixture
-def research_object() -> Generator[ResearchObject, None, None]:
- re_ob = ResearchObject(StdFsAccess(""))
+def research_object(tmp_path: Path) -> Generator[ResearchObject, None, None]:
+ re_ob = ResearchObject(StdFsAccess(str(tmp_path / "ro")), temp_prefix_ro=str(tmp_path / "tmp"))
yield re_ob
- re_ob.close()
+ close_ro(re_ob)
def test_absolute_path_fails(research_object: ResearchObject) -> None:
with pytest.raises(ValueError):
- research_object.write_bag_file("/absolute/path/fails")
+ write_bag_file(research_object, "/absolute/path/fails")
def test_climboutfails(research_object: ResearchObject) -> None:
with pytest.raises(ValueError):
- research_object.write_bag_file("../../outside-ro")
+ write_bag_file(research_object, "../../outside-ro")
def test_writable_string(research_object: ResearchObject) -> None:
- with research_object.write_bag_file("file.txt") as fh:
+ with write_bag_file(research_object, "file.txt") as fh:
assert fh.writable()
fh.write("Hello\n")
@@ -684,19 +698,19 @@ def test_writable_string(research_object: ResearchObject) -> None:
def test_writable_unicode_string(research_object: ResearchObject) -> None:
- with research_object.write_bag_file("file.txt") as fh:
+ with write_bag_file(research_object, "file.txt") as fh:
assert fh.writable()
fh.write("Here is a snowman: \u2603 \n")
def test_writable_bytes(research_object: ResearchObject) -> None:
string = "Here is a snowman: \u2603 \n".encode()
- with research_object.write_bag_file("file.txt", encoding=None) as fh:
+ with write_bag_file(research_object, "file.txt", encoding=None) as fh:
fh.write(string) # type: ignore
def test_data(research_object: ResearchObject) -> None:
- with research_object.write_bag_file("data/file.txt") as fh:
+ with write_bag_file(research_object, "data/file.txt") as fh:
assert fh.writable()
fh.write("Hello\n")
@@ -710,21 +724,21 @@ def test_data(research_object: ResearchObject) -> None:
def test_not_seekable(research_object: ResearchObject) -> None:
- with research_object.write_bag_file("file.txt") as fh:
+ with write_bag_file(research_object, "file.txt") as fh:
assert not fh.seekable()
with pytest.raises(OSError):
fh.seek(0)
def test_not_readable(research_object: ResearchObject) -> None:
- with research_object.write_bag_file("file.txt") as fh:
+ with write_bag_file(research_object, "file.txt") as fh:
assert not fh.readable()
with pytest.raises(OSError):
fh.read()
def test_truncate_fails(research_object: ResearchObject) -> None:
- with research_object.write_bag_file("file.txt") as fh:
+ with write_bag_file(research_object, "file.txt") as fh:
fh.write("Hello there")
fh.truncate() # OK as we're always at end
# Will fail because the checksum can't rewind
@@ -813,8 +827,6 @@ def test_research_object() -> None:
pass
-# Research object may need to be pickled (for Toil)
-
-
def test_research_object_picklability(research_object: ResearchObject) -> None:
+ """Research object may need to be pickled (for Toil)."""
assert pickle.dumps(research_object) is not None
diff --git a/tests/test_recursive_validation.py b/tests/test_recursive_validation.py
index 9d52404c7..300c99d88 100644
--- a/tests/test_recursive_validation.py
+++ b/tests/test_recursive_validation.py
@@ -6,9 +6,7 @@
def test_recursive_validation() -> None:
"""Test the recursive_resolve_and_validate_document function."""
- loadingContext, workflowobj, uri = fetch_document(
- get_data("tests/wf/default_path.cwl")
- )
+ loadingContext, workflowobj, uri = fetch_document(get_data("tests/wf/default_path.cwl"))
loadingContext, uri, tool = recursive_resolve_and_validate_document(
loadingContext, workflowobj, uri
)
diff --git a/tests/test_relocate.py b/tests/test_relocate.py
index c7f0090be..81877c776 100644
--- a/tests/test_relocate.py
+++ b/tests/test_relocate.py
@@ -1,4 +1,6 @@
import json
+import os
+import shutil
import sys
from pathlib import Path
@@ -33,18 +35,42 @@ def test_for_conflict_file_names(tmp_path: Path) -> None:
out = json.loads(stream.getvalue())
assert out["b1"]["basename"] == out["b2"]["basename"]
assert out["b1"]["location"] != out["b2"]["location"]
+ assert Path(out["b1"]["path"]).exists()
+ assert Path(out["b2"]["path"]).exists()
+
+
+def test_for_conflict_file_names_nodocker(tmp_path: Path) -> None:
+ stream = StringIO()
+
+ assert (
+ main(
+ ["--debug", "--outdir", str(tmp_path), get_data("tests/wf/conflict.cwl")],
+ stdout=stream,
+ )
+ == 0
+ )
+
+ out = json.loads(stream.getvalue())
+ assert out["b1"]["basename"] == out["b2"]["basename"]
+ assert out["b1"]["location"] != out["b2"]["location"]
+ assert Path(out["b1"]["path"]).exists()
+ assert Path(out["b2"]["path"]).exists()
def test_relocate_symlinks(tmp_path: Path) -> None:
+ shutil.copy(get_data("tests/reloc/test.cwl"), tmp_path)
+ (tmp_path / "dir1").mkdir()
+ (tmp_path / "dir1" / "foo").touch()
+ os.symlink(tmp_path / "dir1", tmp_path / "dir2")
assert (
main(
[
"--debug",
"--outdir",
- get_data("tests/reloc") + "/dir2",
- get_data("tests/reloc/test.cwl"),
+ str(tmp_path / "dir2"),
+ str(tmp_path / "test.cwl"),
"--inp",
- get_data("tests/reloc") + "/dir2",
+ str(tmp_path / "dir2"),
]
)
== 0
diff --git a/tests/test_singularity.py b/tests/test_singularity.py
index be219e018..605f22028 100644
--- a/tests/test_singularity.py
+++ b/tests/test_singularity.py
@@ -3,6 +3,8 @@
from pathlib import Path
from typing import Any
+import pytest
+
from cwltool.main import main
from .util import (
@@ -58,7 +60,10 @@ def test_singularity_workflow(tmp_path: Path) -> None:
assert error_code == 0
-def test_singularity_iwdr() -> None:
+def test_singularity_iwdr(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None:
+ singularity_dir = tmp_path / "singularity"
+ singularity_dir.mkdir()
+ monkeypatch.setenv("CWL_SINGULARITY_CACHE", str(singularity_dir))
result_code = main(
[
"--singularity",
diff --git a/tests/test_singularity_versions.py b/tests/test_singularity_versions.py
index 844f62a09..61b16177b 100644
--- a/tests/test_singularity_versions.py
+++ b/tests/test_singularity_versions.py
@@ -21,7 +21,7 @@ def reset_singularity_version_cache() -> None:
def set_dummy_check_output(name: str, version: str) -> None:
"""Mock out subprocess.check_output."""
cwltool.singularity.check_output = ( # type: ignore[attr-defined]
- lambda c, universal_newlines: name + " version " + version
+ lambda c, text: name + " version " + version
)
@@ -38,12 +38,8 @@ def test_get_version() -> None:
assert isinstance(v, tuple)
assert isinstance(v[0], list)
assert isinstance(v[1], str)
- assert (
- cwltool.singularity._SINGULARITY_VERSION is not None
- ) # pylint: disable=protected-access
- assert (
- len(cwltool.singularity._SINGULARITY_FLAVOR) > 0
- ) # pylint: disable=protected-access
+ assert cwltool.singularity._SINGULARITY_VERSION is not None # pylint: disable=protected-access
+ assert len(cwltool.singularity._SINGULARITY_FLAVOR) > 0 # pylint: disable=protected-access
v_cached = get_version()
assert v == v_cached
diff --git a/tests/test_streaming.py b/tests/test_streaming.py
index 0e23276ac..3c5526592 100644
--- a/tests/test_streaming.py
+++ b/tests/test_streaming.py
@@ -4,6 +4,7 @@
from typing import cast
import pytest
+from ruamel.yaml.comments import CommentedMap
from schema_salad.sourceline import cmap
from cwltool.command_line_tool import CommandLineTool
@@ -12,7 +13,6 @@
from cwltool.job import JobBase
from cwltool.update import INTERNAL_VERSION, ORIGINAL_CWLVERSION
from cwltool.utils import CWLObjectType
-from ruamel.yaml.comments import CommentedMap
from .util import get_data
diff --git a/tests/test_subclass_mypyc.py b/tests/test_subclass_mypyc.py
new file mode 100644
index 000000000..aa4964b34
--- /dev/null
+++ b/tests/test_subclass_mypyc.py
@@ -0,0 +1,92 @@
+"""
+Confirm that we can subclass and/or serializae certain classes used by 3rd parties.
+
+Especially if those classes are (or become) compiled with mypyc.
+"""
+
+import pickle
+
+import pytest
+from ruamel.yaml.comments import CommentedMap
+from schema_salad.avro import schema
+
+from cwltool.builder import Builder
+from cwltool.command_line_tool import CommandLineTool, ExpressionTool
+from cwltool.context import LoadingContext, RuntimeContext
+from cwltool.stdfsaccess import StdFsAccess
+from cwltool.update import INTERNAL_VERSION
+from cwltool.workflow import Workflow
+
+from .test_anon_types import snippet
+
+
+@pytest.mark.parametrize("snippet", snippet)
+def test_subclass_CLT(snippet: CommentedMap) -> None:
+ """We can subclass CommandLineTool."""
+
+ class TestCLT(CommandLineTool):
+ test = True
+
+ a = TestCLT(snippet, LoadingContext())
+ assert a.test is True
+
+
+@pytest.mark.parametrize("snippet", snippet)
+def test_subclass_exprtool(snippet: CommentedMap) -> None:
+ """We can subclass ExpressionTool."""
+
+ class TestExprTool(ExpressionTool):
+ test = False
+
+ a = TestExprTool(snippet, LoadingContext())
+ assert a.test is False
+
+
+@pytest.mark.parametrize("snippet", snippet)
+def test_pickle_unpickle_workflow(snippet: CommentedMap) -> None:
+ """We can pickle & unpickle a Workflow."""
+
+ a = Workflow(snippet, LoadingContext())
+ stream = pickle.dumps(a)
+ assert stream
+ assert pickle.loads(stream)
+
+
+def test_serialize_builder() -> None:
+ """We can pickle Builder."""
+ runtime_context = RuntimeContext()
+ builder = Builder(
+ {},
+ [],
+ [],
+ {},
+ schema.Names(),
+ [],
+ [],
+ {},
+ None,
+ None,
+ StdFsAccess,
+ StdFsAccess(""),
+ None,
+ 0.1,
+ False,
+ False,
+ False,
+ "no_listing",
+ runtime_context.get_outdir(),
+ runtime_context.get_tmpdir(),
+ runtime_context.get_stagedir(),
+ INTERNAL_VERSION,
+ "docker",
+ )
+ assert pickle.dumps(builder)
+
+
+def test_pickle_unpickle_runtime_context() -> None:
+ """We can pickle & unpickle RuntimeContext"""
+
+ runtime_context = RuntimeContext()
+ stream = pickle.dumps(runtime_context)
+ assert stream
+ assert pickle.loads(stream)
diff --git a/tests/test_subgraph.py b/tests/test_subgraph.py
index a4ac69347..eb99124ea 100644
--- a/tests/test_subgraph.py
+++ b/tests/test_subgraph.py
@@ -102,10 +102,7 @@ def test_single_process_inherit_reqshints(tmp_path: Path) -> None:
]
)
assert err_code == 0
- assert (
- json.loads(stdout)["out"]["checksum"]
- == "sha1$cdc1e84968261d6a7575b5305945471f8be199b6"
- )
+ assert json.loads(stdout)["out"]["checksum"] == "sha1$cdc1e84968261d6a7575b5305945471f8be199b6"
def test_single_process_inherit_hints_collision(tmp_path: Path) -> None:
@@ -121,10 +118,7 @@ def test_single_process_inherit_hints_collision(tmp_path: Path) -> None:
]
)
assert err_code == 0
- assert (
- json.loads(stdout)["out"]["checksum"]
- == "sha1$b3ec4ed1749c207e52b3a6d08c59f31d83bff519"
- )
+ assert json.loads(stdout)["out"]["checksum"] == "sha1$b3ec4ed1749c207e52b3a6d08c59f31d83bff519"
def test_single_process_inherit_reqs_collision(tmp_path: Path) -> None:
@@ -140,10 +134,7 @@ def test_single_process_inherit_reqs_collision(tmp_path: Path) -> None:
]
)
assert err_code == 0
- assert (
- json.loads(stdout)["out"]["checksum"]
- == "sha1$b3ec4ed1749c207e52b3a6d08c59f31d83bff519"
- )
+ assert json.loads(stdout)["out"]["checksum"] == "sha1$b3ec4ed1749c207e52b3a6d08c59f31d83bff519"
def test_single_process_inherit_reqs_step_collision(tmp_path: Path) -> None:
@@ -159,8 +150,7 @@ def test_single_process_inherit_reqs_step_collision(tmp_path: Path) -> None:
)
assert err_code == 0
assert (
- json.loads(stdout)["output"]["checksum"]
- == "sha1$e5fa44f2b31c1fb553b6021e7360d07d5d91ff5e"
+ json.loads(stdout)["output"]["checksum"] == "sha1$e5fa44f2b31c1fb553b6021e7360d07d5d91ff5e"
)
@@ -177,10 +167,7 @@ def test_single_process_inherit_reqs_hints_collision(tmp_path: Path) -> None:
]
)
assert err_code == 0
- assert (
- json.loads(stdout)["out"]["checksum"]
- == "sha1$b3ec4ed1749c207e52b3a6d08c59f31d83bff519"
- )
+ assert json.loads(stdout)["out"]["checksum"] == "sha1$b3ec4ed1749c207e52b3a6d08c59f31d83bff519"
def test_single_process_inherit_only_hints(tmp_path: Path) -> None:
@@ -196,10 +183,7 @@ def test_single_process_inherit_only_hints(tmp_path: Path) -> None:
]
)
assert err_code == 0
- assert (
- json.loads(stdout)["out"]["checksum"]
- == "sha1$ab5f2a9add5f54622dde555ac8ae9a3000e5ee0a"
- )
+ assert json.loads(stdout)["out"]["checksum"] == "sha1$ab5f2a9add5f54622dde555ac8ae9a3000e5ee0a"
def test_single_process_subwf_step(tmp_path: Path) -> None:
@@ -215,10 +199,7 @@ def test_single_process_subwf_step(tmp_path: Path) -> None:
]
)
assert err_code == 0
- assert (
- json.loads(stdout)["out"]["checksum"]
- == "sha1$cdc1e84968261d6a7575b5305945471f8be199b6"
- )
+ assert json.loads(stdout)["out"]["checksum"] == "sha1$cdc1e84968261d6a7575b5305945471f8be199b6"
def test_single_process_packed_subwf_step(tmp_path: Path) -> None:
@@ -234,17 +215,16 @@ def test_single_process_packed_subwf_step(tmp_path: Path) -> None:
]
)
assert err_code == 0
- assert (
- json.loads(stdout)["out"]["checksum"]
- == "sha1$cdc1e84968261d6a7575b5305945471f8be199b6"
- )
+ assert json.loads(stdout)["out"]["checksum"] == "sha1$cdc1e84968261d6a7575b5305945471f8be199b6"
@needs_docker
-def test_single_process_subwf_subwf_inline_step() -> None:
+def test_single_process_subwf_subwf_inline_step(tmp_path: Path) -> None:
"""Test --single-process on an inline sub-sub-workflow step."""
err_code, stdout, stderr = get_main_output(
[
+ "--outdir",
+ str(tmp_path),
"--single-process",
"step1/stepX/stepY",
get_data("tests/subgraph/count-lines17-wf.cwl.json"),
@@ -253,8 +233,7 @@ def test_single_process_subwf_subwf_inline_step() -> None:
)
assert err_code == 0
assert (
- json.loads(stdout)["output"]["checksum"]
- == "sha1$3596ea087bfdaf52380eae441077572ed289d657"
+ json.loads(stdout)["output"]["checksum"] == "sha1$3596ea087bfdaf52380eae441077572ed289d657"
)
@@ -271,10 +250,7 @@ def test_single_step_subwf_step(tmp_path: Path) -> None:
]
)
assert err_code == 0
- assert (
- json.loads(stdout)["out"]["checksum"]
- == "sha1$7608e5669ba454c61fab01c9b133b52a9a7de68c"
- )
+ assert json.loads(stdout)["out"]["checksum"] == "sha1$7608e5669ba454c61fab01c9b133b52a9a7de68c"
def test_single_step_wfstep_long_out(tmp_path: Path) -> None:
@@ -290,10 +266,7 @@ def test_single_step_wfstep_long_out(tmp_path: Path) -> None:
]
)
assert err_code == 0
- assert (
- json.loads(stdout)["out"]["checksum"]
- == "sha1$7608e5669ba454c61fab01c9b133b52a9a7de68c"
- )
+ assert json.loads(stdout)["out"]["checksum"] == "sha1$7608e5669ba454c61fab01c9b133b52a9a7de68c"
def test_single_step_packed_subwf_step(tmp_path: Path) -> None:
@@ -309,10 +282,7 @@ def test_single_step_packed_subwf_step(tmp_path: Path) -> None:
]
)
assert err_code == 0
- assert (
- json.loads(stdout)["out"]["checksum"]
- == "sha1$7608e5669ba454c61fab01c9b133b52a9a7de68c"
- )
+ assert json.loads(stdout)["out"]["checksum"] == "sha1$7608e5669ba454c61fab01c9b133b52a9a7de68c"
@needs_docker
diff --git a/tests/test_target.py b/tests/test_target.py
index 400027ec8..724579736 100644
--- a/tests/test_target.py
+++ b/tests/test_target.py
@@ -6,9 +6,7 @@
def test_target() -> None:
"""Test --target option successful."""
test_file = "tests/wf/scatter-wf4.cwl"
- exit_code = main(
- ["--target", "out", get_data(test_file), "--inp1", "INP1", "--inp2", "INP2"]
- )
+ exit_code = main(["--target", "out", get_data(test_file), "--inp1", "INP1", "--inp2", "INP2"])
assert exit_code == 0
@@ -32,7 +30,5 @@ def test_wrong_target() -> None:
def test_target_packed() -> None:
"""Test --target option with packed workflow schema."""
test_file = "tests/wf/scatter-wf4.json"
- exit_code = main(
- ["--target", "out", get_data(test_file), "--inp1", "INP1", "--inp2", "INP2"]
- )
+ exit_code = main(["--target", "out", get_data(test_file), "--inp1", "INP1", "--inp2", "INP2"])
assert exit_code == 0
diff --git a/tests/test_tmpdir.py b/tests/test_tmpdir.py
index 420fefc1a..14caecb3c 100644
--- a/tests/test_tmpdir.py
+++ b/tests/test_tmpdir.py
@@ -1,10 +1,12 @@
"""Test that all temporary directories respect the --tmpdir-prefix and --tmp-outdir-prefix options."""
+import re
import subprocess
import sys
from pathlib import Path
from typing import List, cast
import pytest
+from ruamel.yaml.comments import CommentedMap
from schema_salad.avro import schema
from schema_salad.sourceline import cmap
@@ -14,13 +16,12 @@
from cwltool.docker import DockerCommandLineJob
from cwltool.job import JobBase
from cwltool.main import main
-from cwltool.pathmapper import MapperEnt, PathMapper
+from cwltool.pathmapper import MapperEnt
from cwltool.stdfsaccess import StdFsAccess
from cwltool.update import INTERNAL_VERSION, ORIGINAL_CWLVERSION
from cwltool.utils import create_tmp_dir
-from ruamel.yaml.comments import CommentedMap
-from .util import get_data, needs_docker
+from .util import get_data, get_main_output, needs_docker
def test_docker_commandLineTool_job_tmpdir_prefix(tmp_path: Path) -> None:
@@ -109,13 +110,9 @@ def test_commandLineTool_job_tmpdir_prefix(tmp_path: Path) -> None:
@needs_docker
-def test_dockerfile_tmpdir_prefix(
- tmp_path: Path, monkeypatch: pytest.MonkeyPatch
-) -> None:
+def test_dockerfile_tmpdir_prefix(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None:
"""Test that DockerCommandLineJob.get_image respects temp directory directives."""
- monkeypatch.setattr(
- target=subprocess, name="check_call", value=lambda *args, **kwargs: True
- )
+ monkeypatch.setattr(target=subprocess, name="check_call", value=lambda *args, **kwargs: True)
(tmp_path / "out").mkdir()
tmp_outdir_prefix = tmp_path / "out" / "1"
(tmp_path / "3").mkdir()
@@ -141,14 +138,16 @@ def test_dockerfile_tmpdir_prefix(
False,
False,
False,
- "",
+ "no_listing",
runtime_context.get_outdir(),
runtime_context.get_tmpdir(),
runtime_context.get_stagedir(),
INTERNAL_VERSION,
"docker",
)
- assert DockerCommandLineJob(builder, {}, PathMapper, [], [], "").get_image(
+ assert DockerCommandLineJob(
+ builder, {}, CommandLineTool.make_path_mapper, [], [], ""
+ ).get_image(
{
"class": "DockerRequirement",
"dockerFile": "FROM debian:stable-slim",
@@ -190,14 +189,14 @@ def test_docker_tmpdir_prefix(tmp_path: Path) -> None:
False,
False,
False,
- "",
+ "no_listing",
runtime_context.get_outdir(),
runtime_context.get_tmpdir(),
runtime_context.get_stagedir(),
INTERNAL_VERSION,
"docker",
)
- job = DockerCommandLineJob(builder, {}, PathMapper, [], [], "")
+ job = DockerCommandLineJob(builder, {}, CommandLineTool.make_path_mapper, [], [], "")
runtime: List[str] = []
volume_writable_file = MapperEnt(
@@ -220,9 +219,7 @@ def test_docker_tmpdir_prefix(tmp_path: Path) -> None:
resolved=str(resolved_writable_dir), target="bar", type=None, staged=None
)
(tmp_path / "2").mkdir()
- job.add_writable_directory_volume(
- runtime, volume_dir, None, str(tmp_path / "2" / "dir")
- )
+ job.add_writable_directory_volume(runtime, volume_dir, None, str(tmp_path / "2" / "dir"))
children = sorted((tmp_path / "2").glob("*"))
assert len(children) == 1
subdir = tmp_path / "2" / children[0]
@@ -235,9 +232,7 @@ def test_docker_tmpdir_prefix(tmp_path: Path) -> None:
volume_file = MapperEnt(resolved="Hoopla!", target="baz", type=None, staged=None)
(tmp_path / "4").mkdir()
- job.create_file_and_add_volume(
- runtime, volume_file, None, None, str(tmp_path / "4" / "file")
- )
+ job.create_file_and_add_volume(runtime, volume_file, None, None, str(tmp_path / "4" / "file"))
children = sorted((tmp_path / "4").glob("*"))
assert len(children) == 1
subdir = tmp_path / "4" / children[0]
@@ -279,3 +274,22 @@ def test_remove_tmpdirs(tmp_path: Path) -> None:
== 0
)
assert len(list(tmp_path.iterdir())) == 0
+
+
+def test_leave_tmpdirs(tmp_path: Path) -> None:
+ """Test that the tmpdirs including input staging directories are retained after the job execution."""
+ error_code, stdout, stderr = get_main_output(
+ [
+ "--debug",
+ "--tmpdir-prefix",
+ str(f"{tmp_path}/tmp/"),
+ "--leave-tmpdir",
+ "--outdir",
+ str(f"{tmp_path}/out/"),
+ get_data("tests/env4.cwl"),
+ ]
+ )
+ assert error_code == 0
+ assert re.search(rf"\"{re.escape(str(tmp_path))}/tmp/.*/env0\.py\"", stderr)
+ assert len(list((tmp_path / "tmp").iterdir())) == 3
+ assert len(list((tmp_path / "tmp").glob("**/env0.py"))) == 1
diff --git a/tests/test_toolargparse.py b/tests/test_toolargparse.py
index 8200b6598..756c373e8 100644
--- a/tests/test_toolargparse.py
+++ b/tests/test_toolargparse.py
@@ -180,7 +180,7 @@ def test_dont_require_inputs(tmp_path: Path) -> None:
def test_argparser_with_doc() -> None:
- """The `desription` field is set if `doc` field is provided."""
+ """The `description` field is set if `doc` field is provided."""
loadingContext = LoadingContext()
tool = load_tool(get_data("tests/with_doc.cwl"), loadingContext)
p = argparse.ArgumentParser()
@@ -189,7 +189,7 @@ def test_argparser_with_doc() -> None:
def test_argparser_without_doc() -> None:
- """The `desription` field is None if `doc` field is not provided."""
+ """The `description` field is None if `doc` field is not provided."""
loadingContext = LoadingContext()
tool = load_tool(get_data("tests/without_doc.cwl"), loadingContext)
p = argparse.ArgumentParser()
@@ -215,15 +215,15 @@ def test_argparser_without_doc() -> None:
),
],
)
-def test_argparse_append_with_default(
- job_order: List[str], expected_values: List[str]
-) -> None:
- """The appended arguments must not include the default. But if no appended argument, then the default is used."""
+def test_argparse_append_with_default(job_order: List[str], expected_values: List[str]) -> None:
+ """
+ Confirm that the appended arguments must not include the default.
+
+ But if no appended argument, then the default is used.
+ """
loadingContext = LoadingContext()
tool = load_tool(get_data("tests/default_values_list.cwl"), loadingContext)
- toolparser = generate_parser(
- argparse.ArgumentParser(prog="test"), tool, {}, [], False
- )
+ toolparser = generate_parser(argparse.ArgumentParser(prog="test"), tool, {}, [], False)
cmd_line = vars(toolparser.parse_args(job_order))
file_paths = list(cmd_line["file_paths"])
assert expected_values == file_paths
diff --git a/tests/test_trs.py b/tests/test_trs.py
index 52cd46a22..5f94ce58a 100644
--- a/tests/test_trs.py
+++ b/tests/test_trs.py
@@ -45,8 +45,8 @@ def json(self) -> Any:
def mocked_requests_get(*args: Any, **kwargs: Any) -> MockResponse2:
if (
- args[0]
- == "https://dockstore.org/api/api/ga4gh/v2/tools/quay.io%2Fbriandoconnor%2Fdockstore-tool-md5sum/versions/1.0.4/CWL/files"
+ args[0] == "https://dockstore.org/api/api/ga4gh/v2/tools/"
+ "quay.io%2Fbriandoconnor%2Fdockstore-tool-md5sum/versions/1.0.4/CWL/files"
):
return MockResponse2(
[
@@ -57,26 +57,26 @@ def mocked_requests_get(*args: Any, **kwargs: Any) -> MockResponse2:
200,
)
elif (
- args[0]
- == "https://dockstore.org/api/api/ga4gh/v2/tools/quay.io%2Fbriandoconnor%2Fdockstore-tool-md5sum/versions/1.0.4/plain-CWL/descriptor/Dockstore.cwl"
+ args[0] == "https://dockstore.org/api/api/ga4gh/v2/tools/"
+ "quay.io%2Fbriandoconnor%2Fdockstore-tool-md5sum/versions/1.0.4/plain-CWL/descriptor/Dockstore.cwl"
):
string = open(get_data("tests/trs/Dockstore.cwl")).read()
return MockResponse2(string, 200)
elif (
- args[0]
- == "https://dockstore.org/api/api/ga4gh/v2/tools/%23workflow%2Fgithub.com%2Fdockstore-testing%2Fmd5sum-checker/versions/develop/plain-CWL/descriptor/md5sum-tool.cwl"
+ args[0] == "https://dockstore.org/api/api/ga4gh/v2/tools/"
+ "%23workflow%2Fgithub.com%2Fdockstore-testing%2Fmd5sum-checker/versions/develop/plain-CWL/descriptor/md5sum-tool.cwl"
):
string = open(get_data("tests/trs/md5sum-tool.cwl")).read()
return MockResponse2(string, 200)
elif (
- args[0]
- == "https://dockstore.org/api/api/ga4gh/v2/tools/%23workflow%2Fgithub.com%2Fdockstore-testing%2Fmd5sum-checker/versions/develop/plain-CWL/descriptor/md5sum-workflow.cwl"
+ args[0] == "https://dockstore.org/api/api/ga4gh/v2/tools/"
+ "%23workflow%2Fgithub.com%2Fdockstore-testing%2Fmd5sum-checker/versions/develop/plain-CWL/descriptor/md5sum-workflow.cwl"
):
string = open(get_data("tests/trs/md5sum-workflow.cwl")).read()
return MockResponse2(string, 200)
elif (
- args[0]
- == "https://dockstore.org/api/api/ga4gh/v2/tools/%23workflow%2Fgithub.com%2Fdockstore-testing%2Fmd5sum-checker/versions/develop/CWL/files"
+ args[0] == "https://dockstore.org/api/api/ga4gh/v2/tools/"
+ "%23workflow%2Fgithub.com%2Fdockstore-testing%2Fmd5sum-checker/versions/develop/CWL/files"
):
return MockResponse2(
[
diff --git a/tests/test_udocker.py b/tests/test_udocker.py
index 9fd4a8691..eb9a0ebfe 100644
--- a/tests/test_udocker.py
+++ b/tests/test_udocker.py
@@ -16,11 +16,9 @@
@pytest.fixture(scope="session")
def udocker(tmp_path_factory: TempPathFactory) -> str:
"""Udocker fixture, returns the path to the udocker script."""
- test_cwd = os.getcwd()
test_environ = copy.copy(os.environ)
docker_install_dir = str(tmp_path_factory.mktemp("udocker"))
with working_directory(docker_install_dir):
-
url = "https://github.com/indigo-dc/udocker/releases/download/1.3.5/udocker-1.3.5.tar.gz"
install_cmds = [
["curl", "-L", url, "-o", "./udocker-tarball.tgz"],
diff --git a/tests/test_validate.py b/tests/test_validate.py
index e809df386..be50d357d 100644
--- a/tests/test_validate.py
+++ b/tests/test_validate.py
@@ -1,5 +1,5 @@
"""Tests --validation."""
-
+import re
from .util import get_data, get_main_output
@@ -14,3 +14,41 @@ def test_validate_graph_with_no_default() -> None:
assert "packed_no_main.cwl#cat is valid CWL" in stdout
assert "packed_no_main.cwl#collision is valid CWL" in stdout
assert "tests/wf/packed_no_main.cwl is valid CWL" in stdout
+
+
+def test_validate_with_valid_input_object() -> None:
+ """Ensure that --validate with a valid input object."""
+ exit_code, stdout, stderr = get_main_output(
+ [
+ "--validate",
+ get_data("tests/wf/1st-workflow.cwl"),
+ "--inp",
+ get_data("tests/wf/1st-workflow.cwl"),
+ "--ex",
+ "FOO",
+ ]
+ )
+ assert exit_code == 0
+ assert "tests/wf/1st-workflow.cwl is valid CWL. No errors detected in the inputs." in stdout
+
+
+def test_validate_with_invalid_input_object() -> None:
+ """Ensure that --validate with an invalid input object."""
+ exit_code, stdout, stderr = get_main_output(
+ [
+ "--validate",
+ get_data("tests/wf/1st-workflow.cwl"),
+ get_data("tests/wf/1st-workflow_bad_inputs.yml"),
+ ]
+ )
+ assert exit_code == 1
+ stderr = re.sub(r"\s\s+", " ", stderr)
+ assert "Invalid job input record" in stderr
+ assert (
+ "tests/wf/1st-workflow_bad_inputs.yml:2:1: * the 'ex' field is not "
+ "valid because the value is not string" in stderr
+ )
+ assert (
+ "tests/wf/1st-workflow_bad_inputs.yml:1:1: * the 'inp' field is not "
+ "valid because is not a dict. Expected a File object." in stderr
+ )
diff --git a/tests/test_validate_js.py b/tests/test_validate_js.py
index d4520ee93..4c81a5576 100644
--- a/tests/test_validate_js.py
+++ b/tests/test_validate_js.py
@@ -69,23 +69,15 @@ def test_js_hint_reports_invalid_js() -> None:
def test_js_hint_warn_on_es6() -> None:
- assert (
- len(validate_js.jshint_js(code_fragment_to_js("((() => 4)())"), []).errors) == 1
- )
+ assert len(validate_js.jshint_js(code_fragment_to_js("((() => 4)())"), []).errors) == 1
def test_js_hint_error_on_undefined_name() -> None:
- assert (
- len(validate_js.jshint_js(code_fragment_to_js("undefined_name()")).errors) == 1
- )
+ assert len(validate_js.jshint_js(code_fragment_to_js("undefined_name()")).errors) == 1
def test_js_hint_set_defined_name() -> None:
assert (
- len(
- validate_js.jshint_js(
- code_fragment_to_js("defined_name()"), ["defined_name"]
- ).errors
- )
+ len(validate_js.jshint_js(code_fragment_to_js("defined_name()"), ["defined_name"]).errors)
== 0
)
diff --git a/tests/util.py b/tests/util.py
index e5e128936..7f33f1c40 100644
--- a/tests/util.py
+++ b/tests/util.py
@@ -67,8 +67,7 @@ def env_accepts_null() -> bool:
if _env_accepts_null is None:
result = subprocess.run(
["env", "-0"],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
+ capture_output=True,
encoding="utf-8",
)
_env_accepts_null = result.returncode == 0
diff --git a/tests/wf/1st-workflow_bad_inputs.yml b/tests/wf/1st-workflow_bad_inputs.yml
new file mode 100644
index 000000000..d95783be2
--- /dev/null
+++ b/tests/wf/1st-workflow_bad_inputs.yml
@@ -0,0 +1,4 @@
+inp: 42
+ex:
+ class: File
+ path: 1st-workflow.cwl
diff --git a/tests/wf/811-12.cwl b/tests/wf/811-12.cwl
new file mode 100644
index 000000000..f4403f45b
--- /dev/null
+++ b/tests/wf/811-12.cwl
@@ -0,0 +1,15 @@
+cwlVersion: v1.2
+class: Workflow
+
+inputs:
+ - id: hello
+ type: Any
+outputs: []
+
+steps:
+ step:
+ id: step
+ run: schemadef-tool-12.cwl
+ in:
+ hello: hello
+ out: []
diff --git a/tests/wf/conflict.cwl b/tests/wf/conflict.cwl
index 21ed20b1f..dfc3d3fff 100644
--- a/tests/wf/conflict.cwl
+++ b/tests/wf/conflict.cwl
@@ -4,14 +4,13 @@ $graph:
- class: CommandLineTool
id: makebzz
inputs: []
+ baseCommand: touch
outputs:
bzz:
type: File
outputBinding:
glob: bzz
- requirements:
- ShellCommandRequirement: {}
- arguments: [{shellQuote: false, valueFrom: "touch bzz"}]
+ arguments: [ bzz ]
- class: Workflow
id: main
inputs: []
diff --git a/tests/wf/echo.cwl b/tests/wf/echo.cwl
index 9c633142e..65b451fb2 100755
--- a/tests/wf/echo.cwl
+++ b/tests/wf/echo.cwl
@@ -21,4 +21,4 @@ outputs:
type: File
outputBinding:
glob: foo$(inputs.r).txt
-arguments: [python, -c, $(inputs.script), $(inputs.r)]
+arguments: [python3, -c, $(inputs.script), $(inputs.r)]
diff --git a/tests/wf/floats_small_and_large.cwl b/tests/wf/floats_small_and_large.cwl
new file mode 100644
index 000000000..434327361
--- /dev/null
+++ b/tests/wf/floats_small_and_large.cwl
@@ -0,0 +1,39 @@
+cwlVersion: v1.0
+class: CommandLineTool
+baseCommand: echo
+requirements:
+ InlineJavascriptRequirement: {}
+
+inputs:
+ annotation_prokka_evalue:
+ type: float
+ default: 0.00001
+ inputBinding: {}
+
+ annotation_prokka_evalue2:
+ type: float
+ default: 1.23e-05
+ inputBinding: {}
+
+ annotation_prokka_evalue3:
+ type: float
+ default: 1.23e5
+ inputBinding: {}
+
+ annotation_prokka_evalue4:
+ type: float
+ default: 1230000
+ inputBinding: {}
+
+
+arguments: [ -n ]
+
+stdout: dump
+
+outputs:
+ result:
+ type: string
+ outputBinding:
+ glob: dump
+ loadContents: true
+ outputEval: $(self[0].contents)
diff --git a/tests/wf/generator/pytoolgen.cwl b/tests/wf/generator/pytoolgen.cwl
index 3d8ce7879..a7dcc9e1b 100644
--- a/tests/wf/generator/pytoolgen.cwl
+++ b/tests/wf/generator/pytoolgen.cwl
@@ -28,5 +28,5 @@ run:
v.push({entryname: "inp.py", entry: inputs.script});
return v;
}
- arguments: [python, inp.py]
+ arguments: [python3, inp.py]
stdout: main.cwl
diff --git a/tests/wf/hello-workflow-badhints.cwl b/tests/wf/hello-workflow-badhints.cwl
new file mode 100755
index 000000000..e4bc659f1
--- /dev/null
+++ b/tests/wf/hello-workflow-badhints.cwl
@@ -0,0 +1,41 @@
+#!/usr/bin/env cwl-runner
+
+cwlVersion: v1.0
+class: Workflow
+
+label: "Hello World"
+doc: "Outputs a message using echo"
+
+inputs:
+ usermessage: string
+
+outputs:
+ response:
+ outputSource: step0/response
+ type: File
+
+hints:
+ - {}
+
+steps:
+ step0:
+ run:
+ class: CommandLineTool
+ inputs:
+ message:
+ type: string
+ doc: "The message to print"
+ default: "Hello World"
+ inputBinding:
+ position: 1
+ baseCommand: echo
+ arguments:
+ - "-n"
+ - "-e"
+ stdout: response.txt
+ outputs:
+ response:
+ type: stdout
+ in:
+ message: usermessage
+ out: [response]
diff --git a/tests/wf/hello-workflow-badhints2.cwl b/tests/wf/hello-workflow-badhints2.cwl
new file mode 100755
index 000000000..1cc17229e
--- /dev/null
+++ b/tests/wf/hello-workflow-badhints2.cwl
@@ -0,0 +1,41 @@
+#!/usr/bin/env cwl-runner
+
+cwlVersion: v1.0
+class: Workflow
+
+label: "Hello World"
+doc: "Outputs a message using echo"
+
+inputs:
+ usermessage: string
+
+outputs:
+ response:
+ outputSource: step0/response
+ type: File
+
+steps:
+ step0:
+ run:
+ class: CommandLineTool
+ inputs:
+ message:
+ type: string
+ doc: "The message to print"
+ default: "Hello World"
+ inputBinding:
+ position: 1
+ baseCommand: echo
+ arguments:
+ - "-n"
+ - "-e"
+ stdout: response.txt
+ outputs:
+ response:
+ type: stdout
+ in:
+ message: usermessage
+ out: [response]
+
+hints:
+ - 42
diff --git a/tests/wf/input_named_id.cwl b/tests/wf/input_named_id.cwl
new file mode 100644
index 000000000..e559f967b
--- /dev/null
+++ b/tests/wf/input_named_id.cwl
@@ -0,0 +1,13 @@
+label: FeatureFinderIdentification
+doc: ""
+inputs:
+ id:
+ doc: featureXML or consensusXML file
+ type: File
+outputs:
+ []
+cwlVersion: v1.2
+class: CommandLineTool
+baseCommand:
+ - FeatureFinderIdentification
+
diff --git a/tests/wf/input_named_id.yaml b/tests/wf/input_named_id.yaml
new file mode 100644
index 000000000..39c36dea8
--- /dev/null
+++ b/tests/wf/input_named_id.yaml
@@ -0,0 +1,3 @@
+id:
+ class: File
+ path: ../2.fastq
diff --git a/tests/wf/js_output.cwl b/tests/wf/js_output.cwl
index 4fa154c08..87b456727 100755
--- a/tests/wf/js_output.cwl
+++ b/tests/wf/js_output.cwl
@@ -6,5 +6,5 @@ requirements:
inputs: []
outputs: []
arguments:
- - valueFrom: ${console.log("Log message");console.error("Error message");return ["python", "-c", "True"]}
- shellQuote: false
\ No newline at end of file
+ - valueFrom: ${console.log("Log message");console.error("Error message");return ["python3", "-c", "True"]}
+ shellQuote: false
diff --git a/tests/wf/mpi_expr.cwl b/tests/wf/mpi_expr.cwl
index 39819c557..2334b865d 100644
--- a/tests/wf/mpi_expr.cwl
+++ b/tests/wf/mpi_expr.cwl
@@ -13,7 +13,7 @@ doc: |
This version takes the number of processes to use as an input and
then passes this to the MPIRequirement using an expression.
-baseCommand: python
+baseCommand: python3
requirements:
cwltool:MPIRequirement:
processes: $(inputs.processes)
diff --git a/tests/wf/mpi_simple.cwl b/tests/wf/mpi_simple.cwl
index c3d8ef56b..6fe836748 100644
--- a/tests/wf/mpi_simple.cwl
+++ b/tests/wf/mpi_simple.cwl
@@ -10,7 +10,7 @@ doc: |
processes. Requires Python (but you have cwltool running, right?)
and an MPI implementation.
-baseCommand: python
+baseCommand: python3
requirements:
cwltool:MPIRequirement:
processes: 2
diff --git a/tests/wf/resreq_expr_float_v1_0.cwl b/tests/wf/resreq_expr_float_v1_0.cwl
new file mode 100644
index 000000000..d5ea08cde
--- /dev/null
+++ b/tests/wf/resreq_expr_float_v1_0.cwl
@@ -0,0 +1,21 @@
+cwlVersion: v1.0
+class: CommandLineTool
+requirements:
+ - class: InlineJavascriptRequirement
+ - class: ResourceRequirement
+ tmpdirMin: $((2 * inputs.input_bam.size) / 3.14159)
+ outdirMin: $((2 * inputs.input_bam.size) / 3.14159)
+
+inputs:
+ input_bam: File
+
+arguments:
+ - |
+ {"result": $(runtime) }
+
+stdout: cwl.output.json
+
+outputs:
+ result: Any
+
+baseCommand: [echo]
diff --git a/tests/wf/resreq_expr_float_v1_2.cwl b/tests/wf/resreq_expr_float_v1_2.cwl
new file mode 100644
index 000000000..cfb1b2a13
--- /dev/null
+++ b/tests/wf/resreq_expr_float_v1_2.cwl
@@ -0,0 +1,21 @@
+cwlVersion: v1.2
+class: CommandLineTool
+requirements:
+ - class: InlineJavascriptRequirement
+ - class: ResourceRequirement
+ tmpdirMin: $((2 * inputs.input_bam.size) / 3.14159)
+ outdirMin: $((2 * inputs.input_bam.size) / 3.14159)
+
+inputs:
+ input_bam: File
+
+arguments:
+ - |
+ {"result": $(runtime) }
+
+stdout: cwl.output.json
+
+outputs:
+ result: Any
+
+baseCommand: [echo]
diff --git a/tests/wf/revsort-job-shortcut.json b/tests/wf/revsort-job-shortcut.json
new file mode 100644
index 000000000..268d1fc55
--- /dev/null
+++ b/tests/wf/revsort-job-shortcut.json
@@ -0,0 +1,8 @@
+{
+ "cwl:tool": "revsort.cwl",
+ "workflow_input": {
+ "class": "File",
+ "location": "whale.txt",
+ "format": "https://www.iana.org/assignments/media-types/text/plain"
+ }
+}
diff --git a/tests/wf/revsort_step_bad_schema.cwl b/tests/wf/revsort_step_bad_schema.cwl
new file mode 100755
index 000000000..694e7ed57
--- /dev/null
+++ b/tests/wf/revsort_step_bad_schema.cwl
@@ -0,0 +1,73 @@
+#!/usr/bin/env cwl-runner
+#
+# This is a two-step workflow which uses "revtool" and "sorttool" defined above.
+#
+class: Workflow
+doc: "Reverse the lines in a document, then sort those lines."
+cwlVersion: v1.0
+
+# Requirements & hints specify prerequisites and extensions to the workflow.
+# In this example, DockerRequirement specifies a default Docker container
+# in which the command line tools will execute.
+hints:
+ - class: DockerRequirement
+ dockerPull: docker.io/debian:stable-slim
+
+
+# The inputs array defines the structure of the input object that describes
+# the inputs to the workflow.
+#
+# The "reverse_sort" input parameter demonstrates the "default" field. If the
+# field "reverse_sort" is not provided in the input object, the default value will
+# be used.
+inputs:
+ workflow_input:
+ type: File
+ doc: "The input file to be processed."
+ format: iana:text/plain
+ default:
+ class: File
+ location: hello.txt
+ reverse_sort:
+ type: boolean
+ default: true
+ doc: "If true, reverse (descending) sort"
+
+# The "outputs" array defines the structure of the output object that describes
+# the outputs of the workflow.
+#
+# Each output field must be connected to the output of one of the workflow
+# steps using the "outputSource" field. Here, the parameter "sorted_output" of the
+# workflow comes from the "sorted_output" output of the "sorted" step.
+outputs:
+ sorted_output:
+ type: File
+ outputSource: sorted/sorted_output
+ doc: "The output with the lines reversed and sorted."
+
+# The "steps" array lists the executable steps that make up the workflow.
+# The tool to execute each step is listed in the "run" field.
+#
+# In the first step, the "in" field of the step connects the upstream
+# parameter "workflow_input" of the workflow to the input parameter of the tool
+# "revtool_input"
+#
+# In the second step, the "in" field of the step connects the output
+# parameter "revtool_output" from the first step to the input parameter of the
+# tool "sorted_input".
+steps:
+ rev:
+ in:
+ revtool_input: workflow_input
+ out: [revtool_output]
+ run: revtool_bad_schema.cwl
+
+ sorted:
+ in:
+ sorted_input: rev/revtool_output
+ reverse: reverse_sort
+ out: [sorted_output]
+ run: sorttool.cwl
+
+$namespaces:
+ iana: https://www.iana.org/assignments/media-types/
diff --git a/tests/wf/revtool_bad_schema.cwl b/tests/wf/revtool_bad_schema.cwl
new file mode 100755
index 000000000..72ce773f0
--- /dev/null
+++ b/tests/wf/revtool_bad_schema.cwl
@@ -0,0 +1,42 @@
+#!/usr/bin/env cwl-runner
+#
+# Simplest example command line program wrapper for the Unix tool "rev".
+#
+class: CommandLineTool
+cwlVersion: v1.0
+doc: "Reverse each line using the `rev` command"
+$schemas:
+ - https://bad.example.com/missing.ttl
+ - https://schema.org/docs/
+ - https://schema.org/docs/schema_org_rdfa.html
+
+# The "inputs" array defines the structure of the input object that describes
+# the inputs to the underlying program. Here, there is one input field
+# defined that will be called "input" and will contain a "File" object.
+#
+# The input binding indicates that the input value should be turned into a
+# command line argument. In this example inputBinding is an empty object,
+# which indicates that the file name should be added to the command line at
+# a default location.
+inputs:
+ revtool_input:
+ type: File
+ inputBinding: {}
+
+# The "outputs" array defines the structure of the output object that
+# describes the outputs of the underlying program. Here, there is one
+# output field defined that will be called "output", must be a "File" type,
+# and after the program executes, the output value will be the file
+# output.txt in the designated output directory.
+outputs:
+ revtool_output:
+ type: File
+ outputBinding:
+ glob: output.txt
+
+# The actual program to execute.
+baseCommand: rev
+
+# Specify that the standard output stream must be redirected to a file called
+# output.txt in the designated output directory.
+stdout: output.txt
diff --git a/tests/wf/schemadef-bug-1473.cwl b/tests/wf/schemadef-bug-1473.cwl
index b586be1dd..ad87ae08e 100644
--- a/tests/wf/schemadef-bug-1473.cwl
+++ b/tests/wf/schemadef-bug-1473.cwl
@@ -449,7 +449,7 @@
"name": "#settings-by-samples__1.0.0.yaml/settings-by-samples/samples"
},
{
- "label": "settings by override cylces",
+ "label": "settings by override cycles",
"doc": "Additional bcl convert settings\n",
"type": [
"null",
diff --git a/tests/wf/schemadef-tool-12.cwl b/tests/wf/schemadef-tool-12.cwl
new file mode 100644
index 000000000..5c3433e7f
--- /dev/null
+++ b/tests/wf/schemadef-tool-12.cwl
@@ -0,0 +1,24 @@
+#!/usr/bin/env cwl-runner
+class: CommandLineTool
+cwlVersion: v1.2
+hints:
+ ResourceRequirement:
+ ramMin: 8
+
+requirements:
+ - $import: schemadef-type.yml
+
+inputs:
+ - id: hello
+ type: "schemadef-type.yml#HelloType"
+ inputBinding:
+ valueFrom: $(self.a)/$(self.b)
+
+outputs:
+ - id: output
+ type: File
+ outputBinding:
+ glob: output.txt
+
+stdout: output.txt
+baseCommand: echo
diff --git a/tests/wf/timelimit.cwl b/tests/wf/timelimit.cwl
index e15ebaddf..7af0d1dff 100644
--- a/tests/wf/timelimit.cwl
+++ b/tests/wf/timelimit.cwl
@@ -11,5 +11,5 @@ inputs:
outputs: []
requirements:
cwltool:TimeLimit:
- timelimit: 15
+ timelimit: 20
baseCommand: sleep
diff --git a/tox.ini b/tox.ini
index bd675b40d..ed8f5c243 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,40 +1,39 @@
[tox]
envlist =
- py3{6,7,8,9,10,11}-lint
- py3{6,7,8,9,10,11}-unit
- py3{6,7,8,9,10,11}-bandit
- py3{7,8,9,10,11}-mypy
- py311-lintreadme
- py311-shellcheck
- py311-pydocstyle
+ py3{8,9,10,11,12}-lint
+ py3{8,9,10,11,12}-unit
+ py3{8,9,10,11,12}-bandit
+ py3{8,9,10,11,12}-mypy
+ py312-lintreadme
+ py312-shellcheck
+ py312-pydocstyle
skip_missing_interpreters = True
[pytest]
-addopts=--ignore cwltool/schemas --basetemp ./tmp -n auto
+addopts=--ignore cwltool/schemas -n auto
testpaths = tests
[gh-actions]
python =
- 3.6: py36
- 3.7: py37
3.8: py38
3.9: py39
3.10: py310
3.11: py311
+ 3.12: py312
[testenv]
skipsdist =
- py3{6,7,8,9,10,11}-!{unit,mypy,lintreadme} = True
+ py3{8,9,10,11,12}-!{unit,mypy,lintreadme} = True
description =
- py3{6,7,8,9,10,11}-unit: Run the unit tests
- py3{6,7,8,9,10,11}-lint: Lint the Python code
- py3{6,7,8,9,10,11}-bandit: Search for common security issues
- py3{7,8,9,10,11}-mypy: Check for type safety
- py311-pydocstyle: docstring style checker
- py311-shellcheck: syntax check for shell scripts
- py311-lintreadme: Lint the README.rst→.md conversion
+ py3{8,9,10,11,12}-unit: Run the unit tests
+ py3{8,9,10,11,12}-lint: Lint the Python code
+ py3{8,9,10,11,12}-bandit: Search for common security issues
+ py3{8,9,10,11,12}-mypy: Check for type safety
+ py312-pydocstyle: docstring style checker
+ py312-shellcheck: syntax check for shell scripts
+ py312-lintreadme: Lint the README.rst→.md conversion
passenv =
CI
@@ -42,39 +41,39 @@ passenv =
PROOT_NO_SECCOMP
extras =
- py3{6,7,8,9,10,11}-unit: deps
+ py3{8,9,10,11,12}-unit: deps
deps =
- py3{6,7,8,9,10,11}-{unit,lint,bandit,mypy}: -rrequirements.txt
- py3{6,7,8,9,10,11}-{unit,mypy}: -rtest-requirements.txt
- py3{6,7,8,9,10,11}-lint: -rlint-requirements.txt
- py3{6,7,8,9,10,11}-bandit: bandit
- py3{6,7,8,9,10,11}-bandit: importlib_metadata != 4.8.0
- py3{7,8,9,10,11}-mypy: -rmypy-requirements.txt
- py311-pydocstyle: pydocstyle
- py311-pydocstyle: diff-cover
- py311-lintreadme: twine
- py311-lintreadme: build
- py311-lintreadme: readme_renderer[md]
+ py3{8,9,10,11,12}-{unit,lint,bandit,mypy}: -rrequirements.txt
+ py3{8,9,10,11,12}-{unit,mypy}: -rtest-requirements.txt
+ py3{8,9,10,11,12}-lint: -rlint-requirements.txt
+ py3{8,9,10,11,12}-bandit: bandit
+ py3{8,9,10,11,12}-bandit: importlib_metadata != 4.8.0
+ py3{8,9,10,11,12}-mypy: -rmypy-requirements.txt
+ py312-pydocstyle: pydocstyle
+ py312-pydocstyle: diff-cover
+ py312-lintreadme: twine
+ py312-lintreadme: build
+ py312-lintreadme: readme_renderer[rst]
setenv =
- py3{6,7,8,9,10,11}-unit: LC_ALL = C.UTF-8
+ LC_ALL = C.UTF-8
+ HOME = {envtmpdir}
commands_pre =
- py3{6,7,8,9,10,11}-unit: python -m pip install -U pip setuptools wheel
- py311-lintreadme: python -m build --outdir {distdir}
+ py3{8,9,10,11,12}-unit: python -m pip install -U pip setuptools wheel
+ py312-lintreadme: python -m build --outdir {distdir}
commands =
- py3{6,7,8,9,10,11}-unit: make coverage-report coverage.xml PYTEST_EXTRA={posargs}
- py3{6,7,8,9,10,11}-bandit: bandit -r cwltool
- py3{6,7,8,9,10,11}-lint: make flake8 format-check codespell
- py3{7,8,9,10,11}-mypy: make mypy mypyc PYTEST_EXTRA={posargs}
- py37-mypy: make mypy_3.6
- py311-shellcheck: make shellcheck
- py311-pydocstyle: make diff_pydocstyle_report
- py311-lintreadme: twine check {distdir}/*
+ py3{8,9,10,11,12}-unit: make coverage-report coverage.xml PYTEST_EXTRA={posargs}
+ py3{8,9,10,11,12}-bandit: bandit -r cwltool
+ py3{8,9,10,11,12}-lint: make flake8 format-check codespell-check
+ py3{8,9,10,11,12}-mypy: make mypy mypyc PYTEST_EXTRA={posargs}
+ py312-shellcheck: make shellcheck
+ py312-pydocstyle: make diff_pydocstyle_report
+ py312-lintreadme: twine check {distdir}/*
skip_install =
- py3{6,7,8,9,10,11}-{bandit,lint,mypy,shellcheck,pydocstyle,lintreadme}: true
+ py3{8,9,10,11,12}-{bandit,lint,mypy,shellcheck,pydocstyle,lintreadme}: true
allowlist_externals = make