Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix issue #9 - parameterization of dependency tests #33

Open
wants to merge 13 commits into
base: master
Choose a base branch
from
40 changes: 29 additions & 11 deletions pytest_dependency.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,15 +42,17 @@ def addResult(self, rep):
def isSuccess(self):
return list(self.results.values()) == ['passed', 'passed', 'passed']

def isDone(self):
return None not in list(self.results.values())


class DependencyManager(object):
"""Dependency manager, stores the results of tests.
"""

ScopeCls = {'module':pytest.Module, 'session':pytest.Session}
ScopeCls = {'module': pytest.Module, 'session': pytest.Session}

@classmethod
def getManager(cls, item, scope='module'):
def getManager(cls, item, scope='module'): # change to session??
"""Get the DependencyManager object from the node at scope level.
Create it, if not yet present.
"""
Expand All @@ -63,13 +65,28 @@ def __init__(self):
self.results = {}

def addResult(self, item, name, rep):
original = item.originalname if item.originalname is not None else item.name
if not name:
if item.cls:
name = "%s::%s" % (item.cls.__name__, item.name)
original = "%s::%s" % (item.cls.__name__, original)
else:
name = item.name
status = self.results.setdefault(name, DependencyItemStatus())
status.addResult(rep)

status = self.results.setdefault(name, DependencyItemStatus())
status.addResult(rep)
else:
original = name

if original != item.name:
try:
check = not self.results[original].isSuccess() and self.results[original].isDone()
if check:
return 1
except KeyError:
pass
status = self.results.setdefault(original, DependencyItemStatus())
status.addResult(rep)

def checkDepend(self, depends, item):
for i in depends:
Expand All @@ -82,7 +99,7 @@ def checkDepend(self, depends, item):
pytest.skip("%s depends on %s" % (item.name, i))


def depends(request, other):
def depends(request, other,):
"""Add dependency on other test.

Call pytest.skip() unless a successful outcome of all of the tests in
Expand All @@ -105,19 +122,19 @@ def depends(request, other):


def pytest_addoption(parser):
parser.addini("automark_dependency",
"Add the dependency marker to all tests automatically",
parser.addini("automark_dependency",
"Add the dependency marker to all tests automatically",
default=False)
parser.addoption("--ignore-unknown-dependency",
action="store_true", default=False,
parser.addoption("--ignore-unknown-dependency",
action="store_true", default=False,
help="ignore dependencies whose outcome is not known")


def pytest_configure(config):
global _automark, _ignore_unknown
_automark = _get_bool(config.getini("automark_dependency"))
_ignore_unknown = config.getoption("--ignore-unknown-dependency")
config.addinivalue_line("markers",
config.addinivalue_line("markers",
"dependency(name=None, depends=[]): "
"mark a test to be used as a dependency for "
"other tests or to depend on other tests.")
Expand All @@ -126,6 +143,7 @@ def pytest_configure(config):
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
"""Store the test outcome if this item is marked "dependency".

"""
outcome = yield
marker = item.get_closest_marker("dependency")
Expand Down
4 changes: 2 additions & 2 deletions tests/test_03_skipmsgs.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,6 @@ def test_d():
*::test_d SKIPPED
""")
result.stdout.fnmatch_lines_random("""
SKIP * test_c depends on test_b
SKIP * test_d depends on test_c
SKIP* test_c depends on test_b
SKIP* test_d depends on test_c
""")