Skip to content

Commit

Permalink
Re enable more not e2e xfail test cases (#6947)
Browse files Browse the repository at this point in the history
* Exclude tests for the deprecated tfx/orchestration/experimental/core module from experimental orchestration.
* Include additional testdata files in the package build.
* Add module-level pytest cleanup functions for proto classes.
* Update doc tests to clean up docs before execution.
  • Loading branch information
nikelite authored Nov 1, 2024
1 parent c770a51 commit 9e66ed1
Show file tree
Hide file tree
Showing 19 changed files with 36 additions and 109 deletions.
5 changes: 4 additions & 1 deletion MANIFEST.in
Original file line number Diff line number Diff line change
Expand Up @@ -14,5 +14,8 @@ include tfx/proto/*.proto
recursive-include tfx/orchestration/kubeflow/v2/testdata *

recursive-include tfx/components/testdata *
recursive-include tfx/orchestration/kubeflow/v2/testdata *

include tfx/examples/imdb/data/
include tfx/examples/imdb/data/*
include tfx/orchestration/beam/testdata/*
include tfx/orchestration/kubeflow/v2/container/testdata/*
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ Repository = "https://github.com/tensorflow/tfx"
addopts = "--import-mode=importlib"
testpaths = "tfx"
python_files = "*_test.py"
norecursedirs = ["custom_components", ".*", "*.egg"]
norecursedirs = ["custom_components", ".*", "*.egg", "tfx/orchestration/experimental/core"]
markers = [
"e2e: end-to-end tests which are slow and require more dependencies (deselect with '-m \"not end_to_end\"')",
"serial: mark tests that should not run in parallel",
Expand Down
9 changes: 9 additions & 0 deletions tfx/dsl/placeholder/proto_placeholder_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,9 @@

import base64
import functools
import importlib
import os
import pytest
from typing import Any, Optional, TypeVar, Union

import tensorflow as tf
Expand All @@ -34,6 +36,13 @@
from google.protobuf import text_format
from ml_metadata.proto import metadata_store_pb2



@pytest.fixture(autouse=True,scope="module")
def cleanup():
yield
importlib.reload(pipeline_pb2)

_ExecutionInvocation = functools.partial(
ph.make_proto, execution_invocation_pb2.ExecutionInvocation()
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,8 +69,8 @@ def setUp(self):
self._makeExample(age=5.0, language=0.0, label=0),
]

@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
"If this test passes, please remove this mark.", strict=True)
@pytest.mark.xfail(run=False, reason="This is based on experimental implementation,"
"and the test fails.", strict=True)
def testMakeSklearnPredictExtractor(self):
"""Tests that predictions are made from extracts for a single model."""
feature_extractor = tfma.extractors.FeaturesExtractor(self._eval_config)
Expand Down Expand Up @@ -98,8 +98,8 @@ def check_result(actual):

util.assert_that(predict_extracts, check_result)

@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
"If this test passes, please remove this mark.", strict=True)
@pytest.mark.xfail(run=False, reason="This is based on experimental implementation,"
"and the test fails.", strict=True)
def testMakeSklearnPredictExtractorWithMultiModels(self):
"""Tests that predictions are made from extracts for multiple models."""
eval_config = tfma.EvalConfig(model_specs=[
Expand Down
5 changes: 0 additions & 5 deletions tfx/examples/ranking/struct2tensor_parsing_utils_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@



import pytest
import itertools
import unittest

Expand Down Expand Up @@ -172,15 +171,11 @@
]


@pytest.mark.xfail(run=False, reason="PR 6889 This class contains tests that fail and needs to be fixed. "
"If all tests pass, please remove this mark.")
@unittest.skipIf(struct2tensor_parsing_utils is None,
'Cannot import required modules. This can happen when'
' struct2tensor is not available.')
class ELWCDecoderTest(tf.test.TestCase):

#@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
#"If this test passes, please remove this mark.", strict=True)
def testAllDTypes(self):
context_features = [
struct2tensor_parsing_utils.Feature('ctx.int', tf.int64),
Expand Down
3 changes: 0 additions & 3 deletions tfx/orchestration/beam/beam_dag_runner_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
"""Tests for tfx.orchestration.portable.beam_dag_runner."""


import pytest
import os
from typing import Optional

Expand Down Expand Up @@ -172,8 +171,6 @@ def _run_node(self):
_executed_components.append(self._node_id)


@pytest.mark.xfail(run=False, reason="PR 6889 This class contains tests that fail and needs to be fixed. "
"If all tests pass, please remove this mark.")
class BeamDagRunnerTest(test_case_utils.TfxTest):

def setUp(self):
Expand Down
8 changes: 8 additions & 0 deletions tfx/orchestration/data_types_utils_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
"""Tests for tfx.orchestration.data_types_utils."""


import importlib
import pytest
from absl.testing import parameterized
from tfx import types
from tfx.orchestration import data_types_utils
Expand All @@ -32,6 +34,12 @@
_DEFAULT_ARTIFACT_TYPE_NAME = 'Examples'


@pytest.fixture(scope="module", autouse=True)
def cleanup():
yield
importlib.reload(struct_pb2)


def _create_artifact(uri: str) -> types.Artifact:
artifact = types.Artifact(
metadata_store_pb2.ArtifactType(name=_DEFAULT_ARTIFACT_TYPE_NAME))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@



import pytest
import os
from kfp.pipeline_spec import pipeline_spec_pb2 as pipeline_pb2
import tensorflow as tf
Expand Down Expand Up @@ -68,8 +67,6 @@
}


@pytest.mark.xfail(run=False, reason="PR 6889 This class contains tests that fail and needs to be fixed. "
"If all tests pass, please remove this mark.")
class KubeflowV2EntrypointUtilsTest(tf.test.TestCase):

def setUp(self):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
"""Tests for kubeflow_v2_run_executor.py."""


import pytest
import json
import os
from typing import Any, Mapping, Sequence
Expand Down Expand Up @@ -100,8 +99,6 @@ def Do(self, input_dict: Mapping[str, Sequence[artifact.Artifact]],
_EXEC_PROPERTIES = {"key_1": "value_1", "key_2": 536870911}


@pytest.mark.xfail(run=False, reason="PR 6889 This class contains tests that fail and needs to be fixed. "
"If all tests pass, please remove this mark.")
class KubeflowV2RunExecutorTest(
test_case_utils.TfxTest, parameterized.TestCase
):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@



import pytest
import json
import os

Expand Down Expand Up @@ -93,8 +92,6 @@ def _load_test_file(filename: str):
).read()


@pytest.mark.xfail(run=False, reason="PR 6889 This class contains tests that fail and needs to be fixed. "
"If all tests pass, please remove this mark.")
class RunDriverTest(test_case_utils.TfxTest, parameterized.TestCase):

def setUp(self):
Expand Down
9 changes: 0 additions & 9 deletions tfx/orchestration/local/local_dag_runner_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
from typing import Any, Dict, List

import absl.testing.absltest
import pytest
from tfx import types
from tfx.dsl.compiler import compiler
from tfx.dsl.components.base import base_component
Expand Down Expand Up @@ -165,17 +164,13 @@ def _getTestPipelineIR(self) -> pipeline_pb2.Pipeline: # pylint: disable=invali
c = compiler.Compiler()
return c.compile(test_pipeline)

@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
"If this test passes, please remove this mark.", strict=True)
def testRun(self):
local_dag_runner.LocalDagRunner().run(self._getTestPipeline())
self.assertEqual(_executed_components, [
'_FakeComponent.a', '_FakeComponent.b', '_FakeComponent.c',
'_FakeComponent.d', '_FakeComponent.e'
])

@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
"If this test passes, please remove this mark.", strict=True)
def testPartialRun(self):
local_dag_runner.LocalDagRunner().run(
self._getTestPipeline(),
Expand All @@ -184,17 +179,13 @@ def testPartialRun(self):
_executed_components,
['_FakeComponent.a', '_FakeComponent.b', '_FakeComponent.c'])

@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
"If this test passes, please remove this mark.", strict=True)
def testRunWithIR(self):
local_dag_runner.LocalDagRunner().run_with_ir(self._getTestPipelineIR())
self.assertEqual(_executed_components, [
'_FakeComponent.a', '_FakeComponent.b', '_FakeComponent.c',
'_FakeComponent.d', '_FakeComponent.e'
])

@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
"If this test passes, please remove this mark.", strict=True)
def testPartialRunWithIR(self):
pr_opts = pipeline_pb2.PartialRun()
pr_opts.to_nodes.append('c')
Expand Down
9 changes: 0 additions & 9 deletions tfx/orchestration/local/local_pipeline_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@
from typing import Any, List

import absl.testing.absltest
import pytest

from tfx import types
from tfx.dsl.compiler import compiler
Expand Down Expand Up @@ -182,17 +181,13 @@ def _getTestPipelineIR(self) -> pipeline_pb2.Pipeline:
c = compiler.Compiler()
return c.compile(test_pipeline)

@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
"If this test passes, please remove this mark.", strict=True)
def testSimplePipelineRun(self):
self.assertEqual(self.RAN_COMPONENTS, [])

local_dag_runner.LocalDagRunner().run(self._getTestPipeline())

self.assertEqual(self.RAN_COMPONENTS, ['Load', 'Train', 'Validate'])

@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
"If this test passes, please remove this mark.", strict=True)
def testSimplePipelinePartialRun(self):
self.assertEqual(self.RAN_COMPONENTS, [])

Expand All @@ -202,17 +197,13 @@ def testSimplePipelinePartialRun(self):

self.assertEqual(self.RAN_COMPONENTS, ['Load', 'Train'])

@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
"If this test passes, please remove this mark.", strict=True)
def testSimplePipelineRunWithIR(self):
self.assertEqual(self.RAN_COMPONENTS, [])

local_dag_runner.LocalDagRunner().run_with_ir(self._getTestPipelineIR())

self.assertEqual(self.RAN_COMPONENTS, ['Load', 'Train', 'Validate'])

@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
"If this test passes, please remove this mark.", strict=True)
def testSimplePipelinePartialRunWithIR(self):
self.assertEqual(self.RAN_COMPONENTS, [])

Expand Down
7 changes: 0 additions & 7 deletions tfx/orchestration/portable/inputs_utils_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
"""Tests for tfx.orchestration.portable.inputs_utils."""
import collections
import os
import pytest

from tfx import types
from tfx.dsl.compiler import placeholder_utils
Expand Down Expand Up @@ -147,8 +146,6 @@ def testResolveParametersFail(self):
with self.assertRaisesRegex(RuntimeError, 'Parameter value not ready'):
inputs_utils.resolve_parameters(parameters)

@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
"If this test passes, please remove this mark.", strict=True)
def testResolveInputArtifacts(self):
pipeline = self.load_pipeline_proto(
'pipeline_for_input_resolver_test.pbtxt')
Expand Down Expand Up @@ -254,8 +251,6 @@ def _setup_pipeline_for_input_resolver_test(self, num_examples=1):
)
self._examples = output_dict['output_examples']

@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
"If this test passes, please remove this mark.", strict=True)
def testResolveInputArtifacts_Normal(self):
self._setup_pipeline_for_input_resolver_test()

Expand All @@ -266,8 +261,6 @@ def testResolveInputArtifacts_Normal(self):
self.assertArtifactMapListEqual([{'examples_1': self._examples,
'examples_2': self._examples}], result)

@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
"If this test passes, please remove this mark.", strict=True)
def testResolveInputArtifacts_FilterOutInsufficient(self):
self._setup_pipeline_for_input_resolver_test()
self._my_transform.inputs.inputs['examples_1'].min_count = 2
Expand Down
19 changes: 0 additions & 19 deletions tfx/orchestration/portable/launcher_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
# limitations under the License.
"""Tests for tfx.orchestration.portable.launcher."""

import pytest
import contextlib
import copy
import os
Expand Down Expand Up @@ -490,8 +489,6 @@ def testLauncher_EmptyOptionalInputTriggersExecution(self):
],
)

@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
"If this test passes, please remove this mark.", strict=True)
def testLauncher_PublishingNewArtifactsAndUseCache(self):
# In this test case, there are two executions:
# In the first one,trainer reads the fake upstream outputs and publish
Expand Down Expand Up @@ -578,8 +575,6 @@ def testLauncher_PublishingNewArtifactsAndUseCache(self):
],
)

@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
"If this test passes, please remove this mark.", strict=True)
def testLauncher_CacheIsSupportedForNodeWithNoOutput(self):
# Even though a node has no output at all, the launcher should treat the
# second execution as CACHED as long as the cache context is the same.
Expand Down Expand Up @@ -639,8 +634,6 @@ def testLauncher_CacheIsSupportedForNodeWithNoOutput(self):
],
)

@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
"If this test passes, please remove this mark.", strict=True)
def testLauncher_CacheDisabled(self):
# In this test case, there are two executions:
# In the first one,trainer reads the fake upstream outputs and publish
Expand Down Expand Up @@ -757,8 +750,6 @@ def testLauncher_CacheDisabled(self):
],
)

@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
"If this test passes, please remove this mark.", strict=True)
def testLauncher_ReEntry(self):
# Some executors or runtime environment may reschedule the launcher job
# before the launcher job can publish any results of the execution to MLMD.
Expand Down Expand Up @@ -830,8 +821,6 @@ def create_test_launcher(executor_operators):
execution_preparation_result = third_test_launcher._prepare_execution()
self.assertFalse(execution_preparation_result.is_execution_needed)

@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
"If this test passes, please remove this mark.", strict=True)
def testLauncher_ToleratesDoubleCleanup(self):
# Some executors or runtime environment may delete stateful_working_dir,
# tmp_dir and unexpectedly. The launcher should handle such cases gracefully
Expand Down Expand Up @@ -895,8 +884,6 @@ def testLauncher_ToleratesDoubleCleanup(self):
],
)

@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
"If this test passes, please remove this mark.", strict=True)
def testLauncher_ExecutionFailed(self):
# In the case that the executor failed and raises an execption.
# An Execution will be published.
Expand All @@ -916,8 +903,6 @@ def testLauncher_ExecutionFailed(self):
with self.assertRaises(FakeError):
_ = test_launcher.launch()

@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
"If this test passes, please remove this mark.", strict=True)
def testLauncher_ExecutionFailedViaReturnCode(self):
# In the case that the executor failed and raises an execption.
# An Execution will be published.
Expand Down Expand Up @@ -965,8 +950,6 @@ def testLauncher_ExecutionFailedViaReturnCode(self):
],
)

@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
"If this test passes, please remove this mark.", strict=True)
def testLauncher_with_CustomDriver_NewSpan(self):
self.reloadPipelineWithNewRunId()
test_launcher = launcher.Launcher(
Expand Down Expand Up @@ -1019,8 +1002,6 @@ def testLauncher_with_CustomDriver_NewSpan(self):
],
)

@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
"If this test passes, please remove this mark.", strict=True)
def testLauncher_with_CustomDriver_ExistingSpan(self):
LauncherTest.fakeExampleGenOutput(self._mlmd_connection, self._example_gen,
2, 1)
Expand Down
Loading

0 comments on commit 9e66ed1

Please sign in to comment.