diff --git a/pymc/testing.py b/pymc/testing.py index 142c9222fe..12200db69b 100644 --- a/pymc/testing.py +++ b/pymc/testing.py @@ -775,24 +775,7 @@ def discrete_random_tester( assert p > alpha, str(point) -class SeededTest: - random_seed = 20160911 - random_state = None - - @classmethod - def setup_class(cls): - nr.seed(cls.random_seed) - - def setup_method(self): - nr.seed(self.random_seed) - - def get_random_state(self, reset=False): - if self.random_state is None or reset: - self.random_state = nr.RandomState(self.random_seed) - return self.random_state - - -class BaseTestDistributionRandom(SeededTest): +class BaseTestDistributionRandom: """ Base class for tests that new RandomVariables are correctly implemented, and that the mapping of parameters between the PyMC @@ -863,8 +846,9 @@ class BaseTestDistributionRandom(SeededTest): sizes_to_check: Optional[List] = None sizes_expected: Optional[List] = None repeated_params_shape = 5 + random_state = None - def test_distribution(self): + def test_distribution(self, seeded_test): self.validate_tests_list() if self.pymc_dist == pm.Wishart: with pytest.warns(UserWarning, match="can currently not be used for MCMC sampling"): @@ -886,6 +870,11 @@ def test_distribution(self): else: getattr(self, check_name)() + def get_random_state(self, reset=False): + if self.random_state is None or reset: + self.random_state = nr.RandomState(20160911) + return self.random_state + def _instantiate_pymc_rv(self, dist_params=None): params = dist_params if dist_params else self.pymc_dist_params self.pymc_rv = self.pymc_dist.dist( diff --git a/tests/conftest.py b/tests/conftest.py index b6e4285a8f..bdb5f1b09a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -44,8 +44,7 @@ def strict_float32(): @pytest.fixture(scope="function", autouse=False) def seeded_test(): - # TODO: use this instead of SeededTest - np.random.seed(42) + np.random.seed(20160911) @pytest.fixture diff --git a/tests/distributions/test_mixture.py b/tests/distributions/test_mixture.py index f694035a94..c90c480977 100644 --- a/tests/distributions/test_mixture.py +++ b/tests/distributions/test_mixture.py @@ -79,7 +79,6 @@ R, Rplus, Rplusbig, - SeededTest, Simplex, Unit, assert_moment_is_expected, @@ -115,7 +114,10 @@ def generate_poisson_mixture_data(w, mu, size=1000): return x -class TestMixture(SeededTest): +class TestMixture: + def get_random_state(self): + return np.random.RandomState(20160911) + def get_initial_point(self, model): """Get initial point with untransformed variables for posterior predictive sampling""" return { @@ -477,7 +479,7 @@ def test_single_poisson_sampling(self): trace = sample( 5000, step=step, - random_seed=self.random_seed, + random_seed=45354, progressbar=False, chains=1, return_inferencedata=False, @@ -502,7 +504,7 @@ def test_list_poissons_sampling(self): 5000, chains=1, step=Metropolis(), - random_seed=self.random_seed, + random_seed=5363567, progressbar=False, return_inferencedata=False, ) @@ -533,7 +535,7 @@ def test_list_normals_sampling(self): 5000, chains=1, step=Metropolis(), - random_seed=self.random_seed, + random_seed=645334, progressbar=False, return_inferencedata=False, ) @@ -785,8 +787,8 @@ def test_preventing_mixing_cont_and_discrete(self): ) -class TestNormalMixture(SeededTest): - def test_normal_mixture_sampling(self): +class TestNormalMixture: + def test_normal_mixture_sampling(self, seeded_test): norm_w = np.array([0.75, 0.25]) norm_mu = np.array([0.0, 5.0]) norm_sigma = np.ones_like(norm_mu) @@ -804,7 +806,7 @@ def test_normal_mixture_sampling(self): trace = sample( 5000, step=step, - random_seed=self.random_seed, + random_seed=20160911, progressbar=False, chains=1, return_inferencedata=False, @@ -816,7 +818,7 @@ def test_normal_mixture_sampling(self): @pytest.mark.parametrize( "nd, ncomp", [(tuple(), 5), (1, 5), (3, 5), ((3, 3), 5), (3, 3), ((3, 3), 3)], ids=str ) - def test_normal_mixture_nd(self, nd, ncomp): + def test_normal_mixture_nd(self, seeded_test, nd, ncomp): nd = to_tuple(nd) ncomp = int(ncomp) comp_shape = nd + (ncomp,) @@ -865,7 +867,7 @@ def test_normal_mixture_nd(self, nd, ncomp): assert_allclose(logp0, logp1) assert_allclose(logp0, logp2) - def test_random(self): + def test_random(self, seeded_test): def ref_rand(size, w, mu, sigma): component = np.random.choice(w.size, size=size, p=w) return np.random.normal(mu[component], sigma[component], size=size) @@ -894,9 +896,12 @@ def ref_rand(size, w, mu, sigma): ) -class TestMixtureVsLatent(SeededTest): +class TestMixtureVsLatent: """This class contains tests that compare a marginal Mixture with a latent indexed Mixture""" + def get_random_state(self): + return np.random.RandomState(20160911) + def test_scalar_components(self): nd = 3 npop = 4 @@ -1013,7 +1018,7 @@ def loose_logp(model, vars): assert_allclose(mix_logp, latent_mix_logp, rtol=rtol) -class TestMixtureSameFamily(SeededTest): +class TestMixtureSameFamily: """Tests that used to belong to deprecated `TestMixtureSameFamily`. The functionality is now expected to be provided by `Mixture` @@ -1021,13 +1026,12 @@ class TestMixtureSameFamily(SeededTest): @classmethod def setup_class(cls): - super().setup_class() cls.size = 50 cls.n_samples = 1000 cls.mixture_comps = 10 @pytest.mark.parametrize("batch_shape", [(3, 4), (20,)], ids=str) - def test_with_multinomial(self, batch_shape): + def test_with_multinomial(self, seeded_test, batch_shape): p = np.random.uniform(size=(*batch_shape, self.mixture_comps, 3)) p /= p.sum(axis=-1, keepdims=True) n = 100 * np.ones((*batch_shape, 1)) @@ -1062,7 +1066,7 @@ def test_with_multinomial(self, batch_shape): rtol, ) - def test_with_mvnormal(self): + def test_with_mvnormal(self, seeded_test): # 10 batch, 3-variate Gaussian mu = np.random.randn(self.mixture_comps, 3) mat = np.random.randn(3, 3) diff --git a/tests/distributions/test_simulator.py b/tests/distributions/test_simulator.py index 0e3b86bd44..b4d16fe161 100644 --- a/tests/distributions/test_simulator.py +++ b/tests/distributions/test_simulator.py @@ -33,10 +33,9 @@ from pymc.initial_point import make_initial_point_fn from pymc.pytensorf import compile_pymc from pymc.smc.kernels import IMH -from pymc.testing import SeededTest -class TestSimulator(SeededTest): +class TestSimulator: @staticmethod def count_rvs(end_node): return len( @@ -60,7 +59,6 @@ def quantiles(x): return np.quantile(x, [0.25, 0.5, 0.75]) def setup_class(self): - super().setup_class() self.data = np.random.normal(loc=0, scale=1, size=1000) with pm.Model() as self.SMABC_test: @@ -75,7 +73,7 @@ def setup_class(self): c = pm.Potential("c", pm.math.switch(a > 0, 0, -np.inf)) s = pm.Simulator("s", self.normal_sim, a, b, observed=self.data) - def test_one_gaussian(self): + def test_one_gaussian(self, seeded_test): assert self.count_rvs(self.SMABC_test.logp()) == 1 with self.SMABC_test: @@ -95,7 +93,7 @@ def test_one_gaussian(self): assert abs(self.data.std() - po_p["s"].std()) < 0.10 @pytest.mark.parametrize("floatX", ["float32", "float64"]) - def test_custom_dist_sum_stat(self, floatX): + def test_custom_dist_sum_stat(self, seeded_test, floatX): with pytensor.config.change_flags(floatX=floatX): with pm.Model() as m: a = pm.Normal("a", mu=0, sigma=1) @@ -118,7 +116,7 @@ def test_custom_dist_sum_stat(self, floatX): pm.sample_smc(draws=100) @pytest.mark.parametrize("floatX", ["float32", "float64"]) - def test_custom_dist_sum_stat_scalar(self, floatX): + def test_custom_dist_sum_stat_scalar(self, seeded_test, floatX): """ Test that automatically wrapped functions cope well with scalar inputs """ @@ -149,14 +147,14 @@ def test_custom_dist_sum_stat_scalar(self, floatX): ) assert self.count_rvs(m.logp()) == 1 - def test_model_with_potential(self): + def test_model_with_potential(self, seeded_test): assert self.count_rvs(self.SMABC_potential.logp()) == 1 with self.SMABC_potential: trace = pm.sample_smc(draws=100, chains=1, return_inferencedata=False) assert np.all(trace["a"] >= 0) - def test_simulator_metropolis_mcmc(self): + def test_simulator_metropolis_mcmc(self, seeded_test): with self.SMABC_test as m: step = pm.Metropolis([m.rvs_to_values[m["a"]], m.rvs_to_values[m["b"]]]) trace = pm.sample(step=step, return_inferencedata=False) @@ -164,7 +162,7 @@ def test_simulator_metropolis_mcmc(self): assert abs(self.data.mean() - trace["a"].mean()) < 0.05 assert abs(self.data.std() - trace["b"].mean()) < 0.05 - def test_multiple_simulators(self): + def test_multiple_simulators(self, seeded_test): true_a = 2 true_b = -2 @@ -214,9 +212,9 @@ def test_multiple_simulators(self): assert abs(true_a - trace["a"].mean()) < 0.05 assert abs(true_b - trace["b"].mean()) < 0.05 - def test_nested_simulators(self): + def test_nested_simulators(self, seeded_test): true_a = 2 - rng = self.get_random_state() + rng = np.random.RandomState(20160911) data = rng.normal(true_a, 0.1, size=1000) with pm.Model() as m: @@ -244,7 +242,7 @@ def test_nested_simulators(self): assert np.abs(true_a - trace["sim1"].mean()) < 0.1 - def test_upstream_rngs_not_in_compiled_logp(self): + def test_upstream_rngs_not_in_compiled_logp(self, seeded_test): smc = IMH(model=self.SMABC_test) smc.initialize_population() smc._initialize_kernel() @@ -263,7 +261,7 @@ def test_upstream_rngs_not_in_compiled_logp(self): ] assert len(shared_rng_vars) == 1 - def test_simulator_error_msg(self): + def test_simulator_error_msg(self, seeded_test): msg = "The distance metric not_real is not implemented" with pytest.raises(ValueError, match=msg): with pm.Model() as m: @@ -280,7 +278,7 @@ def test_simulator_error_msg(self): sim = pm.Simulator("sim", self.normal_sim, 0, params=(1)) @pytest.mark.xfail(reason="KL not refactored") - def test_automatic_use_of_sort(self): + def test_automatic_use_of_sort(self, seeded_test): with pm.Model() as model: s_k = pm.Simulator( "s_k", @@ -292,7 +290,7 @@ def test_automatic_use_of_sort(self): ) assert s_k.distribution.sum_stat is pm.distributions.simulator.identity - def test_name_is_string_type(self): + def test_name_is_string_type(self, seeded_test): with self.SMABC_potential: assert not self.SMABC_potential.name with warnings.catch_warnings(): @@ -303,7 +301,7 @@ def test_name_is_string_type(self): trace = pm.sample_smc(draws=10, chains=1, return_inferencedata=False) assert isinstance(trace._straces[0].name, str) - def test_named_model(self): + def test_named_model(self, seeded_test): # Named models used to fail with Simulator because the arguments to the # random fn used to be passed by name. This is no longer true. # https://github.com/pymc-devs/pymc/pull/4365#issuecomment-761221146 @@ -323,7 +321,7 @@ def test_named_model(self): @pytest.mark.parametrize("mu", [0, np.arange(3)], ids=str) @pytest.mark.parametrize("sigma", [1, np.array([1, 2, 5])], ids=str) @pytest.mark.parametrize("size", [None, 3, (5, 3)], ids=str) - def test_simulator_moment(self, mu, sigma, size): + def test_simulator_moment(self, seeded_test, mu, sigma, size): def normal_sim(rng, mu, sigma, size): return rng.normal(mu, sigma, size=size) @@ -357,7 +355,7 @@ def normal_sim(rng, mu, sigma, size): assert np.all(np.abs((result - expected_sample_mean) / expected_sample_mean_std) < cutoff) - def test_dist(self): + def test_dist(self, seeded_test): x = pm.Simulator.dist(self.normal_sim, 0, 1, sum_stat="sort", shape=(3,)) x = cloudpickle.loads(cloudpickle.dumps(x)) diff --git a/tests/distributions/test_transform.py b/tests/distributions/test_transform.py index f0979938e3..cacc1abd79 100644 --- a/tests/distributions/test_transform.py +++ b/tests/distributions/test_transform.py @@ -33,7 +33,6 @@ R, Rminusbig, Rplusbig, - SeededTest, Simplex, SortedVector, Unit, @@ -301,7 +300,7 @@ def test_chain_jacob_det(): check_jacobian_det(chain_tranf, Vector(R, 4), pt.vector, floatX(np.zeros(4)), elemwise=False) -class TestElementWiseLogp(SeededTest): +class TestElementWiseLogp: def build_model(self, distfam, params, size, transform, initval=None): if initval is not None: initval = pm.floatX(initval) diff --git a/tests/sampler_fixtures.py b/tests/sampler_fixtures.py index 549ed606e3..34fe7127e6 100644 --- a/tests/sampler_fixtures.py +++ b/tests/sampler_fixtures.py @@ -21,7 +21,6 @@ import pymc as pm from pymc.backends.arviz import to_inference_data -from pymc.testing import SeededTest from pymc.util import get_var_name @@ -135,10 +134,11 @@ def make_model(cls): return model -class BaseSampler(SeededTest): +class BaseSampler: @classmethod def setup_class(cls): - super().setup_class() + cls.random_seed = 20160911 + np.random.seed(cls.random_seed) cls.model = cls.make_model() with cls.model: cls.step = cls.make_step() diff --git a/tests/sampling/test_forward.py b/tests/sampling/test_forward.py index 38c31282b7..12440e6908 100644 --- a/tests/sampling/test_forward.py +++ b/tests/sampling/test_forward.py @@ -40,10 +40,10 @@ get_vars_in_point_list, observed_dependent_deterministics, ) -from pymc.testing import SeededTest, fast_unstable_sampling_mode +from pymc.testing import fast_unstable_sampling_mode -class TestDraw(SeededTest): +class TestDraw: def test_univariate(self): with pm.Model(): x = pm.Normal("x") @@ -438,7 +438,7 @@ def test_mutable_coords_volatile(self): } -class TestSamplePPC(SeededTest): +class TestSamplePPC: def test_normal_scalar(self): nchains = 2 ndraws = 500 @@ -466,7 +466,7 @@ def test_normal_scalar(self): assert ppc["a"].shape == (nchains, ndraws) # test default case - random_state = self.get_random_state() + random_state = np.random.RandomState(20160911) idata_ppc = pm.sample_posterior_predictive( trace, var_names=["a"], random_seed=random_state ) @@ -573,7 +573,7 @@ def test_sum_normal(self): _, pval = stats.kstest(ppc["b"].flatten(), stats.norm(scale=scale).cdf) assert pval > 0.001 - def test_model_not_drawable_prior(self): + def test_model_not_drawable_prior(self, seeded_test): data = np.random.poisson(lam=10, size=200) model = pm.Model() with model: @@ -1039,8 +1039,8 @@ def point_list_arg_bug_fixture() -> Tuple[pm.Model, pm.backends.base.MultiTrace] return pmodel, trace -class TestSamplePriorPredictive(SeededTest): - def test_ignores_observed(self): +class TestSamplePriorPredictive: + def test_ignores_observed(self, seeded_test): observed = np.random.normal(10, 1, size=200) with pm.Model(): # Use a prior that's way off to show we're ignoring the observed variables @@ -1081,7 +1081,7 @@ def test_multivariate(self): assert trace.prior["m"].shape == (1, 10, 4) - def test_multivariate2(self): + def test_multivariate2(self, seeded_test): # Added test for issue #3271 mn_data = np.random.multinomial(n=100, pvals=[1 / 6.0] * 6, size=10) with pm.Model() as dm_model: @@ -1114,7 +1114,7 @@ def test_layers(self): avg = np.stack([b_sampler() for i in range(10000)]).mean(0) npt.assert_array_almost_equal(avg, 0.5 * np.ones((10,)), decimal=2) - def test_transformed(self): + def test_transformed(self, seeded_test): n = 18 at_bats = 45 * np.ones(n, dtype=int) hits = np.random.randint(1, 40, size=n, dtype=int) @@ -1135,7 +1135,7 @@ def test_transformed(self): assert gen.prior_predictive["y"].shape == (1, draws, n) assert "thetas" in gen.prior.data_vars - def test_shared(self): + def test_shared(self, seeded_test): n1 = 10 obs = shared(np.random.rand(n1) < 0.5) draws = 50 @@ -1157,7 +1157,7 @@ def test_shared(self): assert gen2.prior_predictive["y"].shape == (1, draws, n2) assert gen2.prior["o"].shape == (1, draws, n2) - def test_density_dist(self): + def test_density_dist(self, seeded_test): obs = np.random.normal(-1, 0.1, size=10) with pm.Model(): mu = pm.Normal("mu", 0, 1) @@ -1344,7 +1344,7 @@ def test_distinct_rvs(): assert np.array_equal(pp_samples["y"], pp_samples_2["y"]) -class TestNestedRandom(SeededTest): +class TestNestedRandom: def build_model(self, distribution, shape, nested_rvs_info): with pm.Model() as model: nested_rvs = {} diff --git a/tests/sampling/test_mcmc.py b/tests/sampling/test_mcmc.py index 90ed8a63f8..ef13ae2c19 100644 --- a/tests/sampling/test_mcmc.py +++ b/tests/sampling/test_mcmc.py @@ -45,13 +45,12 @@ Metropolis, Slice, ) -from pymc.testing import SeededTest, fast_unstable_sampling_mode +from pymc.testing import fast_unstable_sampling_mode from tests.models import simple_init -class TestInitNuts(SeededTest): +class TestInitNuts: def setup_method(self): - super().setup_method() self.model, self.start, self.step, _ = simple_init() def test_checks_seeds_kwarg(self): @@ -60,9 +59,8 @@ def test_checks_seeds_kwarg(self): pm.sampling.mcmc.init_nuts(chains=2, random_seed=[1]) -class TestSample(SeededTest): +class TestSample: def setup_method(self): - super().setup_method() self.model, self.start, self.step, _ = simple_init() @pytest.mark.parametrize("init", ("jitter+adapt_diag", "advi", "map")) @@ -168,7 +166,7 @@ def test_sample_init(self): "tune": 120, "n_init": 1000, "draws": 50, - "random_seed": self.random_seed, + "random_seed": 20160911, } with warnings.catch_warnings(record=True) as rec: warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) @@ -196,7 +194,7 @@ def test_parallel_start(self): cores=2, discard_tuned_samples=False, initvals=[{"x": [10, 10]}, {"x": [-10, -10]}], - random_seed=self.random_seed, + random_seed=20160911, ) assert idata.warmup_posterior["x"].sel(chain=0, draw=0).values[0] > 0 assert idata.warmup_posterior["x"].sel(chain=1, draw=0).values[0] < 0 @@ -241,7 +239,7 @@ def test_sample_callback(self): chains=2, step=self.step, cores=cores, - random_seed=self.random_seed, + random_seed=20160911, callback=callback, ) assert callback.called @@ -262,7 +260,7 @@ def callback(trace, draw): chains=1, step=self.step, cores=1, - random_seed=self.random_seed, + random_seed=2016911, callback=callback, return_inferencedata=False, ) @@ -519,7 +517,7 @@ def test_partial_trace_unsupported(): @pytest.mark.xfail(condition=(pytensor.config.floatX == "float32"), reason="Fails on float32") -class TestNamedSampling(SeededTest): +class TestNamedSampling: def test_shared_named(self): G_var = shared(value=np.atleast_2d(1.0), shape=(1, None), name="G") @@ -838,8 +836,8 @@ def test_float32(self): pm.sample(draws=10, tune=10, chains=1, step=sampler()) -class TestShared(SeededTest): - def test_sample(self): +class TestShared: + def test_sample(self, seeded_test): x = np.random.normal(size=100) y = x + np.random.normal(scale=1e-2, size=100) diff --git a/tests/smc/test_smc.py b/tests/smc/test_smc.py index 5477498360..127d8db2ae 100644 --- a/tests/smc/test_smc.py +++ b/tests/smc/test_smc.py @@ -26,15 +26,13 @@ from pymc.backends.base import MultiTrace from pymc.pytensorf import floatX from pymc.smc.kernels import IMH, systematic_resampling -from pymc.testing import SeededTest from tests.helpers import assert_random_state_equal -class TestSMC(SeededTest): +class TestSMC: """Tests for the default SMC kernel""" def setup_class(self): - super().setup_class() self.samples = 1000 n = 4 mu1 = np.ones(n) * 0.5 @@ -107,7 +105,7 @@ def test_discrete_rounding_proposal(self): def test_unobserved_bernoulli(self): n = 10 - rng = self.get_random_state() + rng = np.random.RandomState(20160911) z_true = np.zeros(n, dtype=int) z_true[int(n / 2) :] = 1 y = st.norm(np.array([-1, 1])[z_true], 0.25).rvs(random_state=rng) @@ -258,9 +256,9 @@ def test_deprecated_abc_args(self): pm.sample_smc(draws=10, chains=1, save_log_pseudolikelihood=True) -class TestMHKernel(SeededTest): +class TestMHKernel: def test_normal_model(self): - data = st.norm(10, 0.5).rvs(1000, random_state=self.get_random_state()) + data = st.norm(10, 0.5).rvs(1000, random_state=np.random.RandomState(20160911)) initial_rng_state = np.random.get_state() with pm.Model() as m: diff --git a/tests/test_data.py b/tests/test_data.py index 87998e63c9..834f698cba 100644 --- a/tests/test_data.py +++ b/tests/test_data.py @@ -29,10 +29,9 @@ from pymc.data import is_minibatch from pymc.pytensorf import GeneratorOp, floatX -from pymc.testing import SeededTest -class TestData(SeededTest): +class TestData: def test_deterministic(self): data_values = np.array([0.5, 0.4, 5, 2]) with pm.Model() as model: @@ -40,7 +39,7 @@ def test_deterministic(self): pm.Normal("y", 0, 1, observed=X) model.compile_logp()(model.initial_point()) - def test_sample(self): + def test_sample(self, seeded_test): x = np.random.normal(size=100) y = x + np.random.normal(scale=1e-2, size=100) @@ -308,7 +307,7 @@ def test_model_to_graphviz_for_model_with_data_container(self): for expected in expected_substrings: assert expected in g.source - def test_explicit_coords(self): + def test_explicit_coords(self, seeded_test): N_rows = 5 N_cols = 7 data = np.random.uniform(size=(N_rows, N_cols)) @@ -371,7 +370,7 @@ def test_symbolic_coords(self): assert pmodel.dim_lengths["row"].eval() == 4 assert pmodel.dim_lengths["column"].eval() == 5 - def test_implicit_coords_series(self): + def test_implicit_coords_series(self, seeded_test): pd = pytest.importorskip("pandas") ser_sales = pd.Series( data=np.random.randint(low=0, high=30, size=22), @@ -385,7 +384,7 @@ def test_implicit_coords_series(self): assert len(pmodel.coords["date"]) == 22 assert pmodel.named_vars_to_dims == {"sales": ("date",)} - def test_implicit_coords_dataframe(self): + def test_implicit_coords_dataframe(self, seeded_test): pd = pytest.importorskip("pandas") N_rows = 5 N_cols = 7 diff --git a/tests/test_math.py b/tests/test_math.py index ff9c8708c0..5ee4cddc71 100644 --- a/tests/test_math.py +++ b/tests/test_math.py @@ -39,7 +39,6 @@ softmax, ) from pymc.pytensorf import floatX -from pymc.testing import SeededTest from tests.helpers import verify_grad @@ -205,9 +204,8 @@ def test_logdiffexp(): assert np.allclose(logdiffexp(a, b).eval(), 0) -class TestLogDet(SeededTest): +class TestLogDet: def setup_method(self): - super().setup_method() np.random.seed(899853) self.op_class = LogDet self.op = logdet diff --git a/tests/test_model.py b/tests/test_model.py index c4d04918a0..acad9afe4a 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -47,7 +47,6 @@ from pymc.logprob.basic import conditional_logp, transformed_conditional_logp from pymc.logprob.transforms import IntervalTransform from pymc.model import Point, ValueGradFunction, modelcontext -from pymc.testing import SeededTest from pymc.util import _FutureWarningValidatingScratchpad from pymc.variational.minibatch_rv import MinibatchRandomVariable from tests.models import simple_model @@ -647,10 +646,7 @@ def test_eval_rv_shapes(self): assert shapes["from_dims"] == (3, 4) -class TestCheckStartVals(SeededTest): - def setup_method(self): - super().setup_method() - +class TestCheckStartVals: def test_valid_start_point(self): with pm.Model() as model: a = pm.Uniform("a", lower=0.0, upper=1.0) @@ -1519,7 +1515,7 @@ def test_symbolic_random_variable(self): ) -class TestShared(SeededTest): +class TestShared: def test_deterministic(self): with pm.Model() as model: data_values = np.array([0.5, 0.4, 5, 2]) diff --git a/tests/test_model_graph.py b/tests/test_model_graph.py index 0ad38737e7..6fadf54a4b 100644 --- a/tests/test_model_graph.py +++ b/tests/test_model_graph.py @@ -25,7 +25,6 @@ from pymc.exceptions import ImputationWarning from pymc.model_graph import ModelGraph, model_to_graphviz, model_to_networkx -from pymc.testing import SeededTest def school_model(): @@ -44,7 +43,7 @@ def school_model(): return schools -class BaseModelNXTest(SeededTest): +class BaseModelNXTest: network_model = { "graph_attr_dict_factory": dict, "node_dict_factory": dict, @@ -263,7 +262,7 @@ def model_non_random_variable_rvs(): return model, compute_graph, plates -class BaseModelGraphTest(SeededTest): +class BaseModelGraphTest: model_func = None @classmethod