diff --git a/.github/workflows/python-test.yml b/.github/workflows/python-test.yml index cad34ac..3e3abce 100644 --- a/.github/workflows/python-test.yml +++ b/.github/workflows/python-test.yml @@ -1,7 +1,7 @@ -# This workflow will install Python dependencies, run tests and lint with a single version of Python +# This workflow will install Python dependencies, run tests, and build distribution artifacts with Poetry # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions -name: CPM test and test deploy +name: CPM test and build on: push: @@ -18,17 +18,26 @@ jobs: steps: - uses: actions/checkout@v3 + name: Checkout code - name: Set up Python 3.11 uses: actions/setup-python@v4 with: python-version: 3.11 + - name: Install Poetry + run: | + curl -sSL https://install.python-poetry.org | python3 - + - name: Configure Poetry + run: | + poetry config virtualenvs.create false - name: Install dependencies run: | - python -m pip install --upgrade pip - pip install scikit-learn pandas numpy pingouin typer pytest pytest-cov + pip install --upgrade poetry + poetry install --verbose - name: Test with pytest run: | - PYTHONPATH=./ pytest ./tests --cov=./cpm_python --full-trace + export PYTHONPATH=$(pwd) + poetry run pytest ./tests --cov=./cpm_python --full-trace + build: name: Build distribution runs-on: ubuntu-latest @@ -42,9 +51,17 @@ jobs: uses: actions/setup-python@v4 with: python-version: 3.11 - - name: Install pypa/build - run: pip install build pbr wheel - - name: Build a binary wheel and a source tarball - run: python -m build -n --sdist --wheel --outdir dist/ + - name: Install Poetry + run: | + curl -sSL https://install.python-poetry.org | python3 - + - name: Configure Poetry + run: | + poetry config virtualenvs.create false # Use the system environment + - name: Install dependencies + run: | + poetry install + - name: Build distribution + run: | + poetry build --no-interaction - name: List built artifacts run: ls -al dist/ diff --git a/pyproject.toml b/pyproject.toml index 3062b0f..5c8d99d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,12 +1,26 @@ # pyproject.toml - [build-system] -requires = ["setuptools>=42", "wheel"] -build-backend = "setuptools.build_meta" +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" -[project] +[tool.poetry] name = "cpm_python" version = "0.1.0" +description = "" +authors = ["Nils Winter "] +readme = "README.md" +packages = [{ include = "cpm" }] -[tool.setuptools.packages.find] -where = ["cpm"] +[tool.poetry.dependencies] +python = ">=3.11" +bleach = "*" +tinycss2 = "*" +numpy = "*" +pandas = "*" +scikit-learn = "*" +pingouin = "*" +streamlit = "*" +nilearn = "*" +typer = "*" +pytest = "*" +pytest-cov = "*" diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index dce84a8..0000000 --- a/setup.cfg +++ /dev/null @@ -1,30 +0,0 @@ -# setup.cfg -[metadata] -name = Connectome-based Predictive Modeling Python Package -version = 0.1.0 -author = MMLL -author_email = nils.r.winter@uni-muenster.de -description = An example Python package -long_description = file: README.md -long_description_content_type = text/markdown -classifiers = - Programming Language :: Python :: 3 - License :: OSI Approved :: MIT License - Operating System :: OS Independent - -[options] -package_dir = - = cpm -packages = find: -python_requires = >=3.6 -install_requires = - numpy - pandas - scikit-learn - pingouin - streamlit - nilearn - typer - -[options.packages.find] -where = src \ No newline at end of file diff --git a/tests/test_edge_selection.py b/tests/test_edge_selection.py index 45cb136..d1a292b 100644 --- a/tests/test_edge_selection.py +++ b/tests/test_edge_selection.py @@ -4,7 +4,7 @@ import pandas as pd import pingouin as pg -from scipy.stats import pearsonr, spearmanr +from scipy.stats import pearsonr, spearmanr, t from cpm.simulate_data import simulate_regression_data from cpm.edge_selection import (pearson_correlation_with_pvalues, spearman_correlation_with_pvalues, @@ -16,50 +16,57 @@ def setUp(self): super(TestEdgeStatistics, self).setUp() self.X, self.y, self.covariates = simulate_regression_data(n_samples=100, n_features=45) - def _test_correlation(self, method, cpm_func, scipy_func): - """Generalized test for correlation with p-values""" - cpm_r, cpm_p = cpm_func(self.y, self.X) - scipy_r, scipy_p = [], [] - - for feature in range(self.X.shape[1]): - c = scipy_func(self.X[:, feature], self.y) - scipy_r.append(c.correlation if method == 'pearson' else c.statistic) - scipy_p.append(c.pvalue) - - np.testing.assert_almost_equal(np.array(scipy_r), cpm_r, decimal=10) - np.testing.assert_almost_equal(np.array(scipy_p), cpm_p, decimal=10) - def test_cpm_pearson(self): - self._test_correlation('pearson', pearson_correlation_with_pvalues, pearsonr) + """Test CPM implementation of Pearson correlation with p-values""" + cpm_r, cpm_p = pearson_correlation_with_pvalues(self.y, self.X) + scipy_r = list() + scipy_p = list() + for feature in range(self.X.shape[1]): + c, p = pearsonr(self.X[:, feature], self.y) + scipy_r.append(c) + scipy_p.append(p) + scipy_r = np.array(scipy_r) + scipy_p = np.array(scipy_p) + np.testing.assert_almost_equal(scipy_r, cpm_r, decimal=10) + np.testing.assert_almost_equal(scipy_p, cpm_p, decimal=10) def test_cpm_spearman(self): - self._test_correlation('spearman', spearman_correlation_with_pvalues, spearmanr) + """Test CPM implementation of Spearman correlation with p-values""" + cpm_r, cpm_p = spearman_correlation_with_pvalues(self.y, self.X) + scipy_r = list() + scipy_p = list() + for feature in range(self.X.shape[1]): + c, p = spearmanr(self.X[:, feature], self.y) + scipy_r.append(c) + scipy_p.append(p) + scipy_r = np.array(scipy_r) + scipy_p = np.array(scipy_p) + np.testing.assert_almost_equal(scipy_r, cpm_r, decimal=10) + np.testing.assert_almost_equal(scipy_p, cpm_p, decimal=10) - def _test_semi_partial_correlation(self, method, func): + def test_semi_partial_correlation_pearson(self): # Calculate partial correlation using the provided function - partial_corr, p_values = func(self.y, self.X, self.covariates) + partial_corr, p_values = semi_partial_correlation_pearson(self.y, self.X, self.covariates) - # Prepare DataFrame + # Calculate partial correlation using pingouin df = pd.DataFrame(np.column_stack([self.y, self.X, self.covariates]), - columns=["y"] + [f"x{i}" for i in range(self.X.shape[1])] + [f"cov{i}" for i in - range(self.covariates.shape[1])]) - pcorr_pingouin, pval_pingouin = [], [] - + columns=["y"] + [f"x{i}" for i in range(self.X.shape[1])] + [f"cov{i}" for i in range(self.covariates.shape[1])]) + pcorr_pingouin = [] + pval_pingouin = [] for i in range(self.X.shape[1]): - result = pg.partial_corr(data=df, x="y", y=f"x{i}", - covar=[f"cov{j}" for j in range(self.covariates.shape[1])], - method=method) + result = pg.partial_corr(data=df, x="y", y=f"x{i}", covar=[f"cov{j}" for j in range(self.covariates.shape[1])], method='pearson') pcorr_pingouin.append(result['r'].values[0]) pval_pingouin.append(result['p-val'].values[0]) - np.testing.assert_almost_equal(partial_corr, np.array(pcorr_pingouin), decimal=10) - np.testing.assert_almost_equal(p_values, np.array(pval_pingouin), decimal=10) + # Convert to numpy arrays for easier comparison + pcorr_pingouin = np.array(pcorr_pingouin) + pval_pingouin = np.array(pval_pingouin) - def test_semi_partial_correlation_pearson(self): - self._test_semi_partial_correlation('pearson', semi_partial_correlation_pearson) + # Assert that the partial correlation results are almost equal between the two methods + np.testing.assert_almost_equal(partial_corr, pcorr_pingouin, decimal=10) - def test_semi_partial_correlation_spearman(self): - self._test_semi_partial_correlation('spearman', semi_partial_correlation_spearman) + # Assert that the p-values results are almost equal between the two methods + np.testing.assert_almost_equal(p_values, pval_pingouin, decimal=10) if __name__ == '__main__':