diff --git a/.github/workflows/pytest.yaml b/.github/workflows/pytest.yaml index d16b945e5..ce891db98 100644 --- a/.github/workflows/pytest.yaml +++ b/.github/workflows/pytest.yaml @@ -30,6 +30,8 @@ jobs: run: poetry install --no-interaction --no-root - name: Install library run: poetry install --no-interaction + - name: Check dependencies + run: poetry run deptry . - name: Run tests run: poetry run pytest - name: Upload coverage to Codecov diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 32b480b54..6d0dec3d4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -10,11 +10,6 @@ repos: - id: check-json - id: check-toml - id: check-yaml - - id: pretty-format-json - args: - - --autofix - - --no-ensure-ascii - - --no-sort-keys - id: check-ast - id: debug-statements - id: check-docstring-first @@ -96,3 +91,9 @@ repos: rev: 0.3.8 hooks: - id: pydoclint + + - repo: https://github.com/pre-commit/mirrors-prettier + rev: "v3.0.3" + hooks: + - id: prettier + args: ["--staged"] diff --git a/.vscode/extensions.json b/.vscode/extensions.json index e434673de..c6134c8a3 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -4,9 +4,8 @@ "ms-python.isort", "ms-python.mypy-type-checker", "ms-python.python", - "ms-python.black-formatter" + "ms-python.black-formatter", + "esbenp.prettier-vscode" ], - "unwantedRecommendations": [ - "ms-python.flake8" - ] + "unwantedRecommendations": ["ms-python.flake8"] } diff --git a/.vscode/launch.json b/.vscode/launch.json index 796274c77..1d3659207 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -8,9 +8,7 @@ "program": "${file}", "console": "integratedTerminal", "justMyCode": true, - "args": [ - "--config-dir=./configs" - ] + "args": ["--config-dir=./configs"] }, { "name": "Python: Current File [local]", @@ -19,10 +17,7 @@ "program": "${file}", "console": "integratedTerminal", "justMyCode": true, - "args": [ - "--config-dir=./configs", - "environment=local" - ] + "args": ["--config-dir=./configs", "environment=local"] } ] } diff --git a/.vscode/settings.json b/.vscode/settings.json index 727b1d959..564dcad30 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -8,11 +8,7 @@ "source.organizeImports": true } }, - "python.terminal.launchArgs": [ - "-m", - "IPython", - "--no-autoindent" - ], + "python.terminal.launchArgs": ["-m", "IPython", "--no-autoindent"], "[jsonc]": { "editor.tabSize": 2, "editor.insertSpaces": true, @@ -23,16 +19,9 @@ } }, "json.format.keepLines": true, - "isort.args": [ - "--profile", - "black" - ], + "isort.args": ["--profile", "black"], "autoDocstring.docstringFormat": "google", - "python.testing.pytestArgs": [ - ".", - "--doctest-modules", - "--cov=src/" - ], + "python.testing.pytestArgs": [".", "--doctest-modules", "--cov=src/"], "python.testing.unittestEnabled": false, "python.testing.pytestEnabled": true, "mypy-type-checker.severity": { diff --git a/LICENSE.md b/LICENSE.md index 261eeb9e9..c61b66391 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -2,180 +2,180 @@ Version 2.0, January 2004 http://www.apache.org/licenses/ - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" @@ -186,16 +186,16 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] +Copyright [yyyy] [name of copyright owner] - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/README.md b/README.md index b1ec5ba19..e49f498a0 100644 --- a/README.md +++ b/README.md @@ -6,4 +6,5 @@ [![pre-commit.ci status](https://results.pre-commit.ci/badge/github/opentargets/genetics_etl_python/main.svg)](https://results.pre-commit.ci/badge/github/opentargets/genetics_etl_python) # Genetics Portal Data Pipeline (experimental) + - [Documentation](https://opentargets.github.io/genetics_etl_python/) diff --git a/codecov.yml b/codecov.yml index d817e3746..bc3b704f2 100644 --- a/codecov.yml +++ b/codecov.yml @@ -1,8 +1,7 @@ - comment: layout: "reach, diff, flags, files" behavior: default - require_changes: false # if true: only post the comment if coverage changes + require_changes: false # if true: only post the comment if coverage changes coverage: status: diff --git a/commitlint.config.js b/commitlint.config.js index 3347cb961..5073c20db 100644 --- a/commitlint.config.js +++ b/commitlint.config.js @@ -1 +1 @@ -module.exports = {extends: ['@commitlint/config-conventional']}; +module.exports = { extends: ["@commitlint/config-conventional"] }; diff --git a/config/datasets/gcp.yaml b/config/datasets/gcp.yaml index f7e79c367..00af8d8e8 100644 --- a/config/datasets/gcp.yaml +++ b/config/datasets/gcp.yaml @@ -11,10 +11,15 @@ anderson: gs://genetics-portal-input/v2g_input/andersson2014/enhancer_tss_associ javierre: gs://genetics-portal-input/v2g_input/javierre_2016_preprocessed.parquet jung: gs://genetics-portal-raw/pchic_jung2019/jung2019_pchic_tableS3.csv thurman: gs://genetics-portal-input/v2g_input/thurman2012/genomewideCorrs_above0.7_promoterPlusMinus500kb_withGeneNames_32celltypeCategories.bed8.gz -catalog_associations: ${datasets.inputs}/v2d/gwas_catalog_v1.0.2-associations_e110_r2023-09-11.tsv -catalog_studies: ${datasets.inputs}/v2d/gwas-catalog-v1.0.3-studies-r2023-09-11.tsv -catalog_ancestries: ${datasets.inputs}/v2d/gwas-catalog-v1.0.3-ancestries-r2023-09-11.tsv -catalog_sumstats_lut: ${datasets.inputs}/v2d/harmonised_list-r2023-09-11.txt +catalog_associations: ${datasets.inputs}/v2d/gwas_catalog_v1.0.2-associations_e110_r2023-11-24.tsv +catalog_studies: + # To get a complete representation of all GWAS Catalog studies, we need to ingest the list of unpublished studies from a different file. + - ${datasets.inputs}/v2d/gwas-catalog-v1.0.3-studies-r2023-11-24.tsv + - ${datasets.inputs}/v2d/gwas-catalog-v1.0.3-unpublished-studies-r2023-11-24.tsv +catalog_ancestries: + - ${datasets.inputs}/v2d/gwas-catalog-v1.0.3-ancestries-r2023-11-24.tsv + - ${datasets.inputs}/v2d/gwas-catalog-v1.0.3-unpublished-ancestries-r2023-11-24.tsv +catalog_sumstats_lut: ${datasets.inputs}/v2d/harmonised_list-r2023-11-24a.txt ukbiobank_manifest: gs://genetics-portal-input/ukb_phenotypes/neale2_saige_study_manifest.190430.tsv l2g_gold_standard_curation: ${datasets.inputs}/l2g/gold_standard/curation.json gene_interactions: ${datasets.inputs}/l2g/interaction # 23.09 data diff --git a/config/step/gwas_catalog.yaml b/config/step/gwas_catalog.yaml index a86ce92ce..ea357eb72 100644 --- a/config/step/gwas_catalog.yaml +++ b/config/step/gwas_catalog.yaml @@ -1,6 +1,6 @@ _target_: otg.gwas_catalog.GWASCatalogStep -catalog_studies_file: ${datasets.catalog_studies} -catalog_ancestry_file: ${datasets.catalog_ancestries} +catalog_study_files: ${datasets.catalog_studies} +catalog_ancestry_files: ${datasets.catalog_ancestries} catalog_associations_file: ${datasets.catalog_associations} catalog_sumstats_lut: ${datasets.catalog_sumstats_lut} variant_annotation_path: ${datasets.variant_annotation} diff --git a/docs/assets/javascripts/extra.js b/docs/assets/javascripts/extra.js index dfc99ea08..808fdb8c1 100644 --- a/docs/assets/javascripts/extra.js +++ b/docs/assets/javascripts/extra.js @@ -1,4 +1,4 @@ -$('.contributors img[data-src]').each(function () { - src = $(this).attr("data-src"); - $(this).attr('src', src); +$(".contributors img[data-src]").each(function () { + src = $(this).attr("data-src"); + $(this).attr("src", src); }); diff --git a/docs/assets/overrides/partials/source-file.html b/docs/assets/overrides/partials/source-file.html index fdda516dc..a8d89b766 100644 --- a/docs/assets/overrides/partials/source-file.html +++ b/docs/assets/overrides/partials/source-file.html @@ -1,46 +1,50 @@
- -
diff --git a/docs/assets/stylesheets/extra.css b/docs/assets/stylesheets/extra.css index 1073b23b9..5712dc8ea 100644 --- a/docs/assets/stylesheets/extra.css +++ b/docs/assets/stylesheets/extra.css @@ -1,79 +1,75 @@ .metadata { - list-style: none; - padding: 0; - margin: 0; - margin-bottom: 15px; - color: #999; + list-style: none; + padding: 0; + margin: 0; + margin-bottom: 15px; + color: #999; } .metadata.page-metadata .contributors-text { - margin-right: 5px; - display: inline-block; + margin-right: 5px; + display: inline-block; } .metadata.page-metadata { - display: flex !important; - flex-direction: row; - align-items: center; - font-size: 1.2em; + display: flex !important; + flex-direction: row; + align-items: center; + font-size: 1.2em; } - .metadata.page-metadata li { - list-style: none; - display: flex; - flex-direction: row; - justify-content: center; - align-items: center; - margin-bottom: 0 !important; + list-style: none; + display: flex; + flex-direction: row; + justify-content: center; + align-items: center; + margin-bottom: 0 !important; } - .metadata.page-metadata li .icon { - width: 1.1rem; - height: 1.1rem; - margin-right: 2px; + width: 1.1rem; + height: 1.1rem; + margin-right: 2px; } - .metadata.page-metadata li .icon svg { - fill: #999; + fill: #999; } - .page-metadata .contributors-holder { - margin: 0 !important; - margin-left: 1em; + margin: 0 !important; + margin-left: 1em; } .page-metadata .contributors { - margin-right: 5px; - display: inline-block; - list-style: none; - display: flex; - flex-direction: row; + margin-right: 5px; + display: inline-block; + list-style: none; + display: flex; + flex-direction: row; } .page-metadata .contributors li { - display: inline-flex; - margin: 0 !important; - padding: 0 !important; + display: inline-flex; + margin: 0 !important; + padding: 0 !important; } .page-metadata .contributors li img { - border-radius: 50%; - filter: grayscale(100%); - -webkit-filter: grayscale(100%); - opacity: 0.8; - vertical-align: middle; - width: 1.6rem; - transition: all 0.2s ease-in-out; + border-radius: 50%; + filter: grayscale(100%); + -webkit-filter: grayscale(100%); + opacity: 0.8; + vertical-align: middle; + width: 1.6rem; + transition: all 0.2s ease-in-out; } .page-metadata .contributors li img:hover { - opacity: 1; - filter: grayscale(0%); - -webkit-filter: grayscale(0%); - vertical-align: middle; - transform: scale(1.2); + opacity: 1; + filter: grayscale(0%); + -webkit-filter: grayscale(0%); + vertical-align: middle; + transform: scale(1.2); } diff --git a/docs/development/airflow.md b/docs/development/airflow.md index 8070d07b1..9768bf961 100644 --- a/docs/development/airflow.md +++ b/docs/development/airflow.md @@ -8,13 +8,12 @@ This section describes how to set up a local Airflow server which will orchestra - [Google Cloud SDK](https://cloud.google.com/sdk/docs/install) !!!warning macOS Docker memory allocation - On macOS, the default amount of memory available for Docker might not be enough to get Airflow up and running. Allocate at least 4GB of memory for the Docker Engine (ideally 8GB). [More info](https://airflow.apache.org/docs/apache-airflow/stable/howto/docker-compose/index.html#) - +On macOS, the default amount of memory available for Docker might not be enough to get Airflow up and running. Allocate at least 4GB of memory for the Docker Engine (ideally 8GB). [More info](https://airflow.apache.org/docs/apache-airflow/stable/howto/docker-compose/index.html#) ## Configure Airflow access to Google Cloud Platform !!!warning Specifying Google Cloud parameters - Run the next two command with the appropriate Google Cloud project ID and service account name to ensure the correct Google default application credentials are set up. +Run the next two command with the appropriate Google Cloud project ID and service account name to ensure the correct Google default application credentials are set up. Authenticate to Google Cloud: @@ -28,7 +27,6 @@ Create the service account key file that will be used by Airflow to access Googl gcloud iam service-accounts keys create ~/.config/gcloud/service_account_credentials.json --iam-account=@appspot.gserviceaccount.com ``` - ## Set up Airflow Change the working directory so that all subsequent commands will work: @@ -40,7 +38,7 @@ cd src/airflow ### Build Docker image !!!note Custom Docker image for Airflow - The custom Dockerfile built by the command below extends the official [Airflow Docker Compose YAML](https://airflow.apache.org/docs/apache-airflow/stable/docker-compose.yaml). We add support for Google Cloud SDK, Google Dataproc operators, and access to GCP credentials. +The custom Dockerfile built by the command below extends the official [Airflow Docker Compose YAML](https://airflow.apache.org/docs/apache-airflow/stable/docker-compose.yaml). We add support for Google Cloud SDK, Google Dataproc operators, and access to GCP credentials. ```bash docker build . --tag extending_airflow:latest @@ -49,7 +47,7 @@ docker build . --tag extending_airflow:latest ### Set Airflow user ID !!!note Setting Airflow user ID - These commands allow Airflow running inside Docker to access the credentials file which was generated earlier. +These commands allow Airflow running inside Docker to access the credentials file which was generated earlier. ```bash # If any user ID is already specified in .env, remove it. @@ -78,7 +76,6 @@ Airflow UI will now be available at `http://localhost:8080/`. Default username a For additional information on how to use Airflow visit the [official documentation](https://airflow.apache.org/docs/apache-airflow/stable/index.html). - ### Cleaning up At any time, you can check the status of your containers with: @@ -105,18 +102,17 @@ More information on running Airflow with Docker Compose can be found in the [off 1. **Increase Airflow concurrency**. Modify the `docker-compose.yaml` and add the following to the x-airflow-common → environment section: - ```yaml - AIRFLOW__CORE__PARALLELISM: 32 - AIRFLOW__CORE__MAX_ACTIVE_TASKS_PER_DAG: 32 - AIRFLOW__SCHEDULER__MAX_TIS_PER_QUERY: 16 - AIRFLOW__CORE__MAX_ACTIVE_RUNS_PER_DAG: 1 - # Also add the following line if you are using CeleryExecutor (by default, LocalExecutor is used). - AIRFLOW__CELERY__WORKER_CONCURRENCY: 32 - ``` + ```yaml + AIRFLOW__CORE__PARALLELISM: 32 + AIRFLOW__CORE__MAX_ACTIVE_TASKS_PER_DAG: 32 + AIRFLOW__SCHEDULER__MAX_TIS_PER_QUERY: 16 + AIRFLOW__CORE__MAX_ACTIVE_RUNS_PER_DAG: 1 + # Also add the following line if you are using CeleryExecutor (by default, LocalExecutor is used). + AIRFLOW__CELERY__WORKER_CONCURRENCY: 32 + ``` 1. **Additional pip packages**. They can be added to the `requirements.txt` file. - ## Troubleshooting Note that when you a a new workflow under `dags/`, Airflow will not pick that up immediately. By default the filesystem is only scanned for new DAGs every 300s. However, once the DAG is added, updates are applied nearly instantaneously. diff --git a/docs/development/contributing.md b/docs/development/contributing.md index 3b37d18e8..a2eaa6aae 100644 --- a/docs/development/contributing.md +++ b/docs/development/contributing.md @@ -5,6 +5,7 @@ title: Contributing guidelines # Contributing guidelines ## One-time configuration + The steps in this section only ever need to be done once on any particular system. Google Cloud configuration: @@ -20,11 +21,13 @@ Check that you have the `make` utility installed, and if not (which is unlikely) Check that you have `java` installed. ## Environment configuration + Run `make setup-dev` to install/update the necessary packages and activate the development environment. You need to do this every time you open a new shell. It is recommended to use VS Code as an IDE for development. ## How to run the code + All pipelines in this repository are intended to be run in Google Dataproc. Running them locally is not currently supported. In order to run the code: @@ -32,23 +35,26 @@ In order to run the code: 1. Manually edit your local `src/airflow/dags/*` file and comment out the steps you do not want to run. 2. Manually edit your local `pyproject.toml` file and modify the version of the code. - - This must be different from the version used by any other people working on the repository to avoid any deployment conflicts, so it's a good idea to use your name, for example: `1.2.3+jdoe`. - - You can also add a brief branch description, for example: `1.2.3+jdoe.myfeature`. - - Note that the version must comply with [PEP440 conventions](https://peps.python.org/pep-0440/#normalization), otherwise Poetry will not allow it to be deployed. - - Do not use underscores or hyphens in your version name. When building the WHL file, they will be automatically converted to dots, which means the file name will no longer match the version and the build will fail. Use dots instead. + + - This must be different from the version used by any other people working on the repository to avoid any deployment conflicts, so it's a good idea to use your name, for example: `1.2.3+jdoe`. + - You can also add a brief branch description, for example: `1.2.3+jdoe.myfeature`. + - Note that the version must comply with [PEP440 conventions](https://peps.python.org/pep-0440/#normalization), otherwise Poetry will not allow it to be deployed. + - Do not use underscores or hyphens in your version name. When building the WHL file, they will be automatically converted to dots, which means the file name will no longer match the version and the build will fail. Use dots instead. 3. Manually edit your local `src/airflow/dags/common_airflow.py` and set `OTG_VERSION` to the same version as you did in the previous step. 4. Run `make build`. - - This will create a bundle containing the neccessary code, configuration and dependencies to run the ETL pipeline, and then upload this bundle to Google Cloud. - - A version specific subpath is used, so uploading the code will not affect any branches but your own. - - If there was already a code bundle uploaded with the same version number, it will be replaced. -5. Open Airflow UI and run the DAG. + - This will create a bundle containing the neccessary code, configuration and dependencies to run the ETL pipeline, and then upload this bundle to Google Cloud. + - A version specific subpath is used, so uploading the code will not affect any branches but your own. + - If there was already a code bundle uploaded with the same version number, it will be replaced. +5. Open Airflow UI and run the DAG. ## Contributing checklist + When making changes, and especially when implementing a new module or feature, it's essential to ensure that all relevant sections of the code base are modified. + - [ ] Run `make check`. This will run the linter and formatter to ensure that the code is compliant with the project conventions. - [ ] Develop unit tests for your code and run `make test`. This will run all unit tests in the repository, including the examples appended in the docstrings of some methods. - [ ] Update the configuration if necessary. @@ -57,20 +63,24 @@ When making changes, and especially when implementing a new module or feature, i For more details on each of these steps, see the sections below. ### Documentation -* If during development you had a question which wasn't covered in the documentation, and someone explained it to you, add it to the documentation. The same applies if you encountered any instructions in the documentation which were obsolete or incorrect. -* Documentation autogeneration expressions start with `:::`. They will automatically generate sections of the documentation based on class and method docstrings. Be sure to update them for: - + Dataset definitions in `docs/python_api/datasource/STEP` (example: `docs/python_api/datasource/finngen/study_index.md`) - + Step definition in `docs/python_api/step/STEP.md` (example: `docs/python_api/step/finngen.md`) + +- If during development you had a question which wasn't covered in the documentation, and someone explained it to you, add it to the documentation. The same applies if you encountered any instructions in the documentation which were obsolete or incorrect. +- Documentation autogeneration expressions start with `:::`. They will automatically generate sections of the documentation based on class and method docstrings. Be sure to update them for: + - Dataset definitions in `docs/python_api/datasource/STEP` (example: `docs/python_api/datasource/finngen/study_index.md`) + - Step definition in `docs/python_api/step/STEP.md` (example: `docs/python_api/step/finngen.md`) ### Configuration -* Input and output paths in `config/datasets/gcp.yaml` -* Step configuration in `config/step/STEP.yaml` (example: `config/step/finngen.yaml`) + +- Input and output paths in `config/datasets/gcp.yaml` +- Step configuration in `config/step/STEP.yaml` (example: `config/step/finngen.yaml`) ### Classes -* Dataset class in `src/otg/datasource/STEP` (example: `src/otg/datasource/finngen/study_index.py` → `FinnGenStudyIndex`) -* Step main running class in `src/otg/STEP.py` (example: `src/otg/finngen.py`) + +- Dataset class in `src/otg/datasource/STEP` (example: `src/otg/datasource/finngen/study_index.py` → `FinnGenStudyIndex`) +- Step main running class in `src/otg/STEP.py` (example: `src/otg/finngen.py`) ### Tests -* Test study fixture in `tests/conftest.py` (example: `mock_study_index_finngen` in that module) -* Test sample data in `tests/data_samples` (example: `tests/data_samples/finngen_studies_sample.json`) -* Test definition in `tests/` (example: `tests/dataset/test_study_index.py` → `test_study_index_finngen_creation`) + +- Test study fixture in `tests/conftest.py` (example: `mock_study_index_finngen` in that module) +- Test sample data in `tests/data_samples` (example: `tests/data_samples/finngen_studies_sample.json`) +- Test definition in `tests/` (example: `tests/dataset/test_study_index.py` → `test_study_index_finngen_creation`) diff --git a/docs/development/workflows.md b/docs/development/workflows.md index 6e84c7ee1..2269041d8 100644 --- a/docs/development/workflows.md +++ b/docs/development/workflows.md @@ -2,7 +2,6 @@ This page describes the high level components of the pipeline, which are organised as Airflow DAGs (directed acyclic graphs). - ## Note on DAGs and Dataproc clusters Each DAG consists of the following general stages: @@ -15,17 +14,14 @@ Each DAG consists of the following general stages: 1. Delete the cluster - Within a DAG, all data processing steps run on the same Dataproc cluster as separate jobs. There is no need to configure DAGs or steps depending on the size of the input data. Clusters have autoscaling enabled, which means they will increase or decrease the number of worker VMs to accommodate the load. - ## DAG 1: Preprocess This DAG contains steps which are only supposed to be run once, or very rarely. They ingest external data and apply bespoke transformations specific for each particular data source. The output is normalised according to the data schemas used by the pipeline. - ## DAG 2: ETL The ETL DAG takes the inputs of the previous step and performs the main algorithmic processing. This processing is supposed to be data source agnostic. diff --git a/docs/installation.md b/docs/installation.md index 20e3e283b..bccb6684d 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -4,6 +4,7 @@ hide: - navigation - toc --- + # Installation TBC diff --git a/docs/python_api/dataset/l2g_feature_matrix.md b/docs/python_api/dataset/l2g_feature_matrix.md index 8cff3b91b..538a9a1de 100644 --- a/docs/python_api/dataset/l2g_feature_matrix.md +++ b/docs/python_api/dataset/l2g_feature_matrix.md @@ -5,4 +5,5 @@ title: L2G Feature Matrix ::: otg.dataset.l2g_feature_matrix.L2GFeatureMatrix ## Schema + --8<-- "assets/schemas/l2g_feature_matrix.md" diff --git a/docs/python_api/dataset/l2g_prediction.md b/docs/python_api/dataset/l2g_prediction.md index 9037b17b3..d77617c6c 100644 --- a/docs/python_api/dataset/l2g_prediction.md +++ b/docs/python_api/dataset/l2g_prediction.md @@ -5,4 +5,5 @@ title: L2G Prediction ::: otg.dataset.l2g_prediction.L2GPrediction ## Schema + --8<-- "assets/schemas/l2g_prediction.md" diff --git a/docs/python_api/dataset/study_locus.md b/docs/python_api/dataset/study_locus.md index 4a6726d5f..ca525024e 100644 --- a/docs/python_api/dataset/study_locus.md +++ b/docs/python_api/dataset/study_locus.md @@ -3,10 +3,12 @@ title: Study Locus --- ::: otg.dataset.study_locus.StudyLocus -___ + +--- ::: otg.dataset.study_locus.StudyLocusQualityCheck -___ + +--- ::: otg.dataset.study_locus.CredibleInterval diff --git a/docs/python_api/datasource/eqtl_catalogue/study_index.md b/docs/python_api/datasource/eqtl_catalogue/study_index.md index c1e675200..28a9c1e29 100644 --- a/docs/python_api/datasource/eqtl_catalogue/study_index.md +++ b/docs/python_api/datasource/eqtl_catalogue/study_index.md @@ -1,4 +1,5 @@ --- title: Study Index --- + ::: otg.datasource.eqtl_catalogue.study_index.EqtlCatalogueStudyIndex diff --git a/docs/python_api/datasource/eqtl_catalogue/summary_stats.md b/docs/python_api/datasource/eqtl_catalogue/summary_stats.md index 62f2b4be2..9dadaa1c3 100644 --- a/docs/python_api/datasource/eqtl_catalogue/summary_stats.md +++ b/docs/python_api/datasource/eqtl_catalogue/summary_stats.md @@ -1,4 +1,5 @@ --- title: Summary Stats --- + ::: otg.datasource.eqtl_catalogue.summary_stats.EqtlCatalogueSummaryStats diff --git a/docs/python_api/datasource/finngen/study_index.md b/docs/python_api/datasource/finngen/study_index.md index 784de4883..cd298e07c 100644 --- a/docs/python_api/datasource/finngen/study_index.md +++ b/docs/python_api/datasource/finngen/study_index.md @@ -1,4 +1,5 @@ --- title: Study Index --- + ::: otg.datasource.finngen.study_index.FinnGenStudyIndex diff --git a/docs/python_api/datasource/gnomad/_gnomad.md b/docs/python_api/datasource/gnomad/_gnomad.md index 7e5323e3e..5d282c438 100644 --- a/docs/python_api/datasource/gnomad/_gnomad.md +++ b/docs/python_api/datasource/gnomad/_gnomad.md @@ -1,6 +1,7 @@ --- title: GnomAD --- +

diff --git a/docs/python_api/datasource/gnomad/gnomad_ld.md b/docs/python_api/datasource/gnomad/gnomad_ld.md index af3188ac0..a6a6a8050 100644 --- a/docs/python_api/datasource/gnomad/gnomad_ld.md +++ b/docs/python_api/datasource/gnomad/gnomad_ld.md @@ -1,4 +1,5 @@ --- title: LD Matrix --- + ::: otg.datasource.gnomad.ld.GnomADLDMatrix diff --git a/docs/python_api/datasource/gnomad/gnomad_variants.md b/docs/python_api/datasource/gnomad/gnomad_variants.md index 949ac4fc6..ae1bbd6dc 100644 --- a/docs/python_api/datasource/gnomad/gnomad_variants.md +++ b/docs/python_api/datasource/gnomad/gnomad_variants.md @@ -1,4 +1,5 @@ --- title: Variants --- + ::: otg.datasource.gnomad.variants.GnomADVariants diff --git a/docs/python_api/datasource/gwas_catalog/associations.md b/docs/python_api/datasource/gwas_catalog/associations.md index ebea61cf3..57504ca33 100644 --- a/docs/python_api/datasource/gwas_catalog/associations.md +++ b/docs/python_api/datasource/gwas_catalog/associations.md @@ -1,4 +1,5 @@ --- title: Associations --- + ::: otg.datasource.gwas_catalog.associations.GWASCatalogAssociations diff --git a/docs/python_api/datasource/gwas_catalog/study_index.md b/docs/python_api/datasource/gwas_catalog/study_index.md index 7494cc851..c7dd42c7e 100644 --- a/docs/python_api/datasource/gwas_catalog/study_index.md +++ b/docs/python_api/datasource/gwas_catalog/study_index.md @@ -1,4 +1,5 @@ --- title: Study Index --- + ::: otg.datasource.gwas_catalog.study_index.GWASCatalogStudyIndex diff --git a/docs/python_api/datasource/gwas_catalog/study_splitter.md b/docs/python_api/datasource/gwas_catalog/study_splitter.md index 210e69e5d..4ad7e5627 100644 --- a/docs/python_api/datasource/gwas_catalog/study_splitter.md +++ b/docs/python_api/datasource/gwas_catalog/study_splitter.md @@ -1,4 +1,5 @@ --- title: Study Splitter --- + ::: otg.datasource.gwas_catalog.study_splitter.GWASCatalogStudySplitter diff --git a/docs/python_api/datasource/gwas_catalog/summary_statistics.md b/docs/python_api/datasource/gwas_catalog/summary_statistics.md index a564b3cde..45294256a 100644 --- a/docs/python_api/datasource/gwas_catalog/summary_statistics.md +++ b/docs/python_api/datasource/gwas_catalog/summary_statistics.md @@ -1,4 +1,5 @@ --- title: Summary statistics --- + :::otg.datasource.gwas_catalog.summary_statistics.GWASCatalogSummaryStatistics diff --git a/docs/python_api/datasource/intervals/andersson.md b/docs/python_api/datasource/intervals/andersson.md index 8bb074623..f9037613e 100644 --- a/docs/python_api/datasource/intervals/andersson.md +++ b/docs/python_api/datasource/intervals/andersson.md @@ -1,4 +1,5 @@ --- title: Andersson et al. --- + ::: otg.datasource.intervals.andersson.IntervalsAndersson diff --git a/docs/python_api/datasource/intervals/javierre.md b/docs/python_api/datasource/intervals/javierre.md index 9fd0443b7..3349465c8 100644 --- a/docs/python_api/datasource/intervals/javierre.md +++ b/docs/python_api/datasource/intervals/javierre.md @@ -1,4 +1,5 @@ --- title: Javierre et al. --- + ::: otg.datasource.intervals.javierre.IntervalsJavierre diff --git a/docs/python_api/datasource/intervals/jung.md b/docs/python_api/datasource/intervals/jung.md index 2fd615eb7..7cb5adc8d 100644 --- a/docs/python_api/datasource/intervals/jung.md +++ b/docs/python_api/datasource/intervals/jung.md @@ -1,4 +1,5 @@ --- title: Jung et al. --- + ::: otg.datasource.intervals.jung.IntervalsJung diff --git a/docs/python_api/datasource/intervals/thurman.md b/docs/python_api/datasource/intervals/thurman.md index 3325f502b..ff13e8ea3 100644 --- a/docs/python_api/datasource/intervals/thurman.md +++ b/docs/python_api/datasource/intervals/thurman.md @@ -1,4 +1,5 @@ --- title: Thurman et al. --- + ::: otg.datasource.intervals.thurman.IntervalsThurman diff --git a/docs/python_api/datasource/open_targets/target.md b/docs/python_api/datasource/open_targets/target.md index fef241312..969618381 100644 --- a/docs/python_api/datasource/open_targets/target.md +++ b/docs/python_api/datasource/open_targets/target.md @@ -1,4 +1,5 @@ --- title: Target --- + ::: otg.datasource.open_targets.target.OpenTargetsTarget diff --git a/docs/python_api/datasource/ukbiobank/_ukbiobank.md b/docs/python_api/datasource/ukbiobank/_ukbiobank.md index fb7cdc5ae..299a9c616 100644 --- a/docs/python_api/datasource/ukbiobank/_ukbiobank.md +++ b/docs/python_api/datasource/ukbiobank/_ukbiobank.md @@ -1,6 +1,7 @@ --- title: UK Biobank --- +

@@ -15,8 +16,9 @@ The UK Biobank is a large-scale biomedical database and research resource that c Recent efforts to rapidly and systematically apply established GWAS methods to all available data fields in UK Biobank have made available large repositories of summary statistics. To leverage these data disease locus discovery, we used full summary statistics from: The Neale lab Round 2 (N=2139). + - These analyses applied GWAS (implemented in Hail) to all data fields using imputed genotypes from HRC as released by UK Biobank in May 2017, consisting of 337,199 individuals post-QC. Full details of the Neale lab GWAS implementation are available here. We have remove all ICD-10 related traits from the Neale data to reduce overlap with the SAIGE results. - http://www.nealelab.is/uk-biobank/ -The University of Michigan SAIGE analysis (N=1281). + The University of Michigan SAIGE analysis (N=1281). - The SAIGE analysis uses PheCode derived phenotypes and applies a new method that "provides accurate P values even when case-control ratios are extremely unbalanced". See Zhou et al. (2018) for further details. - https://pubmed.ncbi.nlm.nih.gov/30104761/ diff --git a/docs/python_api/datasource/ukbiobank/study_index.md b/docs/python_api/datasource/ukbiobank/study_index.md index 1bda1af7f..8b98cc31a 100644 --- a/docs/python_api/datasource/ukbiobank/study_index.md +++ b/docs/python_api/datasource/ukbiobank/study_index.md @@ -1,4 +1,5 @@ --- title: Study Index --- + ::: otg.datasource.ukbiobank.study_index.UKBiobankStudyIndex diff --git a/docs/python_api/method/coloc.md b/docs/python_api/method/coloc.md index 5240f9114..56e9e0b7c 100644 --- a/docs/python_api/method/coloc.md +++ b/docs/python_api/method/coloc.md @@ -1,4 +1,5 @@ --- title: Coloc --- + ::: otg.method.colocalisation.Coloc diff --git a/docs/python_api/method/l2g/feature_factory.md b/docs/python_api/method/l2g/feature_factory.md index 7b6289079..d0cf7014a 100644 --- a/docs/python_api/method/l2g/feature_factory.md +++ b/docs/python_api/method/l2g/feature_factory.md @@ -3,6 +3,5 @@ title: L2G Feature Factory --- ::: otg.method.l2g.feature_factory.ColocalisationFactory ---- ::: otg.method.l2g.feature_factory.StudyLocusFactory diff --git a/docs/python_api/method/pics.md b/docs/python_api/method/pics.md index 78a210f7e..462c430be 100644 --- a/docs/python_api/method/pics.md +++ b/docs/python_api/method/pics.md @@ -1,4 +1,5 @@ --- title: PICS --- + :::otg.method.pics.PICS diff --git a/docs/python_api/method/window_based_clumping.md b/docs/python_api/method/window_based_clumping.md index d34735fbd..43c1e888d 100644 --- a/docs/python_api/method/window_based_clumping.md +++ b/docs/python_api/method/window_based_clumping.md @@ -1,4 +1,5 @@ --- title: Window-based clumping --- + :::otg.method.window_based_clumping.WindowBasedClumping diff --git a/docs/python_api/step/clump.md b/docs/python_api/step/clump.md index bc11079db..aae56ad9e 100644 --- a/docs/python_api/step/clump.md +++ b/docs/python_api/step/clump.md @@ -1,4 +1,5 @@ --- title: Clump --- + ::: otg.clump.ClumpStep diff --git a/docs/python_api/step/colocalisation.md b/docs/python_api/step/colocalisation.md index ac0982d78..7a7012bb5 100644 --- a/docs/python_api/step/colocalisation.md +++ b/docs/python_api/step/colocalisation.md @@ -1,4 +1,5 @@ --- title: Colocalisation --- + ::: otg.colocalisation.ColocalisationStep diff --git a/docs/python_api/step/eqtl_catalogue.md b/docs/python_api/step/eqtl_catalogue.md index 9d96f3486..e152c8ac8 100644 --- a/docs/python_api/step/eqtl_catalogue.md +++ b/docs/python_api/step/eqtl_catalogue.md @@ -1,4 +1,5 @@ --- title: eQTL Catalogue --- + ::: otg.eqtl_catalogue.EqtlCatalogueStep diff --git a/docs/python_api/step/finngen.md b/docs/python_api/step/finngen.md index bc14a8fb6..fedefae50 100644 --- a/docs/python_api/step/finngen.md +++ b/docs/python_api/step/finngen.md @@ -1,4 +1,5 @@ --- title: FinnGen --- + ::: otg.finngen.FinnGenStep diff --git a/docs/python_api/step/gene_index.md b/docs/python_api/step/gene_index.md index 02cb52000..dad07417c 100644 --- a/docs/python_api/step/gene_index.md +++ b/docs/python_api/step/gene_index.md @@ -1,4 +1,5 @@ --- title: Gene Index --- + ::: otg.gene_index.GeneIndexStep diff --git a/docs/python_api/step/gwas_catalog.md b/docs/python_api/step/gwas_catalog.md index faacaa802..aa20adfaa 100644 --- a/docs/python_api/step/gwas_catalog.md +++ b/docs/python_api/step/gwas_catalog.md @@ -1,4 +1,5 @@ --- title: GWAS Catalog --- + ::: otg.gwas_catalog.GWASCatalogStep diff --git a/docs/python_api/step/gwas_catalog_sumstat_preprocess.md b/docs/python_api/step/gwas_catalog_sumstat_preprocess.md index b56a84252..1dd1d7c79 100644 --- a/docs/python_api/step/gwas_catalog_sumstat_preprocess.md +++ b/docs/python_api/step/gwas_catalog_sumstat_preprocess.md @@ -1,4 +1,5 @@ --- title: GWAS Catalog sumstat preprocess --- + ::: otg.gwas_catalog_sumstat_preprocess.GWASCatalogSumstatsPreprocessStep diff --git a/docs/python_api/step/l2g.md b/docs/python_api/step/l2g.md index 9dab61f49..2364c797a 100644 --- a/docs/python_api/step/l2g.md +++ b/docs/python_api/step/l2g.md @@ -1,4 +1,5 @@ --- title: Locus-to-gene (L2G) --- + ::: otg.l2g.LocusToGeneStep diff --git a/docs/python_api/step/ld_index.md b/docs/python_api/step/ld_index.md index 86e6fdd6f..5f769422a 100644 --- a/docs/python_api/step/ld_index.md +++ b/docs/python_api/step/ld_index.md @@ -1,4 +1,5 @@ --- title: LD Index --- + ::: otg.ld_index.LDIndexStep diff --git a/docs/python_api/step/pics.md b/docs/python_api/step/pics.md index 08dfe16fa..c4b7c4291 100644 --- a/docs/python_api/step/pics.md +++ b/docs/python_api/step/pics.md @@ -1,4 +1,5 @@ --- title: PICS --- + ::: otg.pics.PICSStep diff --git a/docs/python_api/step/ukbiobank.md b/docs/python_api/step/ukbiobank.md index 250edf203..34724ea51 100644 --- a/docs/python_api/step/ukbiobank.md +++ b/docs/python_api/step/ukbiobank.md @@ -1,4 +1,5 @@ --- title: UK Biobank --- + ::: otg.ukbiobank.UKBiobankStep diff --git a/docs/python_api/step/variant_annotation_step.md b/docs/python_api/step/variant_annotation_step.md index b39537508..1b3eff6f8 100644 --- a/docs/python_api/step/variant_annotation_step.md +++ b/docs/python_api/step/variant_annotation_step.md @@ -1,4 +1,5 @@ --- title: Variant Annotation --- + ::: otg.variant_annotation.VariantAnnotationStep diff --git a/docs/python_api/step/variant_index_step.md b/docs/python_api/step/variant_index_step.md index 9f884a1f7..345f98401 100644 --- a/docs/python_api/step/variant_index_step.md +++ b/docs/python_api/step/variant_index_step.md @@ -1,4 +1,5 @@ --- title: Variant Index --- + ::: otg.variant_index.VariantIndexStep diff --git a/docs/python_api/step/variant_to_gene_step.md b/docs/python_api/step/variant_to_gene_step.md index 1f20aa015..198ab01ec 100644 --- a/docs/python_api/step/variant_to_gene_step.md +++ b/docs/python_api/step/variant_to_gene_step.md @@ -1,4 +1,5 @@ --- title: Variant-to-gene --- + ::: otg.v2g.V2GStep diff --git a/docs/roadmap.md b/docs/roadmap.md index d1c8c5e03..33ddf5eb2 100644 --- a/docs/roadmap.md +++ b/docs/roadmap.md @@ -4,6 +4,7 @@ hide: - navigation - toc --- + # Roadmap The Open Targets core team is working on refactoring Open Targets Genetics, aiming to: diff --git a/mkdocs.yml b/mkdocs.yml index 3beea2c53..e15a1af8d 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -43,7 +43,7 @@ markdown_extensions: permalink: true hooks: - - src/scripts/schemadocs.py + - src/utils/schemadocs.py repo_name: opentargets/genetics_etl_python repo_url: https://github.com/opentargets/genetics_etl_python diff --git a/poetry.lock b/poetry.lock index 0d60a7f82..9772ae150 100644 --- a/poetry.lock +++ b/poetry.lock @@ -454,13 +454,13 @@ openlineage = ["apache-airflow-providers-openlineage"] [[package]] name = "apache-airflow-providers-google" -version = "10.11.1" -description = "Provider for Apache Airflow. Implements apache-airflow-providers-google package" +version = "10.12.0" +description = "Provider package apache-airflow-providers-google for Apache Airflow" optional = false python-versions = "~=3.8" files = [ - {file = "apache-airflow-providers-google-10.11.1.tar.gz", hash = "sha256:d279194c9025e075ed62a2aa7c7b775e88296b5c9f7ed2f6e6aa9d95188d8367"}, - {file = "apache_airflow_providers_google-10.11.1-py3-none-any.whl", hash = "sha256:4aa2b066457d9c31ae9a6627a1bd981711c4a4f35b57695d986336dc87de511b"}, + {file = "apache_airflow_providers_google-10.12.0-py3-none-any.whl", hash = "sha256:a79c845382e672eab30757dfcd62cee8de5ac766e2d38558e1eb3e5fc1b00990"}, + {file = "apache_airflow_providers_google-10.12.0.tar.gz", hash = "sha256:cedfd906b054036c2d0de40b9ecfa70d45828945b47313c37e9722ba275a9028"}, ] [package.dependencies] @@ -488,7 +488,7 @@ google-cloud-datacatalog = ">=3.11.1" google-cloud-dataflow-client = ">=0.8.2" google-cloud-dataform = ">=0.5.0" google-cloud-dataplex = ">=1.4.2" -google-cloud-dataproc = ">=5.4.0" +google-cloud-dataproc = ">=5.5.0" google-cloud-dataproc-metastore = ">=1.12.0" google-cloud-dlp = ">=3.12.0" google-cloud-kms = ">=2.15.0" @@ -1680,6 +1680,24 @@ wrapt = ">=1.10,<2" [package.extras] dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] +[[package]] +name = "deptry" +version = "0.12.0" +description = "A command line utility to check for unused, missing and transitive dependencies in a Python project." +optional = false +python-versions = ">=3.8,<4.0" +files = [ + {file = "deptry-0.12.0-py3-none-any.whl", hash = "sha256:69c801a6ae1b39c7b8e0daf40dbe8b75f1f161277d206dd8f921f32cd22dad91"}, + {file = "deptry-0.12.0.tar.gz", hash = "sha256:ac3cd32d149c92a9af12f63cd9486ddd1760f0277ed0cf306c6ef0388f57ff0a"}, +] + +[package.dependencies] +chardet = ">=4.0.0" +click = ">=8.0.0,<9.0.0" +colorama = {version = ">=0.4.6", markers = "sys_platform == \"win32\""} +pathspec = ">=0.9.0" +tomli = {version = ">=2.0.1,<3.0.0", markers = "python_version < \"3.11\""} + [[package]] name = "dill" version = "0.3.7" @@ -2288,6 +2306,20 @@ gitdb = ">=4.0.1,<5" [package.extras] test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest", "pytest-cov", "pytest-instafail", "pytest-subtests", "pytest-sugar"] +[[package]] +name = "google" +version = "3.0.0" +description = "Python bindings to the Google search engine." +optional = false +python-versions = "*" +files = [ + {file = "google-3.0.0-py2.py3-none-any.whl", hash = "sha256:889cf695f84e4ae2c55fbc0cfdaf4c1e729417fa52ab1db0485202ba173e4935"}, + {file = "google-3.0.0.tar.gz", hash = "sha256:143530122ee5130509ad5e989f0512f7cb218b2d4eddbafbad40fd10e8d8ccbe"}, +] + +[package.dependencies] +beautifulsoup4 = "*" + [[package]] name = "google-ads" version = "22.1.0" @@ -4681,19 +4713,17 @@ mkdocs = "*" [[package]] name = "mkdocs-git-committers-plugin-2" -version = "1.2.0" -description = "An MkDocs plugin to create a list of contributors on the page. The git-committers plugin will seed the template context with a list of github committers and other useful GIT info such as last modified date" +version = "2.2.2" +description = "An MkDocs plugin to create a list of contributors on the page. The git-committers plugin will seed the template context with a list of GitHub or GitLab committers and other useful GIT info such as last modified date" optional = false python-versions = ">=3.8,<4" files = [ - {file = "mkdocs-git-committers-plugin-2-1.2.0.tar.gz", hash = "sha256:921da26b3f4393e6c170279ac34089151dfc22cd29ec4fbce3506218541685c8"}, - {file = "mkdocs_git_committers_plugin_2-1.2.0-py3-none-any.whl", hash = "sha256:0bb5d71cdd9d43fec0dec16e52a9aad2784256b0fa6ef9bb0cceffc36c081ab3"}, + {file = "mkdocs-git-committers-plugin-2-2.2.2.tar.gz", hash = "sha256:87a241624116e1c6245034ca2ee3f247e4500589e75f19f71d352b052a8630d8"}, + {file = "mkdocs_git_committers_plugin_2-2.2.2-py3-none-any.whl", hash = "sha256:73545bdc813ecad609681f876e4f9305f8878c1aeb01a5e92d0e21d7d02ad87e"}, ] [package.dependencies] -beautifulsoup4 = "*" gitpython = "*" -lxml = ">=4.9" mkdocs = ">=1.0.3" requests = "*" @@ -4716,13 +4746,13 @@ pytz = "*" [[package]] name = "mkdocs-material" -version = "9.4.10" +version = "9.4.14" description = "Documentation that simply works" optional = false python-versions = ">=3.8" files = [ - {file = "mkdocs_material-9.4.10-py3-none-any.whl", hash = "sha256:207c4ebc07faebb220437d2c626edb0c9760c82ccfc484500bd3eb30dfce988c"}, - {file = "mkdocs_material-9.4.10.tar.gz", hash = "sha256:421adedaeaa461dcaf55b8d406673934ade3d4f05ed9819e4cc7b4ee1d646a62"}, + {file = "mkdocs_material-9.4.14-py3-none-any.whl", hash = "sha256:dbc78a4fea97b74319a6aa9a2f0be575a6028be6958f813ba367188f7b8428f6"}, + {file = "mkdocs_material-9.4.14.tar.gz", hash = "sha256:a511d3ff48fa8718b033e7e37d17abd9cc1de0fdf0244a625ca2ae2387e2416d"}, ] [package.dependencies] @@ -5688,6 +5718,17 @@ nodeenv = ">=0.11.1" pyyaml = ">=5.1" virtualenv = ">=20.10.0" +[[package]] +name = "prettier" +version = "0.0.7" +description = "Properly pprint of nested objects" +optional = false +python-versions = "*" +files = [ + {file = "prettier-0.0.7-py3-none-any.whl", hash = "sha256:20e76791de41cafe481328dd49552303f29ca192151cee1b120c26f66cae9bfc"}, + {file = "prettier-0.0.7.tar.gz", hash = "sha256:6c34b8cd09fd9c8956c05d6395ea3f575e0122dce494ba57685c07065abed427"}, +] + [[package]] name = "prison" version = "0.2.1" @@ -6232,17 +6273,17 @@ files = [ [[package]] name = "pymdown-extensions" -version = "10.3.1" +version = "10.5" description = "Extension pack for Python Markdown." optional = false python-versions = ">=3.8" files = [ - {file = "pymdown_extensions-10.3.1-py3-none-any.whl", hash = "sha256:8cba67beb2a1318cdaf742d09dff7c0fc4cafcc290147ade0f8fb7b71522711a"}, - {file = "pymdown_extensions-10.3.1.tar.gz", hash = "sha256:f6c79941498a458852853872e379e7bab63888361ba20992fc8b4f8a9b61735e"}, + {file = "pymdown_extensions-10.5-py3-none-any.whl", hash = "sha256:1f0ca8bb5beff091315f793ee17683bc1390731f6ac4c5eb01e27464b80fe879"}, + {file = "pymdown_extensions-10.5.tar.gz", hash = "sha256:1b60f1e462adbec5a1ed79dac91f666c9c0d241fa294de1989f29d20096cfd0b"}, ] [package.dependencies] -markdown = ">=3.2" +markdown = ">=3.5" pyyaml = "*" [package.extras] @@ -7102,36 +7143,36 @@ tests = ["black (>=23.3.0)", "matplotlib (>=3.1.3)", "mypy (>=1.3)", "numpydoc ( [[package]] name = "scipy" -version = "1.11.3" +version = "1.11.4" description = "Fundamental algorithms for scientific computing in Python" optional = false -python-versions = "<3.13,>=3.9" +python-versions = ">=3.9" files = [ - {file = "scipy-1.11.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:370f569c57e1d888304052c18e58f4a927338eafdaef78613c685ca2ea0d1fa0"}, - {file = "scipy-1.11.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:9885e3e4f13b2bd44aaf2a1a6390a11add9f48d5295f7a592393ceb8991577a3"}, - {file = "scipy-1.11.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e04aa19acc324a1a076abb4035dabe9b64badb19f76ad9c798bde39d41025cdc"}, - {file = "scipy-1.11.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e1a8a4657673bfae1e05e1e1d6e94b0cabe5ed0c7c144c8aa7b7dbb774ce5c1"}, - {file = "scipy-1.11.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7abda0e62ef00cde826d441485e2e32fe737bdddee3324e35c0e01dee65e2a88"}, - {file = "scipy-1.11.3-cp310-cp310-win_amd64.whl", hash = "sha256:033c3fd95d55012dd1148b201b72ae854d5086d25e7c316ec9850de4fe776929"}, - {file = "scipy-1.11.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:925c6f09d0053b1c0f90b2d92d03b261e889b20d1c9b08a3a51f61afc5f58165"}, - {file = "scipy-1.11.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:5664e364f90be8219283eeb844323ff8cd79d7acbd64e15eb9c46b9bc7f6a42a"}, - {file = "scipy-1.11.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00f325434b6424952fbb636506f0567898dca7b0f7654d48f1c382ea338ce9a3"}, - {file = "scipy-1.11.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f290cf561a4b4edfe8d1001ee4be6da60c1c4ea712985b58bf6bc62badee221"}, - {file = "scipy-1.11.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:91770cb3b1e81ae19463b3c235bf1e0e330767dca9eb4cd73ba3ded6c4151e4d"}, - {file = "scipy-1.11.3-cp311-cp311-win_amd64.whl", hash = "sha256:e1f97cd89c0fe1a0685f8f89d85fa305deb3067d0668151571ba50913e445820"}, - {file = "scipy-1.11.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:dfcc1552add7cb7c13fb70efcb2389d0624d571aaf2c80b04117e2755a0c5d15"}, - {file = "scipy-1.11.3-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:0d3a136ae1ff0883fffbb1b05b0b2fea251cb1046a5077d0b435a1839b3e52b7"}, - {file = "scipy-1.11.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bae66a2d7d5768eaa33008fa5a974389f167183c87bf39160d3fefe6664f8ddc"}, - {file = "scipy-1.11.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2f6dee6cbb0e263b8142ed587bc93e3ed5e777f1f75448d24fb923d9fd4dce6"}, - {file = "scipy-1.11.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:74e89dc5e00201e71dd94f5f382ab1c6a9f3ff806c7d24e4e90928bb1aafb280"}, - {file = "scipy-1.11.3-cp312-cp312-win_amd64.whl", hash = "sha256:90271dbde4be191522b3903fc97334e3956d7cfb9cce3f0718d0ab4fd7d8bfd6"}, - {file = "scipy-1.11.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a63d1ec9cadecce838467ce0631c17c15c7197ae61e49429434ba01d618caa83"}, - {file = "scipy-1.11.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:5305792c7110e32ff155aed0df46aa60a60fc6e52cd4ee02cdeb67eaccd5356e"}, - {file = "scipy-1.11.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ea7f579182d83d00fed0e5c11a4aa5ffe01460444219dedc448a36adf0c3917"}, - {file = "scipy-1.11.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c77da50c9a91e23beb63c2a711ef9e9ca9a2060442757dffee34ea41847d8156"}, - {file = "scipy-1.11.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:15f237e890c24aef6891c7d008f9ff7e758c6ef39a2b5df264650eb7900403c0"}, - {file = "scipy-1.11.3-cp39-cp39-win_amd64.whl", hash = "sha256:4b4bb134c7aa457e26cc6ea482b016fef45db71417d55cc6d8f43d799cdf9ef2"}, - {file = "scipy-1.11.3.tar.gz", hash = "sha256:bba4d955f54edd61899776bad459bf7326e14b9fa1c552181f0479cc60a568cd"}, + {file = "scipy-1.11.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc9a714581f561af0848e6b69947fda0614915f072dfd14142ed1bfe1b806710"}, + {file = "scipy-1.11.4-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:cf00bd2b1b0211888d4dc75656c0412213a8b25e80d73898083f402b50f47e41"}, + {file = "scipy-1.11.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9999c008ccf00e8fbcce1236f85ade5c569d13144f77a1946bef8863e8f6eb4"}, + {file = "scipy-1.11.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:933baf588daa8dc9a92c20a0be32f56d43faf3d1a60ab11b3f08c356430f6e56"}, + {file = "scipy-1.11.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8fce70f39076a5aa62e92e69a7f62349f9574d8405c0a5de6ed3ef72de07f446"}, + {file = "scipy-1.11.4-cp310-cp310-win_amd64.whl", hash = "sha256:6550466fbeec7453d7465e74d4f4b19f905642c89a7525571ee91dd7adabb5a3"}, + {file = "scipy-1.11.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f313b39a7e94f296025e3cffc2c567618174c0b1dde173960cf23808f9fae4be"}, + {file = "scipy-1.11.4-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:1b7c3dca977f30a739e0409fb001056484661cb2541a01aba0bb0029f7b68db8"}, + {file = "scipy-1.11.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00150c5eae7b610c32589dda259eacc7c4f1665aedf25d921907f4d08a951b1c"}, + {file = "scipy-1.11.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:530f9ad26440e85766509dbf78edcfe13ffd0ab7fec2560ee5c36ff74d6269ff"}, + {file = "scipy-1.11.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5e347b14fe01003d3b78e196e84bd3f48ffe4c8a7b8a1afbcb8f5505cb710993"}, + {file = "scipy-1.11.4-cp311-cp311-win_amd64.whl", hash = "sha256:acf8ed278cc03f5aff035e69cb511741e0418681d25fbbb86ca65429c4f4d9cd"}, + {file = "scipy-1.11.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:028eccd22e654b3ea01ee63705681ee79933652b2d8f873e7949898dda6d11b6"}, + {file = "scipy-1.11.4-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:2c6ff6ef9cc27f9b3db93a6f8b38f97387e6e0591600369a297a50a8e96e835d"}, + {file = "scipy-1.11.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b030c6674b9230d37c5c60ab456e2cf12f6784596d15ce8da9365e70896effc4"}, + {file = "scipy-1.11.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad669df80528aeca5f557712102538f4f37e503f0c5b9541655016dd0932ca79"}, + {file = "scipy-1.11.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ce7fff2e23ab2cc81ff452a9444c215c28e6305f396b2ba88343a567feec9660"}, + {file = "scipy-1.11.4-cp312-cp312-win_amd64.whl", hash = "sha256:36750b7733d960d7994888f0d148d31ea3017ac15eef664194b4ef68d36a4a97"}, + {file = "scipy-1.11.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6e619aba2df228a9b34718efb023966da781e89dd3d21637b27f2e54db0410d7"}, + {file = "scipy-1.11.4-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:f3cd9e7b3c2c1ec26364856f9fbe78695fe631150f94cd1c22228456404cf1ec"}, + {file = "scipy-1.11.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d10e45a6c50211fe256da61a11c34927c68f277e03138777bdebedd933712fea"}, + {file = "scipy-1.11.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91af76a68eeae0064887a48e25c4e616fa519fa0d38602eda7e0f97d65d57937"}, + {file = "scipy-1.11.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6df1468153a31cf55ed5ed39647279beb9cfb5d3f84369453b49e4b8502394fd"}, + {file = "scipy-1.11.4-cp39-cp39-win_amd64.whl", hash = "sha256:ee410e6de8f88fd5cf6eadd73c135020bfbbbdfcd0f6162c36a7638a1ea8cc65"}, + {file = "scipy-1.11.4.tar.gz", hash = "sha256:90a2b78e7f5733b9de748f589f09225013685f9b218275257f8a8168ededaeaa"}, ] [package.dependencies] @@ -8236,4 +8277,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = "3.10.8" -content-hash = "2456a037d91c98b6c23b41a40d6fad70983414ac4388166bcde6bf6d5d6a88f5" +content-hash = "14e1151e8d4097e3898aa28f96b46c30a0960cb6d7afc72f4b6b7d6820f161b9" diff --git a/pyproject.toml b/pyproject.toml index 46b7589e3..0b6d71fe0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,15 +14,17 @@ otg = "otg.cli:main" [tool.poetry.dependencies] python = "3.10.8" pyspark = "3.3.3" -scipy = "^1.11.3" +scipy = "^1.11.4" hydra-core = "^1.3.2" pyliftover = "^0.4" xgboost = "^1.7.3" -scikit-learn = "^1.2.1" numpy = "^1.26.1" hail = "0.2.126" -pyarrow = "^14.0.1" wandb = "^0.16.0" +google = "^3.0.0" +omegaconf = "^2.3.0" +typing-extensions = "^4.8.0" +scikit-learn = "^1.3.2" [tool.poetry.dev-dependencies] pre-commit = "^3.5.0" @@ -43,9 +45,9 @@ mkdocs-git-revision-date-localized-plugin = "^1.2.1" mkdocs-autolinks-plugin = "^0.7.1" mkdocs-awesome-pages-plugin = "^2.9.2" mkdocs-exclude = "^1.0.2" -mkdocs-git-committers-plugin-2 = "^1.2.0" +mkdocs-git-committers-plugin-2 = "^2.2.2" lxml = "^4.9.3" -pymdown-extensions = "^10.3" +pymdown-extensions = "^10.5" [tool.poetry.group.tests.dependencies] @@ -62,8 +64,10 @@ ipython = "^8.18.1" ipykernel = "^6.19.0" google-cloud-dataproc = "^5.7.0" apache-airflow = "^2.7.3" -apache-airflow-providers-google = "^10.11.1" +apache-airflow-providers-google = "^10.12.0" pydoclint = "^0.3.8" +prettier = "^0.0.7" +deptry = "^0.12.0" [tool.semantic_release] @@ -83,6 +87,12 @@ build-backend = "poetry.core.masonry.api" [tool.isort] profile = "black" +[tool.deptry] +extend_exclude = ["src/conftest.py", "src/airflow", "src/utils"] + +[tool.deptry.per_rule_ignores] +DEP001 = ["otg"] + [tool.interrogate] fail-under = 95 color = true @@ -117,6 +127,9 @@ module = [ "scipy", "scipy.stats", "chardet", + "omegaconf", + "xgboost", + "sklearn", ] ignore_missing_imports = true diff --git a/src/airflow/dags/common_airflow.py b/src/airflow/dags/common_airflow.py index 604a19800..7fde7ebe4 100644 --- a/src/airflow/dags/common_airflow.py +++ b/src/airflow/dags/common_airflow.py @@ -55,7 +55,7 @@ shared_dag_kwargs = dict( tags=["genetics_etl", "experimental"], start_date=pendulum.now(tz="Europe/London").subtract(days=1), - schedule_interval="@once", + schedule="@once", catchup=False, ) diff --git a/src/airflow/docker-compose.yaml b/src/airflow/docker-compose.yaml index 2edda28dc..75bc3d171 100644 --- a/src/airflow/docker-compose.yaml +++ b/src/airflow/docker-compose.yaml @@ -44,29 +44,27 @@ # # Feel free to modify this file to suit your needs. --- -version: '3.8' -x-airflow-common: - &airflow-common +version: "3.8" +x-airflow-common: &airflow-common # In order to add custom dependencies or upgrade provider packages you can use your extended image. # Comment the image line, place your Dockerfile in the directory where you placed the docker-compose.yaml # and uncomment the "build" line below, Then run `docker-compose build` to build the images. image: ${AIRFLOW_IMAGE_NAME:-apache/airflow:slim-2.7.2-python3.10} # build: . - environment: - &airflow-common-env + environment: &airflow-common-env AIRFLOW__CORE__EXECUTOR: LocalExecutor AIRFLOW__DATABASE__SQL_ALCHEMY_CONN: postgresql+psycopg2://airflow:airflow@postgres/airflow # For backward compatibility, with Airflow <2.3 AIRFLOW__CORE__SQL_ALCHEMY_CONN: postgresql+psycopg2://airflow:airflow@postgres/airflow - AIRFLOW__CORE__FERNET_KEY: '' - AIRFLOW__CORE__DAGS_ARE_PAUSED_AT_CREATION: 'true' - AIRFLOW__CORE__LOAD_EXAMPLES: 'false' - AIRFLOW__API__AUTH_BACKENDS: 'airflow.api.auth.backend.basic_auth,airflow.api.auth.backend.session' + AIRFLOW__CORE__FERNET_KEY: "" + AIRFLOW__CORE__DAGS_ARE_PAUSED_AT_CREATION: "true" + AIRFLOW__CORE__LOAD_EXAMPLES: "false" + AIRFLOW__API__AUTH_BACKENDS: "airflow.api.auth.backend.basic_auth,airflow.api.auth.backend.session" # yamllint disable rule:line-length # Use simple http server on scheduler for health checks # See https://airflow.apache.org/docs/apache-airflow/stable/administration-and-deployment/logging-monitoring/check-health.html#scheduler-health-check-server # yamllint enable rule:line-length - AIRFLOW__SCHEDULER__ENABLE_HEALTH_CHECK: 'true' + AIRFLOW__SCHEDULER__ENABLE_HEALTH_CHECK: "true" # WARNING: Use _PIP_ADDITIONAL_REQUIREMENTS option ONLY for a quick checks # for other purpose (development, test and especially production usage) build/extend Airflow image. _PIP_ADDITIONAL_REQUIREMENTS: ${_PIP_ADDITIONAL_REQUIREMENTS:-} @@ -84,8 +82,7 @@ x-airflow-common: # GCLOUD Authentication - ${GOOGLE_LOCAL_CREDENTIALS_PATH}:/${GOOGLE_DOCKER_CREDENTIALS_PATH}:ro user: "${AIRFLOW_UID:-50000}:0" - depends_on: - &airflow-common-depends-on + depends_on: &airflow-common-depends-on postgres: condition: service_healthy @@ -206,11 +203,11 @@ services: # yamllint enable rule:line-length environment: <<: *airflow-common-env - _AIRFLOW_DB_MIGRATE: 'true' - _AIRFLOW_WWW_USER_CREATE: 'true' + _AIRFLOW_DB_MIGRATE: "true" + _AIRFLOW_WWW_USER_CREATE: "true" _AIRFLOW_WWW_USER_USERNAME: ${_AIRFLOW_WWW_USER_USERNAME:-airflow} _AIRFLOW_WWW_USER_PASSWORD: ${_AIRFLOW_WWW_USER_PASSWORD:-airflow} - _PIP_ADDITIONAL_REQUIREMENTS: '' + _PIP_ADDITIONAL_REQUIREMENTS: "" user: "0:0" volumes: - ${AIRFLOW_PROJ_DIR:-.}:/sources diff --git a/src/otg/assets/schemas/l2g_gold_standard.json b/src/otg/assets/schemas/l2g_gold_standard.json index ba494b9aa..cf19d6b52 100644 --- a/src/otg/assets/schemas/l2g_gold_standard.json +++ b/src/otg/assets/schemas/l2g_gold_standard.json @@ -13,6 +13,12 @@ "nullable": false, "metadata": {} }, + { + "name": "studyId", + "type": "string", + "nullable": false, + "metadata": {} + }, { "name": "geneId", "type": "string", diff --git a/src/otg/assets/schemas/study_index.json b/src/otg/assets/schemas/study_index.json index 88ed480a8..71c600952 100644 --- a/src/otg/assets/schemas/study_index.json +++ b/src/otg/assets/schemas/study_index.json @@ -105,6 +105,16 @@ "nullable": true, "metadata": {} }, + { + "name": "cohorts", + "type": { + "type": "array", + "elementType": "string", + "containsNull": true + }, + "nullable": true, + "metadata": {} + }, { "name": "ldPopulationStructure", "type": { diff --git a/src/otg/clump.py b/src/otg/clump.py index 8cb756fa0..6ace21076 100644 --- a/src/otg/clump.py +++ b/src/otg/clump.py @@ -45,7 +45,9 @@ def __post_init__(self: ClumpStep) -> None: Raises: ValueError: If study index and LD index paths are not provided for study locus. """ - input_cols = self.session.spark.read.parquet(self.input_path).columns + input_cols = self.session.spark.read.parquet( + self.input_path, recursiveFileLookup=True + ).columns if "studyLocusId" in input_cols: if self.study_index_path is None or self.ld_index_path is None: raise ValueError( @@ -59,7 +61,9 @@ def __post_init__(self: ClumpStep) -> None: study_index=study_index, ld_index=ld_index ).clump() else: - sumstats = SummaryStatistics.from_parquet(self.session, self.input_path) + sumstats = SummaryStatistics.from_parquet( + self.session, self.input_path, recursiveFileLookup=True + ).coalesce(4000) clumped_study_locus = sumstats.window_based_clumping( locus_collect_distance=self.locus_collect_distance ) diff --git a/src/otg/dataset/dataset.py b/src/otg/dataset/dataset.py index 0fd1e6655..0d04a2779 100644 --- a/src/otg/dataset/dataset.py +++ b/src/otg/dataset/dataset.py @@ -3,7 +3,7 @@ from abc import ABC, abstractmethod from dataclasses import dataclass -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Any from typing_extensions import Self @@ -168,3 +168,33 @@ def unpersist(self: Self) -> Self: """ self.df = self._df.unpersist() return self + + def coalesce(self: Self, num_partitions: int, **kwargs: Any) -> Self: + """Coalesce the DataFrame included in the Dataset. + + Coalescing is efficient for decreasing the number of partitions because it avoids a full shuffle of the data. + + Args: + num_partitions (int): Number of partitions to coalesce to + **kwargs (Any): Arguments to pass to the coalesce method + + Returns: + Self: Coalesced Dataset + """ + self.df = self._df.coalesce(num_partitions, **kwargs) + return self + + def repartition(self: Self, num_partitions: int, **kwargs: Any) -> Self: + """Repartition the DataFrame included in the Dataset. + + Repartitioning creates new partitions with data that is distributed evenly. + + Args: + num_partitions (int): Number of partitions to repartition to + **kwargs (Any): Arguments to pass to the repartition method + + Returns: + Self: Repartitioned Dataset + """ + self.df = self._df.repartition(num_partitions, **kwargs) + return self diff --git a/src/otg/datasource/finngen/study_index.py b/src/otg/datasource/finngen/study_index.py index 16603e2af..1f5d8b737 100644 --- a/src/otg/datasource/finngen/study_index.py +++ b/src/otg/datasource/finngen/study_index.py @@ -64,6 +64,8 @@ def from_source( f.lit("Finnish").alias("ancestry"), ) ).alias("discoverySamples"), + # Cohort label is consistent with GWAS Catalog curation. + f.array(f.lit("FinnGen")).alias("cohorts"), f.concat( f.lit(finngen_summary_stats_url_prefix), f.col("phenocode"), diff --git a/src/otg/datasource/gwas_catalog/study_index.py b/src/otg/datasource/gwas_catalog/study_index.py index 07f4a1b18..5e3464153 100644 --- a/src/otg/datasource/gwas_catalog/study_index.py +++ b/src/otg/datasource/gwas_catalog/study_index.py @@ -201,6 +201,32 @@ def _merge_ancestries_and_counts(ancestry_group: Column) -> Column: ), ) + @staticmethod + def _parse_cohorts(raw_cohort: Column) -> Column: + """Return a list of unique cohort labels from pipe separated list if provided. + + Args: + raw_cohort (Column): Cohort list column, where labels are separated by `|` sign. + + Returns: + Column: an array colun with string elements. + + Examples: + >>> data = [('BioME|CaPS|Estonia|FHS|UKB|GERA|GERA|GERA',),(None,),] + >>> spark.createDataFrame(data, ['cohorts']).select(GWASCatalogStudyIndex._parse_cohorts(f.col('cohorts')).alias('parsedCohorts')).show(truncate=False) + +--------------------------------------+ + |parsedCohorts | + +--------------------------------------+ + |[BioME, CaPS, Estonia, FHS, UKB, GERA]| + |[null] | + +--------------------------------------+ + + """ + return f.when( + (raw_cohort.isNull()) | (raw_cohort == ""), + f.array(f.lit(None).cast(t.StringType())), + ).otherwise(f.array_distinct(f.split(raw_cohort, r"\|"))) + @classmethod def _parse_study_table( cls: type[GWASCatalogStudyIndex], catalog_studies: DataFrame @@ -332,6 +358,12 @@ def _annotate_ancestries( ) # studyId has not been split yet ) + # Parsing cohort information: + cohorts = ancestry_lut.select( + f.col("STUDY ACCESSION").alias("studyId"), + self._parse_cohorts(f.col("COHORT(S)")).alias("cohorts"), + ).distinct() + # Get a high resolution dataset on experimental stage: ancestry_stages = ( ancestry.groupBy("studyId") @@ -418,7 +450,9 @@ def _annotate_ancestries( europeans_deconvoluted, on="studyId", how="outer" ) - self.df = self.df.join(parsed_ancestry_lut, on="studyId", how="left") + self.df = self.df.join(parsed_ancestry_lut, on="studyId", how="left").join( + cohorts, on="studyId", how="left" + ) return self def _annotate_sumstats_info( diff --git a/src/otg/datasource/open_targets/l2g_gold_standard.py b/src/otg/datasource/open_targets/l2g_gold_standard.py index 532e382fe..b51099cff 100644 --- a/src/otg/datasource/open_targets/l2g_gold_standard.py +++ b/src/otg/datasource/open_targets/l2g_gold_standard.py @@ -116,10 +116,8 @@ def as_l2g_gold_standard( L2GGoldStandard: L2G Gold Standard dataset. False negatives have not yet been removed. """ return L2GGoldStandard( - _df=cls.parse_positive_curation(gold_standard_curation) - .transform(cls.expand_gold_standard_with_negatives, v2g) - .drop( - "studyId", + _df=cls.parse_positive_curation(gold_standard_curation).transform( + cls.expand_gold_standard_with_negatives, v2g ), _schema=L2GGoldStandard.get_schema(), ) diff --git a/src/otg/gwas_catalog.py b/src/otg/gwas_catalog.py index 0f0654eae..756f5bb66 100644 --- a/src/otg/gwas_catalog.py +++ b/src/otg/gwas_catalog.py @@ -23,8 +23,8 @@ class GWASCatalogStep: Attributes: session (Session): Session object. - catalog_studies_file (str): Raw GWAS catalog studies file. - catalog_ancestry_file (str): Ancestry annotations file from GWAS Catalog. + catalog_study_files (list[str]): List of raw GWAS catalog studies file. + catalog_ancestry_files (list[str]): List of raw ancestry annotations files from GWAS Catalog. catalog_sumstats_lut (str): GWAS Catalog summary statistics lookup table. catalog_associations_file (str): Raw GWAS catalog associations file. variant_annotation_path (str): Input variant annotation path. @@ -35,8 +35,8 @@ class GWASCatalogStep: """ session: Session = MISSING - catalog_studies_file: str = MISSING - catalog_ancestry_file: str = MISSING + catalog_study_files: list[str] = MISSING + catalog_ancestry_files: list[str] = MISSING catalog_sumstats_lut: str = MISSING catalog_associations_file: str = MISSING variant_annotation_path: str = MISSING @@ -50,10 +50,10 @@ def __post_init__(self: GWASCatalogStep) -> None: # Extract va = VariantAnnotation.from_parquet(self.session, self.variant_annotation_path) catalog_studies = self.session.spark.read.csv( - self.catalog_studies_file, sep="\t", header=True + self.catalog_study_files, sep="\t", header=True ) ancestry_lut = self.session.spark.read.csv( - self.catalog_ancestry_file, sep="\t", header=True + self.catalog_ancestry_files, sep="\t", header=True ) sumstats_lut = self.session.spark.read.csv( self.catalog_sumstats_lut, sep="\t", header=False diff --git a/src/otg/l2g.py b/src/otg/l2g.py index 95d477dc6..34969bf86 100644 --- a/src/otg/l2g.py +++ b/src/otg/l2g.py @@ -4,6 +4,7 @@ from dataclasses import dataclass, field from typing import Any +import sklearn from omegaconf import MISSING from xgboost.spark import SparkXGBClassifier @@ -104,6 +105,7 @@ def __post_init__(self: LocusToGeneStep) -> None: Raises: ValueError: if run_mode is not one of "train" or "predict". """ + print("Sci-kit learn version: ", sklearn.__version__) if self.run_mode not in ["train", "predict"]: raise ValueError( f"run_mode must be one of 'train' or 'predict', got {self.run_mode}" diff --git a/src/scripts/schemadocs.py b/src/utils/schemadocs.py similarity index 100% rename from src/scripts/schemadocs.py rename to src/utils/schemadocs.py diff --git a/tests/data_samples/example_test-pop.bm/metadata.json b/tests/data_samples/example_test-pop.bm/metadata.json index 2aae7af3d..98c1d22bb 100644 --- a/tests/data_samples/example_test-pop.bm/metadata.json +++ b/tests/data_samples/example_test-pop.bm/metadata.json @@ -2,18 +2,7 @@ "blockSize": 2, "nRows": 7, "nCols": 7, - "maybeFiltered": [ - 0, - 4, - 5, - 8, - 9, - 10, - 12, - 13, - 14, - 15 - ], + "maybeFiltered": [0, 4, 5, 8, 9, 10, 12, 13, 14, 15], "partFiles": [ "part-00-36-0-0-9da891bc-b3f3-211c-ec5d-b4586236d3f6", "part-01-36-1-0-0f48a14a-3dc0-261b-3b0e-e3549671ed4b", diff --git a/tests/dataset/test_dataset.py b/tests/dataset/test_dataset.py new file mode 100644 index 000000000..29a017333 --- /dev/null +++ b/tests/dataset/test_dataset.py @@ -0,0 +1,48 @@ +"""Test Dataset class.""" +from __future__ import annotations + +from dataclasses import dataclass + +import pytest +from pyspark.sql import SparkSession +from pyspark.sql.types import IntegerType, StructField, StructType + +from otg.dataset.dataset import Dataset + + +@dataclass +class TestDataset(Dataset): + """Concrete subclass of Dataset for testing. Necessary because Dataset is abstract.""" + + @classmethod + def get_schema(cls) -> StructType: + """Get schema.""" + return StructType([StructField("value", IntegerType(), False)]) + + +class TestCoalesceAndRepartition: + """Test TestDataset.coalesce and TestDataset.repartition.""" + + def test_repartition(self: TestCoalesceAndRepartition) -> None: + """Test Dataset.repartition.""" + initial_partitions = self.test_dataset._df.rdd.getNumPartitions() + new_partitions = initial_partitions + 1 + self.test_dataset.repartition(new_partitions) + assert self.test_dataset._df.rdd.getNumPartitions() == new_partitions + + def test_coalesce(self: TestCoalesceAndRepartition) -> None: + """Test Dataset.coalesce.""" + initial_partitions = self.test_dataset._df.rdd.getNumPartitions() + new_partitions = initial_partitions - 1 if initial_partitions > 1 else 1 + self.test_dataset.coalesce(new_partitions) + assert self.test_dataset._df.rdd.getNumPartitions() == new_partitions + + @pytest.fixture(autouse=True) + def _setup(self: TestCoalesceAndRepartition, spark: SparkSession) -> None: + """Setup fixture.""" + self.test_dataset = TestDataset( + _df=spark.createDataFrame( + [(1,), (2,), (3,)], schema=TestDataset.get_schema() + ), + _schema=TestDataset.get_schema(), + ) diff --git a/tests/dataset/test_l2g.py b/tests/dataset/test_l2g.py index eb42d01c9..3bf5d472c 100644 --- a/tests/dataset/test_l2g.py +++ b/tests/dataset/test_l2g.py @@ -40,27 +40,30 @@ def test_filter_unique_associations(spark: SparkSession) -> None: """Test filter_unique_associations.""" mock_l2g_gs_df = spark.createDataFrame( [ - (1, "variant1", "gene1", "positive"), + (1, "variant1", "study1", "gene1", "positive"), ( 2, "variant2", + "study1", "gene1", "negative", ), # in the same locus as sl1 and pointing to same gene, has to be dropped ( 3, "variant3", + "study1", "gene1", "positive", ), # in diff locus as sl1 and pointing to same gene, has to be kept ( 4, "variant4", + "study1", "gene2", "positive", ), # in same locus as sl1 and pointing to diff gene, has to be kept ], - "studyLocusId LONG, variantId STRING, geneId STRING, goldStandardSet STRING", + "studyLocusId LONG, variantId STRING, studyId STRING, geneId STRING, goldStandardSet STRING", ) mock_sl_overlap_df = spark.createDataFrame( @@ -70,11 +73,11 @@ def test_filter_unique_associations(spark: SparkSession) -> None: expected_df = spark.createDataFrame( [ - (1, "variant1", "gene1", "positive"), - (3, "variant3", "gene1", "positive"), - (4, "variant4", "gene2", "positive"), + (1, "variant1", "study1", "gene1", "positive"), + (3, "variant3", "study1", "gene1", "positive"), + (4, "variant4", "study1", "gene2", "positive"), ], - "studyLocusId LONG, variantId STRING, geneId STRING, goldStandardSet STRING", + "studyLocusId LONG, variantId STRING, studyId STRING, geneId STRING, goldStandardSet STRING", ) mock_l2g_gs = L2GGoldStandard( @@ -93,27 +96,30 @@ def test_remove_false_negatives(spark: SparkSession) -> None: """Test `remove_false_negatives`.""" mock_l2g_gs_df = spark.createDataFrame( [ - (1, "variant1", "gene1", "positive"), + (1, "variant1", "study1", "gene1", "positive"), ( 2, "variant2", + "study1", "gene2", "negative", ), # gene2 is a partner of gene1, has to be dropped ( 3, "variant3", + "study1", "gene3", "negative", ), # gene 3 is not a partner of gene1, has to be kept ( 4, "variant4", + "study1", "gene4", "positive", ), # gene 4 is a partner of gene1, has to be kept because it's positive ], - "studyLocusId LONG, variantId STRING, geneId STRING, goldStandardSet STRING", + "studyLocusId LONG, variantId STRING, studyId STRING, geneId STRING, goldStandardSet STRING", ) mock_interactions_df = spark.createDataFrame( @@ -127,11 +133,11 @@ def test_remove_false_negatives(spark: SparkSession) -> None: expected_df = spark.createDataFrame( [ - (1, "variant1", "gene1", "positive"), - (3, "variant3", "gene3", "negative"), - (4, "variant4", "gene4", "positive"), + (1, "variant1", "study1", "gene1", "positive"), + (3, "variant3", "study1", "gene3", "negative"), + (4, "variant4", "study1", "gene4", "positive"), ], - "studyLocusId LONG, variantId STRING, geneId STRING, goldStandardSet STRING", + "studyLocusId LONG, variantId STRING, studyId STRING, geneId STRING, goldStandardSet STRING", ) mock_l2g_gs = L2GGoldStandard( diff --git a/utils/update_GWAS_Catalog_data.sh b/utils/update_GWAS_Catalog_data.sh index 667e995c4..98ac4cc36 100755 --- a/utils/update_GWAS_Catalog_data.sh +++ b/utils/update_GWAS_Catalog_data.sh @@ -55,17 +55,28 @@ wget -q ${RELEASE_URL}/gwas-catalog-download-studies-v1.0.3.txt \ -O gwas-catalog-v1.0.3-studies-r${YEAR}-${MONTH}-${DAY}.tsv logging "File gwas-catalog-v1.0.3-studies-r${YEAR}-${MONTH}-${DAY}.tsv saved." +wget -q ${RELEASE_URL}/gwas-catalog-unpublished-studies-v1.0.3.tsv \ + -O gwas-catalog-v1.0.3-unpublished-studies-r${YEAR}-${MONTH}-${DAY}.tsv +logging "File gwas-catalog-v1.0.3-unpublished-studies-r${YEAR}-${MONTH}-${DAY}.tsv saved." + wget -q ${RELEASE_URL}/gwas-catalog-download-ancestries-v1.0.3.txt \ -O gwas-catalog-v1.0.3-ancestries-r${YEAR}-${MONTH}-${DAY}.tsv logging "File gwas-catalog-v1.0.3-ancestries-r${YEAR}-${MONTH}-${DAY}.tsv saved." +wget -q ${RELEASE_URL}/gwas-catalog-unpublished-ancestries-v1.0.3.tsv \ + -O gwas-catalog-v1.0.3-unpublished-ancestries-r${YEAR}-${MONTH}-${DAY}.tsv +logging "File gwas-catalog-v1.0.3-unpublished-ancestries-r${YEAR}-${MONTH}-${DAY}.tsv saved." + + wget -q ${BASE_URL}/summary_statistics/harmonised_list.txt -O harmonised_list-r${YEAR}-${MONTH}-${DAY}.txt logging "File harmonised_list-r${YEAR}-${MONTH}-${DAY}.txt saved." logging "Copying files to GCP..." -gsutil -q cp file://$(pwd)/gwas_catalog_v1.0.2-associations_e${ENSEMBL}_r${YEAR}-${MONTH}-${DAY}.tsv ${GCP_TARGET}/ -gsutil -q cp file://$(pwd)/gwas-catalog-v1.0.3-studies-r${YEAR}-${MONTH}-${DAY}.tsv ${GCP_TARGET}/ -gsutil -q cp file://$(pwd)/gwas-catalog-v1.0.3-ancestries-r${YEAR}-${MONTH}-${DAY}.tsv ${GCP_TARGET}/ -gsutil -q cp file://$(pwd)/harmonised_list-r${YEAR}-${MONTH}-${DAY}.txt ${GCP_TARGET}/ +gsutil -mq cp file://$(pwd)/gwas_catalog_v1.0.2-associations_e${ENSEMBL}_r${YEAR}-${MONTH}-${DAY}.tsv ${GCP_TARGET}/ +gsutil -mq cp file://$(pwd)/gwas-catalog-v1.0.3-studies-r${YEAR}-${MONTH}-${DAY}.tsv ${GCP_TARGET}/ +gsutil -mq cp file://$(pwd)/gwas-catalog-v1.0.3-ancestries-r${YEAR}-${MONTH}-${DAY}.tsv ${GCP_TARGET}/ +gsutil -mq cp file://$(pwd)/harmonised_list-r${YEAR}-${MONTH}-${DAY}.txt ${GCP_TARGET}/ +gsutil -mq cp file://$(pwd)/gwas-catalog-v1.0.3-unpublished-studies-r${YEAR}-${MONTH}-${DAY}.tsv ${GCP_TARGET}/ +gsutil -mq cp file://$(pwd)/gwas-catalog-v1.0.3-unpublished-ancestries-r${YEAR}-${MONTH}-${DAY}.tsv ${GCP_TARGET}/ logging "Done."